mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-15 07:35:27 +03:00
Merge branch 'dev' into updated-documentation
This commit is contained in:
commit
6d596a372c
@ -212,6 +212,7 @@ rcond
|
||||
RDONLY
|
||||
rebranded
|
||||
referer
|
||||
reindex
|
||||
Reolink
|
||||
restream
|
||||
restreamed
|
||||
|
||||
@ -518,8 +518,9 @@ semantic_search:
|
||||
enabled: False
|
||||
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
|
||||
reindex: False
|
||||
# Optional: Set device used to run embeddings, options are AUTO, CPU, GPU. (default: shown below)
|
||||
device: "AUTO"
|
||||
# Optional: Set the model size used for embeddings. (default: shown below)
|
||||
# NOTE: small model runs on CPU and large model runs on GPU
|
||||
model_size: "small"
|
||||
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
# NOTE: Semantic Search must be enabled for this to do anything.
|
||||
|
||||
@ -29,15 +29,26 @@ If you are enabling the Search feature for the first time, be advised that Friga
|
||||
|
||||
### Jina AI CLIP
|
||||
|
||||
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
|
||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
|
||||
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
|
||||
|
||||
:::tip
|
||||
|
||||
The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
|
||||
|
||||
:::
|
||||
|
||||
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
```yaml
|
||||
semantic_search:
|
||||
enabled: True
|
||||
model_size: small
|
||||
```
|
||||
|
||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||
- Configuring the `small` model employs a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality.
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@ -11,9 +11,7 @@ class EventsSubLabelBody(BaseModel):
|
||||
|
||||
|
||||
class EventsDescriptionBody(BaseModel):
|
||||
description: Union[str, None] = Field(
|
||||
title="The description of the event", min_length=1
|
||||
)
|
||||
description: Union[str, None] = Field(title="The description of the event")
|
||||
|
||||
|
||||
class EventsCreateBody(BaseModel):
|
||||
|
||||
@ -473,12 +473,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
)
|
||||
|
||||
thumb_result = context.search_thumbnail(search_event)
|
||||
thumb_ids = dict(
|
||||
zip(
|
||||
[result[0] for result in thumb_result],
|
||||
context.thumb_stats.normalize([result[1] for result in thumb_result]),
|
||||
)
|
||||
)
|
||||
thumb_ids = {result[0]: result[1] for result in thumb_result}
|
||||
search_results = {
|
||||
event_id: {"distance": distance, "source": "thumbnail"}
|
||||
for event_id, distance in thumb_ids.items()
|
||||
@ -486,15 +481,18 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
else:
|
||||
search_types = search_type.split(",")
|
||||
|
||||
# only save stats for multi-modal searches
|
||||
save_stats = "thumbnail" in search_types and "description" in search_types
|
||||
|
||||
if "thumbnail" in search_types:
|
||||
thumb_result = context.search_thumbnail(query)
|
||||
thumb_ids = dict(
|
||||
zip(
|
||||
[result[0] for result in thumb_result],
|
||||
context.thumb_stats.normalize(
|
||||
[result[1] for result in thumb_result]
|
||||
),
|
||||
|
||||
thumb_distances = context.thumb_stats.normalize(
|
||||
[result[1] for result in thumb_result], save_stats
|
||||
)
|
||||
|
||||
thumb_ids = dict(
|
||||
zip([result[0] for result in thumb_result], thumb_distances)
|
||||
)
|
||||
search_results.update(
|
||||
{
|
||||
@ -505,12 +503,13 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
|
||||
if "description" in search_types:
|
||||
desc_result = context.search_description(query)
|
||||
desc_ids = dict(
|
||||
zip(
|
||||
[result[0] for result in desc_result],
|
||||
context.desc_stats.normalize([result[1] for result in desc_result]),
|
||||
)
|
||||
|
||||
desc_distances = context.desc_stats.normalize(
|
||||
[result[1] for result in desc_result], save_stats
|
||||
)
|
||||
|
||||
desc_ids = dict(zip([result[0] for result in desc_result], desc_distances))
|
||||
|
||||
for event_id, distance in desc_ids.items():
|
||||
if (
|
||||
event_id not in search_results
|
||||
@ -927,27 +926,19 @@ def set_description(
|
||||
|
||||
new_description = body.description
|
||||
|
||||
if new_description is None or len(new_description) == 0:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": "description cannot be empty",
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
event.data["description"] = new_description
|
||||
event.save()
|
||||
|
||||
# If semantic search is enabled, update the index
|
||||
if request.app.frigate_config.semantic_search.enabled:
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
if len(new_description) > 0:
|
||||
context.update_description(
|
||||
event_id,
|
||||
new_description,
|
||||
)
|
||||
else:
|
||||
context.db.delete_embeddings_description(event_ids=[event_id])
|
||||
|
||||
response_message = (
|
||||
f"Event {event_id} description is now blank"
|
||||
@ -1033,8 +1024,8 @@ def delete_event(request: Request, event_id: str):
|
||||
# If semantic search is enabled, update the index
|
||||
if request.app.frigate_config.semantic_search.enabled:
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
context.db.delete_embeddings_thumbnail(id=[event_id])
|
||||
context.db.delete_embeddings_description(id=[event_id])
|
||||
context.db.delete_embeddings_thumbnail(event_ids=[event_id])
|
||||
context.db.delete_embeddings_description(event_ids=[event_id])
|
||||
return JSONResponse(
|
||||
content=({"success": True, "message": "Event " + event_id + " deleted"}),
|
||||
status_code=200,
|
||||
|
||||
@ -581,12 +581,12 @@ class FrigateApp:
|
||||
self.init_recording_manager()
|
||||
self.init_review_segment_manager()
|
||||
self.init_go2rtc()
|
||||
self.start_detectors()
|
||||
self.init_embeddings_manager()
|
||||
self.bind_database()
|
||||
self.check_db_data_migrations()
|
||||
self.init_inter_process_communicator()
|
||||
self.init_dispatcher()
|
||||
self.start_detectors()
|
||||
self.init_embeddings_manager()
|
||||
self.init_embeddings_client()
|
||||
self.start_video_output_processor()
|
||||
self.start_ptz_autotracker()
|
||||
|
||||
@ -15,6 +15,7 @@ from frigate.const import (
|
||||
INSERT_PREVIEW,
|
||||
REQUEST_REGION_GRID,
|
||||
UPDATE_CAMERA_ACTIVITY,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
UPDATE_MODEL_STATE,
|
||||
UPSERT_REVIEW_SEGMENT,
|
||||
@ -63,6 +64,9 @@ class Dispatcher:
|
||||
self.onvif = onvif
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.comms = communicators
|
||||
self.camera_activity = {}
|
||||
self.model_state = {}
|
||||
self.embeddings_reindex = {}
|
||||
|
||||
self._camera_settings_handlers: dict[str, Callable] = {
|
||||
"audio": self._on_audio_command,
|
||||
@ -84,37 +88,25 @@ class Dispatcher:
|
||||
for comm in self.comms:
|
||||
comm.subscribe(self._receive)
|
||||
|
||||
self.camera_activity = {}
|
||||
self.model_state = {}
|
||||
|
||||
def _receive(self, topic: str, payload: str) -> Optional[Any]:
|
||||
"""Handle receiving of payload from communicators."""
|
||||
if topic.endswith("set"):
|
||||
|
||||
def handle_camera_command(command_type, camera_name, command, payload):
|
||||
try:
|
||||
# example /cam_name/detect/set payload=ON|OFF
|
||||
if topic.count("/") == 2:
|
||||
camera_name = topic.split("/")[-3]
|
||||
command = topic.split("/")[-2]
|
||||
if command_type == "set":
|
||||
self._camera_settings_handlers[command](camera_name, payload)
|
||||
elif topic.count("/") == 1:
|
||||
command = topic.split("/")[-2]
|
||||
self._global_settings_handlers[command](payload)
|
||||
except IndexError:
|
||||
logger.error(f"Received invalid set command: {topic}")
|
||||
return
|
||||
elif topic.endswith("ptz"):
|
||||
try:
|
||||
# example /cam_name/ptz payload=MOVE_UP|MOVE_DOWN|STOP...
|
||||
camera_name = topic.split("/")[-2]
|
||||
elif command_type == "ptz":
|
||||
self._on_ptz_command(camera_name, payload)
|
||||
except IndexError:
|
||||
logger.error(f"Received invalid ptz command: {topic}")
|
||||
return
|
||||
elif topic == "restart":
|
||||
except KeyError:
|
||||
logger.error(f"Invalid command type or handler: {command_type}")
|
||||
|
||||
def handle_restart():
|
||||
restart_frigate()
|
||||
elif topic == INSERT_MANY_RECORDINGS:
|
||||
|
||||
def handle_insert_many_recordings():
|
||||
Recordings.insert_many(payload).execute()
|
||||
elif topic == REQUEST_REGION_GRID:
|
||||
|
||||
def handle_request_region_grid():
|
||||
camera = payload
|
||||
grid = get_camera_regions_grid(
|
||||
camera,
|
||||
@ -122,24 +114,25 @@ class Dispatcher:
|
||||
max(self.config.model.width, self.config.model.height),
|
||||
)
|
||||
return grid
|
||||
elif topic == INSERT_PREVIEW:
|
||||
|
||||
def handle_insert_preview():
|
||||
Previews.insert(payload).execute()
|
||||
elif topic == UPSERT_REVIEW_SEGMENT:
|
||||
(
|
||||
ReviewSegment.insert(payload)
|
||||
.on_conflict(
|
||||
|
||||
def handle_upsert_review_segment():
|
||||
ReviewSegment.insert(payload).on_conflict(
|
||||
conflict_target=[ReviewSegment.id],
|
||||
update=payload,
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
elif topic == CLEAR_ONGOING_REVIEW_SEGMENTS:
|
||||
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
|
||||
ReviewSegment.end_time == None
|
||||
).execute()
|
||||
elif topic == UPDATE_CAMERA_ACTIVITY:
|
||||
|
||||
def handle_clear_ongoing_review_segments():
|
||||
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
|
||||
ReviewSegment.end_time.is_null(True)
|
||||
).execute()
|
||||
|
||||
def handle_update_camera_activity():
|
||||
self.camera_activity = payload
|
||||
elif topic == UPDATE_EVENT_DESCRIPTION:
|
||||
|
||||
def handle_update_event_description():
|
||||
event: Event = Event.get(Event.id == payload["id"])
|
||||
event.data["description"] = payload["description"]
|
||||
event.save()
|
||||
@ -147,15 +140,31 @@ class Dispatcher:
|
||||
"event_update",
|
||||
json.dumps({"id": event.id, "description": event.data["description"]}),
|
||||
)
|
||||
elif topic == UPDATE_MODEL_STATE:
|
||||
|
||||
def handle_update_model_state():
|
||||
if payload:
|
||||
model = payload["model"]
|
||||
state = payload["state"]
|
||||
self.model_state[model] = ModelStatusTypesEnum[state]
|
||||
self.publish("model_state", json.dumps(self.model_state))
|
||||
elif topic == "modelState":
|
||||
model_state = self.model_state.copy()
|
||||
self.publish("model_state", json.dumps(model_state))
|
||||
elif topic == "onConnect":
|
||||
|
||||
def handle_model_state():
|
||||
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||
|
||||
def handle_update_embeddings_reindex_progress():
|
||||
self.embeddings_reindex = payload
|
||||
self.publish(
|
||||
"embeddings_reindex_progress",
|
||||
json.dumps(payload),
|
||||
)
|
||||
|
||||
def handle_embeddings_reindex_progress():
|
||||
self.publish(
|
||||
"embeddings_reindex_progress",
|
||||
json.dumps(self.embeddings_reindex.copy()),
|
||||
)
|
||||
|
||||
def handle_on_connect():
|
||||
camera_status = self.camera_activity.copy()
|
||||
|
||||
for camera in camera_status.keys():
|
||||
@ -170,6 +179,51 @@ class Dispatcher:
|
||||
}
|
||||
|
||||
self.publish("camera_activity", json.dumps(camera_status))
|
||||
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||
self.publish(
|
||||
"embeddings_reindex_progress",
|
||||
json.dumps(self.embeddings_reindex.copy()),
|
||||
)
|
||||
|
||||
# Dictionary mapping topic to handlers
|
||||
topic_handlers = {
|
||||
INSERT_MANY_RECORDINGS: handle_insert_many_recordings,
|
||||
REQUEST_REGION_GRID: handle_request_region_grid,
|
||||
INSERT_PREVIEW: handle_insert_preview,
|
||||
UPSERT_REVIEW_SEGMENT: handle_upsert_review_segment,
|
||||
CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments,
|
||||
UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity,
|
||||
UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
|
||||
UPDATE_MODEL_STATE: handle_update_model_state,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
|
||||
"restart": handle_restart,
|
||||
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
|
||||
"modelState": handle_model_state,
|
||||
"onConnect": handle_on_connect,
|
||||
}
|
||||
|
||||
if topic.endswith("set") or topic.endswith("ptz"):
|
||||
try:
|
||||
parts = topic.split("/")
|
||||
if len(parts) == 3 and topic.endswith("set"):
|
||||
# example /cam_name/detect/set payload=ON|OFF
|
||||
camera_name = parts[-3]
|
||||
command = parts[-2]
|
||||
handle_camera_command("set", camera_name, command, payload)
|
||||
elif len(parts) == 2 and topic.endswith("set"):
|
||||
command = parts[-2]
|
||||
self._global_settings_handlers[command](payload)
|
||||
elif len(parts) == 2 and topic.endswith("ptz"):
|
||||
# example /cam_name/ptz payload=MOVE_UP|MOVE_DOWN|STOP...
|
||||
camera_name = parts[-2]
|
||||
handle_camera_command("ptz", camera_name, "", payload)
|
||||
except IndexError:
|
||||
logger.error(
|
||||
f"Received invalid {topic.split('/')[-1]} command: {topic}"
|
||||
)
|
||||
return
|
||||
elif topic in topic_handlers:
|
||||
return topic_handlers[topic]()
|
||||
else:
|
||||
self.publish(topic, payload, retain=False)
|
||||
|
||||
|
||||
@ -22,7 +22,7 @@ class EmbeddingsResponder:
|
||||
|
||||
def check_for_request(self, process: Callable) -> None:
|
||||
while True: # load all messages that are queued
|
||||
has_message, _, _ = zmq.select([self.socket], [], [], 1)
|
||||
has_message, _, _ = zmq.select([self.socket], [], [], 0.1)
|
||||
|
||||
if not has_message:
|
||||
break
|
||||
@ -54,8 +54,11 @@ class EmbeddingsRequestor:
|
||||
|
||||
def send_data(self, topic: str, data: any) -> str:
|
||||
"""Sends data and then waits for reply."""
|
||||
try:
|
||||
self.socket.send_json((topic, data))
|
||||
return self.socket.recv_json()
|
||||
except zmq.ZMQError:
|
||||
return ""
|
||||
|
||||
def stop(self) -> None:
|
||||
self.socket.close()
|
||||
|
||||
@ -39,7 +39,7 @@ class EventMetadataSubscriber(Subscriber):
|
||||
super().__init__(topic)
|
||||
|
||||
def check_for_update(
|
||||
self, timeout: float = None
|
||||
self, timeout: float = 1
|
||||
) -> Optional[tuple[EventMetadataTypeEnum, str, RegenerateDescriptionEnum]]:
|
||||
return super().check_for_update(timeout)
|
||||
|
||||
|
||||
@ -65,8 +65,11 @@ class InterProcessRequestor:
|
||||
|
||||
def send_data(self, topic: str, data: any) -> any:
|
||||
"""Sends data and then waits for reply."""
|
||||
try:
|
||||
self.socket.send_json((topic, data))
|
||||
return self.socket.recv_json()
|
||||
except zmq.ZMQError:
|
||||
return ""
|
||||
|
||||
def stop(self) -> None:
|
||||
self.socket.close()
|
||||
|
||||
@ -12,4 +12,6 @@ class SemanticSearchConfig(FrigateBaseModel):
|
||||
reindex: Optional[bool] = Field(
|
||||
default=False, title="Reindex all detections on startup."
|
||||
)
|
||||
device: str = Field(default="AUTO", title="Device Type")
|
||||
model_size: str = Field(
|
||||
default="small", title="The size of the embeddings model used."
|
||||
)
|
||||
|
||||
@ -85,6 +85,7 @@ CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments"
|
||||
UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
|
||||
UPDATE_EVENT_DESCRIPTION = "update_event_description"
|
||||
UPDATE_MODEL_STATE = "update_model_state"
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
|
||||
|
||||
# Stats Values
|
||||
|
||||
|
||||
@ -28,3 +28,26 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase):
|
||||
def delete_embeddings_description(self, event_ids: list[str]) -> None:
|
||||
ids = ",".join(["?" for _ in event_ids])
|
||||
self.execute_sql(f"DELETE FROM vec_descriptions WHERE id IN ({ids})", event_ids)
|
||||
|
||||
def drop_embeddings_tables(self) -> None:
|
||||
self.execute_sql("""
|
||||
DROP TABLE vec_descriptions;
|
||||
""")
|
||||
self.execute_sql("""
|
||||
DROP TABLE vec_thumbnails;
|
||||
""")
|
||||
|
||||
def create_embeddings_tables(self) -> None:
|
||||
"""Create vec0 virtual table for embeddings"""
|
||||
self.execute_sql("""
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
|
||||
id TEXT PRIMARY KEY,
|
||||
thumbnail_embedding FLOAT[768] distance_metric=cosine
|
||||
);
|
||||
""")
|
||||
self.execute_sql("""
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_descriptions USING vec0(
|
||||
id TEXT PRIMARY KEY,
|
||||
description_embedding FLOAT[768] distance_metric=cosine
|
||||
);
|
||||
""")
|
||||
|
||||
@ -157,10 +157,14 @@ class ModelConfig(BaseModel):
|
||||
self._model_hash = file_hash.hexdigest()
|
||||
|
||||
def create_colormap(self, enabled_labels: set[str]) -> None:
|
||||
"""Get a list of colors for enabled labels."""
|
||||
colors = generate_color_palette(len(enabled_labels))
|
||||
|
||||
self._colormap = {label: color for label, color in zip(enabled_labels, colors)}
|
||||
"""Get a list of colors for enabled labels that aren't attributes."""
|
||||
enabled_trackable_labels = list(
|
||||
filter(lambda label: label not in self._all_attributes, enabled_labels)
|
||||
)
|
||||
colors = generate_color_palette(len(enabled_trackable_labels))
|
||||
self._colormap = {
|
||||
label: color for label, color in zip(enabled_trackable_labels, colors)
|
||||
}
|
||||
|
||||
model_config = ConfigDict(extra="forbid", protected_namespaces=())
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ import os
|
||||
|
||||
import numpy as np
|
||||
import openvino as ov
|
||||
import openvino.properties as props
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
@ -34,6 +35,8 @@ class OvDetector(DetectionApi):
|
||||
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
||||
raise FileNotFoundError
|
||||
|
||||
os.makedirs("/config/model_cache/openvino", exist_ok=True)
|
||||
self.ov_core.set_property({props.cache_dir: "/config/model_cache/openvino"})
|
||||
self.interpreter = self.ov_core.compile_model(
|
||||
model=detector_config.model.path, device_name=detector_config.device
|
||||
)
|
||||
|
||||
@ -19,7 +19,6 @@ from frigate.models import Event
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.services import listen
|
||||
|
||||
from .embeddings import Embeddings
|
||||
from .maintainer import EmbeddingMaintainer
|
||||
from .util import ZScoreNormalization
|
||||
|
||||
@ -57,12 +56,6 @@ def manage_embeddings(config: FrigateConfig) -> None:
|
||||
models = [Event]
|
||||
db.bind(models)
|
||||
|
||||
embeddings = Embeddings(config.semantic_search, db)
|
||||
|
||||
# Check if we need to re-index events
|
||||
if config.semantic_search.reindex:
|
||||
embeddings.reindex()
|
||||
|
||||
maintainer = EmbeddingMaintainer(
|
||||
db,
|
||||
config,
|
||||
@ -114,18 +107,24 @@ class EmbeddingsContext:
|
||||
query_embedding = row[0]
|
||||
else:
|
||||
# If no embedding found, generate it and return it
|
||||
query_embedding = serialize(
|
||||
self.requestor.send_data(
|
||||
data = self.requestor.send_data(
|
||||
EmbeddingsRequestEnum.embed_thumbnail.value,
|
||||
{"id": query.id, "thumbnail": query.thumbnail},
|
||||
)
|
||||
{"id": str(query.id), "thumbnail": str(query.thumbnail)},
|
||||
)
|
||||
|
||||
if not data:
|
||||
return []
|
||||
|
||||
query_embedding = serialize(data)
|
||||
else:
|
||||
query_embedding = serialize(
|
||||
self.requestor.send_data(
|
||||
data = self.requestor.send_data(
|
||||
EmbeddingsRequestEnum.generate_search.value, query
|
||||
)
|
||||
)
|
||||
|
||||
if not data:
|
||||
return []
|
||||
|
||||
query_embedding = serialize(data)
|
||||
|
||||
sql_query = """
|
||||
SELECT
|
||||
@ -155,11 +154,14 @@ class EmbeddingsContext:
|
||||
def search_description(
|
||||
self, query_text: str, event_ids: list[str] = None
|
||||
) -> list[tuple[str, float]]:
|
||||
query_embedding = serialize(
|
||||
self.requestor.send_data(
|
||||
data = self.requestor.send_data(
|
||||
EmbeddingsRequestEnum.generate_search.value, query_text
|
||||
)
|
||||
)
|
||||
|
||||
if not data:
|
||||
return []
|
||||
|
||||
query_embedding = serialize(data)
|
||||
|
||||
# Prepare the base SQL query
|
||||
sql_query = """
|
||||
|
||||
@ -3,14 +3,20 @@
|
||||
import base64
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from numpy import ndarray
|
||||
from PIL import Image
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config.semantic_search import SemanticSearchConfig
|
||||
from frigate.const import UPDATE_MODEL_STATE
|
||||
from frigate.const import (
|
||||
CONFIG_DIR,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
UPDATE_MODEL_STATE,
|
||||
)
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
@ -63,12 +69,14 @@ class Embeddings:
|
||||
self.requestor = InterProcessRequestor()
|
||||
|
||||
# Create tables if they don't exist
|
||||
self._create_tables()
|
||||
self.db.create_embeddings_tables()
|
||||
|
||||
models = [
|
||||
"jinaai/jina-clip-v1-text_model_fp16.onnx",
|
||||
"jinaai/jina-clip-v1-tokenizer",
|
||||
"jinaai/jina-clip-v1-vision_model_fp16.onnx",
|
||||
"jinaai/jina-clip-v1-vision_model_fp16.onnx"
|
||||
if config.model_size == "large"
|
||||
else "jinaai/jina-clip-v1-vision_model_quantized.onnx",
|
||||
"jinaai/jina-clip-v1-preprocessor_config.json",
|
||||
]
|
||||
|
||||
@ -81,12 +89,6 @@ class Embeddings:
|
||||
},
|
||||
)
|
||||
|
||||
def jina_text_embedding_function(outputs):
|
||||
return outputs[0]
|
||||
|
||||
def jina_vision_embedding_function(outputs):
|
||||
return outputs[0]
|
||||
|
||||
self.text_embedding = GenericONNXEmbedding(
|
||||
model_name="jinaai/jina-clip-v1",
|
||||
model_file="text_model_fp16.onnx",
|
||||
@ -94,49 +96,34 @@ class Embeddings:
|
||||
download_urls={
|
||||
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
|
||||
},
|
||||
embedding_function=jina_text_embedding_function,
|
||||
model_size=config.model_size,
|
||||
model_type="text",
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
self.vision_embedding = GenericONNXEmbedding(
|
||||
model_name="jinaai/jina-clip-v1",
|
||||
model_file="vision_model_fp16.onnx",
|
||||
download_urls={
|
||||
"vision_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/vision_model_fp16.onnx",
|
||||
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
|
||||
},
|
||||
embedding_function=jina_vision_embedding_function,
|
||||
model_type="vision",
|
||||
device=self.config.device,
|
||||
model_file = (
|
||||
"vision_model_fp16.onnx"
|
||||
if self.config.model_size == "large"
|
||||
else "vision_model_quantized.onnx"
|
||||
)
|
||||
|
||||
def _create_tables(self):
|
||||
# Create vec0 virtual table for thumbnail embeddings
|
||||
self.db.execute_sql("""
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
|
||||
id TEXT PRIMARY KEY,
|
||||
thumbnail_embedding FLOAT[768]
|
||||
);
|
||||
""")
|
||||
download_urls = {
|
||||
model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
|
||||
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
|
||||
}
|
||||
|
||||
# Create vec0 virtual table for description embeddings
|
||||
self.db.execute_sql("""
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_descriptions USING vec0(
|
||||
id TEXT PRIMARY KEY,
|
||||
description_embedding FLOAT[768]
|
||||
);
|
||||
""")
|
||||
self.vision_embedding = GenericONNXEmbedding(
|
||||
model_name="jinaai/jina-clip-v1",
|
||||
model_file=model_file,
|
||||
download_urls=download_urls,
|
||||
model_size=config.model_size,
|
||||
model_type="vision",
|
||||
requestor=self.requestor,
|
||||
device="GPU" if config.model_size == "large" else "CPU",
|
||||
)
|
||||
|
||||
def _drop_tables(self):
|
||||
self.db.execute_sql("""
|
||||
DROP TABLE vec_descriptions;
|
||||
""")
|
||||
self.db.execute_sql("""
|
||||
DROP TABLE vec_thumbnails;
|
||||
""")
|
||||
|
||||
def upsert_thumbnail(self, event_id: str, thumbnail: bytes):
|
||||
def upsert_thumbnail(self, event_id: str, thumbnail: bytes) -> ndarray:
|
||||
# Convert thumbnail bytes to PIL Image
|
||||
image = Image.open(io.BytesIO(thumbnail)).convert("RGB")
|
||||
embedding = self.vision_embedding([image])[0]
|
||||
@ -151,9 +138,31 @@ class Embeddings:
|
||||
|
||||
return embedding
|
||||
|
||||
def upsert_description(self, event_id: str, description: str):
|
||||
embedding = self.text_embedding([description])[0]
|
||||
def batch_upsert_thumbnail(self, event_thumbs: dict[str, bytes]) -> list[ndarray]:
|
||||
images = [
|
||||
Image.open(io.BytesIO(thumb)).convert("RGB")
|
||||
for thumb in event_thumbs.values()
|
||||
]
|
||||
ids = list(event_thumbs.keys())
|
||||
embeddings = self.vision_embedding(images)
|
||||
|
||||
items = []
|
||||
|
||||
for i in range(len(ids)):
|
||||
items.append(ids[i])
|
||||
items.append(serialize(embeddings[i]))
|
||||
|
||||
self.db.execute_sql(
|
||||
"""
|
||||
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
|
||||
VALUES {}
|
||||
""".format(", ".join(["(?, ?)"] * len(ids))),
|
||||
items,
|
||||
)
|
||||
return embeddings
|
||||
|
||||
def upsert_description(self, event_id: str, description: str) -> ndarray:
|
||||
embedding = self.text_embedding([description])[0]
|
||||
self.db.execute_sql(
|
||||
"""
|
||||
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
|
||||
@ -164,20 +173,64 @@ class Embeddings:
|
||||
|
||||
return embedding
|
||||
|
||||
def reindex(self) -> None:
|
||||
logger.info("Indexing event embeddings...")
|
||||
def batch_upsert_description(self, event_descriptions: dict[str, str]) -> ndarray:
|
||||
embeddings = self.text_embedding(list(event_descriptions.values()))
|
||||
ids = list(event_descriptions.keys())
|
||||
|
||||
self._drop_tables()
|
||||
self._create_tables()
|
||||
items = []
|
||||
|
||||
for i in range(len(ids)):
|
||||
items.append(ids[i])
|
||||
items.append(serialize(embeddings[i]))
|
||||
|
||||
self.db.execute_sql(
|
||||
"""
|
||||
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
|
||||
VALUES {}
|
||||
""".format(", ".join(["(?, ?)"] * len(ids))),
|
||||
items,
|
||||
)
|
||||
|
||||
return embeddings
|
||||
|
||||
def reindex(self) -> None:
|
||||
logger.info("Indexing tracked object embeddings...")
|
||||
|
||||
self.db.drop_embeddings_tables()
|
||||
logger.debug("Dropped embeddings tables.")
|
||||
self.db.create_embeddings_tables()
|
||||
logger.debug("Created embeddings tables.")
|
||||
|
||||
# Delete the saved stats file
|
||||
if os.path.exists(os.path.join(CONFIG_DIR, ".search_stats.json")):
|
||||
os.remove(os.path.join(CONFIG_DIR, ".search_stats.json"))
|
||||
|
||||
st = time.time()
|
||||
|
||||
# Get total count of events to process
|
||||
total_events = (
|
||||
Event.select()
|
||||
.where(
|
||||
(Event.has_clip == True | Event.has_snapshot == True)
|
||||
& Event.thumbnail.is_null(False)
|
||||
)
|
||||
.count()
|
||||
)
|
||||
|
||||
batch_size = 32
|
||||
current_page = 1
|
||||
|
||||
totals = {
|
||||
"thumb": 0,
|
||||
"desc": 0,
|
||||
"thumbnails": 0,
|
||||
"descriptions": 0,
|
||||
"processed_objects": total_events - 1 if total_events < batch_size else 0,
|
||||
"total_objects": total_events,
|
||||
"time_remaining": 0 if total_events < batch_size else -1,
|
||||
"status": "indexing",
|
||||
}
|
||||
|
||||
batch_size = 100
|
||||
current_page = 1
|
||||
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
|
||||
|
||||
events = (
|
||||
Event.select()
|
||||
.where(
|
||||
@ -190,14 +243,45 @@ class Embeddings:
|
||||
|
||||
while len(events) > 0:
|
||||
event: Event
|
||||
batch_thumbs = {}
|
||||
batch_descs = {}
|
||||
for event in events:
|
||||
thumbnail = base64.b64decode(event.thumbnail)
|
||||
self.upsert_thumbnail(event.id, thumbnail)
|
||||
totals["thumb"] += 1
|
||||
if description := event.data.get("description", "").strip():
|
||||
totals["desc"] += 1
|
||||
self.upsert_description(event.id, description)
|
||||
batch_thumbs[event.id] = base64.b64decode(event.thumbnail)
|
||||
totals["thumbnails"] += 1
|
||||
|
||||
if description := event.data.get("description", "").strip():
|
||||
batch_descs[event.id] = description
|
||||
totals["descriptions"] += 1
|
||||
|
||||
totals["processed_objects"] += 1
|
||||
|
||||
# run batch embedding
|
||||
self.batch_upsert_thumbnail(batch_thumbs)
|
||||
|
||||
if batch_descs:
|
||||
self.batch_upsert_description(batch_descs)
|
||||
|
||||
# report progress every batch so we don't spam the logs
|
||||
progress = (totals["processed_objects"] / total_events) * 100
|
||||
logger.debug(
|
||||
"Processed %d/%d events (%.2f%% complete) | Thumbnails: %d, Descriptions: %d",
|
||||
totals["processed_objects"],
|
||||
total_events,
|
||||
progress,
|
||||
totals["thumbnails"],
|
||||
totals["descriptions"],
|
||||
)
|
||||
|
||||
# Calculate time remaining
|
||||
elapsed_time = time.time() - st
|
||||
avg_time_per_event = elapsed_time / totals["processed_objects"]
|
||||
remaining_events = total_events - totals["processed_objects"]
|
||||
time_remaining = avg_time_per_event * remaining_events
|
||||
totals["time_remaining"] = int(time_remaining)
|
||||
|
||||
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
|
||||
|
||||
# Move to the next page
|
||||
current_page += 1
|
||||
events = (
|
||||
Event.select()
|
||||
@ -211,7 +295,10 @@ class Embeddings:
|
||||
|
||||
logger.info(
|
||||
"Embedded %d thumbnails and %d descriptions in %s seconds",
|
||||
totals["thumb"],
|
||||
totals["desc"],
|
||||
time.time() - st,
|
||||
totals["thumbnails"],
|
||||
totals["descriptions"],
|
||||
round(time.time() - st, 1),
|
||||
)
|
||||
totals["status"] = "completed"
|
||||
|
||||
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
|
||||
|
||||
@ -2,10 +2,9 @@ import logging
|
||||
import os
|
||||
import warnings
|
||||
from io import BytesIO
|
||||
from typing import Callable, Dict, List, Optional, Union
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
@ -15,10 +14,11 @@ from PIL import Image
|
||||
from transformers import AutoFeatureExtractor, AutoTokenizer
|
||||
from transformers.utils.logging import disable_progress_bar
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
from frigate.util.model import get_ort_providers
|
||||
from frigate.util.model import ONNXModelRunner
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
@ -39,34 +39,49 @@ class GenericONNXEmbedding:
|
||||
model_name: str,
|
||||
model_file: str,
|
||||
download_urls: Dict[str, str],
|
||||
embedding_function: Callable[[List[np.ndarray]], np.ndarray],
|
||||
model_size: str,
|
||||
model_type: str,
|
||||
requestor: InterProcessRequestor,
|
||||
tokenizer_file: Optional[str] = None,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
self.model_name = model_name
|
||||
self.model_file = model_file
|
||||
self.tokenizer_file = tokenizer_file
|
||||
self.requestor = requestor
|
||||
self.download_urls = download_urls
|
||||
self.embedding_function = embedding_function
|
||||
self.model_type = model_type # 'text' or 'vision'
|
||||
self.providers, self.provider_options = get_ort_providers(
|
||||
force_cpu=device == "CPU", requires_fp16=True, openvino_device=device
|
||||
)
|
||||
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.tokenizer = None
|
||||
self.feature_extractor = None
|
||||
self.session = None
|
||||
self.runner = None
|
||||
files_names = list(self.download_urls.keys()) + (
|
||||
[self.tokenizer_file] if self.tokenizer_file else []
|
||||
)
|
||||
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=list(self.download_urls.keys())
|
||||
+ ([self.tokenizer_file] if self.tokenizer_file else []),
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_tokenizer()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _download_model(self, path: str):
|
||||
try:
|
||||
@ -101,14 +116,17 @@ class GenericONNXEmbedding:
|
||||
)
|
||||
|
||||
def _load_model_and_tokenizer(self):
|
||||
if self.session is None:
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
if self.model_type == "text":
|
||||
self.tokenizer = self._load_tokenizer()
|
||||
else:
|
||||
self.feature_extractor = self._load_feature_extractor()
|
||||
self.session = self._load_model(
|
||||
os.path.join(self.download_path, self.model_file)
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _load_tokenizer(self):
|
||||
@ -125,15 +143,6 @@ class GenericONNXEmbedding:
|
||||
f"{MODEL_CACHE_DIR}/{self.model_name}",
|
||||
)
|
||||
|
||||
def _load_model(self, path: str):
|
||||
if os.path.exists(path):
|
||||
return ort.InferenceSession(
|
||||
path, providers=self.providers, provider_options=self.provider_options
|
||||
)
|
||||
else:
|
||||
logger.warning(f"{self.model_name} model file {path} not found.")
|
||||
return None
|
||||
|
||||
def _process_image(self, image):
|
||||
if isinstance(image, str):
|
||||
if image.startswith("http"):
|
||||
@ -146,8 +155,7 @@ class GenericONNXEmbedding:
|
||||
self, inputs: Union[List[str], List[Image.Image], List[str]]
|
||||
) -> List[np.ndarray]:
|
||||
self._load_model_and_tokenizer()
|
||||
|
||||
if self.session is None or (
|
||||
if self.runner is None or (
|
||||
self.tokenizer is None and self.feature_extractor is None
|
||||
):
|
||||
logger.error(
|
||||
@ -156,23 +164,37 @@ class GenericONNXEmbedding:
|
||||
return []
|
||||
|
||||
if self.model_type == "text":
|
||||
processed_inputs = self.tokenizer(
|
||||
inputs, padding=True, truncation=True, return_tensors="np"
|
||||
max_length = max(len(self.tokenizer.encode(text)) for text in inputs)
|
||||
processed_inputs = [
|
||||
self.tokenizer(
|
||||
text,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
max_length=max_length,
|
||||
return_tensors="np",
|
||||
)
|
||||
for text in inputs
|
||||
]
|
||||
else:
|
||||
processed_images = [self._process_image(img) for img in inputs]
|
||||
processed_inputs = self.feature_extractor(
|
||||
images=processed_images, return_tensors="np"
|
||||
)
|
||||
processed_inputs = [
|
||||
self.feature_extractor(images=image, return_tensors="np")
|
||||
for image in processed_images
|
||||
]
|
||||
|
||||
input_names = [input.name for input in self.session.get_inputs()]
|
||||
onnx_inputs = {
|
||||
name: processed_inputs[name]
|
||||
for name in input_names
|
||||
if name in processed_inputs
|
||||
}
|
||||
input_names = self.runner.get_input_names()
|
||||
onnx_inputs = {name: [] for name in input_names}
|
||||
input: dict[str, any]
|
||||
for input in processed_inputs:
|
||||
for key, value in input.items():
|
||||
if key in input_names:
|
||||
onnx_inputs[key].append(value[0])
|
||||
|
||||
outputs = self.session.run(None, onnx_inputs)
|
||||
embeddings = self.embedding_function(outputs)
|
||||
for key in input_names:
|
||||
if onnx_inputs.get(key):
|
||||
onnx_inputs[key] = np.stack(onnx_inputs[key])
|
||||
else:
|
||||
logger.warning(f"Expected input '{key}' not found in onnx_inputs")
|
||||
|
||||
embeddings = self.runner.run(onnx_inputs)[0]
|
||||
return [embedding for embedding in embeddings]
|
||||
|
||||
@ -41,10 +41,14 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
config: FrigateConfig,
|
||||
stop_event: MpEvent,
|
||||
) -> None:
|
||||
threading.Thread.__init__(self)
|
||||
self.name = "embeddings_maintainer"
|
||||
super().__init__(name="embeddings_maintainer")
|
||||
self.config = config
|
||||
self.embeddings = Embeddings(config.semantic_search, db)
|
||||
|
||||
# Check if we need to re-index events
|
||||
if config.semantic_search.reindex:
|
||||
self.embeddings.reindex()
|
||||
|
||||
self.event_subscriber = EventUpdateSubscriber()
|
||||
self.event_end_subscriber = EventEndSubscriber()
|
||||
self.event_metadata_subscriber = EventMetadataSubscriber(
|
||||
@ -76,10 +80,13 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
def _process_requests(self) -> None:
|
||||
"""Process embeddings requests"""
|
||||
|
||||
def handle_request(topic: str, data: str) -> str:
|
||||
def _handle_request(topic: str, data: str) -> str:
|
||||
try:
|
||||
if topic == EmbeddingsRequestEnum.embed_description.value:
|
||||
return serialize(
|
||||
self.embeddings.upsert_description(data["id"], data["description"]),
|
||||
self.embeddings.upsert_description(
|
||||
data["id"], data["description"]
|
||||
),
|
||||
pack=False,
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
|
||||
@ -89,13 +96,17 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
pack=False,
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
||||
return serialize(self.embeddings.text_embedding([data])[0], pack=False)
|
||||
return serialize(
|
||||
self.embeddings.text_embedding([data])[0], pack=False
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to handle embeddings request {e}")
|
||||
|
||||
self.embeddings_responder.check_for_request(handle_request)
|
||||
self.embeddings_responder.check_for_request(_handle_request)
|
||||
|
||||
def _process_updates(self) -> None:
|
||||
"""Process event updates"""
|
||||
update = self.event_subscriber.check_for_update()
|
||||
update = self.event_subscriber.check_for_update(timeout=0.1)
|
||||
|
||||
if update is None:
|
||||
return
|
||||
@ -124,7 +135,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
def _process_finalized(self) -> None:
|
||||
"""Process the end of an event."""
|
||||
while True:
|
||||
ended = self.event_end_subscriber.check_for_update()
|
||||
ended = self.event_end_subscriber.check_for_update(timeout=0.1)
|
||||
|
||||
if ended == None:
|
||||
break
|
||||
@ -161,9 +172,6 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
or set(event.zones) & set(camera_config.genai.required_zones)
|
||||
)
|
||||
):
|
||||
logger.debug(
|
||||
f"Description generation for {event}, has_snapshot: {event.has_snapshot}"
|
||||
)
|
||||
if event.has_snapshot and camera_config.genai.use_snapshot:
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
|
||||
@ -217,7 +225,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
def _process_event_metadata(self):
|
||||
# Check for regenerate description requests
|
||||
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update(
|
||||
timeout=1
|
||||
timeout=0.1
|
||||
)
|
||||
|
||||
if topic is None:
|
||||
@ -251,7 +259,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
camera_config = self.config.cameras[event.camera]
|
||||
|
||||
description = self.genai_client.generate_description(
|
||||
camera_config, thumbnails, event.label
|
||||
camera_config, thumbnails, event
|
||||
)
|
||||
|
||||
if not description:
|
||||
|
||||
@ -20,9 +20,10 @@ class ZScoreNormalization:
|
||||
|
||||
@property
|
||||
def stddev(self):
|
||||
return math.sqrt(self.variance)
|
||||
return math.sqrt(self.variance) if self.variance > 0 else 0.0
|
||||
|
||||
def normalize(self, distances: list[float]):
|
||||
def normalize(self, distances: list[float], save_stats: bool):
|
||||
if save_stats:
|
||||
self._update(distances)
|
||||
if self.stddev == 0:
|
||||
return distances
|
||||
|
||||
@ -8,11 +8,9 @@ from enum import Enum
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.embeddings.embeddings import Embeddings
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event, Timeline
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -25,7 +23,7 @@ class EventCleanupType(str, Enum):
|
||||
|
||||
class EventCleanup(threading.Thread):
|
||||
def __init__(
|
||||
self, config: FrigateConfig, stop_event: MpEvent, db: SqliteQueueDatabase
|
||||
self, config: FrigateConfig, stop_event: MpEvent, db: SqliteVecQueueDatabase
|
||||
):
|
||||
super().__init__(name="event_cleanup")
|
||||
self.config = config
|
||||
@ -35,9 +33,6 @@ class EventCleanup(threading.Thread):
|
||||
self.removed_camera_labels: list[str] = None
|
||||
self.camera_labels: dict[str, dict[str, any]] = {}
|
||||
|
||||
if self.config.semantic_search.enabled:
|
||||
self.embeddings = Embeddings(self.config.semantic_search, self.db)
|
||||
|
||||
def get_removed_camera_labels(self) -> list[Event]:
|
||||
"""Get a list of distinct labels for removed cameras."""
|
||||
if self.removed_camera_labels is None:
|
||||
@ -234,8 +229,8 @@ class EventCleanup(threading.Thread):
|
||||
Event.delete().where(Event.id << chunk).execute()
|
||||
|
||||
if self.config.semantic_search.enabled:
|
||||
self.embeddings.delete_description(chunk)
|
||||
self.embeddings.delete_thumbnail(chunk)
|
||||
self.db.delete_embeddings_description(event_ids=chunk)
|
||||
self.db.delete_embeddings_thumbnail(event_ids=chunk)
|
||||
logger.debug(f"Deleted {len(events_to_delete)} embeddings")
|
||||
|
||||
logger.info("Exiting event cleanup...")
|
||||
|
||||
@ -5,6 +5,7 @@ import os
|
||||
from typing import Optional
|
||||
|
||||
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
|
||||
from frigate.models import Event
|
||||
|
||||
PROVIDERS = {}
|
||||
|
||||
@ -31,12 +32,12 @@ class GenAIClient:
|
||||
self,
|
||||
camera_config: CameraConfig,
|
||||
thumbnails: list[bytes],
|
||||
label: str,
|
||||
event: Event,
|
||||
) -> Optional[str]:
|
||||
"""Generate a description for the frame."""
|
||||
prompt = camera_config.genai.object_prompts.get(
|
||||
label, camera_config.genai.prompt
|
||||
).format(label=label)
|
||||
event.label, camera_config.genai.prompt
|
||||
).format(**event)
|
||||
return self._send(prompt, thumbnails)
|
||||
|
||||
def _init_provider(self):
|
||||
|
||||
@ -19,6 +19,13 @@ class FileLock:
|
||||
self.path = path
|
||||
self.lock_file = f"{path}.lock"
|
||||
|
||||
# we have not acquired the lock yet so it should not exist
|
||||
if os.path.exists(self.lock_file):
|
||||
try:
|
||||
os.remove(self.lock_file)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def acquire(self):
|
||||
parent_dir = os.path.dirname(self.lock_file)
|
||||
os.makedirs(parent_dir, exist_ok=True)
|
||||
@ -56,13 +63,11 @@ class ModelDownloader:
|
||||
self.download_complete = threading.Event()
|
||||
|
||||
def ensure_model_files(self):
|
||||
for file in self.file_names:
|
||||
self.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file}",
|
||||
"state": ModelStatusTypesEnum.downloading,
|
||||
},
|
||||
self.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
self.file_names,
|
||||
ModelStatusTypesEnum.downloading,
|
||||
)
|
||||
self.download_thread = threading.Thread(
|
||||
target=self._download_models,
|
||||
@ -92,6 +97,7 @@ class ModelDownloader:
|
||||
},
|
||||
)
|
||||
|
||||
self.requestor.stop()
|
||||
self.download_complete.set()
|
||||
|
||||
@staticmethod
|
||||
@ -119,5 +125,21 @@ class ModelDownloader:
|
||||
if not silent:
|
||||
logger.info(f"Downloading complete: {url}")
|
||||
|
||||
@staticmethod
|
||||
def mark_files_state(
|
||||
requestor: InterProcessRequestor,
|
||||
model_name: str,
|
||||
files: list[str],
|
||||
state: ModelStatusTypesEnum,
|
||||
) -> None:
|
||||
for file_name in files:
|
||||
requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{model_name}-{file_name}",
|
||||
"state": state,
|
||||
},
|
||||
)
|
||||
|
||||
def wait_for_download(self):
|
||||
self.download_complete.wait()
|
||||
|
||||
@ -1,44 +1,116 @@
|
||||
"""Model Utils"""
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
import onnxruntime as ort
|
||||
|
||||
try:
|
||||
import openvino as ov
|
||||
except ImportError:
|
||||
# openvino is not included
|
||||
pass
|
||||
|
||||
|
||||
def get_ort_providers(
|
||||
force_cpu: bool = False, openvino_device: str = "AUTO", requires_fp16: bool = False
|
||||
) -> tuple[list[str], list[dict[str, any]]]:
|
||||
if force_cpu:
|
||||
return (["CPUExecutionProvider"], [{}])
|
||||
return (
|
||||
["CPUExecutionProvider"],
|
||||
[
|
||||
{
|
||||
"arena_extend_strategy": "kSameAsRequested",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
providers = ort.get_available_providers()
|
||||
providers = []
|
||||
options = []
|
||||
|
||||
for provider in providers:
|
||||
if provider == "TensorrtExecutionProvider":
|
||||
os.makedirs("/config/model_cache/tensorrt/ort/trt-engines", exist_ok=True)
|
||||
|
||||
if not requires_fp16 or os.environ.get("USE_FP_16", "True") != "False":
|
||||
for provider in ort.get_available_providers():
|
||||
if provider == "CUDAExecutionProvider":
|
||||
providers.append(provider)
|
||||
options.append(
|
||||
{
|
||||
"trt_fp16_enable": requires_fp16,
|
||||
"trt_timing_cache_enable": True,
|
||||
"trt_engine_cache_enable": True,
|
||||
"trt_timing_cache_path": "/config/model_cache/tensorrt/ort",
|
||||
"trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines",
|
||||
"arena_extend_strategy": "kSameAsRequested",
|
||||
}
|
||||
)
|
||||
else:
|
||||
options.append({})
|
||||
elif provider == "TensorrtExecutionProvider":
|
||||
# TensorrtExecutionProvider uses too much memory without options to control it
|
||||
pass
|
||||
elif provider == "OpenVINOExecutionProvider":
|
||||
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
|
||||
providers.append(provider)
|
||||
options.append(
|
||||
{
|
||||
"arena_extend_strategy": "kSameAsRequested",
|
||||
"cache_dir": "/config/model_cache/openvino/ort",
|
||||
"device_type": openvino_device,
|
||||
}
|
||||
)
|
||||
elif provider == "CPUExecutionProvider":
|
||||
providers.append(provider)
|
||||
options.append(
|
||||
{
|
||||
"arena_extend_strategy": "kSameAsRequested",
|
||||
}
|
||||
)
|
||||
else:
|
||||
providers.append(provider)
|
||||
options.append({})
|
||||
|
||||
return (providers, options)
|
||||
|
||||
|
||||
class ONNXModelRunner:
|
||||
"""Run onnx models optimally based on available hardware."""
|
||||
|
||||
def __init__(self, model_path: str, device: str, requires_fp16: bool = False):
|
||||
self.model_path = model_path
|
||||
self.ort: ort.InferenceSession = None
|
||||
self.ov: ov.Core = None
|
||||
providers, options = get_ort_providers(device == "CPU", device, requires_fp16)
|
||||
|
||||
if "OpenVINOExecutionProvider" in providers:
|
||||
# use OpenVINO directly
|
||||
self.type = "ov"
|
||||
self.ov = ov.Core()
|
||||
self.ov.set_property(
|
||||
{ov.properties.cache_dir: "/config/model_cache/openvino"}
|
||||
)
|
||||
self.interpreter = self.ov.compile_model(
|
||||
model=model_path, device_name=device
|
||||
)
|
||||
else:
|
||||
# Use ONNXRuntime
|
||||
self.type = "ort"
|
||||
self.ort = ort.InferenceSession(
|
||||
model_path, providers=providers, provider_options=options
|
||||
)
|
||||
|
||||
def get_input_names(self) -> list[str]:
|
||||
if self.type == "ov":
|
||||
input_names = []
|
||||
|
||||
for input in self.interpreter.inputs:
|
||||
input_names.extend(input.names)
|
||||
|
||||
return input_names
|
||||
elif self.type == "ort":
|
||||
return [input.name for input in self.ort.get_inputs()]
|
||||
|
||||
def run(self, input: dict[str, Any]) -> Any:
|
||||
if self.type == "ov":
|
||||
infer_request = self.interpreter.create_infer_request()
|
||||
input_tensor = list(input.values())
|
||||
|
||||
if len(input_tensor) == 1:
|
||||
input_tensor = ov.Tensor(array=input_tensor[0])
|
||||
else:
|
||||
input_tensor = ov.Tensor(array=input_tensor)
|
||||
|
||||
infer_request.infer(input_tensor)
|
||||
return [infer_request.get_output_tensor().data]
|
||||
elif self.type == "ort":
|
||||
return self.ort.run(None, input)
|
||||
|
||||
@ -2,6 +2,7 @@ import { baseUrl } from "./baseUrl";
|
||||
import { useCallback, useEffect, useState } from "react";
|
||||
import useWebSocket, { ReadyState } from "react-use-websocket";
|
||||
import {
|
||||
EmbeddingsReindexProgressType,
|
||||
FrigateCameraState,
|
||||
FrigateEvent,
|
||||
FrigateReview,
|
||||
@ -302,6 +303,42 @@ export function useModelState(
|
||||
return { payload: data ? data[model] : undefined };
|
||||
}
|
||||
|
||||
export function useEmbeddingsReindexProgress(
|
||||
revalidateOnFocus: boolean = true,
|
||||
): {
|
||||
payload: EmbeddingsReindexProgressType;
|
||||
} {
|
||||
const {
|
||||
value: { payload },
|
||||
send: sendCommand,
|
||||
} = useWs("embeddings_reindex_progress", "embeddingsReindexProgress");
|
||||
|
||||
const data = useDeepMemo(JSON.parse(payload as string));
|
||||
|
||||
useEffect(() => {
|
||||
let listener = undefined;
|
||||
if (revalidateOnFocus) {
|
||||
sendCommand("embeddingsReindexProgress");
|
||||
listener = () => {
|
||||
if (document.visibilityState == "visible") {
|
||||
sendCommand("embeddingsReindexProgress");
|
||||
}
|
||||
};
|
||||
addEventListener("visibilitychange", listener);
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (listener) {
|
||||
removeEventListener("visibilitychange", listener);
|
||||
}
|
||||
};
|
||||
// we know that these deps are correct
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [revalidateOnFocus]);
|
||||
|
||||
return { payload: data };
|
||||
}
|
||||
|
||||
export function useMotionActivity(camera: string): { payload: string } {
|
||||
const {
|
||||
value: { payload },
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { useEmbeddingsReindexProgress } from "@/api/ws";
|
||||
import {
|
||||
StatusBarMessagesContext,
|
||||
StatusMessage,
|
||||
@ -41,6 +42,23 @@ export default function Statusbar() {
|
||||
});
|
||||
}, [potentialProblems, addMessage, clearMessages]);
|
||||
|
||||
const { payload: reindexState } = useEmbeddingsReindexProgress();
|
||||
|
||||
useEffect(() => {
|
||||
if (reindexState) {
|
||||
if (reindexState.status == "indexing") {
|
||||
clearMessages("embeddings-reindex");
|
||||
addMessage(
|
||||
"embeddings-reindex",
|
||||
`Reindexing embeddings (${Math.floor((reindexState.processed_objects / reindexState.total_objects) * 100)}% complete)`,
|
||||
);
|
||||
}
|
||||
if (reindexState.status === "completed") {
|
||||
clearMessages("embeddings-reindex");
|
||||
}
|
||||
}
|
||||
}, [reindexState, addMessage, clearMessages]);
|
||||
|
||||
return (
|
||||
<div className="absolute bottom-0 left-0 right-0 z-10 flex h-8 w-full items-center justify-between border-t border-secondary-highlight bg-background_alt px-4 dark:text-secondary-foreground">
|
||||
<div className="flex h-full items-center gap-2">
|
||||
|
||||
@ -241,6 +241,8 @@ export default function ReviewFilterGroup({
|
||||
mode="none"
|
||||
setMode={() => {}}
|
||||
setRange={() => {}}
|
||||
showExportPreview={false}
|
||||
setShowExportPreview={() => {}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@ -7,6 +7,7 @@ import {
|
||||
LuChevronUp,
|
||||
LuTrash2,
|
||||
LuStar,
|
||||
LuSearch,
|
||||
} from "react-icons/lu";
|
||||
import {
|
||||
FilterType,
|
||||
@ -161,8 +162,12 @@ export default function InputWithTags({
|
||||
.map((word) => word.trim())
|
||||
.lastIndexOf(words.filter((word) => word.trim() !== "").pop() || "");
|
||||
const currentWord = words[lastNonEmptyWordIndex];
|
||||
if (words.at(-1) === "") {
|
||||
return current_suggestions;
|
||||
}
|
||||
|
||||
return current_suggestions.filter((suggestion) =>
|
||||
suggestion.toLowerCase().includes(currentWord.toLowerCase()),
|
||||
suggestion.toLowerCase().startsWith(currentWord),
|
||||
);
|
||||
},
|
||||
[inputValue, suggestions, currentFilterType],
|
||||
@ -636,7 +641,19 @@ export default function InputWithTags({
|
||||
inputFocused ? "visible" : "hidden",
|
||||
)}
|
||||
>
|
||||
{(Object.keys(filters).length > 0 || isSimilaritySearch) && (
|
||||
{!currentFilterType && inputValue && (
|
||||
<CommandGroup heading="Search">
|
||||
<CommandItem
|
||||
className="cursor-pointer"
|
||||
onSelect={() => handleSearch(inputValue)}
|
||||
>
|
||||
<LuSearch className="mr-2 h-4 w-4" />
|
||||
Search for "{inputValue}"
|
||||
</CommandItem>
|
||||
</CommandGroup>
|
||||
)}
|
||||
{(Object.keys(filters).filter((key) => key !== "query").length > 0 ||
|
||||
isSimilaritySearch) && (
|
||||
<CommandGroup heading="Active Filters">
|
||||
<div className="my-2 flex flex-wrap gap-2 px-2">
|
||||
{isSimilaritySearch && (
|
||||
|
||||
@ -2,6 +2,7 @@ import { useCallback, useMemo, useState } from "react";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
@ -22,10 +23,13 @@ import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover";
|
||||
import { TimezoneAwareCalendar } from "./ReviewActivityCalendar";
|
||||
import { SelectSeparator } from "../ui/select";
|
||||
import { isDesktop, isIOS } from "react-device-detect";
|
||||
import { isDesktop, isIOS, isMobile } from "react-device-detect";
|
||||
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
|
||||
import SaveExportOverlay from "./SaveExportOverlay";
|
||||
import { getUTCOffset } from "@/utils/dateUtil";
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
|
||||
|
||||
const EXPORT_OPTIONS = [
|
||||
"1",
|
||||
@ -44,8 +48,10 @@ type ExportDialogProps = {
|
||||
currentTime: number;
|
||||
range?: TimeRange;
|
||||
mode: ExportMode;
|
||||
showPreview: boolean;
|
||||
setRange: (range: TimeRange | undefined) => void;
|
||||
setMode: (mode: ExportMode) => void;
|
||||
setShowPreview: (showPreview: boolean) => void;
|
||||
};
|
||||
export default function ExportDialog({
|
||||
camera,
|
||||
@ -53,10 +59,13 @@ export default function ExportDialog({
|
||||
currentTime,
|
||||
range,
|
||||
mode,
|
||||
showPreview,
|
||||
setRange,
|
||||
setMode,
|
||||
setShowPreview,
|
||||
}: ExportDialogProps) {
|
||||
const [name, setName] = useState("");
|
||||
|
||||
const onStartExport = useCallback(() => {
|
||||
if (!range) {
|
||||
toast.error("No valid time range selected", { position: "top-center" });
|
||||
@ -109,9 +118,16 @@ export default function ExportDialog({
|
||||
|
||||
return (
|
||||
<>
|
||||
<ExportPreviewDialog
|
||||
camera={camera}
|
||||
range={range}
|
||||
showPreview={showPreview}
|
||||
setShowPreview={setShowPreview}
|
||||
/>
|
||||
<SaveExportOverlay
|
||||
className="pointer-events-none absolute left-1/2 top-8 z-50 -translate-x-1/2"
|
||||
show={mode == "timeline"}
|
||||
onPreview={() => setShowPreview(true)}
|
||||
onSave={() => onStartExport()}
|
||||
onCancel={() => setMode("none")}
|
||||
/>
|
||||
@ -525,3 +541,44 @@ function CustomTimeSelector({
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
type ExportPreviewDialogProps = {
|
||||
camera: string;
|
||||
range?: TimeRange;
|
||||
showPreview: boolean;
|
||||
setShowPreview: (showPreview: boolean) => void;
|
||||
};
|
||||
|
||||
export function ExportPreviewDialog({
|
||||
camera,
|
||||
range,
|
||||
showPreview,
|
||||
setShowPreview,
|
||||
}: ExportPreviewDialogProps) {
|
||||
if (!range) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const source = `${baseUrl}vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`;
|
||||
|
||||
return (
|
||||
<Dialog open={showPreview} onOpenChange={setShowPreview}>
|
||||
<DialogContent
|
||||
className={cn(
|
||||
"scrollbar-container overflow-y-auto",
|
||||
isDesktop &&
|
||||
"max-h-[95dvh] sm:max-w-xl md:max-w-4xl lg:max-w-4xl xl:max-w-7xl",
|
||||
isMobile && "px-4",
|
||||
)}
|
||||
>
|
||||
<DialogHeader>
|
||||
<DialogTitle>Preview Export</DialogTitle>
|
||||
<DialogDescription className="sr-only">
|
||||
Preview Export
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<GenericVideoPlayer source={source} />
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
|
||||
@ -3,7 +3,7 @@ import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
|
||||
import { Button } from "../ui/button";
|
||||
import { FaArrowDown, FaCalendarAlt, FaCog, FaFilter } from "react-icons/fa";
|
||||
import { TimeRange } from "@/types/timeline";
|
||||
import { ExportContent } from "./ExportDialog";
|
||||
import { ExportContent, ExportPreviewDialog } from "./ExportDialog";
|
||||
import { ExportMode } from "@/types/filter";
|
||||
import ReviewActivityCalendar from "./ReviewActivityCalendar";
|
||||
import { SelectSeparator } from "../ui/select";
|
||||
@ -34,12 +34,14 @@ type MobileReviewSettingsDrawerProps = {
|
||||
currentTime: number;
|
||||
range?: TimeRange;
|
||||
mode: ExportMode;
|
||||
showExportPreview: boolean;
|
||||
reviewSummary?: ReviewSummary;
|
||||
allLabels: string[];
|
||||
allZones: string[];
|
||||
onUpdateFilter: (filter: ReviewFilter) => void;
|
||||
setRange: (range: TimeRange | undefined) => void;
|
||||
setMode: (mode: ExportMode) => void;
|
||||
setShowExportPreview: (showPreview: boolean) => void;
|
||||
};
|
||||
export default function MobileReviewSettingsDrawer({
|
||||
features = DEFAULT_DRAWER_FEATURES,
|
||||
@ -50,12 +52,14 @@ export default function MobileReviewSettingsDrawer({
|
||||
currentTime,
|
||||
range,
|
||||
mode,
|
||||
showExportPreview,
|
||||
reviewSummary,
|
||||
allLabels,
|
||||
allZones,
|
||||
onUpdateFilter,
|
||||
setRange,
|
||||
setMode,
|
||||
setShowExportPreview,
|
||||
}: MobileReviewSettingsDrawerProps) {
|
||||
const [drawerMode, setDrawerMode] = useState<DrawerMode>("none");
|
||||
|
||||
@ -282,6 +286,13 @@ export default function MobileReviewSettingsDrawer({
|
||||
show={mode == "timeline"}
|
||||
onSave={() => onStartExport()}
|
||||
onCancel={() => setMode("none")}
|
||||
onPreview={() => setShowExportPreview(true)}
|
||||
/>
|
||||
<ExportPreviewDialog
|
||||
camera={camera}
|
||||
range={range}
|
||||
showPreview={showExportPreview}
|
||||
setShowPreview={setShowExportPreview}
|
||||
/>
|
||||
<Drawer
|
||||
modal={!(isIOS && drawerMode == "export")}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { LuX } from "react-icons/lu";
|
||||
import { LuVideo, LuX } from "react-icons/lu";
|
||||
import { Button } from "../ui/button";
|
||||
import { FaCompactDisc } from "react-icons/fa";
|
||||
import { cn } from "@/lib/utils";
|
||||
@ -6,12 +6,14 @@ import { cn } from "@/lib/utils";
|
||||
type SaveExportOverlayProps = {
|
||||
className: string;
|
||||
show: boolean;
|
||||
onPreview: () => void;
|
||||
onSave: () => void;
|
||||
onCancel: () => void;
|
||||
};
|
||||
export default function SaveExportOverlay({
|
||||
className,
|
||||
show,
|
||||
onPreview,
|
||||
onSave,
|
||||
onCancel,
|
||||
}: SaveExportOverlayProps) {
|
||||
@ -24,6 +26,22 @@ export default function SaveExportOverlay({
|
||||
"mx-auto mt-5 text-center",
|
||||
)}
|
||||
>
|
||||
<Button
|
||||
className="flex items-center gap-1 text-primary"
|
||||
size="sm"
|
||||
onClick={onCancel}
|
||||
>
|
||||
<LuX />
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
className="flex items-center gap-1"
|
||||
size="sm"
|
||||
onClick={onPreview}
|
||||
>
|
||||
<LuVideo />
|
||||
Preview Export
|
||||
</Button>
|
||||
<Button
|
||||
className="flex items-center gap-1"
|
||||
variant="select"
|
||||
@ -33,14 +51,6 @@ export default function SaveExportOverlay({
|
||||
<FaCompactDisc />
|
||||
Save Export
|
||||
</Button>
|
||||
<Button
|
||||
className="flex items-center gap-1 text-primary"
|
||||
size="sm"
|
||||
onClick={onCancel}
|
||||
>
|
||||
<LuX />
|
||||
Cancel
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@ -6,7 +6,7 @@ import { useFormattedTimestamp } from "@/hooks/use-date-utils";
|
||||
import { getIconForLabel } from "@/utils/iconUtil";
|
||||
import { useApiHost } from "@/api";
|
||||
import { Button } from "../../ui/button";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import axios from "axios";
|
||||
import { toast } from "sonner";
|
||||
import { Textarea } from "../../ui/textarea";
|
||||
@ -21,7 +21,6 @@ import {
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog";
|
||||
import { Event } from "@/types/event";
|
||||
import HlsVideoPlayer from "@/components/player/HlsVideoPlayer";
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import { cn } from "@/lib/utils";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
@ -62,8 +61,7 @@ import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
|
||||
import { Card, CardContent } from "@/components/ui/card";
|
||||
import useImageLoaded from "@/hooks/use-image-loaded";
|
||||
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
|
||||
import { useResizeObserver } from "@/hooks/resize-observer";
|
||||
import { VideoResolutionType } from "@/types/live";
|
||||
import { GenericVideoPlayer } from "@/components/player/GenericVideoPlayer";
|
||||
|
||||
const SEARCH_TABS = [
|
||||
"details",
|
||||
@ -398,6 +396,7 @@ function ObjectDetailsTab({
|
||||
draggable={false}
|
||||
src={`${apiHost}api/events/${search.id}/thumbnail.jpg`}
|
||||
/>
|
||||
{config?.semantic_search.enabled && (
|
||||
<Button
|
||||
onClick={() => {
|
||||
setSearch(undefined);
|
||||
@ -409,6 +408,7 @@ function ObjectDetailsTab({
|
||||
>
|
||||
Find Similar
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex flex-col gap-1.5">
|
||||
@ -536,6 +536,7 @@ function ObjectSnapshotTab({
|
||||
/>
|
||||
)}
|
||||
</TransformComponent>
|
||||
{search.plus_id !== "not_enabled" && search.end_time && (
|
||||
<Card className="p-1 text-sm md:p-2">
|
||||
<CardContent className="flex flex-col items-center justify-between gap-3 p-2 md:flex-row">
|
||||
<div className={cn("flex flex-col space-y-3")}>
|
||||
@ -587,6 +588,7 @@ function ObjectSnapshotTab({
|
||||
</div>
|
||||
</CardContent>
|
||||
</Card>
|
||||
)}
|
||||
</div>
|
||||
</TransformWrapper>
|
||||
</div>
|
||||
@ -597,71 +599,19 @@ function ObjectSnapshotTab({
|
||||
type VideoTabProps = {
|
||||
search: SearchResult;
|
||||
};
|
||||
function VideoTab({ search }: VideoTabProps) {
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
const videoRef = useRef<HTMLVideoElement | null>(null);
|
||||
|
||||
const endTime = useMemo(() => search.end_time ?? Date.now() / 1000, [search]);
|
||||
|
||||
export function VideoTab({ search }: VideoTabProps) {
|
||||
const navigate = useNavigate();
|
||||
const { data: reviewItem } = useSWR<ReviewSegment>([
|
||||
`review/event/${search.id}`,
|
||||
]);
|
||||
const endTime = useMemo(() => search.end_time ?? Date.now() / 1000, [search]);
|
||||
|
||||
const containerRef = useRef<HTMLDivElement | null>(null);
|
||||
|
||||
const [{ width: containerWidth, height: containerHeight }] =
|
||||
useResizeObserver(containerRef);
|
||||
const [videoResolution, setVideoResolution] = useState<VideoResolutionType>({
|
||||
width: 0,
|
||||
height: 0,
|
||||
});
|
||||
|
||||
const videoAspectRatio = useMemo(() => {
|
||||
return videoResolution.width / videoResolution.height || 16 / 9;
|
||||
}, [videoResolution]);
|
||||
|
||||
const containerAspectRatio = useMemo(() => {
|
||||
return containerWidth / containerHeight || 16 / 9;
|
||||
}, [containerWidth, containerHeight]);
|
||||
|
||||
const videoDimensions = useMemo(() => {
|
||||
if (!containerWidth || !containerHeight)
|
||||
return { width: "100%", height: "100%" };
|
||||
|
||||
if (containerAspectRatio > videoAspectRatio) {
|
||||
const height = containerHeight;
|
||||
const width = height * videoAspectRatio;
|
||||
return { width: `${width}px`, height: `${height}px` };
|
||||
} else {
|
||||
const width = containerWidth;
|
||||
const height = width / videoAspectRatio;
|
||||
return { width: `${width}px`, height: `${height}px` };
|
||||
}
|
||||
}, [containerWidth, containerHeight, videoAspectRatio, containerAspectRatio]);
|
||||
const source = `${baseUrl}vod/${search.camera}/start/${search.start_time}/end/${endTime}/index.m3u8`;
|
||||
|
||||
return (
|
||||
<div ref={containerRef} className="relative flex h-full w-full flex-col">
|
||||
<div className="relative flex flex-grow items-center justify-center">
|
||||
{(isLoading || !reviewItem) && (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 z-10 -translate-x-1/2 -translate-y-1/2" />
|
||||
)}
|
||||
<div
|
||||
className="relative flex items-center justify-center"
|
||||
style={videoDimensions}
|
||||
>
|
||||
<HlsVideoPlayer
|
||||
videoRef={videoRef}
|
||||
currentSource={`${baseUrl}vod/${search.camera}/start/${search.start_time}/end/${endTime}/index.m3u8`}
|
||||
hotKeys
|
||||
visible
|
||||
frigateControls={false}
|
||||
fullscreen={false}
|
||||
supportsFullscreen={false}
|
||||
onPlaying={() => setIsLoading(false)}
|
||||
setFullResolution={setVideoResolution}
|
||||
/>
|
||||
{!isLoading && reviewItem && (
|
||||
<GenericVideoPlayer source={source}>
|
||||
{reviewItem && (
|
||||
<div
|
||||
className={cn(
|
||||
"absolute top-2 z-10 flex items-center",
|
||||
@ -688,8 +638,6 @@ function VideoTab({ search }: VideoTabProps) {
|
||||
</Tooltip>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</GenericVideoPlayer>
|
||||
);
|
||||
}
|
||||
|
||||
52
web/src/components/player/GenericVideoPlayer.tsx
Normal file
52
web/src/components/player/GenericVideoPlayer.tsx
Normal file
@ -0,0 +1,52 @@
|
||||
import React, { useState, useRef } from "react";
|
||||
import { useVideoDimensions } from "@/hooks/use-video-dimensions";
|
||||
import HlsVideoPlayer from "./HlsVideoPlayer";
|
||||
import ActivityIndicator from "../indicators/activity-indicator";
|
||||
|
||||
type GenericVideoPlayerProps = {
|
||||
source: string;
|
||||
onPlaying?: () => void;
|
||||
children?: React.ReactNode;
|
||||
};
|
||||
|
||||
export function GenericVideoPlayer({
|
||||
source,
|
||||
onPlaying,
|
||||
children,
|
||||
}: GenericVideoPlayerProps) {
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
const videoRef = useRef<HTMLVideoElement | null>(null);
|
||||
const containerRef = useRef<HTMLDivElement | null>(null);
|
||||
const { videoDimensions, setVideoResolution } =
|
||||
useVideoDimensions(containerRef);
|
||||
|
||||
return (
|
||||
<div ref={containerRef} className="relative flex h-full w-full flex-col">
|
||||
<div className="relative flex flex-grow items-center justify-center">
|
||||
{isLoading && (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 z-10 -translate-x-1/2 -translate-y-1/2" />
|
||||
)}
|
||||
<div
|
||||
className="relative flex items-center justify-center"
|
||||
style={videoDimensions}
|
||||
>
|
||||
<HlsVideoPlayer
|
||||
videoRef={videoRef}
|
||||
currentSource={source}
|
||||
hotKeys
|
||||
visible
|
||||
frigateControls={false}
|
||||
fullscreen={false}
|
||||
supportsFullscreen={false}
|
||||
onPlaying={() => {
|
||||
setIsLoading(false);
|
||||
onPlaying?.();
|
||||
}}
|
||||
setFullResolution={setVideoResolution}
|
||||
/>
|
||||
{!isLoading && children}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
108
web/src/components/ui/circular-progress-bar.tsx
Normal file
108
web/src/components/ui/circular-progress-bar.tsx
Normal file
@ -0,0 +1,108 @@
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
interface Props {
|
||||
max: number;
|
||||
value: number;
|
||||
min: number;
|
||||
gaugePrimaryColor: string;
|
||||
gaugeSecondaryColor: string;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export default function AnimatedCircularProgressBar({
|
||||
max = 100,
|
||||
min = 0,
|
||||
value = 0,
|
||||
gaugePrimaryColor,
|
||||
gaugeSecondaryColor,
|
||||
className,
|
||||
}: Props) {
|
||||
const circumference = 2 * Math.PI * 45;
|
||||
const percentPx = circumference / 100;
|
||||
const currentPercent = Math.floor(((value - min) / (max - min)) * 100);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn("relative size-40 text-2xl font-semibold", className)}
|
||||
style={
|
||||
{
|
||||
"--circle-size": "100px",
|
||||
"--circumference": circumference,
|
||||
"--percent-to-px": `${percentPx}px`,
|
||||
"--gap-percent": "5",
|
||||
"--offset-factor": "0",
|
||||
"--transition-length": "1s",
|
||||
"--transition-step": "200ms",
|
||||
"--delay": "0s",
|
||||
"--percent-to-deg": "3.6deg",
|
||||
transform: "translateZ(0)",
|
||||
} as React.CSSProperties
|
||||
}
|
||||
>
|
||||
<svg
|
||||
fill="none"
|
||||
className="size-full"
|
||||
strokeWidth="2"
|
||||
viewBox="0 0 100 100"
|
||||
>
|
||||
{currentPercent <= 90 && currentPercent >= 0 && (
|
||||
<circle
|
||||
cx="50"
|
||||
cy="50"
|
||||
r="45"
|
||||
strokeWidth="10"
|
||||
strokeDashoffset="0"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
className="opacity-100"
|
||||
style={
|
||||
{
|
||||
stroke: gaugeSecondaryColor,
|
||||
"--stroke-percent": 90 - currentPercent,
|
||||
"--offset-factor-secondary": "calc(1 - var(--offset-factor))",
|
||||
strokeDasharray:
|
||||
"calc(var(--stroke-percent) * var(--percent-to-px)) var(--circumference)",
|
||||
transform:
|
||||
"rotate(calc(1turn - 90deg - (var(--gap-percent) * var(--percent-to-deg) * var(--offset-factor-secondary)))) scaleY(-1)",
|
||||
transition: "all var(--transition-length) ease var(--delay)",
|
||||
transformOrigin:
|
||||
"calc(var(--circle-size) / 2) calc(var(--circle-size) / 2)",
|
||||
} as React.CSSProperties
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<circle
|
||||
cx="50"
|
||||
cy="50"
|
||||
r="45"
|
||||
strokeWidth="10"
|
||||
strokeDashoffset="0"
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
className="opacity-100"
|
||||
style={
|
||||
{
|
||||
stroke: gaugePrimaryColor,
|
||||
"--stroke-percent": currentPercent,
|
||||
strokeDasharray:
|
||||
"calc(var(--stroke-percent) * var(--percent-to-px)) var(--circumference)",
|
||||
transition:
|
||||
"var(--transition-length) ease var(--delay),stroke var(--transition-length) ease var(--delay)",
|
||||
transitionProperty: "stroke-dasharray,transform",
|
||||
transform:
|
||||
"rotate(calc(-90deg + var(--gap-percent) * var(--offset-factor) * var(--percent-to-deg)))",
|
||||
transformOrigin:
|
||||
"calc(var(--circle-size) / 2) calc(var(--circle-size) / 2)",
|
||||
} as React.CSSProperties
|
||||
}
|
||||
/>
|
||||
</svg>
|
||||
<span
|
||||
data-current-value={currentPercent}
|
||||
className="duration-[var(--transition-length)] delay-[var(--delay)] absolute inset-0 m-auto size-fit ease-linear animate-in fade-in"
|
||||
>
|
||||
{currentPercent}%
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
45
web/src/hooks/use-video-dimensions.ts
Normal file
45
web/src/hooks/use-video-dimensions.ts
Normal file
@ -0,0 +1,45 @@
|
||||
import { useState, useMemo } from "react";
|
||||
import { useResizeObserver } from "./resize-observer";
|
||||
|
||||
export type VideoResolutionType = {
|
||||
width: number;
|
||||
height: number;
|
||||
};
|
||||
|
||||
export function useVideoDimensions(
|
||||
containerRef: React.RefObject<HTMLDivElement>,
|
||||
) {
|
||||
const [{ width: containerWidth, height: containerHeight }] =
|
||||
useResizeObserver(containerRef);
|
||||
const [videoResolution, setVideoResolution] = useState<VideoResolutionType>({
|
||||
width: 0,
|
||||
height: 0,
|
||||
});
|
||||
|
||||
const videoAspectRatio = useMemo(() => {
|
||||
return videoResolution.width / videoResolution.height || 16 / 9;
|
||||
}, [videoResolution]);
|
||||
|
||||
const containerAspectRatio = useMemo(() => {
|
||||
return containerWidth / containerHeight || 16 / 9;
|
||||
}, [containerWidth, containerHeight]);
|
||||
|
||||
const videoDimensions = useMemo(() => {
|
||||
if (!containerWidth || !containerHeight)
|
||||
return { width: "100%", height: "100%" };
|
||||
if (containerAspectRatio > videoAspectRatio) {
|
||||
const height = containerHeight;
|
||||
const width = height * videoAspectRatio;
|
||||
return { width: `${width}px`, height: `${height}px` };
|
||||
} else {
|
||||
const width = containerWidth;
|
||||
const height = width / videoAspectRatio;
|
||||
return { width: `${width}px`, height: `${height}px` };
|
||||
}
|
||||
}, [containerWidth, containerHeight, videoAspectRatio, containerAspectRatio]);
|
||||
|
||||
return {
|
||||
videoDimensions,
|
||||
setVideoResolution,
|
||||
};
|
||||
}
|
||||
@ -1,10 +1,16 @@
|
||||
import { useEventUpdate, useModelState } from "@/api/ws";
|
||||
import {
|
||||
useEmbeddingsReindexProgress,
|
||||
useEventUpdate,
|
||||
useModelState,
|
||||
} from "@/api/ws";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
import AnimatedCircularProgressBar from "@/components/ui/circular-progress-bar";
|
||||
import { useApiFilterArgs } from "@/hooks/use-api-filter";
|
||||
import { useTimezone } from "@/hooks/use-date-utils";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { SearchFilter, SearchQuery, SearchResult } from "@/types/search";
|
||||
import { ModelState } from "@/types/ws";
|
||||
import { formatSecondsToDuration } from "@/utils/dateUtil";
|
||||
import SearchView from "@/views/search/SearchView";
|
||||
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { LuCheck, LuExternalLink, LuX } from "react-icons/lu";
|
||||
@ -177,11 +183,30 @@ export default function Explore() {
|
||||
const eventUpdate = useEventUpdate();
|
||||
|
||||
useEffect(() => {
|
||||
if (eventUpdate) {
|
||||
mutate();
|
||||
}
|
||||
// mutate / revalidate when event description updates come in
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [eventUpdate]);
|
||||
|
||||
// embeddings reindex progress
|
||||
|
||||
const { payload: reindexState } = useEmbeddingsReindexProgress();
|
||||
|
||||
const embeddingsReindexing = useMemo(() => {
|
||||
if (reindexState) {
|
||||
switch (reindexState.status) {
|
||||
case "indexing":
|
||||
return true;
|
||||
case "completed":
|
||||
return false;
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
}, [reindexState]);
|
||||
|
||||
// model states
|
||||
|
||||
const { payload: textModelState } = useModelState(
|
||||
@ -190,9 +215,12 @@ export default function Explore() {
|
||||
const { payload: textTokenizerState } = useModelState(
|
||||
"jinaai/jina-clip-v1-tokenizer",
|
||||
);
|
||||
const { payload: visionModelState } = useModelState(
|
||||
"jinaai/jina-clip-v1-vision_model_fp16.onnx",
|
||||
);
|
||||
const modelFile =
|
||||
config?.semantic_search.model_size === "large"
|
||||
? "jinaai/jina-clip-v1-vision_model_fp16.onnx"
|
||||
: "jinaai/jina-clip-v1-vision_model_quantized.onnx";
|
||||
|
||||
const { payload: visionModelState } = useModelState(modelFile);
|
||||
const { payload: visionFeatureExtractorState } = useModelState(
|
||||
"jinaai/jina-clip-v1-preprocessor_config.json",
|
||||
);
|
||||
@ -226,7 +254,8 @@ export default function Explore() {
|
||||
|
||||
if (
|
||||
config?.semantic_search.enabled &&
|
||||
(!textModelState ||
|
||||
(!reindexState ||
|
||||
!textModelState ||
|
||||
!textTokenizerState ||
|
||||
!visionModelState ||
|
||||
!visionFeatureExtractorState)
|
||||
@ -238,17 +267,70 @@ export default function Explore() {
|
||||
|
||||
return (
|
||||
<>
|
||||
{config?.semantic_search.enabled && !allModelsLoaded ? (
|
||||
{config?.semantic_search.enabled &&
|
||||
(!allModelsLoaded || embeddingsReindexing) ? (
|
||||
<div className="absolute inset-0 left-1/2 top-1/2 flex h-96 w-96 -translate-x-1/2 -translate-y-1/2">
|
||||
<div className="flex flex-col items-center justify-center space-y-3 rounded-lg bg-background/50 p-5">
|
||||
<div className="flex max-w-96 flex-col items-center justify-center space-y-3 rounded-lg bg-background/50 p-5">
|
||||
<div className="my-5 flex flex-col items-center gap-2 text-xl">
|
||||
<TbExclamationCircle className="mb-3 size-10" />
|
||||
<div>Search Unavailable</div>
|
||||
</div>
|
||||
<div className="max-w-96 text-center">
|
||||
Frigate is downloading the necessary embeddings models to support
|
||||
semantic searching. This may take several minutes depending on the
|
||||
speed of your network connection.
|
||||
{embeddingsReindexing && (
|
||||
<>
|
||||
<div className="text-center text-primary-variant">
|
||||
Search can be used after tracked object embeddings have
|
||||
finished reindexing.
|
||||
</div>
|
||||
<div className="pt-5 text-center">
|
||||
<AnimatedCircularProgressBar
|
||||
min={0}
|
||||
max={reindexState.total_objects}
|
||||
value={reindexState.processed_objects}
|
||||
gaugePrimaryColor="hsl(var(--selected))"
|
||||
gaugeSecondaryColor="hsl(var(--secondary))"
|
||||
/>
|
||||
</div>
|
||||
<div className="flex w-96 flex-col gap-2 py-5">
|
||||
{reindexState.time_remaining !== null && (
|
||||
<div className="mb-3 flex flex-col items-center justify-center gap-1">
|
||||
<div className="text-primary-variant">
|
||||
{reindexState.time_remaining === -1
|
||||
? "Starting up..."
|
||||
: "Estimated time remaining:"}
|
||||
</div>
|
||||
{reindexState.time_remaining >= 0 &&
|
||||
(formatSecondsToDuration(reindexState.time_remaining) ||
|
||||
"Finishing shortly")}
|
||||
</div>
|
||||
)}
|
||||
<div className="flex flex-row items-center justify-center gap-3">
|
||||
<span className="text-primary-variant">
|
||||
Thumbnails embedded:
|
||||
</span>
|
||||
{reindexState.thumbnails}
|
||||
</div>
|
||||
<div className="flex flex-row items-center justify-center gap-3">
|
||||
<span className="text-primary-variant">
|
||||
Descriptions embedded:
|
||||
</span>
|
||||
{reindexState.descriptions}
|
||||
</div>
|
||||
<div className="flex flex-row items-center justify-center gap-3">
|
||||
<span className="text-primary-variant">
|
||||
Tracked objects processed:
|
||||
</span>
|
||||
{reindexState.processed_objects} /{" "}
|
||||
{reindexState.total_objects}
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
{!allModelsLoaded && (
|
||||
<>
|
||||
<div className="text-center text-primary-variant">
|
||||
Frigate is downloading the necessary embeddings models to
|
||||
support semantic searching. This may take several minutes
|
||||
depending on the speed of your network connection.
|
||||
</div>
|
||||
<div className="flex w-96 flex-col gap-2 py-5">
|
||||
<div className="flex flex-row items-center justify-center gap-2">
|
||||
@ -276,11 +358,11 @@ export default function Explore() {
|
||||
An error has occurred. Check Frigate logs.
|
||||
</div>
|
||||
)}
|
||||
<div className="max-w-96 text-center">
|
||||
<div className="text-center text-primary-variant">
|
||||
You may want to reindex the embeddings of your tracked objects
|
||||
once the models are downloaded.
|
||||
</div>
|
||||
<div className="flex max-w-96 items-center text-primary-variant">
|
||||
<div className="flex items-center text-primary-variant">
|
||||
<Link
|
||||
to="https://docs.frigate.video/configuration/semantic_search"
|
||||
target="_blank"
|
||||
@ -291,6 +373,8 @@ export default function Explore() {
|
||||
<LuExternalLink className="ml-2 inline-flex size-3" />
|
||||
</Link>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
|
||||
@ -417,6 +417,7 @@ export interface FrigateConfig {
|
||||
|
||||
semantic_search: {
|
||||
enabled: boolean;
|
||||
model_size: string;
|
||||
};
|
||||
|
||||
snapshots: {
|
||||
|
||||
@ -62,4 +62,13 @@ export type ModelState =
|
||||
| "downloaded"
|
||||
| "error";
|
||||
|
||||
export type EmbeddingsReindexProgressType = {
|
||||
thumbnails: number;
|
||||
descriptions: number;
|
||||
processed_objects: number;
|
||||
total_objects: number;
|
||||
time_remaining: number;
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ToggleableSetting = "ON" | "OFF";
|
||||
|
||||
@ -229,6 +229,23 @@ export const getDurationFromTimestamps = (
|
||||
return duration;
|
||||
};
|
||||
|
||||
/**
|
||||
*
|
||||
* @param seconds - number of seconds to convert into hours, minutes and seconds
|
||||
* @returns string - formatted duration in hours, minutes and seconds
|
||||
*/
|
||||
export const formatSecondsToDuration = (seconds: number): string => {
|
||||
if (isNaN(seconds) || seconds < 0) {
|
||||
return "Invalid duration";
|
||||
}
|
||||
|
||||
const duration = intervalToDuration({ start: 0, end: seconds * 1000 });
|
||||
return formatDuration(duration, {
|
||||
format: ["hours", "minutes", "seconds"],
|
||||
delimiter: ", ",
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Adapted from https://stackoverflow.com/a/29268535 this takes a timezone string and
|
||||
* returns the offset of that timezone from UTC in minutes.
|
||||
|
||||
@ -7,16 +7,31 @@ import {
|
||||
FaCarSide,
|
||||
FaCat,
|
||||
FaCheckCircle,
|
||||
FaDhl,
|
||||
FaDog,
|
||||
FaFedex,
|
||||
FaFire,
|
||||
FaFootballBall,
|
||||
FaHockeyPuck,
|
||||
FaHorse,
|
||||
FaMotorcycle,
|
||||
FaMouse,
|
||||
FaRegTrashAlt,
|
||||
FaUmbrella,
|
||||
FaUps,
|
||||
FaUsps,
|
||||
} from "react-icons/fa";
|
||||
import { GiDeer, GiHummingbird, GiPolarBear, GiSailboat } from "react-icons/gi";
|
||||
import {
|
||||
GiDeer,
|
||||
GiFox,
|
||||
GiGoat,
|
||||
GiHummingbird,
|
||||
GiPolarBear,
|
||||
GiPostStamp,
|
||||
GiRabbit,
|
||||
GiRaccoonHead,
|
||||
GiSailboat,
|
||||
} from "react-icons/gi";
|
||||
import { LuBox, LuLassoSelect } from "react-icons/lu";
|
||||
import * as LuIcons from "react-icons/lu";
|
||||
import { MdRecordVoiceOver } from "react-icons/md";
|
||||
@ -53,8 +68,12 @@ export function getIconForLabel(label: string, className?: string) {
|
||||
case "bark":
|
||||
case "dog":
|
||||
return <FaDog key={label} className={className} />;
|
||||
case "fire_alarm":
|
||||
return <FaFire key={label} className={className} />;
|
||||
case "fox":
|
||||
return <GiFox key={label} className={className} />;
|
||||
case "goat":
|
||||
return <GiGoat key={label} className={className} />;
|
||||
case "horse":
|
||||
return <FaHorse key={label} className={className} />;
|
||||
case "motorcycle":
|
||||
return <FaMotorcycle key={label} className={className} />;
|
||||
case "mouse":
|
||||
@ -63,8 +82,20 @@ export function getIconForLabel(label: string, className?: string) {
|
||||
return <LuBox key={label} className={className} />;
|
||||
case "person":
|
||||
return <BsPersonWalking key={label} className={className} />;
|
||||
case "rabbit":
|
||||
return <GiRabbit key={label} className={className} />;
|
||||
case "raccoon":
|
||||
return <GiRaccoonHead key={label} className={className} />;
|
||||
case "robot_lawnmower":
|
||||
return <FaHockeyPuck key={label} className={className} />;
|
||||
case "sports_ball":
|
||||
return <FaFootballBall key={label} className={className} />;
|
||||
case "squirrel":
|
||||
return <LuIcons.LuSquirrel key={label} className={className} />;
|
||||
case "umbrella":
|
||||
return <FaUmbrella key={label} className={className} />;
|
||||
case "waste_bin":
|
||||
return <FaRegTrashAlt key={label} className={className} />;
|
||||
// audio
|
||||
case "crying":
|
||||
case "laughter":
|
||||
@ -72,9 +103,21 @@ export function getIconForLabel(label: string, className?: string) {
|
||||
case "speech":
|
||||
case "yell":
|
||||
return <MdRecordVoiceOver key={label} className={className} />;
|
||||
case "fire_alarm":
|
||||
return <FaFire key={label} className={className} />;
|
||||
// sub labels
|
||||
case "amazon":
|
||||
return <FaAmazon key={label} className={className} />;
|
||||
case "an_post":
|
||||
case "dpd":
|
||||
case "gls":
|
||||
case "nzpost":
|
||||
case "postnl":
|
||||
case "postnord":
|
||||
case "purolator":
|
||||
return <GiPostStamp key={label} className={className} />;
|
||||
case "dhl":
|
||||
return <FaDhl key={label} className={className} />;
|
||||
case "fedex":
|
||||
return <FaFedex key={label} className={className} />;
|
||||
case "ups":
|
||||
|
||||
@ -531,9 +531,37 @@ function PtzControlPanel({
|
||||
);
|
||||
|
||||
useKeyboardListener(
|
||||
["ArrowLeft", "ArrowRight", "ArrowUp", "ArrowDown", "+", "-"],
|
||||
[
|
||||
"ArrowLeft",
|
||||
"ArrowRight",
|
||||
"ArrowUp",
|
||||
"ArrowDown",
|
||||
"+",
|
||||
"-",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5",
|
||||
"6",
|
||||
"7",
|
||||
"8",
|
||||
"9",
|
||||
],
|
||||
(key, modifiers) => {
|
||||
if (modifiers.repeat) {
|
||||
if (modifiers.repeat || !key) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (["1", "2", "3", "4", "5", "6", "7", "8", "9"].includes(key)) {
|
||||
const presetNumber = parseInt(key);
|
||||
if (
|
||||
ptz &&
|
||||
(ptz.presets?.length ?? 0) > 0 &&
|
||||
presetNumber <= ptz.presets.length
|
||||
) {
|
||||
sendPtz(`preset_${ptz.presets[presetNumber - 1]}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -140,6 +140,7 @@ export function RecordingView({
|
||||
|
||||
const [exportMode, setExportMode] = useState<ExportMode>("none");
|
||||
const [exportRange, setExportRange] = useState<TimeRange>();
|
||||
const [showExportPreview, setShowExportPreview] = useState(false);
|
||||
|
||||
// move to next clip
|
||||
|
||||
@ -412,6 +413,7 @@ export function RecordingView({
|
||||
latestTime={timeRange.before}
|
||||
mode={exportMode}
|
||||
range={exportRange}
|
||||
showPreview={showExportPreview}
|
||||
setRange={(range) => {
|
||||
setExportRange(range);
|
||||
|
||||
@ -420,6 +422,7 @@ export function RecordingView({
|
||||
}
|
||||
}}
|
||||
setMode={setExportMode}
|
||||
setShowPreview={setShowExportPreview}
|
||||
/>
|
||||
)}
|
||||
{isDesktop && (
|
||||
@ -473,11 +476,13 @@ export function RecordingView({
|
||||
latestTime={timeRange.before}
|
||||
mode={exportMode}
|
||||
range={exportRange}
|
||||
showExportPreview={showExportPreview}
|
||||
allLabels={reviewFilterList.labels}
|
||||
allZones={reviewFilterList.zones}
|
||||
onUpdateFilter={updateFilter}
|
||||
setRange={setExportRange}
|
||||
setMode={setExportMode}
|
||||
setShowExportPreview={setShowExportPreview}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -187,13 +187,19 @@ export default function SearchView({
|
||||
}
|
||||
}, [searchResults, searchDetail]);
|
||||
|
||||
// confidence score - probably needs tweaking
|
||||
// confidence score
|
||||
|
||||
const zScoreToConfidence = (score: number) => {
|
||||
// Sigmoid function: 1 / (1 + e^x)
|
||||
const confidence = 1 / (1 + Math.exp(score));
|
||||
// Normalizing is not needed for similarity searches
|
||||
// Sigmoid function for normalized: 1 / (1 + e^x)
|
||||
// Cosine for similarity
|
||||
if (searchFilter) {
|
||||
const notNormalized = searchFilter?.search_type?.includes("similarity");
|
||||
|
||||
const confidence = notNormalized ? 1 - score : 1 / (1 + Math.exp(score));
|
||||
|
||||
return Math.round(confidence * 100);
|
||||
}
|
||||
};
|
||||
|
||||
const hasExistingSearch = useMemo(
|
||||
@ -387,7 +393,11 @@ export default function SearchView({
|
||||
>
|
||||
<SearchThumbnail
|
||||
searchResult={value}
|
||||
findSimilar={() => setSimilaritySearch(value)}
|
||||
findSimilar={() => {
|
||||
if (config?.semantic_search.enabled) {
|
||||
setSimilaritySearch(value);
|
||||
}
|
||||
}}
|
||||
onClick={() => onSelectSearch(value, index)}
|
||||
/>
|
||||
{(searchTerm ||
|
||||
|
||||
@ -11,11 +11,17 @@ import { usePersistence } from "@/hooks/use-persistence";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { useCameraActivity } from "@/hooks/use-camera-activity";
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
|
||||
import {
|
||||
Popover,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
import { ObjectType } from "@/types/ws";
|
||||
import useDeepMemo from "@/hooks/use-deep-memo";
|
||||
import { Card } from "@/components/ui/card";
|
||||
import { getIconForLabel } from "@/utils/iconUtil";
|
||||
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
||||
import { LuInfo } from "react-icons/lu";
|
||||
|
||||
type ObjectSettingsViewProps = {
|
||||
selectedCamera?: string;
|
||||
@ -35,6 +41,30 @@ export default function ObjectSettingsView({
|
||||
param: "bbox",
|
||||
title: "Bounding boxes",
|
||||
description: "Show bounding boxes around tracked objects",
|
||||
info: (
|
||||
<>
|
||||
<p className="mb-2">
|
||||
<strong>Object Bounding Box Colors</strong>
|
||||
</p>
|
||||
<ul className="list-disc space-y-1 pl-5">
|
||||
<li>
|
||||
At startup, different colors will be assigned to each object label
|
||||
</li>
|
||||
<li>
|
||||
A dark blue thin line indicates that object is not detected at
|
||||
this current point in time
|
||||
</li>
|
||||
<li>
|
||||
A gray thin line indicates that object is detected as being
|
||||
stationary
|
||||
</li>
|
||||
<li>
|
||||
A thick line indicates that object is the subject of autotracking
|
||||
(when enabled)
|
||||
</li>
|
||||
</ul>
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
param: "timestamp",
|
||||
@ -55,12 +85,34 @@ export default function ObjectSettingsView({
|
||||
param: "motion",
|
||||
title: "Motion boxes",
|
||||
description: "Show boxes around areas where motion is detected",
|
||||
info: (
|
||||
<>
|
||||
<p className="mb-2">
|
||||
<strong>Motion Boxes</strong>
|
||||
</p>
|
||||
<p>
|
||||
Red boxes will be overlaid on areas of the frame where motion is
|
||||
currently being detected
|
||||
</p>
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
param: "regions",
|
||||
title: "Regions",
|
||||
description:
|
||||
"Show a box of the region of interest sent to the object detector",
|
||||
info: (
|
||||
<>
|
||||
<p className="mb-2">
|
||||
<strong>Region Boxes</strong>
|
||||
</p>
|
||||
<p>
|
||||
Bright green boxes will be overlaid on areas of interest in the
|
||||
frame that are being sent to the object detector.
|
||||
</p>
|
||||
</>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
@ -145,19 +197,34 @@ export default function ObjectSettingsView({
|
||||
<div className="flex w-full flex-col space-y-6">
|
||||
<div className="mt-2 space-y-6">
|
||||
<div className="my-2.5 flex flex-col gap-2.5">
|
||||
{DEBUG_OPTIONS.map(({ param, title, description }) => (
|
||||
{DEBUG_OPTIONS.map(({ param, title, description, info }) => (
|
||||
<div
|
||||
key={param}
|
||||
className="flex w-full flex-row items-center justify-between"
|
||||
>
|
||||
<div className="mb-2 flex flex-col">
|
||||
<div className="flex items-center gap-2">
|
||||
<Label
|
||||
className="mb-2 w-full cursor-pointer capitalize text-primary"
|
||||
className="mb-0 cursor-pointer capitalize text-primary"
|
||||
htmlFor={param}
|
||||
>
|
||||
{title}
|
||||
</Label>
|
||||
<div className="text-xs text-muted-foreground">
|
||||
{info && (
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<div className="cursor-pointer p-0">
|
||||
<LuInfo className="size-4" />
|
||||
<span className="sr-only">Info</span>
|
||||
</div>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-80">
|
||||
{info}
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
)}
|
||||
</div>
|
||||
<div className="mt-1 text-xs text-muted-foreground">
|
||||
{description}
|
||||
</div>
|
||||
</div>
|
||||
@ -240,7 +307,7 @@ function ObjectList(objects?: ObjectType[]) {
|
||||
{getIconForLabel(obj.label, "size-5 text-white")}
|
||||
</div>
|
||||
<div className="ml-3 text-lg">
|
||||
{capitalizeFirstLetter(obj.label)}
|
||||
{capitalizeFirstLetter(obj.label.replaceAll("_", " "))}
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex w-8/12 flex-row items-end justify-end">
|
||||
|
||||
Loading…
Reference in New Issue
Block a user