From f67ec241d4851d37d49ea819d764f2d4ea34adf9 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Thu, 10 Oct 2024 14:28:43 -0500
Subject: [PATCH 01/27] Add embeddings reindex progress to the UI (#14268)
* refactor dispatcher
* add reindex to dictionary
* add circular progress bar component
* Add progress to UI when embeddings are reindexing
* readd comments to dispatcher for clarity
* Only report progress every 10 events so we don't spam the logs and websocket
* clean up
---
.cspell/frigate-dictionary.txt | 1 +
frigate/comms/dispatcher.py | 135 ++++++++++-----
frigate/const.py | 1 +
frigate/embeddings/embeddings.py | 52 +++++-
web/src/api/ws.tsx | 37 +++++
.../components/ui/circular-progress-bar.tsx | 108 ++++++++++++
web/src/pages/Explore.tsx | 155 ++++++++++++------
web/src/types/ws.ts | 7 +
8 files changed, 397 insertions(+), 99 deletions(-)
create mode 100644 web/src/components/ui/circular-progress-bar.tsx
diff --git a/.cspell/frigate-dictionary.txt b/.cspell/frigate-dictionary.txt
index 6c0e8022f..0cbcc4beb 100644
--- a/.cspell/frigate-dictionary.txt
+++ b/.cspell/frigate-dictionary.txt
@@ -212,6 +212,7 @@ rcond
RDONLY
rebranded
referer
+reindex
Reolink
restream
restreamed
diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py
index 1605d645a..c1a9f7e86 100644
--- a/frigate/comms/dispatcher.py
+++ b/frigate/comms/dispatcher.py
@@ -15,6 +15,7 @@ from frigate.const import (
INSERT_PREVIEW,
REQUEST_REGION_GRID,
UPDATE_CAMERA_ACTIVITY,
+ UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
UPDATE_EVENT_DESCRIPTION,
UPDATE_MODEL_STATE,
UPSERT_REVIEW_SEGMENT,
@@ -86,35 +87,27 @@ class Dispatcher:
self.camera_activity = {}
self.model_state = {}
+ self.embeddings_reindex = {}
def _receive(self, topic: str, payload: str) -> Optional[Any]:
"""Handle receiving of payload from communicators."""
- if topic.endswith("set"):
+
+ def handle_camera_command(command_type, camera_name, payload):
try:
- # example /cam_name/detect/set payload=ON|OFF
- if topic.count("/") == 2:
- camera_name = topic.split("/")[-3]
- command = topic.split("/")[-2]
- self._camera_settings_handlers[command](camera_name, payload)
- elif topic.count("/") == 1:
- command = topic.split("/")[-2]
- self._global_settings_handlers[command](payload)
- except IndexError:
- logger.error(f"Received invalid set command: {topic}")
- return
- elif topic.endswith("ptz"):
- try:
- # example /cam_name/ptz payload=MOVE_UP|MOVE_DOWN|STOP...
- camera_name = topic.split("/")[-2]
- self._on_ptz_command(camera_name, payload)
- except IndexError:
- logger.error(f"Received invalid ptz command: {topic}")
- return
- elif topic == "restart":
+ if command_type == "set":
+ self._camera_settings_handlers[camera_name](camera_name, payload)
+ elif command_type == "ptz":
+ self._on_ptz_command(camera_name, payload)
+ except KeyError:
+ logger.error(f"Invalid command type: {command_type}")
+
+ def handle_restart():
restart_frigate()
- elif topic == INSERT_MANY_RECORDINGS:
+
+ def handle_insert_many_recordings():
Recordings.insert_many(payload).execute()
- elif topic == REQUEST_REGION_GRID:
+
+ def handle_request_region_grid():
camera = payload
grid = get_camera_regions_grid(
camera,
@@ -122,24 +115,25 @@ class Dispatcher:
max(self.config.model.width, self.config.model.height),
)
return grid
- elif topic == INSERT_PREVIEW:
+
+ def handle_insert_preview():
Previews.insert(payload).execute()
- elif topic == UPSERT_REVIEW_SEGMENT:
- (
- ReviewSegment.insert(payload)
- .on_conflict(
- conflict_target=[ReviewSegment.id],
- update=payload,
- )
- .execute()
- )
- elif topic == CLEAR_ONGOING_REVIEW_SEGMENTS:
- ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
- ReviewSegment.end_time == None
+
+ def handle_upsert_review_segment():
+ ReviewSegment.insert(payload).on_conflict(
+ conflict_target=[ReviewSegment.id],
+ update=payload,
).execute()
- elif topic == UPDATE_CAMERA_ACTIVITY:
+
+ def handle_clear_ongoing_review_segments():
+ ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
+ ReviewSegment.end_time.is_null(True)
+ ).execute()
+
+ def handle_update_camera_activity():
self.camera_activity = payload
- elif topic == UPDATE_EVENT_DESCRIPTION:
+
+ def handle_update_event_description():
event: Event = Event.get(Event.id == payload["id"])
event.data["description"] = payload["description"]
event.save()
@@ -147,15 +141,30 @@ class Dispatcher:
"event_update",
json.dumps({"id": event.id, "description": event.data["description"]}),
)
- elif topic == UPDATE_MODEL_STATE:
+
+ def handle_update_model_state():
model = payload["model"]
state = payload["state"]
self.model_state[model] = ModelStatusTypesEnum[state]
self.publish("model_state", json.dumps(self.model_state))
- elif topic == "modelState":
- model_state = self.model_state.copy()
- self.publish("model_state", json.dumps(model_state))
- elif topic == "onConnect":
+
+ def handle_model_state():
+ self.publish("model_state", json.dumps(self.model_state.copy()))
+
+ def handle_update_embeddings_reindex_progress():
+ self.embeddings_reindex = payload
+ self.publish(
+ "embeddings_reindex_progress",
+ json.dumps(payload),
+ )
+
+ def handle_embeddings_reindex_progress():
+ self.publish(
+ "embeddings_reindex_progress",
+ json.dumps(self.embeddings_reindex.copy()),
+ )
+
+ def handle_on_connect():
camera_status = self.camera_activity.copy()
for camera in camera_status.keys():
@@ -170,6 +179,46 @@ class Dispatcher:
}
self.publish("camera_activity", json.dumps(camera_status))
+
+ # Dictionary mapping topic to handlers
+ topic_handlers = {
+ INSERT_MANY_RECORDINGS: handle_insert_many_recordings,
+ REQUEST_REGION_GRID: handle_request_region_grid,
+ INSERT_PREVIEW: handle_insert_preview,
+ UPSERT_REVIEW_SEGMENT: handle_upsert_review_segment,
+ CLEAR_ONGOING_REVIEW_SEGMENTS: handle_clear_ongoing_review_segments,
+ UPDATE_CAMERA_ACTIVITY: handle_update_camera_activity,
+ UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
+ UPDATE_MODEL_STATE: handle_update_model_state,
+ UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
+ "restart": handle_restart,
+ "embeddingsReindexProgress": handle_embeddings_reindex_progress,
+ "modelState": handle_model_state,
+ "onConnect": handle_on_connect,
+ }
+
+ if topic.endswith("set") or topic.endswith("ptz"):
+ try:
+ parts = topic.split("/")
+ if len(parts) == 3 and topic.endswith("set"):
+ # example /cam_name/detect/set payload=ON|OFF
+ camera_name = parts[-3]
+ command = parts[-2]
+ handle_camera_command("set", camera_name, payload)
+ elif len(parts) == 2 and topic.endswith("set"):
+ command = parts[-2]
+ self._global_settings_handlers[command](payload)
+ elif len(parts) == 2 and topic.endswith("ptz"):
+ # example /cam_name/ptz payload=MOVE_UP|MOVE_DOWN|STOP...
+ camera_name = parts[-2]
+ handle_camera_command("ptz", camera_name, payload)
+ except IndexError:
+ logger.error(
+ f"Received invalid {topic.split('/')[-1]} command: {topic}"
+ )
+ return
+ elif topic in topic_handlers:
+ return topic_handlers[topic]()
else:
self.publish(topic, payload, retain=False)
diff --git a/frigate/const.py b/frigate/const.py
index e8e841f4f..ad1aacd0f 100644
--- a/frigate/const.py
+++ b/frigate/const.py
@@ -85,6 +85,7 @@ CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments"
UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
UPDATE_EVENT_DESCRIPTION = "update_event_description"
UPDATE_MODEL_STATE = "update_model_state"
+UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
# Stats Values
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index 9bcf2e6c0..dda4d95fd 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -10,7 +10,7 @@ from playhouse.shortcuts import model_to_dict
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config.semantic_search import SemanticSearchConfig
-from frigate.const import UPDATE_MODEL_STATE
+from frigate.const import UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_MODEL_STATE
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event
from frigate.types import ModelStatusTypesEnum
@@ -165,19 +165,36 @@ class Embeddings:
return embedding
def reindex(self) -> None:
- logger.info("Indexing event embeddings...")
+ logger.info("Indexing tracked object embeddings...")
self._drop_tables()
self._create_tables()
st = time.time()
totals = {
- "thumb": 0,
- "desc": 0,
+ "thumbnails": 0,
+ "descriptions": 0,
+ "processed_objects": 0,
+ "total_objects": 0,
}
+ self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
+
+ # Get total count of events to process
+ total_events = (
+ Event.select()
+ .where(
+ (Event.has_clip == True | Event.has_snapshot == True)
+ & Event.thumbnail.is_null(False)
+ )
+ .count()
+ )
+ totals["total_objects"] = total_events
+
batch_size = 100
current_page = 1
+ processed_events = 0
+
events = (
Event.select()
.where(
@@ -193,11 +210,29 @@ class Embeddings:
for event in events:
thumbnail = base64.b64decode(event.thumbnail)
self.upsert_thumbnail(event.id, thumbnail)
- totals["thumb"] += 1
+ totals["thumbnails"] += 1
+
if description := event.data.get("description", "").strip():
- totals["desc"] += 1
+ totals["descriptions"] += 1
self.upsert_description(event.id, description)
+ totals["processed_objects"] += 1
+
+ # report progress every 10 events so we don't spam the logs
+ if (totals["processed_objects"] % 10) == 0:
+ progress = (processed_events / total_events) * 100
+ logger.debug(
+ "Processed %d/%d events (%.2f%% complete) | Thumbnails: %d, Descriptions: %d",
+ processed_events,
+ total_events,
+ progress,
+ totals["thumbnails"],
+ totals["descriptions"],
+ )
+
+ self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
+
+ # Move to the next page
current_page += 1
events = (
Event.select()
@@ -211,7 +246,8 @@ class Embeddings:
logger.info(
"Embedded %d thumbnails and %d descriptions in %s seconds",
- totals["thumb"],
- totals["desc"],
+ totals["thumbnails"],
+ totals["descriptions"],
time.time() - st,
)
+ self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx
index a78722b66..2e083cf83 100644
--- a/web/src/api/ws.tsx
+++ b/web/src/api/ws.tsx
@@ -2,6 +2,7 @@ import { baseUrl } from "./baseUrl";
import { useCallback, useEffect, useState } from "react";
import useWebSocket, { ReadyState } from "react-use-websocket";
import {
+ EmbeddingsReindexProgressType,
FrigateCameraState,
FrigateEvent,
FrigateReview,
@@ -302,6 +303,42 @@ export function useModelState(
return { payload: data ? data[model] : undefined };
}
+export function useEmbeddingsReindexProgress(
+ revalidateOnFocus: boolean = true,
+): {
+ payload: EmbeddingsReindexProgressType;
+} {
+ const {
+ value: { payload },
+ send: sendCommand,
+ } = useWs("embeddings_reindex_progress", "embeddingsReindexProgress");
+
+ const data = useDeepMemo(JSON.parse(payload as string));
+
+ useEffect(() => {
+ let listener = undefined;
+ if (revalidateOnFocus) {
+ sendCommand("embeddingsReindexProgress");
+ listener = () => {
+ if (document.visibilityState == "visible") {
+ sendCommand("embeddingsReindexProgress");
+ }
+ };
+ addEventListener("visibilitychange", listener);
+ }
+
+ return () => {
+ if (listener) {
+ removeEventListener("visibilitychange", listener);
+ }
+ };
+ // we know that these deps are correct
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [revalidateOnFocus]);
+
+ return { payload: data };
+}
+
export function useMotionActivity(camera: string): { payload: string } {
const {
value: { payload },
diff --git a/web/src/components/ui/circular-progress-bar.tsx b/web/src/components/ui/circular-progress-bar.tsx
new file mode 100644
index 000000000..c1714829e
--- /dev/null
+++ b/web/src/components/ui/circular-progress-bar.tsx
@@ -0,0 +1,108 @@
+import { cn } from "@/lib/utils";
+
+interface Props {
+ max: number;
+ value: number;
+ min: number;
+ gaugePrimaryColor: string;
+ gaugeSecondaryColor: string;
+ className?: string;
+}
+
+export default function AnimatedCircularProgressBar({
+ max = 100,
+ min = 0,
+ value = 0,
+ gaugePrimaryColor,
+ gaugeSecondaryColor,
+ className,
+}: Props) {
+ const circumference = 2 * Math.PI * 45;
+ const percentPx = circumference / 100;
+ const currentPercent = Math.floor(((value - min) / (max - min)) * 100);
+
+ return (
+
+
+ {currentPercent <= 90 && currentPercent >= 0 && (
+
+ )}
+
+
+
+ {currentPercent}%
+
+
+ );
+}
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index 5a1ed6145..8607c8760 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -1,5 +1,10 @@
-import { useEventUpdate, useModelState } from "@/api/ws";
+import {
+ useEmbeddingsReindexProgress,
+ useEventUpdate,
+ useModelState,
+} from "@/api/ws";
import ActivityIndicator from "@/components/indicators/activity-indicator";
+import AnimatedCircularProgressBar from "@/components/ui/circular-progress-bar";
import { useApiFilterArgs } from "@/hooks/use-api-filter";
import { useTimezone } from "@/hooks/use-date-utils";
import { FrigateConfig } from "@/types/frigateConfig";
@@ -182,6 +187,18 @@ export default function Explore() {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [eventUpdate]);
+ // embeddings reindex progress
+
+ const { payload: reindexProgress } = useEmbeddingsReindexProgress();
+
+ const embeddingsReindexing = useMemo(
+ () =>
+ reindexProgress
+ ? reindexProgress.total_objects - reindexProgress.processed_objects > 0
+ : undefined,
+ [reindexProgress],
+ );
+
// model states
const { payload: textModelState } = useModelState(
@@ -238,59 +255,101 @@ export default function Explore() {
return (
<>
- {config?.semantic_search.enabled && !allModelsLoaded ? (
+ {config?.semantic_search.enabled &&
+ (!allModelsLoaded || embeddingsReindexing) ? (
-
+
-
- Frigate is downloading the necessary embeddings models to support
- semantic searching. This may take several minutes depending on the
- speed of your network connection.
-
-
-
- {renderModelStateIcon(visionModelState)}
- Vision model
-
-
- {renderModelStateIcon(visionFeatureExtractorState)}
- Vision model feature extractor
-
-
- {renderModelStateIcon(textModelState)}
- Text model
-
-
- {renderModelStateIcon(textTokenizerState)}
- Text tokenizer
-
-
- {(textModelState === "error" ||
- textTokenizerState === "error" ||
- visionModelState === "error" ||
- visionFeatureExtractorState === "error") && (
-
- An error has occurred. Check Frigate logs.
-
+ {embeddingsReindexing && (
+ <>
+
+ Search can be used after tracked object embeddings have
+ finished reindexing.
+
+
+
+
+
+ Thumbnails embedded:
+
+ {reindexProgress.thumbnails}
+
+
+
+ Descriptions embedded:
+
+ {reindexProgress.descriptions}
+
+
+
+ Tracked objects processed:
+
+ {reindexProgress.processed_objects}
+
+
+ >
+ )}
+ {!allModelsLoaded && (
+ <>
+
+ Frigate is downloading the necessary embeddings models to
+ support semantic searching. This may take several minutes
+ depending on the speed of your network connection.
+
+
+
+ {renderModelStateIcon(visionModelState)}
+ Vision model
+
+
+ {renderModelStateIcon(visionFeatureExtractorState)}
+ Vision model feature extractor
+
+
+ {renderModelStateIcon(textModelState)}
+ Text model
+
+
+ {renderModelStateIcon(textTokenizerState)}
+ Text tokenizer
+
+
+ {(textModelState === "error" ||
+ textTokenizerState === "error" ||
+ visionModelState === "error" ||
+ visionFeatureExtractorState === "error") && (
+
+ An error has occurred. Check Frigate logs.
+
+ )}
+
+ You may want to reindex the embeddings of your tracked objects
+ once the models are downloaded.
+
+
+
+ Read the documentation{" "}
+
+
+
+ >
)}
-
- You may want to reindex the embeddings of your tracked objects
- once the models are downloaded.
-
-
-
- Read the documentation{" "}
-
-
-
) : (
diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts
index a8211d269..cab759074 100644
--- a/web/src/types/ws.ts
+++ b/web/src/types/ws.ts
@@ -62,4 +62,11 @@ export type ModelState =
| "downloaded"
| "error";
+export type EmbeddingsReindexProgressType = {
+ thumbnails: number;
+ descriptions: number;
+ processed_objects: number;
+ total_objects: number;
+};
+
export type ToggleableSetting = "ON" | "OFF";
From dd6276e706cbdb6bf841f610b1827054b07a89c6 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Thu, 10 Oct 2024 15:37:43 -0600
Subject: [PATCH 02/27] Embeddings fixes (#14269)
* Add debugging logs for more info
* Improve timeout handling
* Fix event cleanup
* Handle zmq error and empty data
* Don't run download
* Remove unneeded embeddings creations
* Update timouts
* Init models immediately
* Fix order of init
* Cleanup
---
frigate/app.py | 4 +-
frigate/comms/dispatcher.py | 7 ++--
frigate/comms/embeddings_updater.py | 9 +++--
frigate/comms/event_metadata_updater.py | 2 +-
frigate/db/sqlitevecq.py | 23 +++++++++++
frigate/embeddings/__init__.py | 42 ++++++++++---------
frigate/embeddings/embeddings.py | 36 ++++-------------
frigate/embeddings/functions/onnx.py | 42 ++++++++++++++-----
frigate/embeddings/maintainer.py | 54 ++++++++++++++-----------
frigate/events/cleanup.py | 13 ++----
frigate/util/downloader.py | 33 ++++++++++-----
11 files changed, 154 insertions(+), 111 deletions(-)
diff --git a/frigate/app.py b/frigate/app.py
index 1fcf91551..0cf76699c 100644
--- a/frigate/app.py
+++ b/frigate/app.py
@@ -581,12 +581,12 @@ class FrigateApp:
self.init_recording_manager()
self.init_review_segment_manager()
self.init_go2rtc()
+ self.start_detectors()
+ self.init_embeddings_manager()
self.bind_database()
self.check_db_data_migrations()
self.init_inter_process_communicator()
self.init_dispatcher()
- self.start_detectors()
- self.init_embeddings_manager()
self.init_embeddings_client()
self.start_video_output_processor()
self.start_ptz_autotracker()
diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py
index c1a9f7e86..12dfe2731 100644
--- a/frigate/comms/dispatcher.py
+++ b/frigate/comms/dispatcher.py
@@ -64,6 +64,9 @@ class Dispatcher:
self.onvif = onvif
self.ptz_metrics = ptz_metrics
self.comms = communicators
+ self.camera_activity = {}
+ self.model_state = {}
+ self.embeddings_reindex = {}
self._camera_settings_handlers: dict[str, Callable] = {
"audio": self._on_audio_command,
@@ -85,10 +88,6 @@ class Dispatcher:
for comm in self.comms:
comm.subscribe(self._receive)
- self.camera_activity = {}
- self.model_state = {}
- self.embeddings_reindex = {}
-
def _receive(self, topic: str, payload: str) -> Optional[Any]:
"""Handle receiving of payload from communicators."""
diff --git a/frigate/comms/embeddings_updater.py b/frigate/comms/embeddings_updater.py
index 8a7617630..9a13525f8 100644
--- a/frigate/comms/embeddings_updater.py
+++ b/frigate/comms/embeddings_updater.py
@@ -22,7 +22,7 @@ class EmbeddingsResponder:
def check_for_request(self, process: Callable) -> None:
while True: # load all messages that are queued
- has_message, _, _ = zmq.select([self.socket], [], [], 1)
+ has_message, _, _ = zmq.select([self.socket], [], [], 0.1)
if not has_message:
break
@@ -54,8 +54,11 @@ class EmbeddingsRequestor:
def send_data(self, topic: str, data: any) -> str:
"""Sends data and then waits for reply."""
- self.socket.send_json((topic, data))
- return self.socket.recv_json()
+ try:
+ self.socket.send_json((topic, data))
+ return self.socket.recv_json()
+ except zmq.ZMQError:
+ return ""
def stop(self) -> None:
self.socket.close()
diff --git a/frigate/comms/event_metadata_updater.py b/frigate/comms/event_metadata_updater.py
index aeede6d8e..87e1889ce 100644
--- a/frigate/comms/event_metadata_updater.py
+++ b/frigate/comms/event_metadata_updater.py
@@ -39,7 +39,7 @@ class EventMetadataSubscriber(Subscriber):
super().__init__(topic)
def check_for_update(
- self, timeout: float = None
+ self, timeout: float = 1
) -> Optional[tuple[EventMetadataTypeEnum, str, RegenerateDescriptionEnum]]:
return super().check_for_update(timeout)
diff --git a/frigate/db/sqlitevecq.py b/frigate/db/sqlitevecq.py
index 858070c38..398adbd2d 100644
--- a/frigate/db/sqlitevecq.py
+++ b/frigate/db/sqlitevecq.py
@@ -28,3 +28,26 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase):
def delete_embeddings_description(self, event_ids: list[str]) -> None:
ids = ",".join(["?" for _ in event_ids])
self.execute_sql(f"DELETE FROM vec_descriptions WHERE id IN ({ids})", event_ids)
+
+ def drop_embeddings_tables(self) -> None:
+ self.execute_sql("""
+ DROP TABLE vec_descriptions;
+ """)
+ self.execute_sql("""
+ DROP TABLE vec_thumbnails;
+ """)
+
+ def create_embeddings_tables(self) -> None:
+ """Create vec0 virtual table for embeddings"""
+ self.execute_sql("""
+ CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
+ id TEXT PRIMARY KEY,
+ thumbnail_embedding FLOAT[768]
+ );
+ """)
+ self.execute_sql("""
+ CREATE VIRTUAL TABLE IF NOT EXISTS vec_descriptions USING vec0(
+ id TEXT PRIMARY KEY,
+ description_embedding FLOAT[768]
+ );
+ """)
diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py
index e7dcf1053..7f2e1a10c 100644
--- a/frigate/embeddings/__init__.py
+++ b/frigate/embeddings/__init__.py
@@ -19,7 +19,6 @@ from frigate.models import Event
from frigate.util.builtin import serialize
from frigate.util.services import listen
-from .embeddings import Embeddings
from .maintainer import EmbeddingMaintainer
from .util import ZScoreNormalization
@@ -57,12 +56,6 @@ def manage_embeddings(config: FrigateConfig) -> None:
models = [Event]
db.bind(models)
- embeddings = Embeddings(config.semantic_search, db)
-
- # Check if we need to re-index events
- if config.semantic_search.reindex:
- embeddings.reindex()
-
maintainer = EmbeddingMaintainer(
db,
config,
@@ -114,19 +107,25 @@ class EmbeddingsContext:
query_embedding = row[0]
else:
# If no embedding found, generate it and return it
- query_embedding = serialize(
- self.requestor.send_data(
- EmbeddingsRequestEnum.embed_thumbnail.value,
- {"id": query.id, "thumbnail": query.thumbnail},
- )
+ data = self.requestor.send_data(
+ EmbeddingsRequestEnum.embed_thumbnail.value,
+ {"id": str(query.id), "thumbnail": str(query.thumbnail)},
)
+
+ if not data:
+ return []
+
+ query_embedding = serialize(data)
else:
- query_embedding = serialize(
- self.requestor.send_data(
- EmbeddingsRequestEnum.generate_search.value, query
- )
+ data = self.requestor.send_data(
+ EmbeddingsRequestEnum.generate_search.value, query
)
+ if not data:
+ return []
+
+ query_embedding = serialize(data)
+
sql_query = """
SELECT
id,
@@ -155,12 +154,15 @@ class EmbeddingsContext:
def search_description(
self, query_text: str, event_ids: list[str] = None
) -> list[tuple[str, float]]:
- query_embedding = serialize(
- self.requestor.send_data(
- EmbeddingsRequestEnum.generate_search.value, query_text
- )
+ data = self.requestor.send_data(
+ EmbeddingsRequestEnum.generate_search.value, query_text
)
+ if not data:
+ return []
+
+ query_embedding = serialize(data)
+
# Prepare the base SQL query
sql_query = """
SELECT
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index dda4d95fd..e9d8ab833 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -63,7 +63,7 @@ class Embeddings:
self.requestor = InterProcessRequestor()
# Create tables if they don't exist
- self._create_tables()
+ self.db.create_embeddings_tables()
models = [
"jinaai/jina-clip-v1-text_model_fp16.onnx",
@@ -96,6 +96,7 @@ class Embeddings:
},
embedding_function=jina_text_embedding_function,
model_type="text",
+ requestor=self.requestor,
device="CPU",
)
@@ -108,34 +109,10 @@ class Embeddings:
},
embedding_function=jina_vision_embedding_function,
model_type="vision",
+ requestor=self.requestor,
device=self.config.device,
)
- def _create_tables(self):
- # Create vec0 virtual table for thumbnail embeddings
- self.db.execute_sql("""
- CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
- id TEXT PRIMARY KEY,
- thumbnail_embedding FLOAT[768]
- );
- """)
-
- # Create vec0 virtual table for description embeddings
- self.db.execute_sql("""
- CREATE VIRTUAL TABLE IF NOT EXISTS vec_descriptions USING vec0(
- id TEXT PRIMARY KEY,
- description_embedding FLOAT[768]
- );
- """)
-
- def _drop_tables(self):
- self.db.execute_sql("""
- DROP TABLE vec_descriptions;
- """)
- self.db.execute_sql("""
- DROP TABLE vec_thumbnails;
- """)
-
def upsert_thumbnail(self, event_id: str, thumbnail: bytes):
# Convert thumbnail bytes to PIL Image
image = Image.open(io.BytesIO(thumbnail)).convert("RGB")
@@ -153,7 +130,6 @@ class Embeddings:
def upsert_description(self, event_id: str, description: str):
embedding = self.text_embedding([description])[0]
-
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
@@ -167,8 +143,10 @@ class Embeddings:
def reindex(self) -> None:
logger.info("Indexing tracked object embeddings...")
- self._drop_tables()
- self._create_tables()
+ self.db.drop_embeddings_tables()
+ logger.debug("Dropped embeddings tables.")
+ self.db.create_embeddings_tables()
+ logger.debug("Created embeddings tables.")
st = time.time()
totals = {
diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py
index 08901b6a2..34a81528a 100644
--- a/frigate/embeddings/functions/onnx.py
+++ b/frigate/embeddings/functions/onnx.py
@@ -15,6 +15,7 @@ from PIL import Image
from transformers import AutoFeatureExtractor, AutoTokenizer
from transformers.utils.logging import disable_progress_bar
+from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
@@ -41,12 +42,14 @@ class GenericONNXEmbedding:
download_urls: Dict[str, str],
embedding_function: Callable[[List[np.ndarray]], np.ndarray],
model_type: str,
+ requestor: InterProcessRequestor,
tokenizer_file: Optional[str] = None,
device: str = "AUTO",
):
self.model_name = model_name
self.model_file = model_file
self.tokenizer_file = tokenizer_file
+ self.requestor = requestor
self.download_urls = download_urls
self.embedding_function = embedding_function
self.model_type = model_type # 'text' or 'vision'
@@ -58,15 +61,32 @@ class GenericONNXEmbedding:
self.tokenizer = None
self.feature_extractor = None
self.session = None
-
- self.downloader = ModelDownloader(
- model_name=self.model_name,
- download_path=self.download_path,
- file_names=list(self.download_urls.keys())
- + ([self.tokenizer_file] if self.tokenizer_file else []),
- download_func=self._download_model,
+ files_names = list(self.download_urls.keys()) + (
+ [self.tokenizer_file] if self.tokenizer_file else []
)
- self.downloader.ensure_model_files()
+
+ if not all(
+ os.path.exists(os.path.join(self.download_path, n)) for n in files_names
+ ):
+ logger.debug(f"starting model download for {self.model_name}")
+ self.downloader = ModelDownloader(
+ model_name=self.model_name,
+ download_path=self.download_path,
+ file_names=files_names,
+ requestor=self.requestor,
+ download_func=self._download_model,
+ )
+ self.downloader.ensure_model_files()
+ else:
+ self.downloader = None
+ ModelDownloader.mark_files_state(
+ self.requestor,
+ self.model_name,
+ files_names,
+ ModelStatusTypesEnum.downloaded,
+ )
+ self._load_model_and_tokenizer()
+ logger.debug(f"models are already downloaded for {self.model_name}")
def _download_model(self, path: str):
try:
@@ -102,7 +122,8 @@ class GenericONNXEmbedding:
def _load_model_and_tokenizer(self):
if self.session is None:
- self.downloader.wait_for_download()
+ if self.downloader:
+ self.downloader.wait_for_download()
if self.model_type == "text":
self.tokenizer = self._load_tokenizer()
else:
@@ -125,13 +146,12 @@ class GenericONNXEmbedding:
f"{MODEL_CACHE_DIR}/{self.model_name}",
)
- def _load_model(self, path: str):
+ def _load_model(self, path: str) -> Optional[ort.InferenceSession]:
if os.path.exists(path):
return ort.InferenceSession(
path, providers=self.providers, provider_options=self.provider_options
)
else:
- logger.warning(f"{self.model_name} model file {path} not found.")
return None
def _process_image(self, image):
diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py
index 68c3e3686..238efcfdf 100644
--- a/frigate/embeddings/maintainer.py
+++ b/frigate/embeddings/maintainer.py
@@ -41,10 +41,14 @@ class EmbeddingMaintainer(threading.Thread):
config: FrigateConfig,
stop_event: MpEvent,
) -> None:
- threading.Thread.__init__(self)
- self.name = "embeddings_maintainer"
+ super().__init__(name="embeddings_maintainer")
self.config = config
self.embeddings = Embeddings(config.semantic_search, db)
+
+ # Check if we need to re-index events
+ if config.semantic_search.reindex:
+ self.embeddings.reindex()
+
self.event_subscriber = EventUpdateSubscriber()
self.event_end_subscriber = EventEndSubscriber()
self.event_metadata_subscriber = EventMetadataSubscriber(
@@ -76,26 +80,33 @@ class EmbeddingMaintainer(threading.Thread):
def _process_requests(self) -> None:
"""Process embeddings requests"""
- def handle_request(topic: str, data: str) -> str:
- if topic == EmbeddingsRequestEnum.embed_description.value:
- return serialize(
- self.embeddings.upsert_description(data["id"], data["description"]),
- pack=False,
- )
- elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
- thumbnail = base64.b64decode(data["thumbnail"])
- return serialize(
- self.embeddings.upsert_thumbnail(data["id"], thumbnail),
- pack=False,
- )
- elif topic == EmbeddingsRequestEnum.generate_search.value:
- return serialize(self.embeddings.text_embedding([data])[0], pack=False)
+ def _handle_request(topic: str, data: str) -> str:
+ try:
+ if topic == EmbeddingsRequestEnum.embed_description.value:
+ return serialize(
+ self.embeddings.upsert_description(
+ data["id"], data["description"]
+ ),
+ pack=False,
+ )
+ elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
+ thumbnail = base64.b64decode(data["thumbnail"])
+ return serialize(
+ self.embeddings.upsert_thumbnail(data["id"], thumbnail),
+ pack=False,
+ )
+ elif topic == EmbeddingsRequestEnum.generate_search.value:
+ return serialize(
+ self.embeddings.text_embedding([data])[0], pack=False
+ )
+ except Exception as e:
+ logger.error(f"Unable to handle embeddings request {e}")
- self.embeddings_responder.check_for_request(handle_request)
+ self.embeddings_responder.check_for_request(_handle_request)
def _process_updates(self) -> None:
"""Process event updates"""
- update = self.event_subscriber.check_for_update()
+ update = self.event_subscriber.check_for_update(timeout=0.1)
if update is None:
return
@@ -124,7 +135,7 @@ class EmbeddingMaintainer(threading.Thread):
def _process_finalized(self) -> None:
"""Process the end of an event."""
while True:
- ended = self.event_end_subscriber.check_for_update()
+ ended = self.event_end_subscriber.check_for_update(timeout=0.1)
if ended == None:
break
@@ -161,9 +172,6 @@ class EmbeddingMaintainer(threading.Thread):
or set(event.zones) & set(camera_config.genai.required_zones)
)
):
- logger.debug(
- f"Description generation for {event}, has_snapshot: {event.has_snapshot}"
- )
if event.has_snapshot and camera_config.genai.use_snapshot:
with open(
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
@@ -217,7 +225,7 @@ class EmbeddingMaintainer(threading.Thread):
def _process_event_metadata(self):
# Check for regenerate description requests
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update(
- timeout=1
+ timeout=0.1
)
if topic is None:
diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py
index 828b295b4..8fabf2b21 100644
--- a/frigate/events/cleanup.py
+++ b/frigate/events/cleanup.py
@@ -8,11 +8,9 @@ from enum import Enum
from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path
-from playhouse.sqliteq import SqliteQueueDatabase
-
from frigate.config import FrigateConfig
from frigate.const import CLIPS_DIR
-from frigate.embeddings.embeddings import Embeddings
+from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event, Timeline
logger = logging.getLogger(__name__)
@@ -25,7 +23,7 @@ class EventCleanupType(str, Enum):
class EventCleanup(threading.Thread):
def __init__(
- self, config: FrigateConfig, stop_event: MpEvent, db: SqliteQueueDatabase
+ self, config: FrigateConfig, stop_event: MpEvent, db: SqliteVecQueueDatabase
):
super().__init__(name="event_cleanup")
self.config = config
@@ -35,9 +33,6 @@ class EventCleanup(threading.Thread):
self.removed_camera_labels: list[str] = None
self.camera_labels: dict[str, dict[str, any]] = {}
- if self.config.semantic_search.enabled:
- self.embeddings = Embeddings(self.config.semantic_search, self.db)
-
def get_removed_camera_labels(self) -> list[Event]:
"""Get a list of distinct labels for removed cameras."""
if self.removed_camera_labels is None:
@@ -234,8 +229,8 @@ class EventCleanup(threading.Thread):
Event.delete().where(Event.id << chunk).execute()
if self.config.semantic_search.enabled:
- self.embeddings.delete_description(chunk)
- self.embeddings.delete_thumbnail(chunk)
+ self.db.delete_embeddings_description(chunk)
+ self.db.delete_embeddings_thumbnail(chunk)
logger.debug(f"Deleted {len(events_to_delete)} embeddings")
logger.info("Exiting event cleanup...")
diff --git a/frigate/util/downloader.py b/frigate/util/downloader.py
index 642dc7c8f..ce5030566 100644
--- a/frigate/util/downloader.py
+++ b/frigate/util/downloader.py
@@ -44,6 +44,7 @@ class ModelDownloader:
download_path: str,
file_names: List[str],
download_func: Callable[[str], None],
+ requestor: InterProcessRequestor,
silent: bool = False,
):
self.model_name = model_name
@@ -51,19 +52,17 @@ class ModelDownloader:
self.file_names = file_names
self.download_func = download_func
self.silent = silent
- self.requestor = InterProcessRequestor()
+ self.requestor = requestor
self.download_thread = None
self.download_complete = threading.Event()
def ensure_model_files(self):
- for file in self.file_names:
- self.requestor.send_data(
- UPDATE_MODEL_STATE,
- {
- "model": f"{self.model_name}-{file}",
- "state": ModelStatusTypesEnum.downloading,
- },
- )
+ self.mark_files_state(
+ self.requestor,
+ self.model_name,
+ self.file_names,
+ ModelStatusTypesEnum.downloading,
+ )
self.download_thread = threading.Thread(
target=self._download_models,
name=f"_download_model_{self.model_name}",
@@ -119,5 +118,21 @@ class ModelDownloader:
if not silent:
logger.info(f"Downloading complete: {url}")
+ @staticmethod
+ def mark_files_state(
+ requestor: InterProcessRequestor,
+ model_name: str,
+ files: list[str],
+ state: ModelStatusTypesEnum,
+ ) -> None:
+ for file_name in files:
+ requestor.send_data(
+ UPDATE_MODEL_STATE,
+ {
+ "model": f"{model_name}-{file_name}",
+ "state": state,
+ },
+ )
+
def wait_for_download(self):
self.download_complete.wait()
From 54eb03d2a1bc77846582444a0a364201034925c3 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Thu, 10 Oct 2024 17:46:21 -0500
Subject: [PATCH 03/27] Add config option to select fp16 or quantized jina
vision model (#14270)
* Add config option to select fp16 or quantized jina vision model
* requires_fp16 for text and large models only
* fix model type check
* fix cpu
* pass model size
---
docs/docs/configuration/reference.md | 2 ++
docs/docs/configuration/semantic_search.md | 10 +++++++++
frigate/config/semantic_search.py | 3 +++
frigate/embeddings/embeddings.py | 24 ++++++++++++++++------
frigate/embeddings/functions/onnx.py | 5 ++++-
web/src/pages/Explore.tsx | 9 +++++---
web/src/types/frigateConfig.ts | 1 +
7 files changed, 44 insertions(+), 10 deletions(-)
diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md
index 66e49fb7f..234478714 100644
--- a/docs/docs/configuration/reference.md
+++ b/docs/docs/configuration/reference.md
@@ -520,6 +520,8 @@ semantic_search:
reindex: False
# Optional: Set device used to run embeddings, options are AUTO, CPU, GPU. (default: shown below)
device: "AUTO"
+ # Optional: Set the model size used for embeddings. (default: shown below)
+ model_size: "small"
# Optional: Configuration for AI generated tracked object descriptions
# NOTE: Semantic Search must be enabled for this to do anything.
diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md
index 7cb8ca769..87ccbf802 100644
--- a/docs/docs/configuration/semantic_search.md
+++ b/docs/docs/configuration/semantic_search.md
@@ -39,6 +39,16 @@ The vision model is able to embed both images and text into the same vector spac
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
+Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
+
+```yaml
+semantic_search:
+ enabled: True
+ model_size: small
+```
+
+Using `large` as the model size setting employs the full Jina model appropriate for high performance systems running a GPU. The `small` size uses a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality. Most users will not need to change this setting from the default of `small`.
+
## Usage
1. Semantic search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and semantic search for the best results.
diff --git a/frigate/config/semantic_search.py b/frigate/config/semantic_search.py
index ecdcd12d1..fdaf0fff4 100644
--- a/frigate/config/semantic_search.py
+++ b/frigate/config/semantic_search.py
@@ -13,3 +13,6 @@ class SemanticSearchConfig(FrigateBaseModel):
default=False, title="Reindex all detections on startup."
)
device: str = Field(default="AUTO", title="Device Type")
+ model_size: str = Field(
+ default="small", title="The size of the embeddings model used."
+ )
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index e9d8ab833..5fe0566f2 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -68,7 +68,9 @@ class Embeddings:
models = [
"jinaai/jina-clip-v1-text_model_fp16.onnx",
"jinaai/jina-clip-v1-tokenizer",
- "jinaai/jina-clip-v1-vision_model_fp16.onnx",
+ "jinaai/jina-clip-v1-vision_model_fp16.onnx"
+ if config.model_size == "large"
+ else "jinaai/jina-clip-v1-vision_model_quantized.onnx",
"jinaai/jina-clip-v1-preprocessor_config.json",
]
@@ -95,19 +97,29 @@ class Embeddings:
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
},
embedding_function=jina_text_embedding_function,
+ model_size=config.model_size,
model_type="text",
requestor=self.requestor,
device="CPU",
)
+ model_file = (
+ "vision_model_fp16.onnx"
+ if self.config.model_size == "large"
+ else "vision_model_quantized.onnx"
+ )
+
+ download_urls = {
+ model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
+ "preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
+ }
+
self.vision_embedding = GenericONNXEmbedding(
model_name="jinaai/jina-clip-v1",
- model_file="vision_model_fp16.onnx",
- download_urls={
- "vision_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/vision_model_fp16.onnx",
- "preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
- },
+ model_file=model_file,
+ download_urls=download_urls,
embedding_function=jina_vision_embedding_function,
+ model_size=config.model_size,
model_type="vision",
requestor=self.requestor,
device=self.config.device,
diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py
index 34a81528a..ae9fe33bc 100644
--- a/frigate/embeddings/functions/onnx.py
+++ b/frigate/embeddings/functions/onnx.py
@@ -41,6 +41,7 @@ class GenericONNXEmbedding:
model_file: str,
download_urls: Dict[str, str],
embedding_function: Callable[[List[np.ndarray]], np.ndarray],
+ model_size: str,
model_type: str,
requestor: InterProcessRequestor,
tokenizer_file: Optional[str] = None,
@@ -54,7 +55,9 @@ class GenericONNXEmbedding:
self.embedding_function = embedding_function
self.model_type = model_type # 'text' or 'vision'
self.providers, self.provider_options = get_ort_providers(
- force_cpu=device == "CPU", requires_fp16=True, openvino_device=device
+ force_cpu=device == "CPU",
+ requires_fp16=model_size == "large" or self.model_type == "text",
+ openvino_device=device,
)
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index 8607c8760..59c3fd895 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -207,9 +207,12 @@ export default function Explore() {
const { payload: textTokenizerState } = useModelState(
"jinaai/jina-clip-v1-tokenizer",
);
- const { payload: visionModelState } = useModelState(
- "jinaai/jina-clip-v1-vision_model_fp16.onnx",
- );
+ const modelFile =
+ config?.semantic_search.model_size === "large"
+ ? "jinaai/jina-clip-v1-vision_model_fp16.onnx"
+ : "jinaai/jina-clip-v1-vision_model_quantized.onnx";
+
+ const { payload: visionModelState } = useModelState(modelFile);
const { payload: visionFeatureExtractorState } = useModelState(
"jinaai/jina-clip-v1-preprocessor_config.json",
);
diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts
index 68003f0e0..fe889ed9d 100644
--- a/web/src/types/frigateConfig.ts
+++ b/web/src/types/frigateConfig.ts
@@ -417,6 +417,7 @@ export interface FrigateConfig {
semantic_search: {
enabled: boolean;
+ model_size: string;
};
snapshots: {
From 8d753f821da4ea9348bc89ba223a5909a65a8e33 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Thu, 10 Oct 2024 18:12:05 -0500
Subject: [PATCH 04/27] Allow empty description for tracked objects (#14271)
* Allow tracked object description to be saved as an empty string
* ensure event_ids is passed as list
---
frigate/api/defs/events_body.py | 4 +---
frigate/api/event.py | 26 +++++++++-----------------
frigate/events/cleanup.py | 4 ++--
3 files changed, 12 insertions(+), 22 deletions(-)
diff --git a/frigate/api/defs/events_body.py b/frigate/api/defs/events_body.py
index 7aef87433..ca1256598 100644
--- a/frigate/api/defs/events_body.py
+++ b/frigate/api/defs/events_body.py
@@ -11,9 +11,7 @@ class EventsSubLabelBody(BaseModel):
class EventsDescriptionBody(BaseModel):
- description: Union[str, None] = Field(
- title="The description of the event", min_length=1
- )
+ description: Union[str, None] = Field(title="The description of the event")
class EventsCreateBody(BaseModel):
diff --git a/frigate/api/event.py b/frigate/api/event.py
index 3be37539d..3a8d003ad 100644
--- a/frigate/api/event.py
+++ b/frigate/api/event.py
@@ -927,27 +927,19 @@ def set_description(
new_description = body.description
- if new_description is None or len(new_description) == 0:
- return JSONResponse(
- content=(
- {
- "success": False,
- "message": "description cannot be empty",
- }
- ),
- status_code=400,
- )
-
event.data["description"] = new_description
event.save()
# If semantic search is enabled, update the index
if request.app.frigate_config.semantic_search.enabled:
context: EmbeddingsContext = request.app.embeddings
- context.update_description(
- event_id,
- new_description,
- )
+ if len(new_description) > 0:
+ context.update_description(
+ event_id,
+ new_description,
+ )
+ else:
+ context.db.delete_embeddings_description(event_ids=[event_id])
response_message = (
f"Event {event_id} description is now blank"
@@ -1033,8 +1025,8 @@ def delete_event(request: Request, event_id: str):
# If semantic search is enabled, update the index
if request.app.frigate_config.semantic_search.enabled:
context: EmbeddingsContext = request.app.embeddings
- context.db.delete_embeddings_thumbnail(id=[event_id])
- context.db.delete_embeddings_description(id=[event_id])
+ context.db.delete_embeddings_thumbnail(event_ids=[event_id])
+ context.db.delete_embeddings_description(event_ids=[event_id])
return JSONResponse(
content=({"success": True, "message": "Event " + event_id + " deleted"}),
status_code=200,
diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py
index 8fabf2b21..9442eabe9 100644
--- a/frigate/events/cleanup.py
+++ b/frigate/events/cleanup.py
@@ -229,8 +229,8 @@ class EventCleanup(threading.Thread):
Event.delete().where(Event.id << chunk).execute()
if self.config.semantic_search.enabled:
- self.db.delete_embeddings_description(chunk)
- self.db.delete_embeddings_thumbnail(chunk)
+ self.db.delete_embeddings_description(event_ids=[chunk])
+ self.db.delete_embeddings_thumbnail(event_ids=[chunk])
logger.debug(f"Deleted {len(events_to_delete)} embeddings")
logger.info("Exiting event cleanup...")
From 30b5faebaea4bee6f5c2b5366b8e4472d5687907 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Thu, 10 Oct 2024 18:53:11 -0500
Subject: [PATCH 05/27] chunk is already a list (#14272)
---
frigate/events/cleanup.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py
index 9442eabe9..7d3e7c456 100644
--- a/frigate/events/cleanup.py
+++ b/frigate/events/cleanup.py
@@ -229,8 +229,8 @@ class EventCleanup(threading.Thread):
Event.delete().where(Event.id << chunk).execute()
if self.config.semantic_search.enabled:
- self.db.delete_embeddings_description(event_ids=[chunk])
- self.db.delete_embeddings_thumbnail(event_ids=[chunk])
+ self.db.delete_embeddings_description(event_ids=chunk)
+ self.db.delete_embeddings_thumbnail(event_ids=chunk)
logger.debug(f"Deleted {len(events_to_delete)} embeddings")
logger.info("Exiting event cleanup...")
From ee8091ba91f33b8ddf883a1ff9182af8692073f4 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Thu, 10 Oct 2024 19:48:56 -0500
Subject: [PATCH 06/27] Correctly handle camera command in dispatcher (#14273)
---
frigate/comms/dispatcher.py | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py
index 12dfe2731..8b310a4bb 100644
--- a/frigate/comms/dispatcher.py
+++ b/frigate/comms/dispatcher.py
@@ -91,14 +91,14 @@ class Dispatcher:
def _receive(self, topic: str, payload: str) -> Optional[Any]:
"""Handle receiving of payload from communicators."""
- def handle_camera_command(command_type, camera_name, payload):
+ def handle_camera_command(command_type, camera_name, command, payload):
try:
if command_type == "set":
- self._camera_settings_handlers[camera_name](camera_name, payload)
+ self._camera_settings_handlers[command](camera_name, payload)
elif command_type == "ptz":
self._on_ptz_command(camera_name, payload)
except KeyError:
- logger.error(f"Invalid command type: {command_type}")
+ logger.error(f"Invalid command type or handler: {command_type}")
def handle_restart():
restart_frigate()
@@ -203,14 +203,14 @@ class Dispatcher:
# example /cam_name/detect/set payload=ON|OFF
camera_name = parts[-3]
command = parts[-2]
- handle_camera_command("set", camera_name, payload)
+ handle_camera_command("set", camera_name, command, payload)
elif len(parts) == 2 and topic.endswith("set"):
command = parts[-2]
self._global_settings_handlers[command](payload)
elif len(parts) == 2 and topic.endswith("ptz"):
# example /cam_name/ptz payload=MOVE_UP|MOVE_DOWN|STOP...
camera_name = parts[-2]
- handle_camera_command("ptz", camera_name, payload)
+ handle_camera_command("ptz", camera_name, "", payload)
except IndexError:
logger.error(
f"Received invalid {topic.split('/')[-1]} command: {topic}"
From 2897afce41100a903f8f29409ea6d3de22e94ed3 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Fri, 11 Oct 2024 07:59:29 -0500
Subject: [PATCH 07/27] Reset saved search stats on reindex (#14280)
---
frigate/embeddings/embeddings.py | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index 5fe0566f2..85c65b33e 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -3,6 +3,7 @@
import base64
import io
import logging
+import os
import time
from PIL import Image
@@ -10,7 +11,11 @@ from playhouse.shortcuts import model_to_dict
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config.semantic_search import SemanticSearchConfig
-from frigate.const import UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_MODEL_STATE
+from frigate.const import (
+ CONFIG_DIR,
+ UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
+ UPDATE_MODEL_STATE,
+)
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.models import Event
from frigate.types import ModelStatusTypesEnum
@@ -160,6 +165,10 @@ class Embeddings:
self.db.create_embeddings_tables()
logger.debug("Created embeddings tables.")
+ # Delete the saved stats file
+ if os.path.exists(os.path.join(CONFIG_DIR, ".search_stats.json")):
+ os.remove(os.path.join(CONFIG_DIR, ".search_stats.json"))
+
st = time.time()
totals = {
"thumbnails": 0,
From ae91fa6a396a0ef91e64c119122fa11df2d0d0b0 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Fri, 11 Oct 2024 08:04:25 -0500
Subject: [PATCH 08/27] Add time remaining to embedding reindex pane (#14279)
* Add function to convert seconds to human readable duration
* Add estimated time remaining to reindexing pane
---
frigate/embeddings/embeddings.py | 8 ++++++++
web/src/pages/Explore.tsx | 11 +++++++++++
web/src/types/ws.ts | 1 +
web/src/utils/dateUtil.ts | 17 +++++++++++++++++
4 files changed, 37 insertions(+)
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index 85c65b33e..4883e0914 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -175,6 +175,7 @@ class Embeddings:
"descriptions": 0,
"processed_objects": 0,
"total_objects": 0,
+ "time_remaining": 0,
}
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
@@ -229,6 +230,13 @@ class Embeddings:
totals["descriptions"],
)
+ # Calculate time remaining
+ elapsed_time = time.time() - st
+ avg_time_per_event = elapsed_time / totals["processed_objects"]
+ remaining_events = total_events - totals["processed_objects"]
+ time_remaining = avg_time_per_event * remaining_events
+ totals["time_remaining"] = int(time_remaining)
+
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
# Move to the next page
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index 59c3fd895..a3d7d3085 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -10,6 +10,7 @@ import { useTimezone } from "@/hooks/use-date-utils";
import { FrigateConfig } from "@/types/frigateConfig";
import { SearchFilter, SearchQuery, SearchResult } from "@/types/search";
import { ModelState } from "@/types/ws";
+import { formatSecondsToDuration } from "@/utils/dateUtil";
import SearchView from "@/views/search/SearchView";
import { useCallback, useEffect, useMemo, useState } from "react";
import { LuCheck, LuExternalLink, LuX } from "react-icons/lu";
@@ -282,6 +283,16 @@ export default function Explore() {
/>
+ {reindexProgress.time_remaining >= 0 && (
+
+
+ Estimated time remaining:
+
+ {formatSecondsToDuration(
+ reindexProgress.time_remaining,
+ ) || "Finishing shortly"}
+
+ )}
Thumbnails embedded:
diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts
index cab759074..238ef4a8a 100644
--- a/web/src/types/ws.ts
+++ b/web/src/types/ws.ts
@@ -67,6 +67,7 @@ export type EmbeddingsReindexProgressType = {
descriptions: number;
processed_objects: number;
total_objects: number;
+ time_remaining: number;
};
export type ToggleableSetting = "ON" | "OFF";
diff --git a/web/src/utils/dateUtil.ts b/web/src/utils/dateUtil.ts
index dc5589884..8509155e1 100644
--- a/web/src/utils/dateUtil.ts
+++ b/web/src/utils/dateUtil.ts
@@ -229,6 +229,23 @@ export const getDurationFromTimestamps = (
return duration;
};
+/**
+ *
+ * @param seconds - number of seconds to convert into hours, minutes and seconds
+ * @returns string - formatted duration in hours, minutes and seconds
+ */
+export const formatSecondsToDuration = (seconds: number): string => {
+ if (isNaN(seconds) || seconds < 0) {
+ return "Invalid duration";
+ }
+
+ const duration = intervalToDuration({ start: 0, end: seconds * 1000 });
+ return formatDuration(duration, {
+ format: ["hours", "minutes", "seconds"],
+ delimiter: ", ",
+ });
+};
+
/**
* Adapted from https://stackoverflow.com/a/29268535 this takes a timezone string and
* returns the offset of that timezone from UTC in minutes.
From 748087483cba9bcd28029e0083147674a9ca85b2 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Fri, 11 Oct 2024 08:05:28 -0500
Subject: [PATCH 09/27] Use number keys on keyboard to move ptz camera to
presets (#14278)
* Use number keys on keyboard to move ptz camera to presets
* clean up
---
web/src/views/live/LiveCameraView.tsx | 32 +++++++++++++++++++++++++--
1 file changed, 30 insertions(+), 2 deletions(-)
diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx
index 8a8146e14..2af08e497 100644
--- a/web/src/views/live/LiveCameraView.tsx
+++ b/web/src/views/live/LiveCameraView.tsx
@@ -531,9 +531,37 @@ function PtzControlPanel({
);
useKeyboardListener(
- ["ArrowLeft", "ArrowRight", "ArrowUp", "ArrowDown", "+", "-"],
+ [
+ "ArrowLeft",
+ "ArrowRight",
+ "ArrowUp",
+ "ArrowDown",
+ "+",
+ "-",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+ "6",
+ "7",
+ "8",
+ "9",
+ ],
(key, modifiers) => {
- if (modifiers.repeat) {
+ if (modifiers.repeat || !key) {
+ return;
+ }
+
+ if (["1", "2", "3", "4", "5", "6", "7", "8", "9"].includes(key)) {
+ const presetNumber = parseInt(key);
+ if (
+ ptz &&
+ (ptz.presets?.length ?? 0) > 0 &&
+ presetNumber <= ptz.presets.length
+ ) {
+ sendPtz(`preset_${ptz.presets[presetNumber - 1]}`);
+ }
return;
}
From 6df541e1fd989d74ef70b7ffb776ba7822b833e1 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Fri, 11 Oct 2024 10:47:23 -0600
Subject: [PATCH 10/27] Openvino models (#14283)
* Enable model conversion cache for openvino
* Use openvino directly for onnx embeddings if available
* Don't fail if zmq is busy
---
frigate/comms/dispatcher.py | 9 ++--
frigate/comms/inter_process.py | 7 +++-
frigate/detectors/plugins/openvino.py | 3 ++
frigate/embeddings/functions/onnx.py | 35 ++++++----------
frigate/util/model.py | 60 +++++++++++++++++++++++++++
5 files changed, 85 insertions(+), 29 deletions(-)
diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py
index 8b310a4bb..4a3862eaf 100644
--- a/frigate/comms/dispatcher.py
+++ b/frigate/comms/dispatcher.py
@@ -142,10 +142,11 @@ class Dispatcher:
)
def handle_update_model_state():
- model = payload["model"]
- state = payload["state"]
- self.model_state[model] = ModelStatusTypesEnum[state]
- self.publish("model_state", json.dumps(self.model_state))
+ if payload:
+ model = payload["model"]
+ state = payload["state"]
+ self.model_state[model] = ModelStatusTypesEnum[state]
+ self.publish("model_state", json.dumps(self.model_state))
def handle_model_state():
self.publish("model_state", json.dumps(self.model_state.copy()))
diff --git a/frigate/comms/inter_process.py b/frigate/comms/inter_process.py
index 32cec49e4..850e2435c 100644
--- a/frigate/comms/inter_process.py
+++ b/frigate/comms/inter_process.py
@@ -65,8 +65,11 @@ class InterProcessRequestor:
def send_data(self, topic: str, data: any) -> any:
"""Sends data and then waits for reply."""
- self.socket.send_json((topic, data))
- return self.socket.recv_json()
+ try:
+ self.socket.send_json((topic, data))
+ return self.socket.recv_json()
+ except zmq.ZMQError:
+ return ""
def stop(self) -> None:
self.socket.close()
diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py
index 5dc998487..51e48530b 100644
--- a/frigate/detectors/plugins/openvino.py
+++ b/frigate/detectors/plugins/openvino.py
@@ -3,6 +3,7 @@ import os
import numpy as np
import openvino as ov
+import openvino.properties as props
from pydantic import Field
from typing_extensions import Literal
@@ -34,6 +35,8 @@ class OvDetector(DetectionApi):
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
raise FileNotFoundError
+ os.makedirs("/config/model_cache/openvino", exist_ok=True)
+ self.ov_core.set_property({props.cache_dir: "/config/model_cache/openvino"})
self.interpreter = self.ov_core.compile_model(
model=detector_config.model.path, device_name=detector_config.device
)
diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py
index ae9fe33bc..1e50e07b1 100644
--- a/frigate/embeddings/functions/onnx.py
+++ b/frigate/embeddings/functions/onnx.py
@@ -5,7 +5,6 @@ from io import BytesIO
from typing import Callable, Dict, List, Optional, Union
import numpy as np
-import onnxruntime as ort
import requests
from PIL import Image
@@ -19,7 +18,7 @@ from frigate.comms.inter_process import InterProcessRequestor
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
-from frigate.util.model import get_ort_providers
+from frigate.util.model import ONNXModelRunner
warnings.filterwarnings(
"ignore",
@@ -54,16 +53,12 @@ class GenericONNXEmbedding:
self.download_urls = download_urls
self.embedding_function = embedding_function
self.model_type = model_type # 'text' or 'vision'
- self.providers, self.provider_options = get_ort_providers(
- force_cpu=device == "CPU",
- requires_fp16=model_size == "large" or self.model_type == "text",
- openvino_device=device,
- )
-
+ self.model_size = model_size
+ self.device = device
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
self.tokenizer = None
self.feature_extractor = None
- self.session = None
+ self.runner = None
files_names = list(self.download_urls.keys()) + (
[self.tokenizer_file] if self.tokenizer_file else []
)
@@ -124,15 +119,17 @@ class GenericONNXEmbedding:
)
def _load_model_and_tokenizer(self):
- if self.session is None:
+ if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
if self.model_type == "text":
self.tokenizer = self._load_tokenizer()
else:
self.feature_extractor = self._load_feature_extractor()
- self.session = self._load_model(
- os.path.join(self.download_path, self.model_file)
+ self.runner = ONNXModelRunner(
+ os.path.join(self.download_path, self.model_file),
+ self.device,
+ self.model_size,
)
def _load_tokenizer(self):
@@ -149,14 +146,6 @@ class GenericONNXEmbedding:
f"{MODEL_CACHE_DIR}/{self.model_name}",
)
- def _load_model(self, path: str) -> Optional[ort.InferenceSession]:
- if os.path.exists(path):
- return ort.InferenceSession(
- path, providers=self.providers, provider_options=self.provider_options
- )
- else:
- return None
-
def _process_image(self, image):
if isinstance(image, str):
if image.startswith("http"):
@@ -170,7 +159,7 @@ class GenericONNXEmbedding:
) -> List[np.ndarray]:
self._load_model_and_tokenizer()
- if self.session is None or (
+ if self.runner is None or (
self.tokenizer is None and self.feature_extractor is None
):
logger.error(
@@ -188,14 +177,14 @@ class GenericONNXEmbedding:
images=processed_images, return_tensors="np"
)
- input_names = [input.name for input in self.session.get_inputs()]
+ input_names = self.runner.get_input_names()
onnx_inputs = {
name: processed_inputs[name]
for name in input_names
if name in processed_inputs
}
- outputs = self.session.run(None, onnx_inputs)
+ outputs = self.runner.run(onnx_inputs)
embeddings = self.embedding_function(outputs)
return [embedding for embedding in embeddings]
diff --git a/frigate/util/model.py b/frigate/util/model.py
index fabade387..951e61370 100644
--- a/frigate/util/model.py
+++ b/frigate/util/model.py
@@ -1,9 +1,16 @@
"""Model Utils"""
import os
+from typing import Any
import onnxruntime as ort
+try:
+ import openvino as ov
+except ImportError:
+ # openvino is not included
+ pass
+
def get_ort_providers(
force_cpu: bool = False, openvino_device: str = "AUTO", requires_fp16: bool = False
@@ -42,3 +49,56 @@ def get_ort_providers(
options.append({})
return (providers, options)
+
+
+class ONNXModelRunner:
+ """Run onnx models optimally based on available hardware."""
+
+ def __init__(self, model_path: str, device: str, requires_fp16: bool = False):
+ self.model_path = model_path
+ self.ort: ort.InferenceSession = None
+ self.ov: ov.Core = None
+ providers, options = get_ort_providers(device == "CPU", device, requires_fp16)
+
+ if "OpenVINOExecutionProvider" in providers:
+ # use OpenVINO directly
+ self.type = "ov"
+ self.ov = ov.Core()
+ self.ov.set_property(
+ {ov.properties.cache_dir: "/config/model_cache/openvino"}
+ )
+ self.interpreter = self.ov.compile_model(
+ model=model_path, device_name=device
+ )
+ else:
+ # Use ONNXRuntime
+ self.type = "ort"
+ self.ort = ort.InferenceSession(
+ model_path, providers=providers, provider_options=options
+ )
+
+ def get_input_names(self) -> list[str]:
+ if self.type == "ov":
+ input_names = []
+
+ for input in self.interpreter.inputs:
+ input_names.extend(input.names)
+
+ return input_names
+ elif self.type == "ort":
+ return [input.name for input in self.ort.get_inputs()]
+
+ def run(self, input: dict[str, Any]) -> Any:
+ if self.type == "ov":
+ infer_request = self.interpreter.create_infer_request()
+ input_tensor = list(input.values())
+
+ if len(input_tensor) == 1:
+ input_tensor = ov.Tensor(array=input_tensor[0])
+ else:
+ input_tensor = ov.Tensor(array=input_tensor)
+
+ infer_request.infer(input_tensor)
+ return [infer_request.get_output_tensor().data]
+ elif self.type == "ort":
+ return self.ort.run(None, input)
From d4b9b5a7dd439ed04fe2a6a60d0007fb0c0c3029 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Fri, 11 Oct 2024 12:03:47 -0600
Subject: [PATCH 11/27] Reduce onnx memory usage (#14285)
---
frigate/util/model.py | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/frigate/util/model.py b/frigate/util/model.py
index 951e61370..008f5169a 100644
--- a/frigate/util/model.py
+++ b/frigate/util/model.py
@@ -16,7 +16,14 @@ def get_ort_providers(
force_cpu: bool = False, openvino_device: str = "AUTO", requires_fp16: bool = False
) -> tuple[list[str], list[dict[str, any]]]:
if force_cpu:
- return (["CPUExecutionProvider"], [{}])
+ return (
+ ["CPUExecutionProvider"],
+ [
+ {
+ "arena_extend_strategy": "kSameAsRequested",
+ }
+ ],
+ )
providers = ort.get_available_providers()
options = []
@@ -28,6 +35,7 @@ def get_ort_providers(
if not requires_fp16 or os.environ.get("USE_FP_16", "True") != "False":
options.append(
{
+ "arena_extend_strategy": "kSameAsRequested",
"trt_fp16_enable": requires_fp16,
"trt_timing_cache_enable": True,
"trt_engine_cache_enable": True,
@@ -41,10 +49,17 @@ def get_ort_providers(
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
options.append(
{
+ "arena_extend_strategy": "kSameAsRequested",
"cache_dir": "/config/model_cache/openvino/ort",
"device_type": openvino_device,
}
)
+ elif provider == "CPUExecutionProvider":
+ options.append(
+ {
+ "arena_extend_strategy": "kSameAsRequested",
+ }
+ )
else:
options.append({})
From 8a8a0c7decfb0aa70cdc87d8a7de3263d4bec265 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Fri, 11 Oct 2024 13:11:11 -0500
Subject: [PATCH 12/27] Embeddings normalization fixes (#14284)
* Use cosine distance metric for vec tables
* Only apply normalization to multi modal searches
* Catch possible edge case in stddev calc
* Use sigmoid function for normalization for multi modal searches only
* Ensure we get model state on initial page load
* Only save stats for multi modal searches and only use cosine similarity for image -> image search
---
frigate/api/event.py | 33 ++++++++++++++---------------
frigate/db/sqlitevecq.py | 4 ++--
frigate/embeddings/util.py | 7 +++---
web/src/pages/Explore.tsx | 9 ++++++++
web/src/views/search/SearchView.tsx | 14 ++++++++----
5 files changed, 41 insertions(+), 26 deletions(-)
diff --git a/frigate/api/event.py b/frigate/api/event.py
index 3a8d003ad..c716bba13 100644
--- a/frigate/api/event.py
+++ b/frigate/api/event.py
@@ -473,12 +473,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
)
thumb_result = context.search_thumbnail(search_event)
- thumb_ids = dict(
- zip(
- [result[0] for result in thumb_result],
- context.thumb_stats.normalize([result[1] for result in thumb_result]),
- )
- )
+ thumb_ids = {result[0]: result[1] for result in thumb_result}
search_results = {
event_id: {"distance": distance, "source": "thumbnail"}
for event_id, distance in thumb_ids.items()
@@ -486,15 +481,18 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
else:
search_types = search_type.split(",")
+ # only save stats for multi-modal searches
+ save_stats = "thumbnail" in search_types and "description" in search_types
+
if "thumbnail" in search_types:
thumb_result = context.search_thumbnail(query)
+
+ thumb_distances = context.thumb_stats.normalize(
+ [result[1] for result in thumb_result], save_stats
+ )
+
thumb_ids = dict(
- zip(
- [result[0] for result in thumb_result],
- context.thumb_stats.normalize(
- [result[1] for result in thumb_result]
- ),
- )
+ zip([result[0] for result in thumb_result], thumb_distances)
)
search_results.update(
{
@@ -505,12 +503,13 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
if "description" in search_types:
desc_result = context.search_description(query)
- desc_ids = dict(
- zip(
- [result[0] for result in desc_result],
- context.desc_stats.normalize([result[1] for result in desc_result]),
- )
+
+ desc_distances = context.desc_stats.normalize(
+ [result[1] for result in desc_result], save_stats
)
+
+ desc_ids = dict(zip([result[0] for result in desc_result], desc_distances))
+
for event_id, distance in desc_ids.items():
if (
event_id not in search_results
diff --git a/frigate/db/sqlitevecq.py b/frigate/db/sqlitevecq.py
index 398adbd2d..ccb75ae54 100644
--- a/frigate/db/sqlitevecq.py
+++ b/frigate/db/sqlitevecq.py
@@ -42,12 +42,12 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase):
self.execute_sql("""
CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
id TEXT PRIMARY KEY,
- thumbnail_embedding FLOAT[768]
+ thumbnail_embedding FLOAT[768] distance_metric=cosine
);
""")
self.execute_sql("""
CREATE VIRTUAL TABLE IF NOT EXISTS vec_descriptions USING vec0(
id TEXT PRIMARY KEY,
- description_embedding FLOAT[768]
+ description_embedding FLOAT[768] distance_metric=cosine
);
""")
diff --git a/frigate/embeddings/util.py b/frigate/embeddings/util.py
index 0b2acd4d6..bc1a952ec 100644
--- a/frigate/embeddings/util.py
+++ b/frigate/embeddings/util.py
@@ -20,10 +20,11 @@ class ZScoreNormalization:
@property
def stddev(self):
- return math.sqrt(self.variance)
+ return math.sqrt(self.variance) if self.variance > 0 else 0.0
- def normalize(self, distances: list[float]):
- self._update(distances)
+ def normalize(self, distances: list[float], save_stats: bool):
+ if save_stats:
+ self._update(distances)
if self.stddev == 0:
return distances
return [
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index a3d7d3085..d3c5f7d9b 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -2,6 +2,7 @@ import {
useEmbeddingsReindexProgress,
useEventUpdate,
useModelState,
+ useWs,
} from "@/api/ws";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import AnimatedCircularProgressBar from "@/components/ui/circular-progress-bar";
@@ -202,6 +203,14 @@ export default function Explore() {
// model states
+ const { send: sendCommand } = useWs("model_state", "modelState");
+
+ useEffect(() => {
+ sendCommand("modelState");
+ // only run on mount
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
+
const { payload: textModelState } = useModelState(
"jinaai/jina-clip-v1-text_model_fp16.onnx",
);
diff --git a/web/src/views/search/SearchView.tsx b/web/src/views/search/SearchView.tsx
index 203942083..4c33f7dc8 100644
--- a/web/src/views/search/SearchView.tsx
+++ b/web/src/views/search/SearchView.tsx
@@ -187,13 +187,19 @@ export default function SearchView({
}
}, [searchResults, searchDetail]);
- // confidence score - probably needs tweaking
+ // confidence score
const zScoreToConfidence = (score: number) => {
- // Sigmoid function: 1 / (1 + e^x)
- const confidence = 1 / (1 + Math.exp(score));
+ // Normalizing is not needed for similarity searches
+ // Sigmoid function for normalized: 1 / (1 + e^x)
+ // Cosine for similarity
+ if (searchFilter) {
+ const notNormalized = searchFilter?.search_type?.includes("similarity");
- return Math.round(confidence * 100);
+ const confidence = notNormalized ? 1 - score : 1 / (1 + Math.exp(score));
+
+ return Math.round(confidence * 100);
+ }
};
const hasExistingSearch = useMemo(
From 6e332bbdf8600dc22a297d5098281d6e3d141f69 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Fri, 11 Oct 2024 16:08:14 -0600
Subject: [PATCH 13/27] Remove device config and use model size to configure
device used (#14290)
* Remove device config and use model size to configure device used
* Don't show Frigate+ submission when in progress
* Add docs link for bounding box colors
---
docs/docs/configuration/reference.md | 3 +--
docs/docs/configuration/semantic_search.md | 15 ++++++++-------
frigate/config/semantic_search.py | 1 -
frigate/embeddings/embeddings.py | 2 +-
.../overlay/detail/SearchDetailDialog.tsx | 2 +-
web/src/views/settings/ObjectSettingsView.tsx | 13 +++++++++++++
6 files changed, 24 insertions(+), 12 deletions(-)
diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md
index 234478714..604791621 100644
--- a/docs/docs/configuration/reference.md
+++ b/docs/docs/configuration/reference.md
@@ -518,9 +518,8 @@ semantic_search:
enabled: False
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
reindex: False
- # Optional: Set device used to run embeddings, options are AUTO, CPU, GPU. (default: shown below)
- device: "AUTO"
# Optional: Set the model size used for embeddings. (default: shown below)
+ # NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for AI generated tracked object descriptions
diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md
index 87ccbf802..a569e8f1a 100644
--- a/docs/docs/configuration/semantic_search.md
+++ b/docs/docs/configuration/semantic_search.md
@@ -29,25 +29,26 @@ If you are enabling the Search feature for the first time, be advised that Friga
### Jina AI CLIP
-:::tip
-
-The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
-
-:::
-
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option:
+:::tip
+
+The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information.
+
+:::
+
```yaml
semantic_search:
enabled: True
model_size: small
```
-Using `large` as the model size setting employs the full Jina model appropriate for high performance systems running a GPU. The `small` size uses a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality. Most users will not need to change this setting from the default of `small`.
+- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
+- Configuring the `small` model employs a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality.
## Usage
diff --git a/frigate/config/semantic_search.py b/frigate/config/semantic_search.py
index fdaf0fff4..2891050a1 100644
--- a/frigate/config/semantic_search.py
+++ b/frigate/config/semantic_search.py
@@ -12,7 +12,6 @@ class SemanticSearchConfig(FrigateBaseModel):
reindex: Optional[bool] = Field(
default=False, title="Reindex all detections on startup."
)
- device: str = Field(default="AUTO", title="Device Type")
model_size: str = Field(
default="small", title="The size of the embeddings model used."
)
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index 4883e0914..9c8d4abbd 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -127,7 +127,7 @@ class Embeddings:
model_size=config.model_size,
model_type="vision",
requestor=self.requestor,
- device=self.config.device,
+ device="GPU" if config.model_size == "large" else "CPU",
)
def upsert_thumbnail(self, event_id: str, thumbnail: bytes):
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index 843f2de59..4063f3a59 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -554,7 +554,7 @@ function ObjectSnapshotTab({
- {state == "reviewing" && (
+ {state == "reviewing" && search.end_time && (
<>
+
+
+ Read the meaning of bounding box colors
+
+
+
From de86c3768778d824116366457db8fb1ae53ceceb Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sat, 12 Oct 2024 07:11:22 -0500
Subject: [PATCH 14/27] Prevent single letter words from matching filter
suggestions (#14297)
---
web/src/components/input/InputWithTags.tsx | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/web/src/components/input/InputWithTags.tsx b/web/src/components/input/InputWithTags.tsx
index 6c06e67e7..5d0786346 100644
--- a/web/src/components/input/InputWithTags.tsx
+++ b/web/src/components/input/InputWithTags.tsx
@@ -7,6 +7,7 @@ import {
LuChevronUp,
LuTrash2,
LuStar,
+ LuSearch,
} from "react-icons/lu";
import {
FilterType,
@@ -161,8 +162,12 @@ export default function InputWithTags({
.map((word) => word.trim())
.lastIndexOf(words.filter((word) => word.trim() !== "").pop() || "");
const currentWord = words[lastNonEmptyWordIndex];
+ if (words.at(-1) === "") {
+ return current_suggestions;
+ }
+
return current_suggestions.filter((suggestion) =>
- suggestion.toLowerCase().includes(currentWord.toLowerCase()),
+ suggestion.toLowerCase().startsWith(currentWord),
);
},
[inputValue, suggestions, currentFilterType],
@@ -636,7 +641,19 @@ export default function InputWithTags({
inputFocused ? "visible" : "hidden",
)}
>
- {(Object.keys(filters).length > 0 || isSimilaritySearch) && (
+ {!currentFilterType && inputValue && (
+
+ handleSearch(inputValue)}
+ >
+
+ Search for "{inputValue}"
+
+
+ )}
+ {(Object.keys(filters).filter((key) => key !== "query").length > 0 ||
+ isSimilaritySearch) && (
{isSimilaritySearch && (
From 1e1610671e44448ccf12e991192cdabf842dfe86 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sat, 12 Oct 2024 07:12:02 -0500
Subject: [PATCH 15/27] Add info icons for popovers in debug view (#14296)
---
web/src/views/settings/ObjectSettingsView.tsx | 96 +++++++++++++++----
1 file changed, 75 insertions(+), 21 deletions(-)
diff --git a/web/src/views/settings/ObjectSettingsView.tsx b/web/src/views/settings/ObjectSettingsView.tsx
index 8b6c99f4f..927d08ee0 100644
--- a/web/src/views/settings/ObjectSettingsView.tsx
+++ b/web/src/views/settings/ObjectSettingsView.tsx
@@ -11,13 +11,17 @@ import { usePersistence } from "@/hooks/use-persistence";
import { Skeleton } from "@/components/ui/skeleton";
import { useCameraActivity } from "@/hooks/use-camera-activity";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
+import {
+ Popover,
+ PopoverContent,
+ PopoverTrigger,
+} from "@/components/ui/popover";
import { ObjectType } from "@/types/ws";
import useDeepMemo from "@/hooks/use-deep-memo";
import { Card } from "@/components/ui/card";
import { getIconForLabel } from "@/utils/iconUtil";
import { capitalizeFirstLetter } from "@/utils/stringUtil";
-import { Link } from "react-router-dom";
-import { LuExternalLink } from "react-icons/lu";
+import { LuInfo } from "react-icons/lu";
type ObjectSettingsViewProps = {
selectedCamera?: string;
@@ -37,6 +41,30 @@ export default function ObjectSettingsView({
param: "bbox",
title: "Bounding boxes",
description: "Show bounding boxes around tracked objects",
+ info: (
+ <>
+
+ Object Bounding Box Colors
+
+
+
+ At startup, different colors will be assigned to each object label
+
+
+ A dark blue thin line indicates that object is not detected at
+ this current point in time
+
+
+ A gray thin line indicates that object is detected as being
+ stationary
+
+
+ A thick line indicates that object is the subject of autotracking
+ (when enabled)
+
+
+ >
+ ),
},
{
param: "timestamp",
@@ -57,12 +85,34 @@ export default function ObjectSettingsView({
param: "motion",
title: "Motion boxes",
description: "Show boxes around areas where motion is detected",
+ info: (
+ <>
+
+ Motion Boxes
+
+
+ Red boxes will be overlaid on areas of the frame where motion is
+ currently being detected
+
+ >
+ ),
},
{
param: "regions",
title: "Regions",
description:
"Show a box of the region of interest sent to the object detector",
+ info: (
+ <>
+
+ Region Boxes
+
+
+ Bright green boxes will be overlaid on areas of interest in the
+ frame that are being sent to the object detector.
+
+ >
+ ),
},
];
@@ -136,17 +186,6 @@ export default function ObjectSettingsView({
statistics. The object list shows a time-delayed summary of detected
objects.
-
-
- Read the meaning of bounding box colors
-
-
-
@@ -158,19 +197,34 @@ export default function ObjectSettingsView({
- {DEBUG_OPTIONS.map(({ param, title, description }) => (
+ {DEBUG_OPTIONS.map(({ param, title, description, info }) => (
-
- {title}
-
-
+
+
+ {title}
+
+ {info && (
+
+
+
+
+ Info
+
+
+
+ {info}
+
+
+ )}
+
+
{description}
From 48c60621b6fcca746a11366d481ee6b1c9c2522d Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sat, 12 Oct 2024 07:19:24 -0500
Subject: [PATCH 16/27] Fix substitution on genai prompts (#14298)
---
frigate/embeddings/maintainer.py | 2 +-
frigate/genai/__init__.py | 7 ++++---
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py
index 238efcfdf..c7060b9a6 100644
--- a/frigate/embeddings/maintainer.py
+++ b/frigate/embeddings/maintainer.py
@@ -259,7 +259,7 @@ class EmbeddingMaintainer(threading.Thread):
camera_config = self.config.cameras[event.camera]
description = self.genai_client.generate_description(
- camera_config, thumbnails, event.label
+ camera_config, thumbnails, event
)
if not description:
diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py
index caf13082d..dccb74c1d 100644
--- a/frigate/genai/__init__.py
+++ b/frigate/genai/__init__.py
@@ -5,6 +5,7 @@ import os
from typing import Optional
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum
+from frigate.models import Event
PROVIDERS = {}
@@ -31,12 +32,12 @@ class GenAIClient:
self,
camera_config: CameraConfig,
thumbnails: list[bytes],
- label: str,
+ event: Event,
) -> Optional[str]:
"""Generate a description for the frame."""
prompt = camera_config.genai.object_prompts.get(
- label, camera_config.genai.prompt
- ).format(label=label)
+ event.label, camera_config.genai.prompt
+ ).format(**event)
return self._send(prompt, thumbnails)
def _init_provider(self):
From 40bb4765d4682d07fe2816aabf2b50464f333d34 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Sat, 12 Oct 2024 07:37:22 -0600
Subject: [PATCH 17/27] Add support for more icons (#14299)
---
web/src/utils/iconUtil.tsx | 49 +++++++++++++++++++++++++++++++++++---
1 file changed, 46 insertions(+), 3 deletions(-)
diff --git a/web/src/utils/iconUtil.tsx b/web/src/utils/iconUtil.tsx
index f5ab70c9e..e3c1f5508 100644
--- a/web/src/utils/iconUtil.tsx
+++ b/web/src/utils/iconUtil.tsx
@@ -7,16 +7,31 @@ import {
FaCarSide,
FaCat,
FaCheckCircle,
+ FaDhl,
FaDog,
FaFedex,
FaFire,
FaFootballBall,
+ FaHockeyPuck,
+ FaHorse,
FaMotorcycle,
FaMouse,
+ FaRegTrashAlt,
+ FaUmbrella,
FaUps,
FaUsps,
} from "react-icons/fa";
-import { GiDeer, GiHummingbird, GiPolarBear, GiSailboat } from "react-icons/gi";
+import {
+ GiDeer,
+ GiFox,
+ GiGoat,
+ GiHummingbird,
+ GiPolarBear,
+ GiPostStamp,
+ GiRabbit,
+ GiRaccoonHead,
+ GiSailboat,
+} from "react-icons/gi";
import { LuBox, LuLassoSelect } from "react-icons/lu";
import * as LuIcons from "react-icons/lu";
import { MdRecordVoiceOver } from "react-icons/md";
@@ -53,8 +68,12 @@ export function getIconForLabel(label: string, className?: string) {
case "bark":
case "dog":
return ;
- case "fire_alarm":
- return ;
+ case "fox":
+ return ;
+ case "goat":
+ return ;
+ case "horse":
+ return ;
case "motorcycle":
return ;
case "mouse":
@@ -63,8 +82,20 @@ export function getIconForLabel(label: string, className?: string) {
return ;
case "person":
return ;
+ case "rabbit":
+ return ;
+ case "raccoon":
+ return ;
+ case "robot_lawnmower":
+ return ;
case "sports_ball":
return ;
+ case "squirrel":
+ return ;
+ case "umbrella":
+ return ;
+ case "waste_bin":
+ return ;
// audio
case "crying":
case "laughter":
@@ -72,9 +103,21 @@ export function getIconForLabel(label: string, className?: string) {
case "speech":
case "yell":
return ;
+ case "fire_alarm":
+ return ;
// sub labels
case "amazon":
return ;
+ case "an_post":
+ case "dpd":
+ case "gls":
+ case "nzpost":
+ case "postnl":
+ case "postnord":
+ case "purolator":
+ return ;
+ case "dhl":
+ return ;
case "fedex":
return ;
case "ups":
From acccc6fd939ec2d5c4c69d6edffcfd0c0b5e7d2c Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sat, 12 Oct 2024 09:32:11 -0500
Subject: [PATCH 18/27] Only revalidate if event update is valid (#14302)
---
web/src/pages/Explore.tsx | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index d3c5f7d9b..e81889fb0 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -184,7 +184,9 @@ export default function Explore() {
const eventUpdate = useEventUpdate();
useEffect(() => {
- mutate();
+ if (eventUpdate) {
+ mutate();
+ }
// mutate / revalidate when event description updates come in
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [eventUpdate]);
From 3a403392e7a2006aa8b54538411cf21b7469c7ba Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Sat, 12 Oct 2024 12:36:10 -0600
Subject: [PATCH 19/27] Fixes for model downloading (#14305)
* Use different requestor for downloaders
* Handle case where lock is left over from failed partial download
* close requestor
* Formatting
---
frigate/embeddings/functions/onnx.py | 1 -
frigate/util/downloader.py | 11 +++++++++--
2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py
index 1e50e07b1..e836ba960 100644
--- a/frigate/embeddings/functions/onnx.py
+++ b/frigate/embeddings/functions/onnx.py
@@ -71,7 +71,6 @@ class GenericONNXEmbedding:
model_name=self.model_name,
download_path=self.download_path,
file_names=files_names,
- requestor=self.requestor,
download_func=self._download_model,
)
self.downloader.ensure_model_files()
diff --git a/frigate/util/downloader.py b/frigate/util/downloader.py
index ce5030566..6685b0bb8 100644
--- a/frigate/util/downloader.py
+++ b/frigate/util/downloader.py
@@ -19,6 +19,13 @@ class FileLock:
self.path = path
self.lock_file = f"{path}.lock"
+ # we have not acquired the lock yet so it should not exist
+ if os.path.exists(self.lock_file):
+ try:
+ os.remove(self.lock_file)
+ except Exception:
+ pass
+
def acquire(self):
parent_dir = os.path.dirname(self.lock_file)
os.makedirs(parent_dir, exist_ok=True)
@@ -44,7 +51,6 @@ class ModelDownloader:
download_path: str,
file_names: List[str],
download_func: Callable[[str], None],
- requestor: InterProcessRequestor,
silent: bool = False,
):
self.model_name = model_name
@@ -52,7 +58,7 @@ class ModelDownloader:
self.file_names = file_names
self.download_func = download_func
self.silent = silent
- self.requestor = requestor
+ self.requestor = InterProcessRequestor()
self.download_thread = None
self.download_complete = threading.Event()
@@ -91,6 +97,7 @@ class ModelDownloader:
},
)
+ self.requestor.stop()
self.download_complete.set()
@staticmethod
From 0fc799978004950711b9fbff0b2be64a29482ff3 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sat, 12 Oct 2024 14:44:01 -0500
Subject: [PATCH 20/27] Improve reindex completion flag (#14308)
---
frigate/embeddings/embeddings.py | 5 ++++-
web/src/pages/Explore.tsx | 22 ++++++++++++++--------
web/src/types/ws.ts | 1 +
3 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index 9c8d4abbd..b5b166b00 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -176,6 +176,7 @@ class Embeddings:
"processed_objects": 0,
"total_objects": 0,
"time_remaining": 0,
+ "status": "indexing",
}
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
@@ -255,6 +256,8 @@ class Embeddings:
"Embedded %d thumbnails and %d descriptions in %s seconds",
totals["thumbnails"],
totals["descriptions"],
- time.time() - st,
+ round(time.time() - st, 1),
)
+ totals["status"] = "completed"
+
self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index e81889fb0..4aebaefd1 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -195,13 +195,18 @@ export default function Explore() {
const { payload: reindexProgress } = useEmbeddingsReindexProgress();
- const embeddingsReindexing = useMemo(
- () =>
- reindexProgress
- ? reindexProgress.total_objects - reindexProgress.processed_objects > 0
- : undefined,
- [reindexProgress],
- );
+ const embeddingsReindexing = useMemo(() => {
+ if (reindexProgress) {
+ switch (reindexProgress.status) {
+ case "indexing":
+ return true;
+ case "completed":
+ return false;
+ default:
+ return undefined;
+ }
+ }
+ }, [reindexProgress]);
// model states
@@ -320,7 +325,8 @@ export default function Explore() {
Tracked objects processed:
- {reindexProgress.processed_objects}
+ {reindexProgress.processed_objects} /{" "}
+ {reindexProgress.total_objects}
>
diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts
index 238ef4a8a..397b213f6 100644
--- a/web/src/types/ws.ts
+++ b/web/src/types/ws.ts
@@ -68,6 +68,7 @@ export type EmbeddingsReindexProgressType = {
processed_objects: number;
total_objects: number;
time_remaining: number;
+ status: string;
};
export type ToggleableSetting = "ON" | "OFF";
From e8b2fde753f50421dde2ef725b51ac6f5999ee5f Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Sun, 13 Oct 2024 11:33:27 -0600
Subject: [PATCH 21/27] Support batch embeddings when reindexing (#14320)
* Refactor onnx embeddings to handle multiple inputs by default
* Process items in batches when reindexing
---
frigate/embeddings/embeddings.py | 97 ++++++++++++++++++----------
frigate/embeddings/functions/onnx.py | 35 +++++-----
2 files changed, 82 insertions(+), 50 deletions(-)
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index b5b166b00..8d12feb32 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -6,6 +6,7 @@ import logging
import os
import time
+from numpy import ndarray
from PIL import Image
from playhouse.shortcuts import model_to_dict
@@ -88,12 +89,6 @@ class Embeddings:
},
)
- def jina_text_embedding_function(outputs):
- return outputs[0]
-
- def jina_vision_embedding_function(outputs):
- return outputs[0]
-
self.text_embedding = GenericONNXEmbedding(
model_name="jinaai/jina-clip-v1",
model_file="text_model_fp16.onnx",
@@ -101,7 +96,6 @@ class Embeddings:
download_urls={
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
},
- embedding_function=jina_text_embedding_function,
model_size=config.model_size,
model_type="text",
requestor=self.requestor,
@@ -123,14 +117,13 @@ class Embeddings:
model_name="jinaai/jina-clip-v1",
model_file=model_file,
download_urls=download_urls,
- embedding_function=jina_vision_embedding_function,
model_size=config.model_size,
model_type="vision",
requestor=self.requestor,
device="GPU" if config.model_size == "large" else "CPU",
)
- def upsert_thumbnail(self, event_id: str, thumbnail: bytes):
+ def upsert_thumbnail(self, event_id: str, thumbnail: bytes) -> ndarray:
# Convert thumbnail bytes to PIL Image
image = Image.open(io.BytesIO(thumbnail)).convert("RGB")
embedding = self.vision_embedding([image])[0]
@@ -145,7 +138,25 @@ class Embeddings:
return embedding
- def upsert_description(self, event_id: str, description: str):
+ def batch_upsert_thumbnail(self, event_thumbs: dict[str, bytes]) -> list[ndarray]:
+ images = [
+ Image.open(io.BytesIO(thumb)).convert("RGB")
+ for thumb in event_thumbs.values()
+ ]
+ ids = list(event_thumbs.keys())
+ embeddings = self.vision_embedding(images)
+ items = [(ids[i], serialize(embeddings[i])) for i in range(len(ids))]
+
+ self.db.execute_sql(
+ """
+ INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
+ VALUES {}
+ """.format(", ".join(["(?, ?)"] * len(items))),
+ items,
+ )
+ return embeddings
+
+ def upsert_description(self, event_id: str, description: str) -> ndarray:
embedding = self.text_embedding([description])[0]
self.db.execute_sql(
"""
@@ -157,6 +168,21 @@ class Embeddings:
return embedding
+ def batch_upsert_description(self, event_descriptions: dict[str, str]) -> ndarray:
+ embeddings = self.text_embedding(list(event_descriptions.values()))
+ ids = list(event_descriptions.keys())
+ items = [(ids[i], serialize(embeddings[i])) for i in range(len(ids))]
+
+ self.db.execute_sql(
+ """
+ INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
+ VALUES {}
+ """.format(", ".join(["(?, ?)"] * len(items))),
+ items,
+ )
+
+ return embeddings
+
def reindex(self) -> None:
logger.info("Indexing tracked object embeddings...")
@@ -192,9 +218,8 @@ class Embeddings:
)
totals["total_objects"] = total_events
- batch_size = 100
+ batch_size = 32
current_page = 1
- processed_events = 0
events = (
Event.select()
@@ -208,37 +233,43 @@ class Embeddings:
while len(events) > 0:
event: Event
+ batch_thumbs = {}
+ batch_descs = {}
for event in events:
- thumbnail = base64.b64decode(event.thumbnail)
- self.upsert_thumbnail(event.id, thumbnail)
+ batch_thumbs[event.id] = base64.b64decode(event.thumbnail)
totals["thumbnails"] += 1
if description := event.data.get("description", "").strip():
+ batch_descs[event.id] = description
totals["descriptions"] += 1
- self.upsert_description(event.id, description)
totals["processed_objects"] += 1
- # report progress every 10 events so we don't spam the logs
- if (totals["processed_objects"] % 10) == 0:
- progress = (processed_events / total_events) * 100
- logger.debug(
- "Processed %d/%d events (%.2f%% complete) | Thumbnails: %d, Descriptions: %d",
- processed_events,
- total_events,
- progress,
- totals["thumbnails"],
- totals["descriptions"],
- )
+ # run batch embedding
+ self.batch_upsert_thumbnail(batch_thumbs)
- # Calculate time remaining
- elapsed_time = time.time() - st
- avg_time_per_event = elapsed_time / totals["processed_objects"]
- remaining_events = total_events - totals["processed_objects"]
- time_remaining = avg_time_per_event * remaining_events
- totals["time_remaining"] = int(time_remaining)
+ if batch_descs:
+ self.batch_upsert_description(batch_descs)
- self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
+ # report progress every batch so we don't spam the logs
+ progress = (totals["processed_objects"] / total_events) * 100
+ logger.debug(
+ "Processed %d/%d events (%.2f%% complete) | Thumbnails: %d, Descriptions: %d",
+ totals["processed_objects"],
+ total_events,
+ progress,
+ totals["thumbnails"],
+ totals["descriptions"],
+ )
+
+ # Calculate time remaining
+ elapsed_time = time.time() - st
+ avg_time_per_event = elapsed_time / totals["processed_objects"]
+ remaining_events = total_events - totals["processed_objects"]
+ time_remaining = avg_time_per_event * remaining_events
+ totals["time_remaining"] = int(time_remaining)
+
+ self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
# Move to the next page
current_page += 1
diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py
index e836ba960..765a7e88c 100644
--- a/frigate/embeddings/functions/onnx.py
+++ b/frigate/embeddings/functions/onnx.py
@@ -2,7 +2,7 @@ import logging
import os
import warnings
from io import BytesIO
-from typing import Callable, Dict, List, Optional, Union
+from typing import Dict, List, Optional, Union
import numpy as np
import requests
@@ -39,7 +39,6 @@ class GenericONNXEmbedding:
model_name: str,
model_file: str,
download_urls: Dict[str, str],
- embedding_function: Callable[[List[np.ndarray]], np.ndarray],
model_size: str,
model_type: str,
requestor: InterProcessRequestor,
@@ -51,7 +50,6 @@ class GenericONNXEmbedding:
self.tokenizer_file = tokenizer_file
self.requestor = requestor
self.download_urls = download_urls
- self.embedding_function = embedding_function
self.model_type = model_type # 'text' or 'vision'
self.model_size = model_size
self.device = device
@@ -157,7 +155,6 @@ class GenericONNXEmbedding:
self, inputs: Union[List[str], List[Image.Image], List[str]]
) -> List[np.ndarray]:
self._load_model_and_tokenizer()
-
if self.runner is None or (
self.tokenizer is None and self.feature_extractor is None
):
@@ -167,23 +164,27 @@ class GenericONNXEmbedding:
return []
if self.model_type == "text":
- processed_inputs = self.tokenizer(
- inputs, padding=True, truncation=True, return_tensors="np"
- )
+ processed_inputs = [
+ self.tokenizer(text, padding=True, truncation=True, return_tensors="np")
+ for text in inputs
+ ]
else:
processed_images = [self._process_image(img) for img in inputs]
- processed_inputs = self.feature_extractor(
- images=processed_images, return_tensors="np"
- )
+ processed_inputs = [
+ self.feature_extractor(images=image, return_tensors="np")
+ for image in processed_images
+ ]
input_names = self.runner.get_input_names()
- onnx_inputs = {
- name: processed_inputs[name]
- for name in input_names
- if name in processed_inputs
- }
+ onnx_inputs = {name: [] for name in input_names}
+ input: dict[str, any]
+ for input in processed_inputs:
+ for key, value in input.items():
+ if key in input_names:
+ onnx_inputs[key].append(value[0])
- outputs = self.runner.run(onnx_inputs)
- embeddings = self.embedding_function(outputs)
+ for key in onnx_inputs.keys():
+ onnx_inputs[key] = np.array(onnx_inputs[key])
+ embeddings = self.runner.run(onnx_inputs)[0]
return [embedding for embedding in embeddings]
From 92ac025e43f230d3cd16d2dba3d67c8de2b2f8b7 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sun, 13 Oct 2024 12:34:39 -0500
Subject: [PATCH 22/27] Don't show submit to frigate plus card if plus is
disabled (#14319)
---
.../overlay/detail/SearchDetailDialog.tsx | 102 +++++++++---------
1 file changed, 52 insertions(+), 50 deletions(-)
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index 4063f3a59..a04d5b8c1 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -536,57 +536,59 @@ function ObjectSnapshotTab({
/>
)}
-
-
-
-
- Submit To Frigate+
-
-
- Objects in locations you want to avoid are not false
- positives. Submitting them as false positives will confuse
- the model.
-
-
-
-
- {state == "reviewing" && search.end_time && (
- <>
-
{
- setState("uploading");
- onSubmitToPlus(false);
- }}
- >
- This is a {search?.label}
-
-
{
- setState("uploading");
- onSubmitToPlus(true);
- }}
- >
- This is not a {search?.label}
-
- >
- )}
- {state == "uploading" &&
}
- {state == "submitted" && (
-
-
- Submitted
+ {search.plus_id !== "not_enabled" && (
+
+
+
+
+ Submit To Frigate+
- )}
-
-
-
+
+ Objects in locations you want to avoid are not false
+ positives. Submitting them as false positives will confuse
+ the model.
+
+
+
+
+ {state == "reviewing" && search.end_time && (
+ <>
+
{
+ setState("uploading");
+ onSubmitToPlus(false);
+ }}
+ >
+ This is a {search?.label}
+
+
{
+ setState("uploading");
+ onSubmitToPlus(true);
+ }}
+ >
+ This is not a {search?.label}
+
+ >
+ )}
+ {state == "uploading" &&
}
+ {state == "submitted" && (
+
+
+ Submitted
+
+ )}
+
+
+
+ )}
From 66d0ad58031bcd1fc8811503313038c3e4683552 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sun, 13 Oct 2024 12:46:40 -0500
Subject: [PATCH 23/27] See a preview when using the timeline to export footage
(#14321)
* custom hook and generic video player component
* add export preview dialog
* export preview dialog when using timeline export
* refactor search detail dialog to use new generic video player component
* clean up
---
.../components/filter/ReviewFilterGroup.tsx | 2 +
web/src/components/overlay/ExportDialog.tsx | 59 ++++++++-
.../overlay/MobileReviewSettingsDrawer.tsx | 13 +-
.../components/overlay/SaveExportOverlay.tsx | 28 +++--
.../overlay/detail/SearchDetailDialog.tsx | 118 +++++-------------
.../components/player/GenericVideoPlayer.tsx | 52 ++++++++
web/src/hooks/use-video-dimensions.ts | 45 +++++++
web/src/views/recording/RecordingView.tsx | 5 +
8 files changed, 224 insertions(+), 98 deletions(-)
create mode 100644 web/src/components/player/GenericVideoPlayer.tsx
create mode 100644 web/src/hooks/use-video-dimensions.ts
diff --git a/web/src/components/filter/ReviewFilterGroup.tsx b/web/src/components/filter/ReviewFilterGroup.tsx
index 6d3ee010a..a52755e6c 100644
--- a/web/src/components/filter/ReviewFilterGroup.tsx
+++ b/web/src/components/filter/ReviewFilterGroup.tsx
@@ -241,6 +241,8 @@ export default function ReviewFilterGroup({
mode="none"
setMode={() => {}}
setRange={() => {}}
+ showExportPreview={false}
+ setShowExportPreview={() => {}}
/>
)}
diff --git a/web/src/components/overlay/ExportDialog.tsx b/web/src/components/overlay/ExportDialog.tsx
index c9018c579..577415420 100644
--- a/web/src/components/overlay/ExportDialog.tsx
+++ b/web/src/components/overlay/ExportDialog.tsx
@@ -2,6 +2,7 @@ import { useCallback, useMemo, useState } from "react";
import {
Dialog,
DialogContent,
+ DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
@@ -22,10 +23,13 @@ import { FrigateConfig } from "@/types/frigateConfig";
import { Popover, PopoverContent, PopoverTrigger } from "../ui/popover";
import { TimezoneAwareCalendar } from "./ReviewActivityCalendar";
import { SelectSeparator } from "../ui/select";
-import { isDesktop, isIOS } from "react-device-detect";
+import { isDesktop, isIOS, isMobile } from "react-device-detect";
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
import SaveExportOverlay from "./SaveExportOverlay";
import { getUTCOffset } from "@/utils/dateUtil";
+import { baseUrl } from "@/api/baseUrl";
+import { cn } from "@/lib/utils";
+import { GenericVideoPlayer } from "../player/GenericVideoPlayer";
const EXPORT_OPTIONS = [
"1",
@@ -44,8 +48,10 @@ type ExportDialogProps = {
currentTime: number;
range?: TimeRange;
mode: ExportMode;
+ showPreview: boolean;
setRange: (range: TimeRange | undefined) => void;
setMode: (mode: ExportMode) => void;
+ setShowPreview: (showPreview: boolean) => void;
};
export default function ExportDialog({
camera,
@@ -53,10 +59,13 @@ export default function ExportDialog({
currentTime,
range,
mode,
+ showPreview,
setRange,
setMode,
+ setShowPreview,
}: ExportDialogProps) {
const [name, setName] = useState("");
+
const onStartExport = useCallback(() => {
if (!range) {
toast.error("No valid time range selected", { position: "top-center" });
@@ -109,9 +118,16 @@ export default function ExportDialog({
return (
<>
+
setShowPreview(true)}
onSave={() => onStartExport()}
onCancel={() => setMode("none")}
/>
@@ -525,3 +541,44 @@ function CustomTimeSelector({
);
}
+
+type ExportPreviewDialogProps = {
+ camera: string;
+ range?: TimeRange;
+ showPreview: boolean;
+ setShowPreview: (showPreview: boolean) => void;
+};
+
+export function ExportPreviewDialog({
+ camera,
+ range,
+ showPreview,
+ setShowPreview,
+}: ExportPreviewDialogProps) {
+ if (!range) {
+ return null;
+ }
+
+ const source = `${baseUrl}vod/${camera}/start/${range.after}/end/${range.before}/index.m3u8`;
+
+ return (
+
+
+
+ Preview Export
+
+ Preview Export
+
+
+
+
+
+ );
+}
diff --git a/web/src/components/overlay/MobileReviewSettingsDrawer.tsx b/web/src/components/overlay/MobileReviewSettingsDrawer.tsx
index c9879b8cb..fe0e13c11 100644
--- a/web/src/components/overlay/MobileReviewSettingsDrawer.tsx
+++ b/web/src/components/overlay/MobileReviewSettingsDrawer.tsx
@@ -3,7 +3,7 @@ import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
import { Button } from "../ui/button";
import { FaArrowDown, FaCalendarAlt, FaCog, FaFilter } from "react-icons/fa";
import { TimeRange } from "@/types/timeline";
-import { ExportContent } from "./ExportDialog";
+import { ExportContent, ExportPreviewDialog } from "./ExportDialog";
import { ExportMode } from "@/types/filter";
import ReviewActivityCalendar from "./ReviewActivityCalendar";
import { SelectSeparator } from "../ui/select";
@@ -34,12 +34,14 @@ type MobileReviewSettingsDrawerProps = {
currentTime: number;
range?: TimeRange;
mode: ExportMode;
+ showExportPreview: boolean;
reviewSummary?: ReviewSummary;
allLabels: string[];
allZones: string[];
onUpdateFilter: (filter: ReviewFilter) => void;
setRange: (range: TimeRange | undefined) => void;
setMode: (mode: ExportMode) => void;
+ setShowExportPreview: (showPreview: boolean) => void;
};
export default function MobileReviewSettingsDrawer({
features = DEFAULT_DRAWER_FEATURES,
@@ -50,12 +52,14 @@ export default function MobileReviewSettingsDrawer({
currentTime,
range,
mode,
+ showExportPreview,
reviewSummary,
allLabels,
allZones,
onUpdateFilter,
setRange,
setMode,
+ setShowExportPreview,
}: MobileReviewSettingsDrawerProps) {
const [drawerMode, setDrawerMode] = useState("none");
@@ -282,6 +286,13 @@ export default function MobileReviewSettingsDrawer({
show={mode == "timeline"}
onSave={() => onStartExport()}
onCancel={() => setMode("none")}
+ onPreview={() => setShowExportPreview(true)}
+ />
+
void;
onSave: () => void;
onCancel: () => void;
};
export default function SaveExportOverlay({
className,
show,
+ onPreview,
onSave,
onCancel,
}: SaveExportOverlayProps) {
@@ -24,6 +26,22 @@ export default function SaveExportOverlay({
"mx-auto mt-5 text-center",
)}
>
+
+
+ Cancel
+
+
+
+ Preview Export
+
Save Export
-
-
- Cancel
-
);
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index a04d5b8c1..45c0659d8 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -6,7 +6,7 @@ import { useFormattedTimestamp } from "@/hooks/use-date-utils";
import { getIconForLabel } from "@/utils/iconUtil";
import { useApiHost } from "@/api";
import { Button } from "../../ui/button";
-import { useCallback, useEffect, useMemo, useRef, useState } from "react";
+import { useCallback, useEffect, useMemo, useState } from "react";
import axios from "axios";
import { toast } from "sonner";
import { Textarea } from "../../ui/textarea";
@@ -21,7 +21,6 @@ import {
DialogTitle,
} from "@/components/ui/dialog";
import { Event } from "@/types/event";
-import HlsVideoPlayer from "@/components/player/HlsVideoPlayer";
import { baseUrl } from "@/api/baseUrl";
import { cn } from "@/lib/utils";
import ActivityIndicator from "@/components/indicators/activity-indicator";
@@ -62,8 +61,7 @@ import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
import { Card, CardContent } from "@/components/ui/card";
import useImageLoaded from "@/hooks/use-image-loaded";
import ImageLoadingIndicator from "@/components/indicators/ImageLoadingIndicator";
-import { useResizeObserver } from "@/hooks/resize-observer";
-import { VideoResolutionType } from "@/types/live";
+import { GenericVideoPlayer } from "@/components/player/GenericVideoPlayer";
const SEARCH_TABS = [
"details",
@@ -599,99 +597,45 @@ function ObjectSnapshotTab({
type VideoTabProps = {
search: SearchResult;
};
-function VideoTab({ search }: VideoTabProps) {
- const [isLoading, setIsLoading] = useState(true);
- const videoRef = useRef(null);
-
- const endTime = useMemo(() => search.end_time ?? Date.now() / 1000, [search]);
+export function VideoTab({ search }: VideoTabProps) {
const navigate = useNavigate();
const { data: reviewItem } = useSWR([
`review/event/${search.id}`,
]);
+ const endTime = useMemo(() => search.end_time ?? Date.now() / 1000, [search]);
- const containerRef = useRef(null);
-
- const [{ width: containerWidth, height: containerHeight }] =
- useResizeObserver(containerRef);
- const [videoResolution, setVideoResolution] = useState({
- width: 0,
- height: 0,
- });
-
- const videoAspectRatio = useMemo(() => {
- return videoResolution.width / videoResolution.height || 16 / 9;
- }, [videoResolution]);
-
- const containerAspectRatio = useMemo(() => {
- return containerWidth / containerHeight || 16 / 9;
- }, [containerWidth, containerHeight]);
-
- const videoDimensions = useMemo(() => {
- if (!containerWidth || !containerHeight)
- return { width: "100%", height: "100%" };
-
- if (containerAspectRatio > videoAspectRatio) {
- const height = containerHeight;
- const width = height * videoAspectRatio;
- return { width: `${width}px`, height: `${height}px` };
- } else {
- const width = containerWidth;
- const height = width / videoAspectRatio;
- return { width: `${width}px`, height: `${height}px` };
- }
- }, [containerWidth, containerHeight, videoAspectRatio, containerAspectRatio]);
+ const source = `${baseUrl}vod/${search.camera}/start/${search.start_time}/end/${endTime}/index.m3u8`;
return (
-
-
- {(isLoading || !reviewItem) && (
-
- )}
+
+ {reviewItem && (
-
setIsLoading(false)}
- setFullResolution={setVideoResolution}
- />
- {!isLoading && reviewItem && (
-
-
-
- {
- if (reviewItem?.id) {
- const params = new URLSearchParams({
- id: reviewItem.id,
- }).toString();
- navigate(`/review?${params}`);
- }
- }}
- >
-
-
-
- View in History
-
-
+ className={cn(
+ "absolute top-2 z-10 flex items-center",
+ isIOS ? "right-8" : "right-2",
)}
+ >
+
+
+ {
+ if (reviewItem?.id) {
+ const params = new URLSearchParams({
+ id: reviewItem.id,
+ }).toString();
+ navigate(`/review?${params}`);
+ }
+ }}
+ >
+
+
+
+ View in History
+
-
-
+ )}
+
);
}
diff --git a/web/src/components/player/GenericVideoPlayer.tsx b/web/src/components/player/GenericVideoPlayer.tsx
new file mode 100644
index 000000000..75f56e96f
--- /dev/null
+++ b/web/src/components/player/GenericVideoPlayer.tsx
@@ -0,0 +1,52 @@
+import React, { useState, useRef } from "react";
+import { useVideoDimensions } from "@/hooks/use-video-dimensions";
+import HlsVideoPlayer from "./HlsVideoPlayer";
+import ActivityIndicator from "../indicators/activity-indicator";
+
+type GenericVideoPlayerProps = {
+ source: string;
+ onPlaying?: () => void;
+ children?: React.ReactNode;
+};
+
+export function GenericVideoPlayer({
+ source,
+ onPlaying,
+ children,
+}: GenericVideoPlayerProps) {
+ const [isLoading, setIsLoading] = useState(true);
+ const videoRef = useRef(null);
+ const containerRef = useRef(null);
+ const { videoDimensions, setVideoResolution } =
+ useVideoDimensions(containerRef);
+
+ return (
+
+
+ {isLoading && (
+
+ )}
+
+ {
+ setIsLoading(false);
+ onPlaying?.();
+ }}
+ setFullResolution={setVideoResolution}
+ />
+ {!isLoading && children}
+
+
+
+ );
+}
diff --git a/web/src/hooks/use-video-dimensions.ts b/web/src/hooks/use-video-dimensions.ts
new file mode 100644
index 000000000..448dd5078
--- /dev/null
+++ b/web/src/hooks/use-video-dimensions.ts
@@ -0,0 +1,45 @@
+import { useState, useMemo } from "react";
+import { useResizeObserver } from "./resize-observer";
+
+export type VideoResolutionType = {
+ width: number;
+ height: number;
+};
+
+export function useVideoDimensions(
+ containerRef: React.RefObject,
+) {
+ const [{ width: containerWidth, height: containerHeight }] =
+ useResizeObserver(containerRef);
+ const [videoResolution, setVideoResolution] = useState({
+ width: 0,
+ height: 0,
+ });
+
+ const videoAspectRatio = useMemo(() => {
+ return videoResolution.width / videoResolution.height || 16 / 9;
+ }, [videoResolution]);
+
+ const containerAspectRatio = useMemo(() => {
+ return containerWidth / containerHeight || 16 / 9;
+ }, [containerWidth, containerHeight]);
+
+ const videoDimensions = useMemo(() => {
+ if (!containerWidth || !containerHeight)
+ return { width: "100%", height: "100%" };
+ if (containerAspectRatio > videoAspectRatio) {
+ const height = containerHeight;
+ const width = height * videoAspectRatio;
+ return { width: `${width}px`, height: `${height}px` };
+ } else {
+ const width = containerWidth;
+ const height = width / videoAspectRatio;
+ return { width: `${width}px`, height: `${height}px` };
+ }
+ }, [containerWidth, containerHeight, videoAspectRatio, containerAspectRatio]);
+
+ return {
+ videoDimensions,
+ setVideoResolution,
+ };
+}
diff --git a/web/src/views/recording/RecordingView.tsx b/web/src/views/recording/RecordingView.tsx
index 0c59cef38..535c412d4 100644
--- a/web/src/views/recording/RecordingView.tsx
+++ b/web/src/views/recording/RecordingView.tsx
@@ -140,6 +140,7 @@ export function RecordingView({
const [exportMode, setExportMode] = useState("none");
const [exportRange, setExportRange] = useState();
+ const [showExportPreview, setShowExportPreview] = useState(false);
// move to next clip
@@ -412,6 +413,7 @@ export function RecordingView({
latestTime={timeRange.before}
mode={exportMode}
range={exportRange}
+ showPreview={showExportPreview}
setRange={(range) => {
setExportRange(range);
@@ -420,6 +422,7 @@ export function RecordingView({
}
}}
setMode={setExportMode}
+ setShowPreview={setShowExportPreview}
/>
)}
{isDesktop && (
@@ -473,11 +476,13 @@ export function RecordingView({
latestTime={timeRange.before}
mode={exportMode}
range={exportRange}
+ showExportPreview={showExportPreview}
allLabels={reviewFilterList.labels}
allZones={reviewFilterList.zones}
onUpdateFilter={updateFilter}
setRange={setExportRange}
setMode={setExportMode}
+ setShowExportPreview={setShowExportPreview}
/>
From 1ec459ea3a0cf123e04514e059e9a5af0c51aac3 Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sun, 13 Oct 2024 16:25:13 -0500
Subject: [PATCH 24/27] Batch embeddings fixes (#14325)
* fixes
* more readable loops
* more robust key check and warning message
* ensure we get reindex progress on mount
* use correct var for length
---
frigate/embeddings/embeddings.py | 40 +++++++++++++++++-----------
frigate/embeddings/functions/onnx.py | 16 ++++++++---
web/src/pages/Explore.tsx | 29 +++++++++++++++-----
3 files changed, 60 insertions(+), 25 deletions(-)
diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py
index 8d12feb32..cb0626f7b 100644
--- a/frigate/embeddings/embeddings.py
+++ b/frigate/embeddings/embeddings.py
@@ -145,13 +145,18 @@ class Embeddings:
]
ids = list(event_thumbs.keys())
embeddings = self.vision_embedding(images)
- items = [(ids[i], serialize(embeddings[i])) for i in range(len(ids))]
+
+ items = []
+
+ for i in range(len(ids)):
+ items.append(ids[i])
+ items.append(serialize(embeddings[i]))
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_thumbnails(id, thumbnail_embedding)
VALUES {}
- """.format(", ".join(["(?, ?)"] * len(items))),
+ """.format(", ".join(["(?, ?)"] * len(ids))),
items,
)
return embeddings
@@ -171,13 +176,18 @@ class Embeddings:
def batch_upsert_description(self, event_descriptions: dict[str, str]) -> ndarray:
embeddings = self.text_embedding(list(event_descriptions.values()))
ids = list(event_descriptions.keys())
- items = [(ids[i], serialize(embeddings[i])) for i in range(len(ids))]
+
+ items = []
+
+ for i in range(len(ids)):
+ items.append(ids[i])
+ items.append(serialize(embeddings[i]))
self.db.execute_sql(
"""
INSERT OR REPLACE INTO vec_descriptions(id, description_embedding)
VALUES {}
- """.format(", ".join(["(?, ?)"] * len(items))),
+ """.format(", ".join(["(?, ?)"] * len(ids))),
items,
)
@@ -196,16 +206,6 @@ class Embeddings:
os.remove(os.path.join(CONFIG_DIR, ".search_stats.json"))
st = time.time()
- totals = {
- "thumbnails": 0,
- "descriptions": 0,
- "processed_objects": 0,
- "total_objects": 0,
- "time_remaining": 0,
- "status": "indexing",
- }
-
- self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
# Get total count of events to process
total_events = (
@@ -216,11 +216,21 @@ class Embeddings:
)
.count()
)
- totals["total_objects"] = total_events
batch_size = 32
current_page = 1
+ totals = {
+ "thumbnails": 0,
+ "descriptions": 0,
+ "processed_objects": total_events - 1 if total_events < batch_size else 0,
+ "total_objects": total_events,
+ "time_remaining": 0 if total_events < batch_size else -1,
+ "status": "indexing",
+ }
+
+ self.requestor.send_data(UPDATE_EMBEDDINGS_REINDEX_PROGRESS, totals)
+
events = (
Event.select()
.where(
diff --git a/frigate/embeddings/functions/onnx.py b/frigate/embeddings/functions/onnx.py
index 765a7e88c..574822d59 100644
--- a/frigate/embeddings/functions/onnx.py
+++ b/frigate/embeddings/functions/onnx.py
@@ -164,8 +164,15 @@ class GenericONNXEmbedding:
return []
if self.model_type == "text":
+ max_length = max(len(self.tokenizer.encode(text)) for text in inputs)
processed_inputs = [
- self.tokenizer(text, padding=True, truncation=True, return_tensors="np")
+ self.tokenizer(
+ text,
+ padding="max_length",
+ truncation=True,
+ max_length=max_length,
+ return_tensors="np",
+ )
for text in inputs
]
else:
@@ -183,8 +190,11 @@ class GenericONNXEmbedding:
if key in input_names:
onnx_inputs[key].append(value[0])
- for key in onnx_inputs.keys():
- onnx_inputs[key] = np.array(onnx_inputs[key])
+ for key in input_names:
+ if onnx_inputs.get(key):
+ onnx_inputs[key] = np.stack(onnx_inputs[key])
+ else:
+ logger.warning(f"Expected input '{key}' not found in onnx_inputs")
embeddings = self.runner.run(onnx_inputs)[0]
return [embedding for embedding in embeddings]
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index 4aebaefd1..e4bb49521 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -193,6 +193,17 @@ export default function Explore() {
// embeddings reindex progress
+ const { send: sendReindexCommand } = useWs(
+ "embeddings_reindex_progress",
+ "embeddingsReindexProgress",
+ );
+
+ useEffect(() => {
+ sendReindexCommand("embeddingsReindexProgress");
+ // only run on mount
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
+
const { payload: reindexProgress } = useEmbeddingsReindexProgress();
const embeddingsReindexing = useMemo(() => {
@@ -210,10 +221,10 @@ export default function Explore() {
// model states
- const { send: sendCommand } = useWs("model_state", "modelState");
+ const { send: sendModelCommand } = useWs("model_state", "modelState");
useEffect(() => {
- sendCommand("modelState");
+ sendModelCommand("modelState");
// only run on mount
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
@@ -299,14 +310,18 @@ export default function Explore() {
/>
- {reindexProgress.time_remaining >= 0 && (
+ {reindexProgress.time_remaining !== null && (
- Estimated time remaining:
+ {reindexProgress.time_remaining === -1
+ ? "Starting up..."
+ : "Estimated time remaining:"}
- {formatSecondsToDuration(
- reindexProgress.time_remaining,
- ) || "Finishing shortly"}
+ {reindexProgress.time_remaining >= 0 &&
+ (formatSecondsToDuration(
+ reindexProgress.time_remaining,
+ ) ||
+ "Finishing shortly")}
)}
From 833768172d2c60d5af5316d8b3e72c06b1f4777f Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sun, 13 Oct 2024 16:48:54 -0500
Subject: [PATCH 25/27] UI tweaks (#14326)
* small tweaks for frigate+ submission and debug object list
* exclude attributes from labels colormap
---
frigate/detectors/detector_config.py | 12 ++++++++++--
.../components/overlay/detail/SearchDetailDialog.tsx | 4 ++--
web/src/views/settings/ObjectSettingsView.tsx | 2 +-
3 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py
index bc0a0ff11..11f08a86c 100644
--- a/frigate/detectors/detector_config.py
+++ b/frigate/detectors/detector_config.py
@@ -157,8 +157,16 @@ class ModelConfig(BaseModel):
self._model_hash = file_hash.hexdigest()
def create_colormap(self, enabled_labels: set[str]) -> None:
- """Get a list of colors for enabled labels."""
- colors = generate_color_palette(len(enabled_labels))
+ """Get a list of colors for enabled labels that aren't attributes."""
+ colors = generate_color_palette(
+ len(
+ list(
+ filter(
+ lambda label: label not in self._all_attributes, enabled_labels
+ )
+ )
+ )
+ )
self._colormap = {label: color for label, color in zip(enabled_labels, colors)}
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index 45c0659d8..94d28c7c8 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -534,7 +534,7 @@ function ObjectSnapshotTab({
/>
)}
- {search.plus_id !== "not_enabled" && (
+ {search.plus_id !== "not_enabled" && search.end_time && (
@@ -553,7 +553,7 @@ function ObjectSnapshotTab({
- {state == "reviewing" && search.end_time && (
+ {state == "reviewing" && (
<>
- {capitalizeFirstLetter(obj.label)}
+ {capitalizeFirstLetter(obj.label.replaceAll("_", " "))}
From 4ca267ea17b161fb0d127a450e6e7a567f256b0f Mon Sep 17 00:00:00 2001
From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
Date: Sun, 13 Oct 2024 20:36:49 -0500
Subject: [PATCH 26/27] Search UI tweaks and bugfixes (#14328)
* Publish model state and embeddings reindex in dispatcher onConnect
* remove unneeded from explore
* add embeddings reindex progress to statusbar
* don't allow right click or show similar button if semantic search is disabled
* fix status bar
---
frigate/comms/dispatcher.py | 5 ++
web/src/components/Statusbar.tsx | 18 +++++++
.../overlay/detail/SearchDetailDialog.tsx | 22 ++++----
web/src/pages/Explore.tsx | 53 ++++++-------------
web/src/views/search/SearchView.tsx | 6 ++-
5 files changed, 56 insertions(+), 48 deletions(-)
diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py
index 4a3862eaf..1f480fa9c 100644
--- a/frigate/comms/dispatcher.py
+++ b/frigate/comms/dispatcher.py
@@ -179,6 +179,11 @@ class Dispatcher:
}
self.publish("camera_activity", json.dumps(camera_status))
+ self.publish("model_state", json.dumps(self.model_state.copy()))
+ self.publish(
+ "embeddings_reindex_progress",
+ json.dumps(self.embeddings_reindex.copy()),
+ )
# Dictionary mapping topic to handlers
topic_handlers = {
diff --git a/web/src/components/Statusbar.tsx b/web/src/components/Statusbar.tsx
index 41bd9372f..1b20b26f6 100644
--- a/web/src/components/Statusbar.tsx
+++ b/web/src/components/Statusbar.tsx
@@ -1,3 +1,4 @@
+import { useEmbeddingsReindexProgress } from "@/api/ws";
import {
StatusBarMessagesContext,
StatusMessage,
@@ -41,6 +42,23 @@ export default function Statusbar() {
});
}, [potentialProblems, addMessage, clearMessages]);
+ const { payload: reindexState } = useEmbeddingsReindexProgress();
+
+ useEffect(() => {
+ if (reindexState) {
+ if (reindexState.status == "indexing") {
+ clearMessages("embeddings-reindex");
+ addMessage(
+ "embeddings-reindex",
+ `Reindexing embeddings (${Math.floor((reindexState.processed_objects / reindexState.total_objects) * 100)}% complete)`,
+ );
+ }
+ if (reindexState.status === "completed") {
+ clearMessages("embeddings-reindex");
+ }
+ }
+ }, [reindexState, addMessage, clearMessages]);
+
return (
diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx
index 94d28c7c8..1cee70aaa 100644
--- a/web/src/components/overlay/detail/SearchDetailDialog.tsx
+++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx
@@ -396,17 +396,19 @@ function ObjectDetailsTab({
draggable={false}
src={`${apiHost}api/events/${search.id}/thumbnail.jpg`}
/>
- {
- setSearch(undefined);
+ {config?.semantic_search.enabled && (
+ {
+ setSearch(undefined);
- if (setSimilarity) {
- setSimilarity();
- }
- }}
- >
- Find Similar
-
+ if (setSimilarity) {
+ setSimilarity();
+ }
+ }}
+ >
+ Find Similar
+
+ )}
diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx
index e4bb49521..03a60a8d0 100644
--- a/web/src/pages/Explore.tsx
+++ b/web/src/pages/Explore.tsx
@@ -2,7 +2,6 @@ import {
useEmbeddingsReindexProgress,
useEventUpdate,
useModelState,
- useWs,
} from "@/api/ws";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import AnimatedCircularProgressBar from "@/components/ui/circular-progress-bar";
@@ -193,22 +192,11 @@ export default function Explore() {
// embeddings reindex progress
- const { send: sendReindexCommand } = useWs(
- "embeddings_reindex_progress",
- "embeddingsReindexProgress",
- );
-
- useEffect(() => {
- sendReindexCommand("embeddingsReindexProgress");
- // only run on mount
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, []);
-
- const { payload: reindexProgress } = useEmbeddingsReindexProgress();
+ const { payload: reindexState } = useEmbeddingsReindexProgress();
const embeddingsReindexing = useMemo(() => {
- if (reindexProgress) {
- switch (reindexProgress.status) {
+ if (reindexState) {
+ switch (reindexState.status) {
case "indexing":
return true;
case "completed":
@@ -217,18 +205,10 @@ export default function Explore() {
return undefined;
}
}
- }, [reindexProgress]);
+ }, [reindexState]);
// model states
- const { send: sendModelCommand } = useWs("model_state", "modelState");
-
- useEffect(() => {
- sendModelCommand("modelState");
- // only run on mount
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, []);
-
const { payload: textModelState } = useModelState(
"jinaai/jina-clip-v1-text_model_fp16.onnx",
);
@@ -274,7 +254,8 @@ export default function Explore() {
if (
config?.semantic_search.enabled &&
- (!textModelState ||
+ (!reindexState ||
+ !textModelState ||
!textTokenizerState ||
!visionModelState ||
!visionFeatureExtractorState)
@@ -303,24 +284,22 @@ export default function Explore() {
- {reindexProgress.time_remaining !== null && (
+ {reindexState.time_remaining !== null && (
- {reindexProgress.time_remaining === -1
+ {reindexState.time_remaining === -1
? "Starting up..."
: "Estimated time remaining:"}
- {reindexProgress.time_remaining >= 0 &&
- (formatSecondsToDuration(
- reindexProgress.time_remaining,
- ) ||
+ {reindexState.time_remaining >= 0 &&
+ (formatSecondsToDuration(reindexState.time_remaining) ||
"Finishing shortly")}
)}
@@ -328,20 +307,20 @@ export default function Explore() {
Thumbnails embedded:
- {reindexProgress.thumbnails}
+ {reindexState.thumbnails}
Descriptions embedded:
- {reindexProgress.descriptions}
+ {reindexState.descriptions}
Tracked objects processed:
- {reindexProgress.processed_objects} /{" "}
- {reindexProgress.total_objects}
+ {reindexState.processed_objects} /{" "}
+ {reindexState.total_objects}
>
diff --git a/web/src/views/search/SearchView.tsx b/web/src/views/search/SearchView.tsx
index 4c33f7dc8..e64affa36 100644
--- a/web/src/views/search/SearchView.tsx
+++ b/web/src/views/search/SearchView.tsx
@@ -393,7 +393,11 @@ export default function SearchView({
>
setSimilaritySearch(value)}
+ findSimilar={() => {
+ if (config?.semantic_search.enabled) {
+ setSimilaritySearch(value);
+ }
+ }}
onClick={() => onSelectSearch(value, index)}
/>
{(searchTerm ||
From 9adffa1ef5b2b8e18ce791faf6c245e86c0d5785 Mon Sep 17 00:00:00 2001
From: Nicolas Mowen
Date: Sun, 13 Oct 2024 20:34:51 -0600
Subject: [PATCH 27/27] Detection adjustments (#14329)
---
frigate/detectors/detector_config.py | 16 +++++---------
frigate/util/model.py | 33 +++++++++++++---------------
2 files changed, 21 insertions(+), 28 deletions(-)
diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py
index 11f08a86c..90937d8f4 100644
--- a/frigate/detectors/detector_config.py
+++ b/frigate/detectors/detector_config.py
@@ -158,17 +158,13 @@ class ModelConfig(BaseModel):
def create_colormap(self, enabled_labels: set[str]) -> None:
"""Get a list of colors for enabled labels that aren't attributes."""
- colors = generate_color_palette(
- len(
- list(
- filter(
- lambda label: label not in self._all_attributes, enabled_labels
- )
- )
- )
+ enabled_trackable_labels = list(
+ filter(lambda label: label not in self._all_attributes, enabled_labels)
)
-
- self._colormap = {label: color for label, color in zip(enabled_labels, colors)}
+ colors = generate_color_palette(len(enabled_trackable_labels))
+ self._colormap = {
+ label: color for label, color in zip(enabled_trackable_labels, colors)
+ }
model_config = ConfigDict(extra="forbid", protected_namespaces=())
diff --git a/frigate/util/model.py b/frigate/util/model.py
index 008f5169a..685cd34ec 100644
--- a/frigate/util/model.py
+++ b/frigate/util/model.py
@@ -25,28 +25,23 @@ def get_ort_providers(
],
)
- providers = ort.get_available_providers()
+ providers = []
options = []
- for provider in providers:
- if provider == "TensorrtExecutionProvider":
- os.makedirs("/config/model_cache/tensorrt/ort/trt-engines", exist_ok=True)
-
- if not requires_fp16 or os.environ.get("USE_FP_16", "True") != "False":
- options.append(
- {
- "arena_extend_strategy": "kSameAsRequested",
- "trt_fp16_enable": requires_fp16,
- "trt_timing_cache_enable": True,
- "trt_engine_cache_enable": True,
- "trt_timing_cache_path": "/config/model_cache/tensorrt/ort",
- "trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines",
- }
- )
- else:
- options.append({})
+ for provider in ort.get_available_providers():
+ if provider == "CUDAExecutionProvider":
+ providers.append(provider)
+ options.append(
+ {
+ "arena_extend_strategy": "kSameAsRequested",
+ }
+ )
+ elif provider == "TensorrtExecutionProvider":
+ # TensorrtExecutionProvider uses too much memory without options to control it
+ pass
elif provider == "OpenVINOExecutionProvider":
os.makedirs("/config/model_cache/openvino/ort", exist_ok=True)
+ providers.append(provider)
options.append(
{
"arena_extend_strategy": "kSameAsRequested",
@@ -55,12 +50,14 @@ def get_ort_providers(
}
)
elif provider == "CPUExecutionProvider":
+ providers.append(provider)
options.append(
{
"arena_extend_strategy": "kSameAsRequested",
}
)
else:
+ providers.append(provider)
options.append({})
return (providers, options)