2024-10-07 23:30:45 +03:00
|
|
|
"""SQLite-vec embeddings database."""
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2024-10-23 01:05:48 +03:00
|
|
|
import base64
|
2024-06-23 16:13:02 +03:00
|
|
|
import json
|
2024-06-22 00:30:19 +03:00
|
|
|
import logging
|
2024-10-07 23:30:45 +03:00
|
|
|
import os
|
2024-06-22 00:30:19 +03:00
|
|
|
import threading
|
2025-07-29 21:38:13 +03:00
|
|
|
from json.decoder import JSONDecodeError
|
2025-06-24 20:41:11 +03:00
|
|
|
from multiprocessing.synchronize import Event as MpEvent
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
from typing import Any, Union
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2025-05-18 01:11:19 +03:00
|
|
|
import regex
|
2025-04-24 02:27:46 +03:00
|
|
|
from pathvalidate import ValidationError, sanitize_filename
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2024-10-10 18:42:24 +03:00
|
|
|
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
|
2024-06-22 00:30:19 +03:00
|
|
|
from frigate.config import FrigateConfig
|
2025-07-18 20:23:06 +03:00
|
|
|
from frigate.const import CONFIG_DIR, FACE_DIR, PROCESS_PRIORITY_HIGH
|
2025-01-10 22:44:30 +03:00
|
|
|
from frigate.data_processing.types import DataProcessorMetrics
|
2024-10-07 23:30:45 +03:00
|
|
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
from frigate.models import Event
|
2024-10-10 18:42:24 +03:00
|
|
|
from frigate.util.builtin import serialize
|
2025-06-09 17:25:33 +03:00
|
|
|
from frigate.util.classification import kickoff_model_training
|
2025-06-13 20:09:51 +03:00
|
|
|
from frigate.util.process import FrigateProcess
|
2024-06-22 00:30:19 +03:00
|
|
|
|
2024-06-23 16:13:02 +03:00
|
|
|
from .maintainer import EmbeddingMaintainer
|
|
|
|
|
from .util import ZScoreNormalization
|
|
|
|
|
|
2024-06-22 00:30:19 +03:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
class EmbeddingProcess(FrigateProcess):
|
|
|
|
|
def __init__(
|
2025-06-24 20:41:11 +03:00
|
|
|
self,
|
|
|
|
|
config: FrigateConfig,
|
|
|
|
|
metrics: DataProcessorMetrics | None,
|
|
|
|
|
stop_event: MpEvent,
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
) -> None:
|
2025-07-18 20:23:06 +03:00
|
|
|
super().__init__(
|
|
|
|
|
stop_event,
|
|
|
|
|
PROCESS_PRIORITY_HIGH,
|
|
|
|
|
name="frigate.embeddings_manager",
|
|
|
|
|
daemon=True,
|
|
|
|
|
)
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
self.config = config
|
|
|
|
|
self.metrics = metrics
|
2024-06-22 00:30:19 +03:00
|
|
|
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
def run(self) -> None:
|
2025-06-13 17:43:38 +03:00
|
|
|
self.pre_run_setup(self.config.logger)
|
Use Fork-Server As Spawn Method (#18682)
* Set runtime
* Use count correctly
* Don't assume camera sizes
* Use separate zmq proxy for object detection
* Correct order
* Use forkserver
* Only store PID instead of entire process reference
* Cleanup
* Catch correct errors
* Fix typing
* Remove before_run from process util
The before_run never actually ran because:
You're right to suspect an issue with before_run not being called and a potential deadlock. The way you've implemented the run_wrapper using __getattribute__ for the run method of BaseProcess is a common pitfall in Python's multiprocessing, especially when combined with how multiprocessing.Process works internally.
Here's a breakdown of why before_run isn't being called and why you might be experiencing a deadlock:
The Problem: __getattribute__ and Process Serialization
When you create a multiprocessing.Process object and call start(), the multiprocessing module needs to serialize the process object (or at least enough of it to re-create the process in the new interpreter). It then pickles this serialized object and sends it to the newly spawned process.
The issue with your __getattribute__ implementation for run is that:
run is retrieved during serialization: When multiprocessing tries to pickle your Process object to send to the new process, it will likely access the run attribute. This triggers your __getattribute__ wrapper, which then tries to bind run_wrapper to self.
run_wrapper is bound to the parent process's self: The run_wrapper closure, when created in the parent process, captures the self (the Process instance) from the parent's memory space.
Deserialization creates a new object: In the child process, a new Process object is created by deserializing the pickled data. However, the run_wrapper method that was pickled still holds a reference to the self from the parent process. This is a subtle but critical distinction.
The child's run is not your wrapped run: When the child process starts, it internally calls its own run method. Because of the serialization and deserialization process, the run method that's ultimately executed in the child process is the original multiprocessing.Process.run or the Process.run if you had directly overridden it. Your __getattribute__ magic, which wraps run, isn't correctly applied to the Process object within the child's context.
* Cleanup
* Logging bugfix (#18465)
* use mp Manager to handle logging queues
A Python bug (https://github.com/python/cpython/issues/91555) was preventing logs from the embeddings maintainer process from printing. The bug is fixed in Python 3.14, but a viable workaround is to use the multiprocessing Manager, which better manages mp queues and causes the logging to work correctly.
* consolidate
* fix typing
* Fix typing
* Use global log queue
* Move to using process for logging
* Convert camera tracking to process
* Add more processes
* Finalize process
* Cleanup
* Cleanup typing
* Formatting
* Remove daemon
---------
Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-06-12 21:12:34 +03:00
|
|
|
maintainer = EmbeddingMaintainer(
|
|
|
|
|
self.config,
|
|
|
|
|
self.metrics,
|
|
|
|
|
self.stop_event,
|
|
|
|
|
)
|
|
|
|
|
maintainer.start()
|
2024-06-23 16:13:02 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class EmbeddingsContext:
|
2024-10-10 18:42:24 +03:00
|
|
|
def __init__(self, db: SqliteVecQueueDatabase):
|
|
|
|
|
self.db = db
|
2024-06-23 16:13:02 +03:00
|
|
|
self.thumb_stats = ZScoreNormalization()
|
2024-10-10 00:31:54 +03:00
|
|
|
self.desc_stats = ZScoreNormalization()
|
2024-10-10 18:42:24 +03:00
|
|
|
self.requestor = EmbeddingsRequestor()
|
2024-06-23 16:13:02 +03:00
|
|
|
|
|
|
|
|
# load stats from disk
|
2025-07-29 21:38:13 +03:00
|
|
|
stats_file = os.path.join(CONFIG_DIR, ".search_stats.json")
|
2024-06-23 16:13:02 +03:00
|
|
|
try:
|
2025-07-29 21:38:13 +03:00
|
|
|
with open(stats_file, "r") as f:
|
2024-06-23 16:13:02 +03:00
|
|
|
data = json.loads(f.read())
|
|
|
|
|
self.thumb_stats.from_dict(data["thumb_stats"])
|
|
|
|
|
self.desc_stats.from_dict(data["desc_stats"])
|
|
|
|
|
except FileNotFoundError:
|
|
|
|
|
pass
|
2025-07-29 21:38:13 +03:00
|
|
|
except JSONDecodeError:
|
|
|
|
|
logger.warning("Failed to decode semantic search stats, clearing file")
|
|
|
|
|
try:
|
|
|
|
|
with open(stats_file, "w") as f:
|
|
|
|
|
f.write("")
|
|
|
|
|
except OSError as e:
|
|
|
|
|
logger.error(f"Failed to clear corrupted stats file: {e}")
|
2024-06-23 16:13:02 +03:00
|
|
|
|
2024-10-10 18:42:24 +03:00
|
|
|
def stop(self):
|
2024-06-23 16:13:02 +03:00
|
|
|
"""Write the stats to disk as JSON on exit."""
|
|
|
|
|
contents = {
|
|
|
|
|
"thumb_stats": self.thumb_stats.to_dict(),
|
|
|
|
|
"desc_stats": self.desc_stats.to_dict(),
|
|
|
|
|
}
|
2024-10-07 23:30:45 +03:00
|
|
|
with open(os.path.join(CONFIG_DIR, ".search_stats.json"), "w") as f:
|
|
|
|
|
json.dump(contents, f)
|
2024-10-10 18:42:24 +03:00
|
|
|
self.requestor.stop()
|
|
|
|
|
|
|
|
|
|
def search_thumbnail(
|
|
|
|
|
self, query: Union[Event, str], event_ids: list[str] = None
|
|
|
|
|
) -> list[tuple[str, float]]:
|
|
|
|
|
if query.__class__ == Event:
|
|
|
|
|
cursor = self.db.execute_sql(
|
|
|
|
|
"""
|
|
|
|
|
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
|
|
|
|
|
""",
|
|
|
|
|
[query.id],
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
row = cursor.fetchone() if cursor else None
|
|
|
|
|
|
|
|
|
|
if row:
|
|
|
|
|
query_embedding = row[0]
|
|
|
|
|
else:
|
|
|
|
|
# If no embedding found, generate it and return it
|
2024-10-11 00:37:43 +03:00
|
|
|
data = self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.embed_thumbnail.value,
|
|
|
|
|
{"id": str(query.id), "thumbnail": str(query.thumbnail)},
|
2024-10-10 18:42:24 +03:00
|
|
|
)
|
2024-10-11 00:37:43 +03:00
|
|
|
|
|
|
|
|
if not data:
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
query_embedding = serialize(data)
|
2024-10-10 18:42:24 +03:00
|
|
|
else:
|
2024-10-11 00:37:43 +03:00
|
|
|
data = self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.generate_search.value, query
|
2024-10-10 18:42:24 +03:00
|
|
|
)
|
|
|
|
|
|
2024-10-11 00:37:43 +03:00
|
|
|
if not data:
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
query_embedding = serialize(data)
|
|
|
|
|
|
2024-10-10 18:42:24 +03:00
|
|
|
sql_query = """
|
|
|
|
|
SELECT
|
|
|
|
|
id,
|
|
|
|
|
distance
|
|
|
|
|
FROM vec_thumbnails
|
|
|
|
|
WHERE thumbnail_embedding MATCH ?
|
|
|
|
|
AND k = 100
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# Add the IN clause if event_ids is provided and not empty
|
|
|
|
|
# this is the only filter supported by sqlite-vec as of 0.1.3
|
|
|
|
|
# but it seems to be broken in this version
|
|
|
|
|
if event_ids:
|
|
|
|
|
sql_query += " AND id IN ({})".format(",".join("?" * len(event_ids)))
|
|
|
|
|
|
|
|
|
|
# order by distance DESC is not implemented in this version of sqlite-vec
|
|
|
|
|
# when it's implemented, we can use cosine similarity
|
|
|
|
|
sql_query += " ORDER BY distance"
|
|
|
|
|
|
|
|
|
|
parameters = [query_embedding] + event_ids if event_ids else [query_embedding]
|
|
|
|
|
|
|
|
|
|
results = self.db.execute_sql(sql_query, parameters).fetchall()
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
def search_description(
|
|
|
|
|
self, query_text: str, event_ids: list[str] = None
|
|
|
|
|
) -> list[tuple[str, float]]:
|
2024-10-11 00:37:43 +03:00
|
|
|
data = self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.generate_search.value, query_text
|
2024-10-10 18:42:24 +03:00
|
|
|
)
|
|
|
|
|
|
2024-10-11 00:37:43 +03:00
|
|
|
if not data:
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
query_embedding = serialize(data)
|
|
|
|
|
|
2024-10-10 18:42:24 +03:00
|
|
|
# Prepare the base SQL query
|
|
|
|
|
sql_query = """
|
|
|
|
|
SELECT
|
|
|
|
|
id,
|
|
|
|
|
distance
|
|
|
|
|
FROM vec_descriptions
|
|
|
|
|
WHERE description_embedding MATCH ?
|
|
|
|
|
AND k = 100
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# Add the IN clause if event_ids is provided and not empty
|
|
|
|
|
# this is the only filter supported by sqlite-vec as of 0.1.3
|
|
|
|
|
# but it seems to be broken in this version
|
|
|
|
|
if event_ids:
|
|
|
|
|
sql_query += " AND id IN ({})".format(",".join("?" * len(event_ids)))
|
|
|
|
|
|
|
|
|
|
# order by distance DESC is not implemented in this version of sqlite-vec
|
|
|
|
|
# when it's implemented, we can use cosine similarity
|
|
|
|
|
sql_query += " ORDER BY distance"
|
|
|
|
|
|
|
|
|
|
parameters = [query_embedding] + event_ids if event_ids else [query_embedding]
|
|
|
|
|
|
|
|
|
|
results = self.db.execute_sql(sql_query, parameters).fetchall()
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
2025-05-13 17:27:20 +03:00
|
|
|
def register_face(self, face_name: str, image_data: bytes) -> dict[str, Any]:
|
2025-01-10 18:39:24 +03:00
|
|
|
return self.requestor.send_data(
|
2024-10-23 01:05:48 +03:00
|
|
|
EmbeddingsRequestEnum.register_face.value,
|
|
|
|
|
{
|
|
|
|
|
"face_name": face_name,
|
|
|
|
|
"image": base64.b64encode(image_data).decode("ASCII"),
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
2025-05-13 17:27:20 +03:00
|
|
|
def recognize_face(self, image_data: bytes) -> dict[str, Any]:
|
2025-03-19 18:02:25 +03:00
|
|
|
return self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.recognize_face.value,
|
|
|
|
|
{
|
|
|
|
|
"image": base64.b64encode(image_data).decode("ASCII"),
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
2024-10-23 01:05:48 +03:00
|
|
|
def get_face_ids(self, name: str) -> list[str]:
|
|
|
|
|
sql_query = f"""
|
|
|
|
|
SELECT
|
|
|
|
|
id
|
|
|
|
|
FROM vec_descriptions
|
|
|
|
|
WHERE id LIKE '%{name}%'
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
return self.db.execute_sql(sql_query).fetchall()
|
|
|
|
|
|
2025-05-13 17:27:20 +03:00
|
|
|
def reprocess_face(self, face_file: str) -> dict[str, Any]:
|
2025-01-29 17:41:35 +03:00
|
|
|
return self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.reprocess_face.value, {"image_file": face_file}
|
|
|
|
|
)
|
|
|
|
|
|
2025-01-18 20:52:01 +03:00
|
|
|
def clear_face_classifier(self) -> None:
|
|
|
|
|
self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.clear_face_classifier.value, None
|
|
|
|
|
)
|
|
|
|
|
|
2024-11-26 23:41:49 +03:00
|
|
|
def delete_face_ids(self, face: str, ids: list[str]) -> None:
|
|
|
|
|
folder = os.path.join(FACE_DIR, face)
|
|
|
|
|
for id in ids:
|
|
|
|
|
file_path = os.path.join(folder, id)
|
|
|
|
|
|
|
|
|
|
if os.path.isfile(file_path):
|
|
|
|
|
os.unlink(file_path)
|
2024-10-23 01:05:48 +03:00
|
|
|
|
2025-05-09 16:36:44 +03:00
|
|
|
if face != "train" and len(os.listdir(folder)) == 0:
|
2025-03-16 14:01:15 +03:00
|
|
|
os.rmdir(folder)
|
|
|
|
|
|
2025-03-27 14:31:29 +03:00
|
|
|
self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.clear_face_classifier.value, None
|
|
|
|
|
)
|
|
|
|
|
|
2025-04-24 02:27:46 +03:00
|
|
|
def rename_face(self, old_name: str, new_name: str) -> None:
|
2025-05-18 01:11:19 +03:00
|
|
|
valid_name_pattern = r"^[\p{L}\p{N}\s'_-]{1,50}$"
|
2025-04-24 02:27:46 +03:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
sanitized_old_name = sanitize_filename(old_name, replacement_text="_")
|
|
|
|
|
sanitized_new_name = sanitize_filename(new_name, replacement_text="_")
|
|
|
|
|
except ValidationError as e:
|
|
|
|
|
raise ValueError(f"Invalid face name: {str(e)}")
|
|
|
|
|
|
2025-05-18 01:11:19 +03:00
|
|
|
if not regex.match(valid_name_pattern, old_name):
|
2025-04-24 02:27:46 +03:00
|
|
|
raise ValueError(f"Invalid old face name: {old_name}")
|
2025-05-18 01:11:19 +03:00
|
|
|
if not regex.match(valid_name_pattern, new_name):
|
2025-04-24 02:27:46 +03:00
|
|
|
raise ValueError(f"Invalid new face name: {new_name}")
|
|
|
|
|
if sanitized_old_name != old_name:
|
|
|
|
|
raise ValueError(f"Old face name contains invalid characters: {old_name}")
|
|
|
|
|
if sanitized_new_name != new_name:
|
|
|
|
|
raise ValueError(f"New face name contains invalid characters: {new_name}")
|
|
|
|
|
|
|
|
|
|
old_path = os.path.normpath(os.path.join(FACE_DIR, old_name))
|
|
|
|
|
new_path = os.path.normpath(os.path.join(FACE_DIR, new_name))
|
|
|
|
|
|
|
|
|
|
# Prevent path traversal
|
|
|
|
|
if not old_path.startswith(
|
|
|
|
|
os.path.normpath(FACE_DIR)
|
|
|
|
|
) or not new_path.startswith(os.path.normpath(FACE_DIR)):
|
|
|
|
|
raise ValueError("Invalid path detected")
|
|
|
|
|
|
|
|
|
|
if not os.path.exists(old_path):
|
|
|
|
|
raise ValueError(f"Face {old_name} not found.")
|
|
|
|
|
|
|
|
|
|
os.rename(old_path, new_path)
|
|
|
|
|
|
|
|
|
|
self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.clear_face_classifier.value, None
|
|
|
|
|
)
|
|
|
|
|
|
2024-10-10 18:42:24 +03:00
|
|
|
def update_description(self, event_id: str, description: str) -> None:
|
|
|
|
|
self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.embed_description.value,
|
|
|
|
|
{"id": event_id, "description": description},
|
|
|
|
|
)
|
2025-02-21 16:51:37 +03:00
|
|
|
|
2025-05-13 17:27:20 +03:00
|
|
|
def reprocess_plate(self, event: dict[str, Any]) -> dict[str, Any]:
|
2025-02-21 16:51:37 +03:00
|
|
|
return self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.reprocess_plate.value, {"event": event}
|
|
|
|
|
)
|
2025-03-27 20:29:34 +03:00
|
|
|
|
2025-05-13 17:27:20 +03:00
|
|
|
def reindex_embeddings(self) -> dict[str, Any]:
|
2025-03-27 20:29:34 +03:00
|
|
|
return self.requestor.send_data(EmbeddingsRequestEnum.reindex.value, {})
|
2025-05-27 18:26:00 +03:00
|
|
|
|
2025-06-05 18:13:12 +03:00
|
|
|
def start_classification_training(self, model_name: str) -> dict[str, Any]:
|
2025-06-09 17:25:33 +03:00
|
|
|
threading.Thread(
|
|
|
|
|
target=kickoff_model_training,
|
|
|
|
|
args=(self.requestor, model_name),
|
|
|
|
|
daemon=True,
|
|
|
|
|
).start()
|
|
|
|
|
return {"success": True, "message": f"Began training {model_name} model."}
|
2025-06-05 18:13:12 +03:00
|
|
|
|
2025-05-27 18:26:00 +03:00
|
|
|
def transcribe_audio(self, event: dict[str, any]) -> dict[str, any]:
|
|
|
|
|
return self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.transcribe_audio.value, {"event": event}
|
|
|
|
|
)
|
2025-07-07 17:03:57 +03:00
|
|
|
|
|
|
|
|
def generate_description_embedding(self, text: str) -> None:
|
|
|
|
|
return self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.embed_description.value,
|
|
|
|
|
{"id": None, "description": text, "upsert": False},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def generate_image_embedding(self, event_id: str, thumbnail: bytes) -> None:
|
|
|
|
|
return self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.embed_thumbnail.value,
|
|
|
|
|
{"id": str(event_id), "thumbnail": str(thumbnail), "upsert": False},
|
|
|
|
|
)
|
2025-08-13 01:27:35 +03:00
|
|
|
|
|
|
|
|
def generate_review_summary(self, start_ts: float, end_ts: float) -> str | None:
|
|
|
|
|
return self.requestor.send_data(
|
|
|
|
|
EmbeddingsRequestEnum.summarize_review.value,
|
|
|
|
|
{"start_ts": start_ts, "end_ts": end_ts},
|
|
|
|
|
)
|