Compare commits
124 Commits
1daef45532
...
75d655df23
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
75d655df23 | ||
|
|
9d4aac2b8e | ||
|
|
aa09132dfd | ||
|
|
24766ce427 | ||
|
|
97b29d177a | ||
|
|
1a75251ffb | ||
|
|
048475e750 | ||
|
|
1b57fb15a7 | ||
|
|
cd606ad240 | ||
|
|
de2144f158 | ||
|
|
e79ff9a079 | ||
|
|
fe47620153 | ||
|
|
8520ade5c4 | ||
|
|
1c7ed45f21 | ||
|
|
130c7c9eec | ||
|
|
26e630aa8c | ||
|
|
a478da45a3 | ||
|
|
694f72d577 | ||
|
|
1e42cedf9e | ||
|
|
a35a0fc8ba | ||
|
|
10b7ffe3d1 | ||
|
|
42c6cfc9a2 | ||
|
|
e8bf570d21 | ||
|
|
cdbd9038b8 | ||
|
|
1e05abb0ea | ||
|
|
70d1c2e041 | ||
|
|
f4d128b3ee | ||
|
|
dd64ffca6c | ||
|
|
fce1f78bdc | ||
|
|
69ca63d608 | ||
|
|
111b83e8e3 | ||
|
|
198733b729 | ||
|
|
03d9fd6f19 | ||
|
|
f90a54f1d9 | ||
|
|
bbec4c4a60 | ||
|
|
9fe16d7b17 | ||
|
|
dc886b11f3 | ||
|
|
3bbe24f5f8 | ||
|
|
2a9c028f55 | ||
|
|
aa8b423b68 | ||
|
|
2d8b6c8301 | ||
|
|
84c3f98a09 | ||
|
|
c87f89fcc1 | ||
|
|
815303922d | ||
|
|
224cbdc2d6 | ||
|
|
3f9b153758 | ||
|
|
8e8346099e | ||
|
|
b0527df3c7 | ||
|
|
301e0a1a3a | ||
|
|
213a1fbd00 | ||
|
|
fbf4388b37 | ||
|
|
097673b845 | ||
|
|
d56cf59b9a | ||
|
|
de066d0062 | ||
|
|
f1a05d0f9b | ||
|
|
a623150811 | ||
|
|
e4eac4ac81 | ||
|
|
c371fc0c87 | ||
|
|
99a363c047 | ||
|
|
a374a60756 | ||
|
|
d41ee4ff88 | ||
|
|
c99ada8f6a | ||
|
|
01452e4c51 | ||
|
|
ef19332fe5 | ||
|
|
530b69b877 | ||
|
|
a15399fed5 | ||
|
|
88a2f6c991 | ||
|
|
dca04cbe9c | ||
|
|
f0de8e7643 | ||
|
|
ef4e13089c | ||
|
|
ee9a734ebd | ||
|
|
0c12677f7b | ||
|
|
72ede19bee | ||
|
|
3d3e43da96 | ||
|
|
b3140f666c | ||
|
|
eb6187b5fc | ||
|
|
6fef71ef46 | ||
|
|
18377ed716 | ||
|
|
165e1e4e64 | ||
|
|
a4790586ad | ||
|
|
21623113b5 | ||
|
|
1d8915b0cd | ||
|
|
7e5c117cd6 | ||
|
|
229b7ead78 | ||
|
|
bd9ad3c50a | ||
|
|
6f66f681d1 | ||
|
|
77773d133d | ||
|
|
1b3edf8798 | ||
|
|
99e81eba95 | ||
|
|
44c91adcee | ||
|
|
81932cd399 | ||
|
|
4b1054ee05 | ||
|
|
945317b44e | ||
|
|
32f1d85a6f | ||
|
|
35ce275071 | ||
|
|
8048168814 | ||
|
|
a510ea9036 | ||
|
|
e1bc7360ad | ||
|
|
4638c22c16 | ||
|
|
81faa8899d | ||
|
|
043bd9e6ee | ||
|
|
9f0b6004f2 | ||
|
|
b751228476 | ||
|
|
3b2d136665 | ||
|
|
e7394d0dc1 | ||
|
|
2e288109f4 | ||
|
|
256817d5c2 | ||
|
|
84409eab7e | ||
|
|
9e83888133 | ||
|
|
85f7138361 | ||
|
|
fc1cad2872 | ||
|
|
5529432856 | ||
|
|
59963fc47e | ||
|
|
31fa87ce73 | ||
|
|
740c618240 | ||
|
|
4f76b34f44 | ||
|
|
d44340eca6 | ||
|
|
aff82f809c | ||
|
|
1e50d83d06 | ||
|
|
36fb27ef56 | ||
|
|
9937a7cc3d | ||
|
|
7aac6b4f21 | ||
|
|
338b681ed0 | ||
|
|
7af9209e1e |
2
.github/workflows/ci.yml
vendored
@ -15,7 +15,7 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: 3.9
|
||||
PYTHON_VERSION: 3.11
|
||||
|
||||
jobs:
|
||||
amd64_build:
|
||||
|
||||
1
.gitignore
vendored
@ -15,6 +15,7 @@ frigate/version.py
|
||||
web/build
|
||||
web/node_modules
|
||||
web/coverage
|
||||
web/.env
|
||||
core
|
||||
!/web/**/*.ts
|
||||
.idea/*
|
||||
|
||||
4
LICENSE
@ -1,6 +1,6 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2020 Blake Blackshear
|
||||
Copyright (c) 2025 Frigate LLC (Frigate™)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
SOFTWARE.
|
||||
|
||||
1
Makefile
@ -14,6 +14,7 @@ push-boards: $(BOARDS:%=push-%)
|
||||
|
||||
version:
|
||||
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
|
||||
echo 'VITE_GIT_COMMIT_HASH=$(COMMIT_HASH)' > web/.env
|
||||
|
||||
local: version
|
||||
docker buildx build --target=frigate --file docker/main/Dockerfile . \
|
||||
|
||||
21
README.md
@ -1,8 +1,10 @@
|
||||
<p align="center">
|
||||
<img align="center" alt="logo" src="docs/static/img/frigate.png">
|
||||
<img align="center" alt="logo" src="docs/static/img/branding/frigate.png">
|
||||
</p>
|
||||
|
||||
# Frigate - NVR With Realtime Object Detection for IP Cameras
|
||||
# Frigate NVR™ - Realtime Object Detection for IP Cameras
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/language-badge.svg" alt="Translation status" />
|
||||
@ -12,7 +14,7 @@
|
||||
|
||||
A complete and local NVR designed for [Home Assistant](https://www.home-assistant.io) with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras.
|
||||
|
||||
Use of a GPU or AI accelerator such as a [Google Coral](https://coral.ai/products/) or [Hailo](https://hailo.ai/) is highly recommended. AI accelerators will outperform even the best CPUs with very little overhead.
|
||||
Use of a GPU or AI accelerator is highly recommended. AI accelerators will outperform even the best CPUs with very little overhead. See Frigate's supported [object detectors](https://docs.frigate.video/configuration/object_detectors/).
|
||||
|
||||
- Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration)
|
||||
- Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary
|
||||
@ -33,6 +35,15 @@ View the documentation at https://docs.frigate.video
|
||||
|
||||
If you would like to make a donation to support development, please use [Github Sponsors](https://github.com/sponsors/blakeblackshear).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the **MIT License**.
|
||||
|
||||
- **Code:** The source code, configuration files, and documentation in this repository are available under the [MIT License](LICENSE). You are free to use, modify, and distribute the code as long as you include the original copyright notice.
|
||||
- **Trademarks:** The "Frigate" name, the "Frigate NVR" brand, and the Frigate logo are **trademarks of Frigate LLC** and are **not** covered by the MIT License.
|
||||
|
||||
Please see our [Trademark Policy](TRADEMARK.md) for details on acceptable use of our brand assets.
|
||||
|
||||
## Screenshots
|
||||
|
||||
### Live dashboard
|
||||
@ -66,3 +77,7 @@ We use [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) to support la
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/multi-auto.svg" alt="Translation status" />
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
**Copyright © 2025 Frigate LLC.**
|
||||
|
||||
53
README_CN.md
@ -1,28 +1,31 @@
|
||||
<p align="center">
|
||||
<img align="center" alt="logo" src="docs/static/img/frigate.png">
|
||||
<img align="center" alt="logo" src="docs/static/img/branding/frigate.png">
|
||||
</p>
|
||||
|
||||
# Frigate - 一个具有实时目标检测的本地NVR
|
||||
# Frigate NVR™ - 一个具有实时目标检测的本地 NVR
|
||||
|
||||
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
|
||||
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/-/zh_Hans/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/-/zh_Hans/svg-badge.svg" alt="翻译状态" />
|
||||
</a>
|
||||
|
||||
一个完整的本地网络视频录像机(NVR),专为[Home Assistant](https://www.home-assistant.io)设计,具备AI物体检测功能。使用OpenCV和TensorFlow在本地为IP摄像头执行实时物体检测。
|
||||
一个完整的本地网络视频录像机(NVR),专为[Home Assistant](https://www.home-assistant.io)设计,具备 AI 目标/物体检测功能。使用 OpenCV 和 TensorFlow 在本地为 IP 摄像头执行实时物体检测。
|
||||
|
||||
强烈推荐使用GPU或者AI加速器(例如[Google Coral加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/))。它们的性能甚至超过目前的顶级CPU,并且可以以极低的耗电实现更优的性能。
|
||||
- 通过[自定义组件](https://github.com/blakeblackshear/frigate-hass-integration)与Home Assistant紧密集成
|
||||
- 设计上通过仅在必要时和必要地点寻找物体,最大限度地减少资源使用并最大化性能
|
||||
强烈推荐使用 GPU 或者 AI 加速器(例如[Google Coral 加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/)等)。它们的运行效率远远高于现在的顶级 CPU,并且功耗也极低。
|
||||
|
||||
- 通过[自定义组件](https://github.com/blakeblackshear/frigate-hass-integration)与 Home Assistant 紧密集成
|
||||
- 设计上通过仅在必要时和必要地点寻找目标,最大限度地减少资源使用并最大化性能
|
||||
- 大量利用多进程处理,强调实时性而非处理每一帧
|
||||
- 使用非常低开销的运动检测来确定运行物体检测的位置
|
||||
- 使用TensorFlow进行物体检测,运行在单独的进程中以达到最大FPS
|
||||
- 通过MQTT进行通信,便于集成到其他系统中
|
||||
- 使用非常低开销的画面变动检测(也叫运动检测)来确定运行目标检测的位置
|
||||
- 使用 TensorFlow 进行目标检测,并运行在单独的进程中以达到最大 FPS
|
||||
- 通过 MQTT 进行通信,便于集成到其他系统中
|
||||
- 根据检测到的物体设置保留时间进行视频录制
|
||||
- 24/7全天候录制
|
||||
- 通过RTSP重新流传输以减少摄像头的连接数
|
||||
- 支持WebRTC和MSE,实现低延迟的实时观看
|
||||
- 24/7 全天候录制
|
||||
- 通过 RTSP 重新流传输以减少摄像头的连接数
|
||||
- 支持 WebRTC 和 MSE,实现低延迟的实时观看
|
||||
|
||||
## 社区中文翻译文档
|
||||
|
||||
@ -32,39 +35,55 @@
|
||||
|
||||
如果您想通过捐赠支持开发,请使用 [Github Sponsors](https://github.com/sponsors/blakeblackshear)。
|
||||
|
||||
## 协议
|
||||
|
||||
本项目采用 **MIT 许可证**授权。
|
||||
**代码部分**:本代码库中的源代码、配置文件和文档均遵循 [MIT 许可证](LICENSE)。您可以自由使用、修改和分发这些代码,但必须保留原始版权声明。
|
||||
|
||||
**商标部分**:“Frigate”名称、“Frigate NVR”品牌以及 Frigate 的 Logo 为 **Frigate LLC 的商标**,**不在** MIT 许可证覆盖范围内。
|
||||
有关品牌资产的规范使用详情,请参阅我们的[《商标政策》](TRADEMARK.md)。
|
||||
|
||||
## 截图
|
||||
|
||||
### 实时监控面板
|
||||
|
||||
<div>
|
||||
<img width="800" alt="实时监控面板" src="https://github.com/blakeblackshear/frigate/assets/569905/5e713cb9-9db5-41dc-947a-6937c3bc376e">
|
||||
</div>
|
||||
|
||||
### 简单的核查工作流程
|
||||
|
||||
<div>
|
||||
<img width="800" alt="简单的审查工作流程" src="https://github.com/blakeblackshear/frigate/assets/569905/6fed96e8-3b18-40e5-9ddc-31e6f3c9f2ff">
|
||||
</div>
|
||||
|
||||
### 多摄像头可按时间轴查看
|
||||
|
||||
<div>
|
||||
<img width="800" alt="多摄像头可按时间轴查看" src="https://github.com/blakeblackshear/frigate/assets/569905/d6788a15-0eeb-4427-a8d4-80b93cae3d74">
|
||||
</div>
|
||||
|
||||
### 内置遮罩和区域编辑器
|
||||
|
||||
<div>
|
||||
<img width="800" alt="内置遮罩和区域编辑器" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5">
|
||||
</div>
|
||||
|
||||
|
||||
## 翻译
|
||||
|
||||
我们使用 [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) 平台提供翻译支持,欢迎参与进来一起完善。
|
||||
|
||||
|
||||
## 非官方中文讨论社区
|
||||
欢迎加入中文讨论QQ群:[1043861059](https://qm.qq.com/q/7vQKsTmSz)
|
||||
|
||||
欢迎加入中文讨论 QQ 群:[1043861059](https://qm.qq.com/q/7vQKsTmSz)
|
||||
|
||||
Bilibili:https://space.bilibili.com/3546894915602564
|
||||
|
||||
|
||||
## 中文社区赞助商
|
||||
|
||||
[](https://edgeone.ai/zh?from=github)
|
||||
本项目 CDN 加速及安全防护由 Tencent EdgeOne 赞助
|
||||
|
||||
---
|
||||
|
||||
**Copyright © 2025 Frigate LLC.**
|
||||
|
||||
58
TRADEMARK.md
Normal file
@ -0,0 +1,58 @@
|
||||
# Trademark Policy
|
||||
|
||||
**Last Updated:** November 2025
|
||||
|
||||
This document outlines the policy regarding the use of the trademarks associated with the Frigate NVR project.
|
||||
|
||||
## 1. Our Trademarks
|
||||
|
||||
The following terms and visual assets are trademarks (the "Marks") of **Frigate LLC**:
|
||||
|
||||
- **Frigate™**
|
||||
- **Frigate NVR™**
|
||||
- **Frigate+™**
|
||||
- **The Frigate Logo**
|
||||
|
||||
**Note on Common Law Rights:**
|
||||
Frigate LLC asserts all common law rights in these Marks. The absence of a federal registration symbol (®) does not constitute a waiver of our intellectual property rights.
|
||||
|
||||
## 2. Interaction with the MIT License
|
||||
|
||||
The software in this repository is licensed under the [MIT License](LICENSE).
|
||||
|
||||
**Crucial Distinction:**
|
||||
|
||||
- The **Code** is free to use, modify, and distribute under the MIT terms.
|
||||
- The **Brand (Trademarks)** is **NOT** licensed under MIT.
|
||||
|
||||
You may not use the Marks in any way that is not explicitly permitted by this policy or by written agreement with Frigate LLC.
|
||||
|
||||
## 3. Acceptable Use
|
||||
|
||||
You may use the Marks without prior written permission in the following specific contexts:
|
||||
|
||||
- **Referential Use:** To truthfully refer to the software (e.g., _"I use Frigate NVR for my home security"_).
|
||||
- **Compatibility:** To indicate that your product or project works with the software (e.g., _"MyPlugin for Frigate NVR"_ or _"Compatible with Frigate"_).
|
||||
- **Commentary:** In news articles, blog posts, or tutorials discussing the software.
|
||||
|
||||
## 4. Prohibited Use
|
||||
|
||||
You may **NOT** use the Marks in the following ways:
|
||||
|
||||
- **Commercial Products:** You may not use "Frigate" in the name of a commercial product, service, or app (e.g., selling an app named _"Frigate Viewer"_ is prohibited).
|
||||
- **Implying Affiliation:** You may not use the Marks in a way that suggests your project is official, sponsored by, or endorsed by Frigate LLC.
|
||||
- **Confusing Forks:** If you fork this repository to create a derivative work, you **must** remove the Frigate logo and rename your project to avoid user confusion. You cannot distribute a modified version of the software under the name "Frigate".
|
||||
- **Domain Names:** You may not register domain names containing "Frigate" that are likely to confuse users (e.g., `frigate-official-support.com`).
|
||||
|
||||
## 5. The Logo
|
||||
|
||||
The Frigate logo (the bird icon) is a visual trademark.
|
||||
|
||||
- You generally **cannot** use the logo on your own website or product packaging without permission.
|
||||
- If you are building a dashboard or integration that interfaces with Frigate, you may use the logo only to represent the Frigate node/service, provided it does not imply you _are_ Frigate.
|
||||
|
||||
## 6. Questions & Permissions
|
||||
|
||||
If you are unsure if your intended use violates this policy, or if you wish to request a specific license to use the Marks (e.g., for a partnership), please contact us at:
|
||||
|
||||
**help@frigate.video**
|
||||
@ -5,21 +5,27 @@ set -euxo pipefail
|
||||
SQLITE3_VERSION="3.46.1"
|
||||
PYSQLITE3_VERSION="0.5.3"
|
||||
|
||||
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
|
||||
if ! dpkg -l | grep -q libsqlite3-dev; then
|
||||
echo "Installing libsqlite3-dev for compilation..."
|
||||
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
|
||||
# Fetch the pre-built sqlite amalgamation instead of building from source
|
||||
if [[ ! -d "sqlite" ]]; then
|
||||
mkdir sqlite
|
||||
cd sqlite
|
||||
|
||||
|
||||
# Download the pre-built amalgamation from sqlite.org
|
||||
# For SQLite 3.46.1, the amalgamation version is 3460100
|
||||
SQLITE_AMALGAMATION_VERSION="3460100"
|
||||
|
||||
|
||||
wget https://www.sqlite.org/2024/sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}.zip -O sqlite-amalgamation.zip
|
||||
unzip sqlite-amalgamation.zip
|
||||
mv sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}/* .
|
||||
rmdir sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}
|
||||
rm sqlite-amalgamation.zip
|
||||
|
||||
|
||||
cd ../
|
||||
fi
|
||||
|
||||
|
||||
@ -145,6 +145,6 @@ rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install yq, for frigate-prepare and go2rtc echo source
|
||||
curl -fsSL \
|
||||
"https://github.com/mikefarah/yq/releases/download/v4.33.3/yq_linux_$(dpkg --print-architecture)" \
|
||||
"https://github.com/mikefarah/yq/releases/download/v4.48.2/yq_linux_$(dpkg --print-architecture)" \
|
||||
--output /usr/local/bin/yq
|
||||
chmod +x /usr/local/bin/yq
|
||||
|
||||
@ -2,9 +2,9 @@
|
||||
set -e
|
||||
|
||||
# Download the MxAccl for Frigate github release
|
||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
|
||||
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
|
||||
unzip /tmp/mxaccl.zip -d /tmp
|
||||
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
|
||||
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
|
||||
rm /tmp/mxaccl.zip
|
||||
|
||||
# Install Python dependencies
|
||||
|
||||
@ -56,7 +56,7 @@ pywebpush == 2.0.*
|
||||
# alpr
|
||||
pyclipper == 1.3.*
|
||||
shapely == 2.0.*
|
||||
Levenshtein==0.26.*
|
||||
rapidfuzz==3.12.*
|
||||
# HailoRT Wheels
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
@ -81,3 +81,5 @@ librosa==0.11.*
|
||||
soundfile==0.13.*
|
||||
# DeGirum detector
|
||||
degirum == 0.16.*
|
||||
# Memory profiling
|
||||
memray == 1.15.*
|
||||
|
||||
@ -320,6 +320,12 @@ http {
|
||||
add_header Cache-Control "public";
|
||||
}
|
||||
|
||||
location /fonts/ {
|
||||
access_log off;
|
||||
expires 1y;
|
||||
add_header Cache-Control "public";
|
||||
}
|
||||
|
||||
location /locales/ {
|
||||
access_log off;
|
||||
add_header Cache-Control "public";
|
||||
|
||||
@ -24,10 +24,13 @@ echo "Adding MemryX GPG key and repository..."
|
||||
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
|
||||
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
|
||||
|
||||
# Update and install memx-drivers
|
||||
echo "Installing memx-drivers..."
|
||||
# Update and install specific SDK 2.1 packages
|
||||
echo "Installing MemryX SDK 2.1 packages..."
|
||||
sudo apt update
|
||||
sudo apt install -y memx-drivers
|
||||
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
|
||||
|
||||
# Hold packages to prevent automatic upgrades
|
||||
sudo apt-mark hold memx-drivers memx-accl mxa-manager
|
||||
|
||||
# ARM-specific board setup
|
||||
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
|
||||
@ -37,11 +40,5 @@ fi
|
||||
|
||||
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
|
||||
|
||||
# Install other runtime packages
|
||||
packages=("memx-accl" "mxa-manager")
|
||||
for pkg in "${packages[@]}"; do
|
||||
echo "Installing $pkg..."
|
||||
sudo apt install -y "$pkg"
|
||||
done
|
||||
echo "MemryX SDK 2.1 installation complete!"
|
||||
|
||||
echo "MemryX installation complete!"
|
||||
|
||||
@ -15,7 +15,7 @@ ARG AMDGPU
|
||||
|
||||
RUN apt update -qq && \
|
||||
apt install -y wget gpg && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.0.2/ubuntu/jammy/amdgpu-install_7.0.2.70002-1_all.deb && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \
|
||||
apt install -y ./rocm.deb && \
|
||||
apt update && \
|
||||
apt install -qq -y rocm
|
||||
|
||||
@ -1 +1 @@
|
||||
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.0.2/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
|
||||
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
|
||||
@ -2,7 +2,7 @@ variable "AMDGPU" {
|
||||
default = "gfx900"
|
||||
}
|
||||
variable "ROCM" {
|
||||
default = "7.0.2"
|
||||
default = "7.1.1"
|
||||
}
|
||||
variable "HSA_OVERRIDE_GFX_VERSION" {
|
||||
default = ""
|
||||
|
||||
@ -21,7 +21,7 @@ FROM deps AS frigate-tensorrt
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 uninstall -y onnxruntime tensorflow-cpu \
|
||||
pip3 uninstall -y onnxruntime \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
||||
COPY --from=rootfs / /
|
||||
|
||||
@ -112,7 +112,7 @@ RUN apt-get update \
|
||||
&& apt-get install -y protobuf-compiler libprotobuf-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
|
||||
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
|
||||
pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
|
||||
|
||||
FROM wget AS jetson-ffmpeg
|
||||
ARG DEBIAN_FRONTEND
|
||||
@ -145,7 +145,8 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
||||
pip3 uninstall -y onnxruntime \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl \
|
||||
&& pip3 install -U /deps/trt-model-wheels/*.whl \
|
||||
&& ldconfig
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
|
||||
@ -13,7 +13,6 @@ nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
|
||||
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
|
||||
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
|
||||
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
tensorflow==2.19.*; platform_machine == 'x86_64'
|
||||
onnx==1.16.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
||||
@ -1 +1,2 @@
|
||||
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
||||
numpy == 1.26.*; platform_machine == 'aarch64'
|
||||
|
||||
@ -25,7 +25,7 @@ Examples of available modules are:
|
||||
|
||||
- `frigate.app`
|
||||
- `frigate.mqtt`
|
||||
- `frigate.object_detection`
|
||||
- `frigate.object_detection.base`
|
||||
- `detector.<detector_name>`
|
||||
- `watchdog.<camera_name>`
|
||||
- `ffmpeg.<camera_name>.<sorted_roles>` NOTE: All FFmpeg logs are sent as `error` level.
|
||||
@ -53,6 +53,17 @@ environment_vars:
|
||||
VARIABLE_NAME: variable_value
|
||||
```
|
||||
|
||||
#### TensorFlow Thread Configuration
|
||||
|
||||
If you encounter thread creation errors during classification model training, you can limit TensorFlow's thread usage:
|
||||
|
||||
```yaml
|
||||
environment_vars:
|
||||
TF_INTRA_OP_PARALLELISM_THREADS: "2" # Threads within operations (0 = use default)
|
||||
TF_INTER_OP_PARALLELISM_THREADS: "2" # Threads between operations (0 = use default)
|
||||
TF_DATASET_THREAD_POOL_SIZE: "2" # Data pipeline threads (0 = use default)
|
||||
```
|
||||
|
||||
### `database`
|
||||
|
||||
Tracked object and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
|
||||
@ -247,7 +258,7 @@ curl -X POST http://frigate_host:5000/api/config/save -d @config.json
|
||||
if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json:
|
||||
|
||||
```bash
|
||||
yq r -j config.yml | curl -X POST http://frigate_host:5000/api/config/save -d @-
|
||||
yq -o=json '.' config.yaml | curl -X POST 'http://frigate_host:5000/api/config/save?save_option=saveonly' --data-binary @-
|
||||
```
|
||||
|
||||
### Via Command Line
|
||||
|
||||
@ -75,7 +75,13 @@ audio:
|
||||
|
||||
### Audio Transcription
|
||||
|
||||
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI’s open-source Whisper models via `faster-whisper`. To enable transcription, enable it in your config. Note that audio detection must also be enabled as described above in order to use audio transcription features.
|
||||
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAI’s open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background.
|
||||
|
||||
Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service.
|
||||
|
||||
#### Configuration
|
||||
|
||||
To enable transcription, enable it in your config. Note that audio detection must also be enabled as described above in order to use audio transcription features.
|
||||
|
||||
```yaml
|
||||
audio_transcription:
|
||||
@ -144,4 +150,26 @@ In order to use transcription and translation for past events, you must enable a
|
||||
|
||||
The transcribed/translated speech will appear in the description box in the Tracked Object Details pane. If Semantic Search is enabled, embeddings are generated for the transcription text and are fully searchable using the description search type.
|
||||
|
||||
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.
|
||||
:::note
|
||||
|
||||
Only one `speech` event may be transcribed at a time. Frigate does not automatically transcribe `speech` events or implement a queue for long-running transcription model inference.
|
||||
|
||||
:::
|
||||
|
||||
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a supported Nvidia GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.
|
||||
|
||||
#### FAQ
|
||||
|
||||
1. Why doesn't Frigate automatically transcribe all `speech` events?
|
||||
|
||||
Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. That’s a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise.
|
||||
|
||||
Because transcription is **serialized (one event at a time)** and speech events can be generated far faster than they can be processed, an auto-transcribe toggle would very quickly create an ever-growing backlog and degrade core functionality. For the amount of engineering and risk involved, it adds **very little practical value** for the majority of deployments, which are often on low-powered, edge hardware.
|
||||
|
||||
If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
|
||||
|
||||
2. Why don't you save live transcription text and use that for `speech` events?
|
||||
|
||||
There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
|
||||
|
||||
Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. That’s why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event.
|
||||
|
||||
@ -10,7 +10,6 @@ Object classification allows you to train a custom MobileNetV2 classification mo
|
||||
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
## Classes
|
||||
|
||||
@ -36,6 +35,15 @@ For object classification:
|
||||
- Ideal when multiple attributes can coexist independently.
|
||||
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not.
|
||||
|
||||
## Assignment Requirements
|
||||
|
||||
Sub labels and attributes are only assigned when both conditions are met:
|
||||
|
||||
1. **Threshold**: Each classification attempt must have a confidence score that meets or exceeds the configured `threshold` (default: `0.8`).
|
||||
2. **Class Consensus**: After at least 3 classification attempts, 60% of attempts must agree on the same class label. If the consensus class is `none`, no assignment is made.
|
||||
|
||||
This two-step verification prevents false positives by requiring consistent predictions across multiple frames before assigning a sub label or attribute.
|
||||
|
||||
## Example use cases
|
||||
|
||||
### Sub label
|
||||
@ -67,14 +75,18 @@ classification:
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page.
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of two steps:
|
||||
|
||||
### Getting Started
|
||||
### Step 1: Name and Define
|
||||
|
||||
Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Include a `none` class for objects that don't fit any specific category.
|
||||
|
||||
### Step 2: Assign Training Examples
|
||||
|
||||
The system will automatically generate example images from detected objects matching your selected label. You'll be guided through each class one at a time to select which images represent that class. Any images not assigned to a specific class will automatically be assigned to `none` when you complete the last class. Once all images are processed, training will begin automatically.
|
||||
|
||||
When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects.
|
||||
|
||||
// TODO add this section once UI is implemented. Explain process of selecting objects and curating training examples.
|
||||
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
|
||||
|
||||
@ -10,7 +10,6 @@ State classification allows you to train a custom MobileNetV2 classification mod
|
||||
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
|
||||
|
||||
## Classes
|
||||
|
||||
@ -49,13 +48,23 @@ classification:
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page.
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of three steps:
|
||||
|
||||
### Getting Started
|
||||
### Step 1: Name and Define
|
||||
|
||||
When choosing a portion of the camera frame for state classification, it is important to make the crop tight around the area of interest to avoid extra signals unrelated to what is being classified.
|
||||
Enter a name for your model and define at least 2 classes (states) that represent mutually exclusive states. For example, `open` and `closed` for a door, or `on` and `off` for lights.
|
||||
|
||||
// TODO add this section once UI is implemented. Explain process of selecting a crop.
|
||||
### Step 2: Select the Crop Area
|
||||
|
||||
Choose one or more cameras and draw a rectangle over the area of interest for each camera. The crop should be tight around the region you want to classify to avoid extra signals unrelated to what is being classified. You can drag and resize the rectangle to adjust the crop area.
|
||||
|
||||
### Step 3: Assign Training Examples
|
||||
|
||||
The system will automatically generate example images from your camera feeds. You'll be guided through each class one at a time to select which images represent that state.
|
||||
|
||||
**Important**: All images must be assigned to a state before training can begin. This includes images that may not be optimal, such as when people temporarily block the view, sun glare is present, or other distractions occur. Assign these images to the state that is actually present (based on what you know the state to be), not based on the distraction. This training helps the model correctly identify the state even when such conditions occur during inference.
|
||||
|
||||
Once all images are assigned, training will begin automatically.
|
||||
|
||||
### Improving the Model
|
||||
|
||||
|
||||
@ -70,7 +70,7 @@ You should have at least 8 GB of RAM available (or VRAM if running on GPU) to ru
|
||||
genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: llava:7b
|
||||
model: qwen3-vl:4b
|
||||
```
|
||||
|
||||
## Google Gemini
|
||||
|
||||
@ -35,19 +35,18 @@ Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger s
|
||||
|
||||
:::tip
|
||||
|
||||
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. https://github.com/skye-harris/ollama-modelfiles contains optimized model configs for this task.
|
||||
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. qwen3-VL supports vision and tools simultaneously in Ollama.
|
||||
|
||||
:::
|
||||
|
||||
The following models are recommended:
|
||||
|
||||
| Model | Notes |
|
||||
| ----------------- | ----------------------------------------------------------- |
|
||||
| `qwen3-vl` | Strong visual and situational understanding |
|
||||
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
||||
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
||||
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
|
||||
| `llava-phi3` | Lightweight and fast model with vision comprehension |
|
||||
| Model | Notes |
|
||||
| ----------------- | -------------------------------------------------------------------- |
|
||||
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
|
||||
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
||||
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
||||
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
|
||||
|
||||
:::note
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@ title: Enrichments
|
||||
|
||||
# Enrichments
|
||||
|
||||
Some of Frigate's enrichments can use a discrete GPU / NPU for accelerated processing.
|
||||
Some of Frigate's enrichments can use a discrete GPU or integrated GPU for accelerated processing.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -18,8 +18,10 @@ Object detection and enrichments (like Semantic Search, Face Recognition, and Li
|
||||
- **Intel**
|
||||
|
||||
- OpenVINO will automatically be detected and used for enrichments in the default Frigate image.
|
||||
- **Note:** Intel NPUs have limited model support for enrichments. GPU is recommended for enrichments when available.
|
||||
|
||||
- **Nvidia**
|
||||
|
||||
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
|
||||
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
|
||||
|
||||
|
||||
@ -3,18 +3,18 @@ id: license_plate_recognition
|
||||
title: License Plate Recognition (LPR)
|
||||
---
|
||||
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a [known](#matching) name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
|
||||
|
||||
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition.
|
||||
|
||||
When a plate is recognized, the details are:
|
||||
|
||||
- Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object.
|
||||
- Viewable in the Review Item Details pane in Review (sub labels).
|
||||
- Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object.
|
||||
- Viewable in the Details pane in Review/History.
|
||||
- Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates).
|
||||
- Filterable through the More Filters menu in Explore.
|
||||
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object.
|
||||
- Published via the `frigate/tracked_object_update` MQTT topic with `name` (if known) and `plate`.
|
||||
- Published via the `frigate/events` MQTT topic as a `sub_label` ([known](#matching)) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object.
|
||||
- Published via the `frigate/tracked_object_update` MQTT topic with `name` (if [known](#matching)) and `plate`.
|
||||
|
||||
## Model Requirements
|
||||
|
||||
@ -31,6 +31,7 @@ In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle`
|
||||
## Minimum System Requirements
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
|
||||
|
||||
## Configuration
|
||||
|
||||
License plate recognition is disabled by default. Enable it in your config file:
|
||||
@ -73,8 +74,8 @@ Fine-tune the LPR feature using these optional parameters at the global level of
|
||||
- Default: `small`
|
||||
- This can be `small` or `large`.
|
||||
- The `small` model is fast and identifies groups of Latin and Chinese characters.
|
||||
- The `large` model identifies Latin characters only, but uses an enhanced text detector and is more capable at finding characters on multi-line plates. It is significantly slower than the `small` model. Note that using the `large` model does not improve _text recognition_, but it may improve _text detection_.
|
||||
- For most users, the `small` model is recommended.
|
||||
- The `large` model identifies Latin characters only, and uses an enhanced text detector to find characters on multi-line plates. It is significantly slower than the `small` model.
|
||||
- If your country or region does not use multi-line plates, you should use the `small` model as performance is much better for single-line plates.
|
||||
|
||||
### Recognition
|
||||
|
||||
@ -177,7 +178,7 @@ lpr:
|
||||
|
||||
:::note
|
||||
|
||||
If you want to detect cars on cameras but don't want to use resources to run LPR on those cars, you should disable LPR for those specific cameras.
|
||||
If a camera is configured to detect `car` or `motorcycle` but you don't want Frigate to run LPR for that camera, disable LPR at the camera level:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
@ -305,7 +306,7 @@ With this setup:
|
||||
- Review items will always be classified as a `detection`.
|
||||
- Snapshots will always be saved.
|
||||
- Zones and object masks are **not** used.
|
||||
- The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a known plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field.
|
||||
- The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a [known](#matching) plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field.
|
||||
- License plate snapshots are saved at the highest-scoring moment and appear in Explore.
|
||||
- Debug view will not show `license_plate` bounding boxes.
|
||||
|
||||
|
||||
@ -214,6 +214,42 @@ For restreamed cameras, go2rtc remains active but does not use system resources
|
||||
|
||||
Note that disabling a camera through the config file (`enabled: False`) removes all related UI elements, including historical footage access. To retain access while disabling the camera, keep it enabled in the config and use the UI or MQTT to disable it temporarily.
|
||||
|
||||
### Live player error messages
|
||||
|
||||
When your browser runs into problems playing back your camera streams, it will log short error messages to the browser console. They indicate playback, codec, or network issues on the client/browser side, not something server side with Frigate itself. Below are the common messages you may see and simple actions you can take to try to resolve them.
|
||||
|
||||
- **startup**
|
||||
|
||||
- What it means: The player failed to initialize or connect to the live stream (network or startup error).
|
||||
- What to try: Reload the Live view or click _Reset_. Verify `go2rtc` is running and the camera stream is reachable. Try switching to a different stream from the Live UI dropdown (if available) or use a different browser.
|
||||
|
||||
- Possible console messages from the player code:
|
||||
|
||||
- `Error opening MediaSource.`
|
||||
- `Browser reported a network error.`
|
||||
- `Max error count ${errorCount} exceeded.` (the numeric value will vary)
|
||||
|
||||
- **mse-decode**
|
||||
|
||||
- What it means: The browser reported a decoding error while trying to play the stream, which usually is a result of a codec incompatibility or corrupted frames.
|
||||
- What to try: Check the browser console for the supported and negotiated codecs. Ensure your camera/restream is using H.264 video and AAC audio (these are the most compatible). If your camera uses a non-standard audio codec, configure `go2rtc` to transcode the stream to AAC. Try another browser (some browsers have stricter MSE/codec support) and, for iPhone, ensure you're on iOS 17.1 or newer.
|
||||
|
||||
- Possible console messages from the player code:
|
||||
|
||||
- `Safari cannot open MediaSource.`
|
||||
- `Safari reported InvalidStateError.`
|
||||
- `Safari reported decoding errors.`
|
||||
|
||||
- **stalled**
|
||||
|
||||
- What it means: Playback has stalled because the player has fallen too far behind live (extended buffering or no data arriving).
|
||||
- What to try: This is usually indicative of the browser struggling to decode too many high-resolution streams at once. Try selecting a lower-bandwidth stream (substream), reduce the number of live streams open, improve the network connection, or lower the camera resolution. Also check your camera's keyframe (I-frame) interval — shorter intervals make playback start and recover faster. You can also try increasing the timeout value in the UI pane of Frigate's settings.
|
||||
|
||||
- Possible console messages from the player code:
|
||||
|
||||
- `Buffer time (10 seconds) exceeded, browser may not be playing media correctly.`
|
||||
- `Media playback has stalled after <n> seconds due to insufficient buffering or a network interruption.` (the seconds value will vary)
|
||||
|
||||
## Live view FAQ
|
||||
|
||||
1. **Why don't I have audio in my Live view?**
|
||||
@ -277,3 +313,38 @@ Note that disabling a camera through the config file (`enabled: False`) removes
|
||||
7. **My camera streams have lots of visual artifacts / distortion.**
|
||||
|
||||
Some cameras don't include the hardware to support multiple connections to the high resolution stream, and this can cause unexpected behavior. In this case it is recommended to [restream](./restream.md) the high resolution stream so that it can be used for live view and recordings.
|
||||
|
||||
8. **Why does my camera stream switch aspect ratios on the Live dashboard?**
|
||||
|
||||
Your camera may change aspect ratios on the dashboard because Frigate uses different streams for different purposes. With go2rtc and Smart Streaming, Frigate shows a static image from the `detect` stream when no activity is present, and switches to the live stream when motion is detected. The camera image will change size if your streams use different aspect ratios.
|
||||
|
||||
To prevent this, make the `detect` stream match the go2rtc live stream's aspect ratio (resolution does not need to match, just the aspect ratio). You can either adjust the camera's output resolution or set the `width` and `height` values in your config's `detect` section to a resolution with an aspect ratio that matches.
|
||||
|
||||
Example: Resolutions from two streams
|
||||
|
||||
- Mismatched (may cause aspect ratio switching on the dashboard):
|
||||
|
||||
- Live/go2rtc stream: 1920x1080 (16:9)
|
||||
- Detect stream: 640x352 (~1.82:1, not 16:9)
|
||||
|
||||
- Matched (prevents switching):
|
||||
- Live/go2rtc stream: 1920x1080 (16:9)
|
||||
- Detect stream: 640x360 (16:9)
|
||||
|
||||
You can update the detect settings in your camera config to match the aspect ratio of your go2rtc live stream. For example:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
front_door:
|
||||
detect:
|
||||
width: 640
|
||||
height: 360 # set this to 360 instead of 352
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://127.0.0.1:8554/front_door # main stream 1920x1080
|
||||
roles:
|
||||
- record
|
||||
- path: rtsp://127.0.0.1:8554/front_door_sub # sub stream 640x352
|
||||
roles:
|
||||
- detect
|
||||
```
|
||||
|
||||
@ -3,6 +3,8 @@ id: object_detectors
|
||||
title: Object Detectors
|
||||
---
|
||||
|
||||
import CommunityBadge from '@site/src/components/CommunityBadge';
|
||||
|
||||
# Supported Hardware
|
||||
|
||||
:::info
|
||||
@ -13,8 +15,8 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
|
||||
- [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
|
||||
- [DeGirum](#degirum): Service for using hardware devices in the cloud or locally. Hardware and models provided on the cloud on [their website](https://hub.degirum.com).
|
||||
- <CommunityBadge /> [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
|
||||
- <CommunityBadge /> [DeGirum](#degirum): Service for using hardware devices in the cloud or locally. Hardware and models provided on the cloud on [their website](https://hub.degirum.com).
|
||||
|
||||
**AMD**
|
||||
|
||||
@ -34,16 +36,16 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Nvidia Jetson**
|
||||
**Nvidia Jetson** <CommunityBadge />
|
||||
|
||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Jetson devices, using one of many default models.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt-jp6` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Rockchip**
|
||||
**Rockchip** <CommunityBadge />
|
||||
|
||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
||||
|
||||
**Synaptics**
|
||||
**Synaptics** <CommunityBadge />
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
|
||||
|
||||
@ -261,6 +263,8 @@ OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will al
|
||||
|
||||
:::tip
|
||||
|
||||
**NPU + GPU Systems:** If you have both NPU and GPU available (Intel Core Ultra processors), use NPU for object detection and GPU for enrichments (semantic search, face recognition, etc.) for best performance and compatibility.
|
||||
|
||||
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
|
||||
|
||||
```yaml
|
||||
@ -283,7 +287,7 @@ detectors:
|
||||
| [RF-DETR](#rf-detr) | ✅ | ✅ | Requires XE iGPU or Arc |
|
||||
| [YOLO-NAS](#yolo-nas) | ✅ | ✅ | |
|
||||
| [MobileNet v2](#ssdlite-mobilenet-v2) | ✅ | ✅ | Fast and lightweight model, less accurate than larger models |
|
||||
| [YOLOX](#yolox) | ✅ | ? | |
|
||||
| [YOLOX](#yolox) | ✅ | ? | |
|
||||
| [D-FINE](#d-fine) | ❌ | ❌ | |
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
@ -960,7 +964,6 @@ model:
|
||||
# path: /config/yolov9.zip
|
||||
# The .zip file must contain:
|
||||
# ├── yolov9.dfp (a file ending with .dfp)
|
||||
# └── yolov9_post.onnx (optional; only if the model includes a cropped post-processing network)
|
||||
```
|
||||
|
||||
#### YOLOX
|
||||
|
||||
@ -246,7 +246,7 @@ birdseye:
|
||||
# Optional: ffmpeg configuration
|
||||
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
|
||||
ffmpeg:
|
||||
# Optional: ffmpeg binry path (default: shown below)
|
||||
# Optional: ffmpeg binary path (default: shown below)
|
||||
# can also be set to `7.0` or `5.0` to specify one of the included versions
|
||||
# or can be set to any path that holds `bin/ffmpeg` & `bin/ffprobe`
|
||||
path: "default"
|
||||
@ -700,11 +700,11 @@ genai:
|
||||
# Optional: Configuration for audio transcription
|
||||
# NOTE: only the enabled option can be overridden at the camera level
|
||||
audio_transcription:
|
||||
# Optional: Enable license plate recognition (default: shown below)
|
||||
# Optional: Enable live and speech event audio transcription (default: shown below)
|
||||
enabled: False
|
||||
# Optional: The device to run the models on (default: shown below)
|
||||
# Optional: The device to run the models on for live transcription. (default: shown below)
|
||||
device: CPU
|
||||
# Optional: Set the model size used for transcription. (default: shown below)
|
||||
# Optional: Set the model size used for live transcription. (default: shown below)
|
||||
model_size: small
|
||||
# Optional: Set the language used for transcription translation. (default: shown below)
|
||||
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
|
||||
@ -810,6 +810,8 @@ cameras:
|
||||
# NOTE: This must be different than any camera names, but can match with another zone on another
|
||||
# camera.
|
||||
front_steps:
|
||||
# Optional: A friendly name or descriptive text for the zones
|
||||
friendly_name: ""
|
||||
# Required: List of x,y coordinates to define the polygon of the zone.
|
||||
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
|
||||
coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428
|
||||
|
||||
@ -78,7 +78,7 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU / NPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
|
||||
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
@ -90,7 +90,7 @@ semantic_search:
|
||||
|
||||
:::info
|
||||
|
||||
If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU / NPU will be detected and used automatically.
|
||||
If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU will be detected and used automatically.
|
||||
Specify the `device` option to target a specific GPU in a multi-GPU system (see [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)).
|
||||
If you do not specify a device, the first available GPU will be used.
|
||||
|
||||
@ -141,7 +141,7 @@ Triggers are best configured through the Frigate UI.
|
||||
Check the `Add Attribute` box to add the trigger's internal ID (e.g., "red_car_alert") to a data attribute on the tracked object that can be processed via the API or MQTT.
|
||||
5. Save the trigger to update the configuration and store the embedding in the database.
|
||||
|
||||
When a trigger fires, the UI highlights the trigger with a blue dot for 3 seconds for easy identification.
|
||||
When a trigger fires, the UI highlights the trigger with a blue dot for 3 seconds for easy identification. Additionally, the UI will show the last date/time and tracked object ID that activated your trigger. The last triggered timestamp is not saved to the database or persisted through restarts of Frigate.
|
||||
|
||||
### Usage and Best Practices
|
||||
|
||||
|
||||
@ -27,6 +27,7 @@ cameras:
|
||||
- entire_yard
|
||||
zones:
|
||||
entire_yard:
|
||||
friendly_name: Entire yard # You can use characters from any language text
|
||||
coordinates: ...
|
||||
```
|
||||
|
||||
@ -44,8 +45,10 @@ cameras:
|
||||
- edge_yard
|
||||
zones:
|
||||
edge_yard:
|
||||
friendly_name: Edge yard # You can use characters from any language text
|
||||
coordinates: ...
|
||||
inner_yard:
|
||||
friendly_name: Inner yard # You can use characters from any language text
|
||||
coordinates: ...
|
||||
```
|
||||
|
||||
@ -59,6 +62,7 @@ cameras:
|
||||
- entire_yard
|
||||
zones:
|
||||
entire_yard:
|
||||
friendly_name: Entire yard
|
||||
coordinates: ...
|
||||
```
|
||||
|
||||
@ -82,6 +86,7 @@ cameras:
|
||||
|
||||
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
|
||||
|
||||
|
||||
### Zone Loitering
|
||||
|
||||
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone.
|
||||
|
||||
@ -3,6 +3,8 @@ id: hardware
|
||||
title: Recommended hardware
|
||||
---
|
||||
|
||||
import CommunityBadge from '@site/src/components/CommunityBadge';
|
||||
|
||||
## Cameras
|
||||
|
||||
Cameras that output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. It is also helpful if your camera supports multiple substreams to allow different resolutions to be used for detection, streaming, and recordings without re-encoding.
|
||||
@ -59,7 +61,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
|
||||
|
||||
- [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
|
||||
- <CommunityBadge /> [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Supports many model architectures](../../configuration/object_detectors#memryx-mx3)
|
||||
- Runs best with tiny, small, or medium-size models
|
||||
|
||||
@ -84,32 +86,26 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
**Nvidia**
|
||||
|
||||
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices.
|
||||
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs to provide efficient object detection.
|
||||
|
||||
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#onnx-supported-models)
|
||||
- Runs well with any size models including large
|
||||
|
||||
**Rockchip**
|
||||
- <CommunityBadge /> [Jetson](#nvidia-jetson): Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6.
|
||||
|
||||
**Rockchip** <CommunityBadge />
|
||||
|
||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs to provide efficient object detection.
|
||||
- [Supports limited model architectures](../../configuration/object_detectors#choosing-a-model)
|
||||
- Runs best with tiny or small size models
|
||||
- Runs efficiently on low power hardware
|
||||
|
||||
**Synaptics**
|
||||
**Synaptics** <CommunityBadge />
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
|
||||
|
||||
:::
|
||||
|
||||
### Synaptics
|
||||
|
||||
- **Synaptics** Default model is **mobilenet**
|
||||
|
||||
| Name | Synaptics SL1680 Inference Time |
|
||||
| ---------------- | ------------------------------- |
|
||||
| ssd mobilenet | ~ 25 ms |
|
||||
| yolov5m | ~ 118 ms |
|
||||
|
||||
### Hailo-8
|
||||
|
||||
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isn’t provided.
|
||||
@ -163,7 +159,7 @@ Inference speeds vary greatly depending on the CPU or GPU used, some known examp
|
||||
| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance |
|
||||
| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | |
|
||||
| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | |
|
||||
| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | |
|
||||
| Intel UHD 730 | ~ 10 ms | t-320: 14ms s-320: 24ms t-640: 34ms s-640: 65ms | 320: ~ 19 ms 640: ~ 54 ms | | |
|
||||
| Intel UHD 770 | ~ 15 ms | t-320: ~ 16 ms s-320: ~ 20 ms s-640: ~ 40 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
|
||||
| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance |
|
||||
| Intel N150 | ~ 15 ms | t-320: 16 ms s-320: 24 ms | | | |
|
||||
@ -261,7 +257,7 @@ Inference speeds may vary depending on the host platform. The above data was mea
|
||||
|
||||
### Nvidia Jetson
|
||||
|
||||
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
|
||||
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
|
||||
|
||||
@ -282,6 +278,15 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard
|
||||
|
||||
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.
|
||||
|
||||
### Synaptics
|
||||
|
||||
- **Synaptics** Default model is **mobilenet**
|
||||
|
||||
| Name | Synaptics SL1680 Inference Time |
|
||||
| ------------- | ------------------------------- |
|
||||
| ssd mobilenet | ~ 25 ms |
|
||||
| yolov5m | ~ 118 ms |
|
||||
|
||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||
|
||||
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
||||
|
||||
@ -56,7 +56,7 @@ services:
|
||||
volumes:
|
||||
- /path/to/your/config:/config
|
||||
- /path/to/your/storage:/media/frigate
|
||||
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
|
||||
- type: tmpfs # Recommended: 1GB of memory
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 1000000000
|
||||
@ -310,7 +310,7 @@ services:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /path/to/your/config:/config
|
||||
- /path/to/your/storage:/media/frigate
|
||||
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
|
||||
- type: tmpfs # Recommended: 1GB of memory
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 1000000000
|
||||
|
||||
@ -159,11 +159,44 @@ Message published for updates to tracked object metadata, for example:
|
||||
}
|
||||
```
|
||||
|
||||
#### Object Classification Update
|
||||
|
||||
Message published when [object classification](/configuration/custom_classification/object_classification) reaches consensus on a classification result.
|
||||
|
||||
**Sub label type:**
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "classification",
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"camera": "front_door_cam",
|
||||
"timestamp": 1607123958.748393,
|
||||
"model": "person_classifier",
|
||||
"sub_label": "delivery_person",
|
||||
"score": 0.87
|
||||
}
|
||||
```
|
||||
|
||||
**Attribute type:**
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "classification",
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"camera": "front_door_cam",
|
||||
"timestamp": 1607123958.748393,
|
||||
"model": "helmet_detector",
|
||||
"attribute": "yes",
|
||||
"score": 0.92
|
||||
}
|
||||
```
|
||||
|
||||
### `frigate/reviews`
|
||||
|
||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated.
|
||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated.
|
||||
|
||||
An `update` with the same ID will be published when:
|
||||
|
||||
- The severity changes from `detection` to `alert`
|
||||
- Additional objects are detected
|
||||
- An object is recognized via face, lpr, etc.
|
||||
@ -308,6 +341,11 @@ Publishes transcribed text for audio detected on this camera.
|
||||
|
||||
**NOTE:** Requires audio detection and transcription to be enabled
|
||||
|
||||
### `frigate/<camera_name>/classification/<model_name>`
|
||||
|
||||
Publishes the current state detected by a state classification model for the camera. The topic name includes the model name as configured in your classification settings.
|
||||
The published value is the detected state class name (e.g., `open`, `closed`, `on`, `off`). The state is only published when it changes, helping to reduce unnecessary MQTT traffic.
|
||||
|
||||
### `frigate/<camera_name>/enabled/set`
|
||||
|
||||
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
129
docs/docs/troubleshooting/memory.md
Normal file
@ -0,0 +1,129 @@
|
||||
---
|
||||
id: memory
|
||||
title: Memory Troubleshooting
|
||||
---
|
||||
|
||||
Frigate includes built-in memory profiling using [memray](https://bloomberg.github.io/memray/) to help diagnose memory issues. This feature allows you to profile specific Frigate modules to identify memory leaks, excessive allocations, or other memory-related problems.
|
||||
|
||||
## Enabling Memory Profiling
|
||||
|
||||
Memory profiling is controlled via the `FRIGATE_MEMRAY_MODULES` environment variable. Set it to a comma-separated list of module names you want to profile:
|
||||
|
||||
```bash
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture"
|
||||
```
|
||||
|
||||
### Module Names
|
||||
|
||||
Frigate processes are named using a module-based naming scheme. Common module names include:
|
||||
|
||||
- `frigate.review_segment_manager` - Review segment processing
|
||||
- `frigate.recording_manager` - Recording management
|
||||
- `frigate.capture` - Camera capture processes (all cameras with this module name)
|
||||
- `frigate.process` - Camera processing/tracking (all cameras with this module name)
|
||||
- `frigate.output` - Output processing
|
||||
- `frigate.audio_manager` - Audio processing
|
||||
- `frigate.embeddings` - Embeddings processing
|
||||
|
||||
You can also specify the full process name (including camera-specific identifiers) if you want to profile a specific camera:
|
||||
|
||||
```bash
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.capture:front_door"
|
||||
```
|
||||
|
||||
When you specify a module name (e.g., `frigate.capture`), all processes with that module prefix will be profiled. For example, `frigate.capture` will profile all camera capture processes.
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **Binary File Creation**: When profiling is enabled, memray creates a binary file (`.bin`) in `/config/memray_reports/` that is updated continuously in real-time as the process runs.
|
||||
|
||||
2. **Automatic HTML Generation**: On normal process exit, Frigate automatically:
|
||||
|
||||
- Stops memray tracking
|
||||
- Generates an HTML flamegraph report
|
||||
- Saves it to `/config/memray_reports/<module_name>.html`
|
||||
|
||||
3. **Crash Recovery**: If a process crashes (SIGKILL, segfault, etc.), the binary file is preserved with all data up to the crash point. You can manually generate the HTML report from the binary file.
|
||||
|
||||
## Viewing Reports
|
||||
|
||||
### Automatic Reports
|
||||
|
||||
After a process exits normally, you'll find HTML reports in `/config/memray_reports/`. Open these files in a web browser to view interactive flamegraphs showing memory usage patterns.
|
||||
|
||||
### Manual Report Generation
|
||||
|
||||
If a process crashes or you want to generate a report from an existing binary file, you can manually create the HTML report:
|
||||
|
||||
```bash
|
||||
memray flamegraph /config/memray_reports/<module_name>.bin
|
||||
```
|
||||
|
||||
This will generate an HTML file that you can open in your browser.
|
||||
|
||||
## Understanding the Reports
|
||||
|
||||
Memray flamegraphs show:
|
||||
|
||||
- **Memory allocations over time**: See where memory is being allocated in your code
|
||||
- **Call stacks**: Understand the full call chain leading to allocations
|
||||
- **Memory hotspots**: Identify functions or code paths that allocate the most memory
|
||||
- **Memory leaks**: Spot patterns where memory is allocated but not freed
|
||||
|
||||
The interactive HTML reports allow you to:
|
||||
|
||||
- Zoom into specific time ranges
|
||||
- Filter by function names
|
||||
- View detailed allocation information
|
||||
- Export data for further analysis
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Profile During Issues**: Enable profiling when you're experiencing memory issues, not all the time, as it adds some overhead.
|
||||
|
||||
2. **Profile Specific Modules**: Instead of profiling everything, focus on the modules you suspect are causing issues.
|
||||
|
||||
3. **Let Processes Run**: Allow processes to run for a meaningful duration to capture representative memory usage patterns.
|
||||
|
||||
4. **Check Binary Files**: If HTML reports aren't generated automatically (e.g., after a crash), check for `.bin` files in `/config/memray_reports/` and generate reports manually.
|
||||
|
||||
5. **Compare Reports**: Generate reports at different times to compare memory usage patterns and identify trends.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No Reports Generated
|
||||
|
||||
- Check that the environment variable is set correctly
|
||||
- Verify the module name matches exactly (case-sensitive)
|
||||
- Check logs for memray-related errors
|
||||
- Ensure `/config/memray_reports/` directory exists and is writable
|
||||
|
||||
### Process Crashed Before Report Generation
|
||||
|
||||
- Look for `.bin` files in `/config/memray_reports/`
|
||||
- Manually generate HTML reports using: `memray flamegraph <file>.bin`
|
||||
- The binary file contains all data up to the crash point
|
||||
|
||||
### Reports Show No Data
|
||||
|
||||
- Ensure the process ran long enough to generate meaningful data
|
||||
- Check that memray is properly installed (included by default in Frigate)
|
||||
- Verify the process actually started and ran (check process logs)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```bash
|
||||
# Enable profiling for review and capture modules
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture"
|
||||
|
||||
# Start Frigate
|
||||
# ... let it run for a while ...
|
||||
|
||||
# Check for reports
|
||||
ls -lh /config/memray_reports/
|
||||
|
||||
# If a process crashed, manually generate report
|
||||
memray flamegraph /config/memray_reports/frigate_capture_front_door.bin
|
||||
```
|
||||
|
||||
For more information about memray and interpreting reports, see the [official memray documentation](https://bloomberg.github.io/memray/).
|
||||
@ -10,7 +10,7 @@ const config: Config = {
|
||||
baseUrl: "/",
|
||||
onBrokenLinks: "throw",
|
||||
onBrokenMarkdownLinks: "warn",
|
||||
favicon: "img/favicon.ico",
|
||||
favicon: "img/branding/favicon.ico",
|
||||
organizationName: "blakeblackshear",
|
||||
projectName: "frigate",
|
||||
themes: [
|
||||
@ -116,8 +116,8 @@ const config: Config = {
|
||||
title: "Frigate",
|
||||
logo: {
|
||||
alt: "Frigate",
|
||||
src: "img/logo.svg",
|
||||
srcDark: "img/logo-dark.svg",
|
||||
src: "img/branding/logo.svg",
|
||||
srcDark: "img/branding/logo-dark.svg",
|
||||
},
|
||||
items: [
|
||||
{
|
||||
@ -170,7 +170,7 @@ const config: Config = {
|
||||
],
|
||||
},
|
||||
],
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Blake Blackshear`,
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Frigate LLC`,
|
||||
},
|
||||
},
|
||||
plugins: [
|
||||
|
||||
@ -131,6 +131,7 @@ const sidebars: SidebarsConfig = {
|
||||
"troubleshooting/recordings",
|
||||
"troubleshooting/gpu",
|
||||
"troubleshooting/edgetpu",
|
||||
"troubleshooting/memory",
|
||||
],
|
||||
Development: [
|
||||
"development/contributing",
|
||||
|
||||
23
docs/src/components/CommunityBadge/index.jsx
Normal file
@ -0,0 +1,23 @@
|
||||
import React from "react";
|
||||
|
||||
export default function CommunityBadge() {
|
||||
return (
|
||||
<span
|
||||
title="This detector is maintained by community members who provide code, maintenance, and support. See the contributing boards documentation for more information."
|
||||
style={{
|
||||
display: "inline-block",
|
||||
backgroundColor: "#f1f3f5",
|
||||
color: "#24292f",
|
||||
fontSize: "11px",
|
||||
fontWeight: 600,
|
||||
padding: "2px 6px",
|
||||
borderRadius: "3px",
|
||||
border: "1px solid #d1d9e0",
|
||||
marginLeft: "4px",
|
||||
cursor: "help",
|
||||
}}
|
||||
>
|
||||
Community Supported
|
||||
</span>
|
||||
);
|
||||
}
|
||||
@ -1,13 +1,18 @@
|
||||
.alert {
|
||||
padding: 12px;
|
||||
background: #fff8e6;
|
||||
border-bottom: 1px solid #ffd166;
|
||||
text-align: center;
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.alert a {
|
||||
color: #1890ff;
|
||||
font-weight: 500;
|
||||
margin-left: 6px;
|
||||
}
|
||||
padding: 12px;
|
||||
background: #fff8e6;
|
||||
border-bottom: 1px solid #ffd166;
|
||||
text-align: center;
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
[data-theme="dark"] .alert {
|
||||
background: #3b2f0b;
|
||||
border-bottom: 1px solid #665c22;
|
||||
}
|
||||
|
||||
.alert a {
|
||||
color: #1890ff;
|
||||
font-weight: 500;
|
||||
margin-left: 6px;
|
||||
}
|
||||
|
||||
30
docs/static/img/branding/LICENSE.md
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# COPYRIGHT AND TRADEMARK NOTICE
|
||||
|
||||
The images, logos, and icons contained in this directory (the "Brand Assets") are
|
||||
proprietary to Frigate LLC and are NOT covered by the MIT License governing the
|
||||
rest of this repository.
|
||||
|
||||
1. TRADEMARK STATUS
|
||||
The "Frigate" name and the accompanying logo are common law trademarks™ of
|
||||
Frigate LLC. Frigate LLC reserves all rights to these marks.
|
||||
|
||||
2. LIMITED PERMISSION FOR USE
|
||||
Permission is hereby granted to display these Brand Assets strictly for the
|
||||
following purposes:
|
||||
a. To execute the software interface on a local machine.
|
||||
b. To identify the software in documentation or reviews (nominative use).
|
||||
|
||||
3. RESTRICTIONS
|
||||
You may NOT:
|
||||
a. Use these Brand Assets to represent a derivative work (fork) as an official
|
||||
product of Frigate LLC.
|
||||
b. Use these Brand Assets in a way that implies endorsement, sponsorship, or
|
||||
commercial affiliation with Frigate LLC.
|
||||
c. Modify or alter the Brand Assets.
|
||||
|
||||
If you fork this repository with the intent to distribute a modified or competing
|
||||
version of the software, you must replace these Brand Assets with your own
|
||||
original content.
|
||||
|
||||
ALL RIGHTS RESERVED.
|
||||
Copyright (c) 2025 Frigate LLC.
|
||||
|
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 936 B After Width: | Height: | Size: 936 B |
|
Before Width: | Height: | Size: 933 B After Width: | Height: | Size: 933 B |
@ -23,7 +23,7 @@ from markupsafe import escape
|
||||
from peewee import SQL, fn, operator
|
||||
from pydantic import ValidationError
|
||||
|
||||
from frigate.api.auth import require_role
|
||||
from frigate.api.auth import allow_any_authenticated, allow_public, require_role
|
||||
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
@ -37,7 +37,6 @@ from frigate.stats.prometheus import get_metrics, update_metrics
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
flatten_config_data,
|
||||
get_tz_modifiers,
|
||||
process_config_query_string,
|
||||
update_yaml_file_bulk,
|
||||
)
|
||||
@ -48,6 +47,7 @@ from frigate.util.services import (
|
||||
restart_frigate,
|
||||
vainfo_hwaccel,
|
||||
)
|
||||
from frigate.util.time import get_tz_modifiers
|
||||
from frigate.version import VERSION
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -56,29 +56,33 @@ logger = logging.getLogger(__name__)
|
||||
router = APIRouter(tags=[Tags.app])
|
||||
|
||||
|
||||
@router.get("/", response_class=PlainTextResponse)
|
||||
@router.get(
|
||||
"/", response_class=PlainTextResponse, dependencies=[Depends(allow_public())]
|
||||
)
|
||||
def is_healthy():
|
||||
return "Frigate is running. Alive and healthy!"
|
||||
|
||||
|
||||
@router.get("/config/schema.json")
|
||||
@router.get("/config/schema.json", dependencies=[Depends(allow_public())])
|
||||
def config_schema(request: Request):
|
||||
return Response(
|
||||
content=request.app.frigate_config.schema_json(), media_type="application/json"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/version", response_class=PlainTextResponse)
|
||||
@router.get(
|
||||
"/version", response_class=PlainTextResponse, dependencies=[Depends(allow_public())]
|
||||
)
|
||||
def version():
|
||||
return VERSION
|
||||
|
||||
|
||||
@router.get("/stats")
|
||||
@router.get("/stats", dependencies=[Depends(allow_any_authenticated())])
|
||||
def stats(request: Request):
|
||||
return JSONResponse(content=request.app.stats_emitter.get_latest_stats())
|
||||
|
||||
|
||||
@router.get("/stats/history")
|
||||
@router.get("/stats/history", dependencies=[Depends(allow_any_authenticated())])
|
||||
def stats_history(request: Request, keys: str = None):
|
||||
if keys:
|
||||
keys = keys.split(",")
|
||||
@ -86,7 +90,7 @@ def stats_history(request: Request, keys: str = None):
|
||||
return JSONResponse(content=request.app.stats_emitter.get_stats_history(keys))
|
||||
|
||||
|
||||
@router.get("/metrics")
|
||||
@router.get("/metrics", dependencies=[Depends(allow_any_authenticated())])
|
||||
def metrics(request: Request):
|
||||
"""Expose Prometheus metrics endpoint and update metrics with latest stats"""
|
||||
# Retrieve the latest statistics and update the Prometheus metrics
|
||||
@ -103,7 +107,7 @@ def metrics(request: Request):
|
||||
return Response(content=content, media_type=content_type)
|
||||
|
||||
|
||||
@router.get("/config")
|
||||
@router.get("/config", dependencies=[Depends(allow_any_authenticated())])
|
||||
def config(request: Request):
|
||||
config_obj: FrigateConfig = request.app.frigate_config
|
||||
config: dict[str, dict[str, Any]] = config_obj.model_dump(
|
||||
@ -179,7 +183,37 @@ def config(request: Request):
|
||||
return JSONResponse(content=config)
|
||||
|
||||
|
||||
@router.get("/config/raw")
|
||||
@router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))])
|
||||
def config_raw_paths(request: Request):
|
||||
"""Admin-only endpoint that returns camera paths and go2rtc streams without credential masking."""
|
||||
config_obj: FrigateConfig = request.app.frigate_config
|
||||
|
||||
raw_paths = {"cameras": {}, "go2rtc": {"streams": {}}}
|
||||
|
||||
# Extract raw camera ffmpeg input paths
|
||||
for camera_name, camera in config_obj.cameras.items():
|
||||
raw_paths["cameras"][camera_name] = {
|
||||
"ffmpeg": {
|
||||
"inputs": [
|
||||
{"path": input.path, "roles": input.roles}
|
||||
for input in camera.ffmpeg.inputs
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Extract raw go2rtc stream URLs
|
||||
go2rtc_config = config_obj.go2rtc.model_dump(
|
||||
mode="json", warnings="none", exclude_none=True
|
||||
)
|
||||
for stream_name, stream in go2rtc_config.get("streams", {}).items():
|
||||
if stream is None:
|
||||
continue
|
||||
raw_paths["go2rtc"]["streams"][stream_name] = stream
|
||||
|
||||
return JSONResponse(content=raw_paths)
|
||||
|
||||
|
||||
@router.get("/config/raw", dependencies=[Depends(allow_any_authenticated())])
|
||||
def config_raw():
|
||||
config_file = find_config_file()
|
||||
|
||||
@ -403,12 +437,13 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
# Handle nested config updates (e.g., config/classification/custom/{name})
|
||||
# Generic handling for global config updates
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
if settings:
|
||||
request.app.config_publisher.publisher.publish(
|
||||
body.update_topic, settings
|
||||
)
|
||||
|
||||
# Publish None for removal, actual config for add/update
|
||||
request.app.config_publisher.publisher.publish(
|
||||
body.update_topic, settings
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
@ -421,7 +456,7 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
)
|
||||
|
||||
|
||||
@router.get("/vainfo")
|
||||
@router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())])
|
||||
def vainfo():
|
||||
vainfo = vainfo_hwaccel()
|
||||
return JSONResponse(
|
||||
@ -441,12 +476,16 @@ def vainfo():
|
||||
)
|
||||
|
||||
|
||||
@router.get("/nvinfo")
|
||||
@router.get("/nvinfo", dependencies=[Depends(allow_any_authenticated())])
|
||||
def nvinfo():
|
||||
return JSONResponse(content=get_nvidia_driver_info())
|
||||
|
||||
|
||||
@router.get("/logs/{service}", tags=[Tags.logs])
|
||||
@router.get(
|
||||
"/logs/{service}",
|
||||
tags=[Tags.logs],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def logs(
|
||||
service: str = Path(enum=["frigate", "nginx", "go2rtc"]),
|
||||
download: Optional[str] = None,
|
||||
@ -554,7 +593,7 @@ def restart():
|
||||
)
|
||||
|
||||
|
||||
@router.get("/labels")
|
||||
@router.get("/labels", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_labels(camera: str = ""):
|
||||
try:
|
||||
if camera:
|
||||
@ -572,7 +611,7 @@ def get_labels(camera: str = ""):
|
||||
return JSONResponse(content=labels)
|
||||
|
||||
|
||||
@router.get("/sub_labels")
|
||||
@router.get("/sub_labels", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_sub_labels(split_joined: Optional[int] = None):
|
||||
try:
|
||||
events = Event.select(Event.sub_label).distinct()
|
||||
@ -603,7 +642,7 @@ def get_sub_labels(split_joined: Optional[int] = None):
|
||||
return JSONResponse(content=sub_labels)
|
||||
|
||||
|
||||
@router.get("/plus/models")
|
||||
@router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())])
|
||||
def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
|
||||
if not request.app.frigate_config.plus_api.is_active():
|
||||
return JSONResponse(
|
||||
@ -645,7 +684,9 @@ def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
|
||||
return JSONResponse(content=validModels)
|
||||
|
||||
|
||||
@router.get("/recognized_license_plates")
|
||||
@router.get(
|
||||
"/recognized_license_plates", dependencies=[Depends(allow_any_authenticated())]
|
||||
)
|
||||
def get_recognized_license_plates(split_joined: Optional[int] = None):
|
||||
try:
|
||||
query = (
|
||||
@ -679,7 +720,7 @@ def get_recognized_license_plates(split_joined: Optional[int] = None):
|
||||
return JSONResponse(content=recognized_license_plates)
|
||||
|
||||
|
||||
@router.get("/timeline")
|
||||
@router.get("/timeline", dependencies=[Depends(allow_any_authenticated())])
|
||||
def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = None):
|
||||
clauses = []
|
||||
|
||||
@ -716,7 +757,7 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
|
||||
return JSONResponse(content=[t for t in timeline])
|
||||
|
||||
|
||||
@router.get("/timeline/hourly")
|
||||
@router.get("/timeline/hourly", dependencies=[Depends(allow_any_authenticated())])
|
||||
def hourly_timeline(params: AppTimelineHourlyQueryParameters = Depends()):
|
||||
"""Get hourly summary for timeline."""
|
||||
cameras = params.cameras
|
||||
|
||||
@ -32,10 +32,178 @@ from frigate.models import User
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def require_admin_by_default():
|
||||
"""
|
||||
Global admin requirement dependency for all endpoints by default.
|
||||
|
||||
This is set as the default dependency on the FastAPI app to ensure all
|
||||
endpoints require admin access unless explicitly overridden with
|
||||
allow_public(), allow_any_authenticated(), or require_role().
|
||||
|
||||
Port 5000 (internal) always has admin role set by the /auth endpoint,
|
||||
so this check passes automatically for internal requests.
|
||||
|
||||
Certain paths are exempted from the global admin check because they must
|
||||
be accessible before authentication (login, auth) or they have their own
|
||||
route-level authorization dependencies that handle access control.
|
||||
"""
|
||||
# Paths that have route-level auth dependencies and should bypass global admin check
|
||||
# These paths still have authorization - it's handled by their route-level dependencies
|
||||
EXEMPT_PATHS = {
|
||||
# Public auth endpoints (allow_public)
|
||||
"/auth",
|
||||
"/auth/first_time_login",
|
||||
"/login",
|
||||
# Authenticated user endpoints (allow_any_authenticated)
|
||||
"/logout",
|
||||
"/profile",
|
||||
# Public info endpoints (allow_public)
|
||||
"/",
|
||||
"/version",
|
||||
"/config/schema.json",
|
||||
# Authenticated user endpoints (allow_any_authenticated)
|
||||
"/metrics",
|
||||
"/stats",
|
||||
"/stats/history",
|
||||
"/config",
|
||||
"/config/raw",
|
||||
"/vainfo",
|
||||
"/nvinfo",
|
||||
"/labels",
|
||||
"/sub_labels",
|
||||
"/plus/models",
|
||||
"/recognized_license_plates",
|
||||
"/timeline",
|
||||
"/timeline/hourly",
|
||||
"/recordings/storage",
|
||||
"/recordings/summary",
|
||||
"/recordings/unavailable",
|
||||
"/go2rtc/streams",
|
||||
"/event_ids",
|
||||
"/events",
|
||||
"/exports",
|
||||
}
|
||||
|
||||
# Path prefixes that should be exempt (for paths with parameters)
|
||||
EXEMPT_PREFIXES = (
|
||||
"/logs/", # /logs/{service}
|
||||
"/review", # /review, /review/{id}, /review/summary, /review_ids, etc.
|
||||
"/reviews/", # /reviews/viewed, /reviews/delete
|
||||
"/events/", # /events/{id}/thumbnail, /events/summary, etc. (camera-scoped)
|
||||
"/export/", # /export/{camera}/start/..., /export/{id}/rename, /export/{id}
|
||||
"/go2rtc/streams/", # /go2rtc/streams/{camera}
|
||||
"/users/", # /users/{username}/password (has own auth)
|
||||
"/preview/", # /preview/{file}/thumbnail.jpg
|
||||
"/exports/", # /exports/{export_id}
|
||||
"/vod/", # /vod/{camera_name}/...
|
||||
"/notifications/", # /notifications/pubkey, /notifications/register
|
||||
)
|
||||
|
||||
async def admin_checker(request: Request):
|
||||
path = request.url.path
|
||||
|
||||
# Check exact path matches
|
||||
if path in EXEMPT_PATHS:
|
||||
return
|
||||
|
||||
# Check prefix matches for parameterized paths
|
||||
if path.startswith(EXEMPT_PREFIXES):
|
||||
return
|
||||
|
||||
# Dynamic camera path exemption:
|
||||
# Any path whose first segment matches a configured camera name should
|
||||
# bypass the global admin requirement. These endpoints enforce access
|
||||
# via route-level dependencies (e.g. require_camera_access) to ensure
|
||||
# per-camera authorization. This allows non-admin authenticated users
|
||||
# (e.g. viewer role) to access camera-specific resources without
|
||||
# needing admin privileges.
|
||||
try:
|
||||
if path.startswith("/"):
|
||||
first_segment = path.split("/", 2)[1]
|
||||
if (
|
||||
first_segment
|
||||
and first_segment in request.app.frigate_config.cameras
|
||||
):
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# For all other paths, require admin role
|
||||
# Port 5000 (internal) requests have admin role set automatically
|
||||
role = request.headers.get("remote-role")
|
||||
if role == "admin":
|
||||
return
|
||||
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Access denied. A user with the admin role is required.",
|
||||
)
|
||||
|
||||
return admin_checker
|
||||
|
||||
|
||||
def _is_authenticated(request: Request) -> bool:
|
||||
"""
|
||||
Helper to determine if a request is from an authenticated user.
|
||||
|
||||
Returns True if the request has a valid authenticated user (not anonymous).
|
||||
Port 5000 internal requests are considered anonymous despite having admin role.
|
||||
"""
|
||||
username = request.headers.get("remote-user")
|
||||
return username is not None and username != "anonymous"
|
||||
|
||||
|
||||
def allow_public():
|
||||
"""
|
||||
Override dependency to allow unauthenticated access to an endpoint.
|
||||
|
||||
Use this for endpoints that should be publicly accessible without
|
||||
authentication, such as login page, health checks, or pre-auth info.
|
||||
|
||||
Example:
|
||||
@router.get("/public-endpoint", dependencies=[Depends(allow_public())])
|
||||
"""
|
||||
|
||||
async def public_checker(request: Request):
|
||||
return # Always allow
|
||||
|
||||
return public_checker
|
||||
|
||||
|
||||
def allow_any_authenticated():
|
||||
"""
|
||||
Override dependency to allow any authenticated user (bypass admin requirement).
|
||||
|
||||
Allows:
|
||||
- Port 5000 internal requests (have admin role despite anonymous user)
|
||||
- Any authenticated user with a real username (not "anonymous")
|
||||
|
||||
Rejects:
|
||||
- Port 8971 requests with anonymous user (auth disabled, no proxy auth)
|
||||
|
||||
Example:
|
||||
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
|
||||
"""
|
||||
|
||||
async def auth_checker(request: Request):
|
||||
# Port 5000 requests have admin role and should be allowed
|
||||
role = request.headers.get("remote-role")
|
||||
if role == "admin":
|
||||
return
|
||||
|
||||
# Otherwise require a real authenticated user (not anonymous)
|
||||
if not _is_authenticated(request):
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
return
|
||||
|
||||
return auth_checker
|
||||
|
||||
|
||||
router = APIRouter(tags=[Tags.auth])
|
||||
|
||||
|
||||
@router.get("/auth/first_time_login")
|
||||
@router.get("/auth/first_time_login", dependencies=[Depends(allow_public())])
|
||||
def first_time_login(request: Request):
|
||||
"""Return whether the admin first-time login help flag is set in config.
|
||||
|
||||
@ -352,7 +520,7 @@ def resolve_role(
|
||||
|
||||
|
||||
# Endpoints
|
||||
@router.get("/auth")
|
||||
@router.get("/auth", dependencies=[Depends(allow_public())])
|
||||
def auth(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
proxy_config: ProxyConfig = request.app.frigate_config.proxy
|
||||
@ -478,7 +646,7 @@ def auth(request: Request):
|
||||
return fail_response
|
||||
|
||||
|
||||
@router.get("/profile")
|
||||
@router.get("/profile", dependencies=[Depends(allow_any_authenticated())])
|
||||
def profile(request: Request):
|
||||
username = request.headers.get("remote-user", "anonymous")
|
||||
role = request.headers.get("remote-role", "viewer")
|
||||
@ -492,7 +660,7 @@ def profile(request: Request):
|
||||
)
|
||||
|
||||
|
||||
@router.get("/logout")
|
||||
@router.get("/logout", dependencies=[Depends(allow_any_authenticated())])
|
||||
def logout(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
response = RedirectResponse("/login", status_code=303)
|
||||
@ -503,7 +671,7 @@ def logout(request: Request):
|
||||
limiter = Limiter(key_func=get_remote_addr)
|
||||
|
||||
|
||||
@router.post("/login")
|
||||
@router.post("/login", dependencies=[Depends(allow_public())])
|
||||
@limiter.limit(limit_value=rateLimiter.get_limit)
|
||||
def login(request: Request, body: AppPostLoginBody):
|
||||
JWT_COOKIE_NAME = request.app.frigate_config.auth.cookie_name
|
||||
@ -578,13 +746,21 @@ def create_user(
|
||||
return JSONResponse(content={"username": body.username})
|
||||
|
||||
|
||||
@router.delete("/users/{username}")
|
||||
def delete_user(username: str):
|
||||
@router.delete("/users/{username}", dependencies=[Depends(require_role(["admin"]))])
|
||||
def delete_user(request: Request, username: str):
|
||||
# Prevent deletion of the built-in admin user
|
||||
if username == "admin":
|
||||
return JSONResponse(
|
||||
content={"message": "Cannot delete admin user"}, status_code=403
|
||||
)
|
||||
|
||||
User.delete_by_id(username)
|
||||
return JSONResponse(content={"success": True})
|
||||
|
||||
|
||||
@router.put("/users/{username}/password")
|
||||
@router.put(
|
||||
"/users/{username}/password", dependencies=[Depends(allow_any_authenticated())]
|
||||
)
|
||||
async def update_password(
|
||||
request: Request,
|
||||
username: str,
|
||||
|
||||
@ -3,13 +3,23 @@
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from importlib.util import find_spec
|
||||
from pathlib import Path
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
import httpx
|
||||
import requests
|
||||
from fastapi import APIRouter, Depends, Request, Response
|
||||
from fastapi import APIRouter, Depends, Query, Request, Response
|
||||
from fastapi.responses import JSONResponse
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
from zeep.exceptions import Fault, TransportError
|
||||
from zeep.transports import AsyncTransport
|
||||
|
||||
from frigate.api.auth import require_role
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config.config import FrigateConfig
|
||||
from frigate.util.builtin import clean_camera_user_pass
|
||||
@ -44,7 +54,7 @@ def _is_valid_host(host: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
@router.get("/go2rtc/streams")
|
||||
@router.get("/go2rtc/streams", dependencies=[Depends(allow_any_authenticated())])
|
||||
def go2rtc_streams():
|
||||
r = requests.get("http://127.0.0.1:1984/api/streams")
|
||||
if not r.ok:
|
||||
@ -60,7 +70,9 @@ def go2rtc_streams():
|
||||
return JSONResponse(content=stream_data)
|
||||
|
||||
|
||||
@router.get("/go2rtc/streams/{camera_name}")
|
||||
@router.get(
|
||||
"/go2rtc/streams/{camera_name}", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
def go2rtc_camera_stream(request: Request, camera_name: str):
|
||||
r = requests.get(
|
||||
f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=allµphone"
|
||||
@ -155,7 +167,7 @@ def go2rtc_delete_stream(stream_name: str):
|
||||
)
|
||||
|
||||
|
||||
@router.get("/ffprobe")
|
||||
@router.get("/ffprobe", dependencies=[Depends(require_role(["admin"]))])
|
||||
def ffprobe(request: Request, paths: str = "", detailed: bool = False):
|
||||
path_param = paths
|
||||
|
||||
@ -452,3 +464,537 @@ def _extract_fps(r_frame_rate: str) -> float | None:
|
||||
return round(float(num) / float(den), 2)
|
||||
except (ValueError, ZeroDivisionError):
|
||||
return None
|
||||
|
||||
|
||||
@router.get(
|
||||
"/onvif/probe",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Probe ONVIF device",
|
||||
description=(
|
||||
"Probe an ONVIF device to determine capabilities and optionally test available stream URIs. "
|
||||
"Query params: host (required), port (default 80), username, password, test (boolean), "
|
||||
"auth_type (basic or digest, default basic)."
|
||||
),
|
||||
)
|
||||
async def onvif_probe(
|
||||
request: Request,
|
||||
host: str = Query(None),
|
||||
port: int = Query(80),
|
||||
username: str = Query(""),
|
||||
password: str = Query(""),
|
||||
test: bool = Query(False),
|
||||
auth_type: str = Query("basic"), # Add auth_type parameter
|
||||
):
|
||||
"""
|
||||
Probe a single ONVIF device to determine capabilities.
|
||||
|
||||
Connects to an ONVIF device and queries for:
|
||||
- Device information (manufacturer, model)
|
||||
- Media profiles count
|
||||
- PTZ support
|
||||
- Available presets
|
||||
- Autotracking support
|
||||
|
||||
Query Parameters:
|
||||
host: Device host/IP address (required)
|
||||
port: Device port (default 80)
|
||||
username: ONVIF username (optional)
|
||||
password: ONVIF password (optional)
|
||||
test: run ffprobe on the stream (optional)
|
||||
auth_type: Authentication type - "basic" or "digest" (default "basic")
|
||||
|
||||
Returns:
|
||||
JSON with device capabilities information
|
||||
"""
|
||||
if not host:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "host parameter is required"},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
# Validate host format
|
||||
if not _is_valid_host(host):
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Invalid host format"},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
# Validate auth_type
|
||||
if auth_type not in ["basic", "digest"]:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "auth_type must be 'basic' or 'digest'",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
onvif_camera = None
|
||||
|
||||
try:
|
||||
logger.debug(f"Probing ONVIF device at {host}:{port} with {auth_type} auth")
|
||||
|
||||
try:
|
||||
wsdl_base = None
|
||||
spec = find_spec("onvif")
|
||||
if spec and getattr(spec, "origin", None):
|
||||
wsdl_base = str(Path(spec.origin).parent / "wsdl")
|
||||
except Exception:
|
||||
wsdl_base = None
|
||||
|
||||
onvif_camera = ONVIFCamera(
|
||||
host, port, username or "", password or "", wsdl_dir=wsdl_base
|
||||
)
|
||||
|
||||
# Configure digest authentication if requested
|
||||
if auth_type == "digest" and username and password:
|
||||
# Create httpx client with digest auth
|
||||
auth = httpx.DigestAuth(username, password)
|
||||
client = httpx.AsyncClient(auth=auth, timeout=10.0)
|
||||
|
||||
# Replace the transport in the zeep client
|
||||
transport = AsyncTransport(client=client)
|
||||
|
||||
# Update the xaddr before setting transport
|
||||
await onvif_camera.update_xaddrs()
|
||||
|
||||
# Replace transport in all services
|
||||
if hasattr(onvif_camera, "devicemgmt"):
|
||||
onvif_camera.devicemgmt.zeep_client.transport = transport
|
||||
if hasattr(onvif_camera, "media"):
|
||||
onvif_camera.media.zeep_client.transport = transport
|
||||
if hasattr(onvif_camera, "ptz"):
|
||||
onvif_camera.ptz.zeep_client.transport = transport
|
||||
|
||||
logger.debug("Configured digest authentication")
|
||||
else:
|
||||
await onvif_camera.update_xaddrs()
|
||||
|
||||
# Get device information
|
||||
device_info = {
|
||||
"manufacturer": "Unknown",
|
||||
"model": "Unknown",
|
||||
"firmware_version": "Unknown",
|
||||
}
|
||||
try:
|
||||
device_service = await onvif_camera.create_devicemgmt_service()
|
||||
|
||||
# Update transport for device service if digest auth
|
||||
if auth_type == "digest" and username and password:
|
||||
auth = httpx.DigestAuth(username, password)
|
||||
client = httpx.AsyncClient(auth=auth, timeout=10.0)
|
||||
transport = AsyncTransport(client=client)
|
||||
device_service.zeep_client.transport = transport
|
||||
|
||||
device_info_resp = await device_service.GetDeviceInformation()
|
||||
manufacturer = getattr(device_info_resp, "Manufacturer", None) or (
|
||||
device_info_resp.get("Manufacturer")
|
||||
if isinstance(device_info_resp, dict)
|
||||
else None
|
||||
)
|
||||
model = getattr(device_info_resp, "Model", None) or (
|
||||
device_info_resp.get("Model")
|
||||
if isinstance(device_info_resp, dict)
|
||||
else None
|
||||
)
|
||||
firmware = getattr(device_info_resp, "FirmwareVersion", None) or (
|
||||
device_info_resp.get("FirmwareVersion")
|
||||
if isinstance(device_info_resp, dict)
|
||||
else None
|
||||
)
|
||||
device_info.update(
|
||||
{
|
||||
"manufacturer": manufacturer or "Unknown",
|
||||
"model": model or "Unknown",
|
||||
"firmware_version": firmware or "Unknown",
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get device info: {e}")
|
||||
|
||||
# Get media profiles
|
||||
profiles = []
|
||||
profiles_count = 0
|
||||
first_profile_token = None
|
||||
ptz_config_token = None
|
||||
try:
|
||||
media_service = await onvif_camera.create_media_service()
|
||||
|
||||
# Update transport for media service if digest auth
|
||||
if auth_type == "digest" and username and password:
|
||||
auth = httpx.DigestAuth(username, password)
|
||||
client = httpx.AsyncClient(auth=auth, timeout=10.0)
|
||||
transport = AsyncTransport(client=client)
|
||||
media_service.zeep_client.transport = transport
|
||||
|
||||
profiles = await media_service.GetProfiles()
|
||||
profiles_count = len(profiles) if profiles else 0
|
||||
if profiles and len(profiles) > 0:
|
||||
p = profiles[0]
|
||||
first_profile_token = getattr(p, "token", None) or (
|
||||
p.get("token") if isinstance(p, dict) else None
|
||||
)
|
||||
# Get PTZ configuration token from the profile
|
||||
ptz_configuration = getattr(p, "PTZConfiguration", None) or (
|
||||
p.get("PTZConfiguration") if isinstance(p, dict) else None
|
||||
)
|
||||
if ptz_configuration:
|
||||
ptz_config_token = getattr(ptz_configuration, "token", None) or (
|
||||
ptz_configuration.get("token")
|
||||
if isinstance(ptz_configuration, dict)
|
||||
else None
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get media profiles: {e}")
|
||||
|
||||
# Check PTZ support and capabilities
|
||||
ptz_supported = False
|
||||
presets_count = 0
|
||||
autotrack_supported = False
|
||||
|
||||
try:
|
||||
ptz_service = await onvif_camera.create_ptz_service()
|
||||
|
||||
# Update transport for PTZ service if digest auth
|
||||
if auth_type == "digest" and username and password:
|
||||
auth = httpx.DigestAuth(username, password)
|
||||
client = httpx.AsyncClient(auth=auth, timeout=10.0)
|
||||
transport = AsyncTransport(client=client)
|
||||
ptz_service.zeep_client.transport = transport
|
||||
|
||||
# Check if PTZ service is available
|
||||
try:
|
||||
await ptz_service.GetServiceCapabilities()
|
||||
ptz_supported = True
|
||||
logger.debug("PTZ service is available")
|
||||
except Exception as e:
|
||||
logger.debug(f"PTZ service not available: {e}")
|
||||
ptz_supported = False
|
||||
|
||||
# Try to get presets if PTZ is supported and we have a profile
|
||||
if ptz_supported and first_profile_token:
|
||||
try:
|
||||
presets_resp = await ptz_service.GetPresets(
|
||||
{"ProfileToken": first_profile_token}
|
||||
)
|
||||
presets_count = len(presets_resp) if presets_resp else 0
|
||||
logger.debug(f"Found {presets_count} presets")
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to get presets: {e}")
|
||||
presets_count = 0
|
||||
|
||||
# Check for autotracking support - requires both FOV relative movement and MoveStatus
|
||||
if ptz_supported and first_profile_token and ptz_config_token:
|
||||
# First check for FOV relative movement support
|
||||
pt_r_fov_supported = False
|
||||
try:
|
||||
config_request = ptz_service.create_type("GetConfigurationOptions")
|
||||
config_request.ConfigurationToken = ptz_config_token
|
||||
ptz_config = await ptz_service.GetConfigurationOptions(
|
||||
config_request
|
||||
)
|
||||
|
||||
if ptz_config:
|
||||
# Check for pt-r-fov support
|
||||
spaces = getattr(ptz_config, "Spaces", None) or (
|
||||
ptz_config.get("Spaces")
|
||||
if isinstance(ptz_config, dict)
|
||||
else None
|
||||
)
|
||||
|
||||
if spaces:
|
||||
rel_pan_tilt_space = getattr(
|
||||
spaces, "RelativePanTiltTranslationSpace", None
|
||||
) or (
|
||||
spaces.get("RelativePanTiltTranslationSpace")
|
||||
if isinstance(spaces, dict)
|
||||
else None
|
||||
)
|
||||
|
||||
if rel_pan_tilt_space:
|
||||
# Look for FOV space
|
||||
for i, space in enumerate(rel_pan_tilt_space):
|
||||
uri = None
|
||||
if isinstance(space, dict):
|
||||
uri = space.get("URI")
|
||||
else:
|
||||
uri = getattr(space, "URI", None)
|
||||
|
||||
if uri and "TranslationSpaceFov" in uri:
|
||||
pt_r_fov_supported = True
|
||||
logger.debug(
|
||||
"FOV relative movement (pt-r-fov) supported"
|
||||
)
|
||||
break
|
||||
|
||||
logger.debug(f"PTZ config spaces: {ptz_config}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to check FOV relative movement: {e}")
|
||||
pt_r_fov_supported = False
|
||||
|
||||
# Now check for MoveStatus support via GetServiceCapabilities
|
||||
if pt_r_fov_supported:
|
||||
try:
|
||||
service_capabilities_request = ptz_service.create_type(
|
||||
"GetServiceCapabilities"
|
||||
)
|
||||
service_capabilities = await ptz_service.GetServiceCapabilities(
|
||||
service_capabilities_request
|
||||
)
|
||||
|
||||
# Look for MoveStatus in the capabilities
|
||||
move_status_capable = False
|
||||
if service_capabilities:
|
||||
# Try to find MoveStatus key recursively
|
||||
def find_move_status(obj, key="MoveStatus"):
|
||||
if isinstance(obj, dict):
|
||||
if key in obj:
|
||||
return obj[key]
|
||||
for v in obj.values():
|
||||
result = find_move_status(v, key)
|
||||
if result is not None:
|
||||
return result
|
||||
elif hasattr(obj, key):
|
||||
return getattr(obj, key)
|
||||
elif hasattr(obj, "__dict__"):
|
||||
for v in vars(obj).values():
|
||||
result = find_move_status(v, key)
|
||||
if result is not None:
|
||||
return result
|
||||
return None
|
||||
|
||||
move_status_value = find_move_status(service_capabilities)
|
||||
|
||||
# MoveStatus should return "true" if supported
|
||||
if isinstance(move_status_value, bool):
|
||||
move_status_capable = move_status_value
|
||||
elif isinstance(move_status_value, str):
|
||||
move_status_capable = (
|
||||
move_status_value.lower() == "true"
|
||||
)
|
||||
|
||||
logger.debug(f"MoveStatus capability: {move_status_value}")
|
||||
|
||||
# Autotracking is supported if both conditions are met
|
||||
autotrack_supported = pt_r_fov_supported and move_status_capable
|
||||
|
||||
if autotrack_supported:
|
||||
logger.debug(
|
||||
"Autotracking fully supported (pt-r-fov + MoveStatus)"
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"Autotracking not fully supported - pt-r-fov: {pt_r_fov_supported}, MoveStatus: {move_status_capable}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to check MoveStatus support: {e}")
|
||||
autotrack_supported = False
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to probe PTZ service: {e}")
|
||||
|
||||
result = {
|
||||
"success": True,
|
||||
"host": host,
|
||||
"port": port,
|
||||
"manufacturer": device_info["manufacturer"],
|
||||
"model": device_info["model"],
|
||||
"firmware_version": device_info["firmware_version"],
|
||||
"profiles_count": profiles_count,
|
||||
"ptz_supported": ptz_supported,
|
||||
"presets_count": presets_count,
|
||||
"autotrack_supported": autotrack_supported,
|
||||
}
|
||||
|
||||
# Gather RTSP candidates
|
||||
rtsp_candidates: list[dict] = []
|
||||
try:
|
||||
media_service = await onvif_camera.create_media_service()
|
||||
|
||||
# Update transport for media service if digest auth
|
||||
if auth_type == "digest" and username and password:
|
||||
auth = httpx.DigestAuth(username, password)
|
||||
client = httpx.AsyncClient(auth=auth, timeout=10.0)
|
||||
transport = AsyncTransport(client=client)
|
||||
media_service.zeep_client.transport = transport
|
||||
|
||||
if profiles_count and media_service:
|
||||
for p in profiles or []:
|
||||
token = getattr(p, "token", None) or (
|
||||
p.get("token") if isinstance(p, dict) else None
|
||||
)
|
||||
if not token:
|
||||
continue
|
||||
try:
|
||||
stream_setup = {
|
||||
"Stream": "RTP-Unicast",
|
||||
"Transport": {"Protocol": "RTSP"},
|
||||
}
|
||||
stream_req = {
|
||||
"ProfileToken": token,
|
||||
"StreamSetup": stream_setup,
|
||||
}
|
||||
stream_uri_resp = await media_service.GetStreamUri(stream_req)
|
||||
uri = (
|
||||
stream_uri_resp.get("Uri")
|
||||
if isinstance(stream_uri_resp, dict)
|
||||
else getattr(stream_uri_resp, "Uri", None)
|
||||
)
|
||||
if uri:
|
||||
logger.debug(
|
||||
f"GetStreamUri returned for token {token}: {uri}"
|
||||
)
|
||||
# If credentials were provided, do NOT add the unauthenticated URI.
|
||||
try:
|
||||
if isinstance(uri, str) and uri.startswith("rtsp://"):
|
||||
if username and password and "@" not in uri:
|
||||
# Inject URL-encoded credentials and add only the
|
||||
# authenticated version.
|
||||
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
|
||||
injected = uri.replace(
|
||||
"rtsp://", f"rtsp://{cred}", 1
|
||||
)
|
||||
rtsp_candidates.append(
|
||||
{
|
||||
"source": "GetStreamUri",
|
||||
"profile_token": token,
|
||||
"uri": injected,
|
||||
}
|
||||
)
|
||||
else:
|
||||
# No credentials provided or URI already contains
|
||||
# credentials — add the URI as returned.
|
||||
rtsp_candidates.append(
|
||||
{
|
||||
"source": "GetStreamUri",
|
||||
"profile_token": token,
|
||||
"uri": uri,
|
||||
}
|
||||
)
|
||||
else:
|
||||
# Non-RTSP URIs (e.g., http-flv) — add as returned.
|
||||
rtsp_candidates.append(
|
||||
{
|
||||
"source": "GetStreamUri",
|
||||
"profile_token": token,
|
||||
"uri": uri,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Skipping stream URI for token {token} due to processing error: {e}"
|
||||
)
|
||||
continue
|
||||
except Exception:
|
||||
logger.debug(
|
||||
f"GetStreamUri failed for token {token}", exc_info=True
|
||||
)
|
||||
continue
|
||||
|
||||
# Add common RTSP patterns as fallback
|
||||
if not rtsp_candidates:
|
||||
common_paths = [
|
||||
"/h264",
|
||||
"/live.sdp",
|
||||
"/media.amp",
|
||||
"/Streaming/Channels/101",
|
||||
"/Streaming/Channels/1",
|
||||
"/stream1",
|
||||
"/cam/realmonitor?channel=1&subtype=0",
|
||||
"/11",
|
||||
]
|
||||
# Use URL-encoded credentials for pattern fallback URIs when provided
|
||||
auth_str = (
|
||||
f"{quote_plus(username)}:{quote_plus(password)}@"
|
||||
if username and password
|
||||
else ""
|
||||
)
|
||||
rtsp_port = 554
|
||||
for path in common_paths:
|
||||
uri = f"rtsp://{auth_str}{host}:{rtsp_port}{path}"
|
||||
rtsp_candidates.append({"source": "pattern", "uri": uri})
|
||||
except Exception:
|
||||
logger.debug("Failed to collect RTSP candidates")
|
||||
|
||||
# Optionally test RTSP candidates using ffprobe_stream
|
||||
tested_candidates = []
|
||||
if test and rtsp_candidates:
|
||||
for c in rtsp_candidates:
|
||||
uri = c["uri"]
|
||||
to_test = [uri]
|
||||
try:
|
||||
if (
|
||||
username
|
||||
and password
|
||||
and isinstance(uri, str)
|
||||
and uri.startswith("rtsp://")
|
||||
and "@" not in uri
|
||||
):
|
||||
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
|
||||
cred_uri = uri.replace("rtsp://", f"rtsp://{cred}", 1)
|
||||
if cred_uri not in to_test:
|
||||
to_test.append(cred_uri)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for test_uri in to_test:
|
||||
try:
|
||||
probe = ffprobe_stream(
|
||||
request.app.frigate_config.ffmpeg, test_uri, detailed=False
|
||||
)
|
||||
print(probe)
|
||||
ok = probe is not None and getattr(probe, "returncode", 1) == 0
|
||||
tested_candidates.append(
|
||||
{
|
||||
"uri": test_uri,
|
||||
"source": c.get("source"),
|
||||
"ok": ok,
|
||||
"profile_token": c.get("profile_token"),
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to probe stream: {e}")
|
||||
tested_candidates.append(
|
||||
{
|
||||
"uri": test_uri,
|
||||
"source": c.get("source"),
|
||||
"ok": False,
|
||||
"profile_token": c.get("profile_token"),
|
||||
}
|
||||
)
|
||||
|
||||
result["rtsp_candidates"] = rtsp_candidates
|
||||
if test:
|
||||
result["rtsp_tested"] = tested_candidates
|
||||
|
||||
logger.debug(f"ONVIF probe successful: {result}")
|
||||
return JSONResponse(content=result)
|
||||
|
||||
except ONVIFError as e:
|
||||
logger.warning(f"ONVIF error probing {host}:{port}: {e}")
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "ONVIF error"},
|
||||
status_code=400,
|
||||
)
|
||||
except (Fault, TransportError) as e:
|
||||
logger.warning(f"Connection error probing {host}:{port}: {e}")
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Connection error"},
|
||||
status_code=503,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error probing ONVIF device at {host}:{port}, {e}")
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Probe failed"},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
finally:
|
||||
# Best-effort cleanup of ONVIF camera client session
|
||||
if onvif_camera is not None:
|
||||
try:
|
||||
# Check if the camera has a close method and call it
|
||||
if hasattr(onvif_camera, "close"):
|
||||
await onvif_camera.close()
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing ONVIF camera session: {e}")
|
||||
|
||||
@ -31,14 +31,16 @@ from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera import DetectConfig
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
from frigate.util.classification import (
|
||||
collect_object_classification_examples,
|
||||
collect_state_classification_examples,
|
||||
get_dataset_image_count,
|
||||
read_training_metadata,
|
||||
)
|
||||
from frigate.util.path import get_event_snapshot
|
||||
from frigate.util.file import get_event_snapshot
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -112,9 +114,18 @@ def reclassify_face(request: Request, body: dict = None):
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
response = context.reprocess_face(training_file)
|
||||
|
||||
if not isinstance(response, dict):
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Could not process request.",
|
||||
},
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
status_code=200 if response.get("success", True) else 400,
|
||||
content=response,
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@ -531,6 +542,7 @@ def transcribe_audio(request: Request, body: AudioTranscriptionBody):
|
||||
status_code=409, # 409 Conflict
|
||||
)
|
||||
else:
|
||||
logger.debug(f"Failed to transcribe audio, response: {response}")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -555,23 +567,59 @@ def get_classification_dataset(name: str):
|
||||
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "dataset")
|
||||
|
||||
if not os.path.exists(dataset_dir):
|
||||
return JSONResponse(status_code=200, content={})
|
||||
return JSONResponse(
|
||||
status_code=200, content={"categories": {}, "training_metadata": None}
|
||||
)
|
||||
|
||||
for name in os.listdir(dataset_dir):
|
||||
category_dir = os.path.join(dataset_dir, name)
|
||||
for category_name in os.listdir(dataset_dir):
|
||||
category_dir = os.path.join(dataset_dir, category_name)
|
||||
|
||||
if not os.path.isdir(category_dir):
|
||||
continue
|
||||
|
||||
dataset_dict[name] = []
|
||||
dataset_dict[category_name] = []
|
||||
|
||||
for file in filter(
|
||||
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
|
||||
os.listdir(category_dir),
|
||||
):
|
||||
dataset_dict[name].append(file)
|
||||
dataset_dict[category_name].append(file)
|
||||
|
||||
return JSONResponse(status_code=200, content=dataset_dict)
|
||||
# Get training metadata
|
||||
metadata = read_training_metadata(sanitize_filename(name))
|
||||
current_image_count = get_dataset_image_count(sanitize_filename(name))
|
||||
|
||||
if metadata is None:
|
||||
training_metadata = {
|
||||
"has_trained": False,
|
||||
"last_training_date": None,
|
||||
"last_training_image_count": 0,
|
||||
"current_image_count": current_image_count,
|
||||
"new_images_count": current_image_count,
|
||||
"dataset_changed": current_image_count > 0,
|
||||
}
|
||||
else:
|
||||
last_training_count = metadata.get("last_training_image_count", 0)
|
||||
# Dataset has changed if count is different (either added or deleted images)
|
||||
dataset_changed = current_image_count != last_training_count
|
||||
# Only show positive count for new images (ignore deletions in the count display)
|
||||
new_images_count = max(0, current_image_count - last_training_count)
|
||||
training_metadata = {
|
||||
"has_trained": True,
|
||||
"last_training_date": metadata.get("last_training_date"),
|
||||
"last_training_image_count": last_training_count,
|
||||
"current_image_count": current_image_count,
|
||||
"new_images_count": new_images_count,
|
||||
"dataset_changed": dataset_changed,
|
||||
}
|
||||
|
||||
return JSONResponse(
|
||||
status_code=200,
|
||||
content={
|
||||
"categories": dataset_dict,
|
||||
"training_metadata": training_metadata,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
@ -662,12 +710,106 @@ def delete_classification_dataset_images(
|
||||
if os.path.isfile(file_path):
|
||||
os.unlink(file_path)
|
||||
|
||||
if os.path.exists(folder) and not os.listdir(folder):
|
||||
os.rmdir(folder)
|
||||
|
||||
return JSONResponse(
|
||||
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||
content=({"success": True, "message": "Successfully deleted images."}),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.put(
|
||||
"/classification/{name}/dataset/{old_category}/rename",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Rename a classification category",
|
||||
description="""Renames a classification category for a given classification model.
|
||||
The old category must exist and the new name must be valid. Returns a success message or an error if the name is invalid.""",
|
||||
)
|
||||
def rename_classification_category(
|
||||
request: Request, name: str, old_category: str, body: dict = None
|
||||
):
|
||||
config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
if name not in config.classification.custom:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"{name} is not a known classification model.",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
json: dict[str, Any] = body or {}
|
||||
new_category = sanitize_filename(json.get("new_category", ""))
|
||||
|
||||
if not new_category:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": "New category name is required.",
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
old_folder = os.path.join(
|
||||
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(old_category)
|
||||
)
|
||||
new_folder = os.path.join(
|
||||
CLIPS_DIR, sanitize_filename(name), "dataset", new_category
|
||||
)
|
||||
|
||||
if not os.path.exists(old_folder):
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"Category {old_category} does not exist.",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if os.path.exists(new_folder):
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"Category {new_category} already exists.",
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
try:
|
||||
os.rename(old_folder, new_folder)
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Successfully renamed category to {new_category}.",
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error renaming category: {e}")
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": "Failed to rename category",
|
||||
}
|
||||
),
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/classification/{name}/dataset/categorize",
|
||||
response_model=GenericResponse,
|
||||
@ -723,7 +865,47 @@ def categorize_classification_image(request: Request, name: str, body: dict = No
|
||||
os.unlink(training_file)
|
||||
|
||||
return JSONResponse(
|
||||
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||
content=({"success": True, "message": "Successfully categorized image."}),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/classification/{name}/dataset/{category}/create",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create an empty classification category folder",
|
||||
description="""Creates an empty folder for a classification category.
|
||||
This is used to create folders for categories that don't have images yet.
|
||||
Returns a success message or an error if the name is invalid.""",
|
||||
)
|
||||
def create_classification_category(request: Request, name: str, category: str):
|
||||
config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
if name not in config.classification.custom:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"{name} is not a known classification model.",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
category_folder = os.path.join(
|
||||
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(category)
|
||||
)
|
||||
|
||||
os.makedirs(category_folder, exist_ok=True)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Successfully created category folder: {category}",
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
@ -761,7 +943,7 @@ def delete_classification_train_images(request: Request, name: str, body: dict =
|
||||
os.unlink(file_path)
|
||||
|
||||
return JSONResponse(
|
||||
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||
content=({"success": True, "message": "Successfully deleted images."}),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
@ -804,3 +986,44 @@ async def generate_object_examples(request: Request, body: GenerateObjectExample
|
||||
content={"success": True, "message": "Example generation completed"},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/classification/{name}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete a classification model",
|
||||
description="""Deletes a specific classification model and all its associated data.
|
||||
Works even if the model is not in the config (e.g., partially created during wizard).
|
||||
Returns a success message.""",
|
||||
)
|
||||
def delete_classification_model(request: Request, name: str):
|
||||
sanitized_name = sanitize_filename(name)
|
||||
|
||||
# Delete the classification model's data directory in clips
|
||||
data_dir = os.path.join(CLIPS_DIR, sanitized_name)
|
||||
if os.path.exists(data_dir):
|
||||
try:
|
||||
shutil.rmtree(data_dir)
|
||||
logger.info(f"Deleted classification data directory for {name}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to delete data directory for {name}: {e}")
|
||||
|
||||
# Delete the classification model's files in model_cache
|
||||
model_dir = os.path.join(MODEL_CACHE_DIR, sanitized_name)
|
||||
if os.path.exists(model_dir):
|
||||
try:
|
||||
shutil.rmtree(model_dir)
|
||||
logger.info(f"Deleted classification model directory for {name}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to delete model directory for {name}: {e}")
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Successfully deleted classification model {name}.",
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
@ -21,6 +22,7 @@ from peewee import JOIN, DoesNotExist, fn, operator
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
require_role,
|
||||
@ -57,8 +59,8 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||
from frigate.track.object_processing import TrackedObject
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.time import get_dst_transitions, get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -68,6 +70,7 @@ router = APIRouter(tags=[Tags.events])
|
||||
@router.get(
|
||||
"/events",
|
||||
response_model=list[EventResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get events",
|
||||
description="Returns a list of events.",
|
||||
)
|
||||
@ -342,6 +345,7 @@ def events(
|
||||
@router.get(
|
||||
"/events/explore",
|
||||
response_model=list[EventResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get summary of objects.",
|
||||
description="""Gets a summary of objects from the database.
|
||||
Returns a list of objects with a max of `limit` objects for each label.
|
||||
@ -434,6 +438,7 @@ def events_explore(
|
||||
@router.get(
|
||||
"/event_ids",
|
||||
response_model=list[EventResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get events by ids.",
|
||||
description="""Gets events by a list of ids.
|
||||
Returns a list of events.
|
||||
@ -467,6 +472,7 @@ async def event_ids(ids: str, request: Request):
|
||||
|
||||
@router.get(
|
||||
"/events/search",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Search events.",
|
||||
description="""Searches for events in the database.
|
||||
Returns a list of events.
|
||||
@ -807,13 +813,12 @@ def events_search(
|
||||
return JSONResponse(content=processed_events)
|
||||
|
||||
|
||||
@router.get("/events/summary")
|
||||
@router.get("/events/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def events_summary(
|
||||
params: EventsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
tz_name = params.timezone
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name)
|
||||
has_clip = params.has_clip
|
||||
has_snapshot = params.has_snapshot
|
||||
|
||||
@ -828,38 +833,97 @@ def events_summary(
|
||||
if len(clauses) == 0:
|
||||
clauses.append((True))
|
||||
|
||||
groups = (
|
||||
time_range_query = (
|
||||
Event.select(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||
),
|
||||
).alias("day"),
|
||||
Event.zones,
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
fn.MIN(Event.start_time).alias("min_time"),
|
||||
fn.MAX(Event.start_time).alias("max_time"),
|
||||
)
|
||||
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
|
||||
.group_by(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
|
||||
Event.zones,
|
||||
)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
return JSONResponse(content=[e for e in groups.dicts()])
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=[])
|
||||
|
||||
dst_periods = get_dst_transitions(tz_name, min_time, max_time)
|
||||
|
||||
grouped: dict[tuple, dict] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_groups = (
|
||||
Event.select(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
Event.zones,
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(
|
||||
reduce(operator.and_, clauses)
|
||||
& (Event.camera << allowed_cameras)
|
||||
& (Event.start_time >= period_start)
|
||||
& (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
(Event.start_time + period_offset).cast("int") / (3600 * 24),
|
||||
Event.zones,
|
||||
)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_groups:
|
||||
key = (
|
||||
g.camera,
|
||||
g.label,
|
||||
g.sub_label,
|
||||
json.dumps(g.data, sort_keys=True) if g.data is not None else None,
|
||||
g.day,
|
||||
json.dumps(g.zones, sort_keys=True) if g.zones is not None else None,
|
||||
)
|
||||
|
||||
if key in grouped:
|
||||
grouped[key]["count"] += int(g.count or 0)
|
||||
else:
|
||||
grouped[key] = {
|
||||
"camera": g.camera,
|
||||
"label": g.label,
|
||||
"sub_label": g.sub_label,
|
||||
"data": g.data,
|
||||
"day": g.day,
|
||||
"zones": g.zones,
|
||||
"count": int(g.count or 0),
|
||||
}
|
||||
|
||||
return JSONResponse(content=sorted(grouped.values(), key=lambda x: x["day"]))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/events/{event_id}",
|
||||
response_model=EventResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get event by id.",
|
||||
description="Gets an event by its id.",
|
||||
)
|
||||
@ -903,6 +967,7 @@ def set_retain(event_id: str):
|
||||
@router.post(
|
||||
"/events/{event_id}/plus",
|
||||
response_model=EventUploadPlusResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Send event to Frigate+.",
|
||||
description="""Sends an event to Frigate+.
|
||||
Returns a success message or an error if the event is not found.
|
||||
@ -1043,6 +1108,7 @@ async def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = N
|
||||
@router.put(
|
||||
"/events/{event_id}/false_positive",
|
||||
response_model=EventUploadPlusResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Submit false positive to Frigate+",
|
||||
description="""Submit an event as a false positive to Frigate+.
|
||||
This endpoint is the same as the standard Frigate+ submission endpoint,
|
||||
@ -1695,7 +1761,7 @@ def create_trigger_embedding(
|
||||
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||
)
|
||||
|
||||
if embedding is None:
|
||||
if not embedding:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -1723,9 +1789,8 @@ def create_trigger_embedding(
|
||||
logger.debug(
|
||||
f"Writing thumbnail for trigger with data {body.data} in {camera_name}."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(e.with_traceback())
|
||||
logger.error(
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to write thumbnail for trigger with data {body.data} in {camera_name}"
|
||||
)
|
||||
|
||||
@ -1749,8 +1814,8 @@ def create_trigger_embedding(
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(e.with_traceback())
|
||||
except Exception:
|
||||
logger.exception("Error creating trigger embedding")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -1831,7 +1896,7 @@ def update_trigger_embedding(
|
||||
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
|
||||
)
|
||||
|
||||
if embedding is None:
|
||||
if not embedding:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -1859,9 +1924,8 @@ def update_trigger_embedding(
|
||||
logger.debug(
|
||||
f"Deleted thumbnail for trigger with data {trigger.data} in {camera_name}."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(e.with_traceback())
|
||||
logger.error(
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera_name}"
|
||||
)
|
||||
|
||||
@ -1900,9 +1964,8 @@ def update_trigger_embedding(
|
||||
logger.debug(
|
||||
f"Writing thumbnail for trigger with data {body.data} in {camera_name}."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(e.with_traceback())
|
||||
logger.error(
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to write thumbnail for trigger with data {body.data} in {camera_name}"
|
||||
)
|
||||
|
||||
@ -1914,8 +1977,8 @@ def update_trigger_embedding(
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(e.with_traceback())
|
||||
except Exception:
|
||||
logger.exception("Error updating trigger embedding")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -1975,9 +2038,8 @@ def delete_trigger_embedding(
|
||||
logger.debug(
|
||||
f"Deleted thumbnail for trigger with data {trigger.data} in {camera_name}."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(e.with_traceback())
|
||||
logger.error(
|
||||
except Exception:
|
||||
logger.exception(
|
||||
f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera_name}"
|
||||
)
|
||||
|
||||
@ -1989,8 +2051,8 @@ def delete_trigger_embedding(
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(e.with_traceback())
|
||||
except Exception:
|
||||
logger.exception("Error deleting trigger embedding")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
|
||||
@ -14,6 +14,7 @@ from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
require_role,
|
||||
@ -34,7 +35,7 @@ from frigate.record.export import (
|
||||
PlaybackSourceEnum,
|
||||
RecordingExporter,
|
||||
)
|
||||
from frigate.util.builtin import is_current_hour
|
||||
from frigate.util.time import is_current_hour
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -44,6 +45,7 @@ router = APIRouter(tags=[Tags.export])
|
||||
@router.get(
|
||||
"/exports",
|
||||
response_model=ExportsResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get exports",
|
||||
description="""Gets all exports from the database for cameras the user has access to.
|
||||
Returns a list of exports ordered by date (most recent first).""",
|
||||
@ -272,6 +274,7 @@ async def export_delete(event_id: str, request: Request):
|
||||
@router.get(
|
||||
"/exports/{export_id}",
|
||||
response_model=ExportModel,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get a single export",
|
||||
description="""Gets a specific export by ID. The user must have access to the camera
|
||||
associated with the export.""",
|
||||
|
||||
@ -2,7 +2,7 @@ import logging
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi import Depends, FastAPI, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from joserfc.jwk import OctKey
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
@ -24,7 +24,7 @@ from frigate.api import (
|
||||
preview,
|
||||
review,
|
||||
)
|
||||
from frigate.api.auth import get_jwt_secret, limiter
|
||||
from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataPublisher,
|
||||
)
|
||||
@ -62,11 +62,15 @@ def create_fastapi_app(
|
||||
stats_emitter: StatsEmitter,
|
||||
event_metadata_updater: EventMetadataPublisher,
|
||||
config_publisher: CameraConfigUpdatePublisher,
|
||||
enforce_default_admin: bool = True,
|
||||
):
|
||||
logger.info("Starting FastAPI app")
|
||||
app = FastAPI(
|
||||
debug=False,
|
||||
swagger_ui_parameters={"apisSorter": "alpha", "operationsSorter": "alpha"},
|
||||
dependencies=[Depends(require_admin_by_default())]
|
||||
if enforce_default_admin
|
||||
else [],
|
||||
)
|
||||
|
||||
# update the request_address with the x-forwarded-for header from nginx
|
||||
|
||||
@ -22,7 +22,11 @@ from pathvalidate import sanitize_filename
|
||||
from peewee import DoesNotExist, fn, operator
|
||||
from tzlocal import get_localzone_name
|
||||
|
||||
from frigate.api.auth import get_allowed_cameras_for_filter, require_camera_access
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
)
|
||||
from frigate.api.defs.query.media_query_parameters import (
|
||||
Extension,
|
||||
MediaEventsSnapshotQueryParams,
|
||||
@ -44,9 +48,9 @@ from frigate.const import (
|
||||
)
|
||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -393,7 +397,7 @@ async def submit_recording_snapshot_to_plus(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/recordings/storage")
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
@ -417,14 +421,13 @@ def get_recordings_storage_usage(request: Request):
|
||||
return JSONResponse(content=camera_usages)
|
||||
|
||||
|
||||
@router.get("/recordings/summary")
|
||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def all_recordings_summary(
|
||||
request: Request,
|
||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
@ -432,43 +435,72 @@ def all_recordings_summary(
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
cameras = ",".join(filtered)
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
camera_list = allowed_cameras
|
||||
|
||||
query = (
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time + seconds_offset,
|
||||
"unixepoch",
|
||||
hour_modifier,
|
||||
minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time + seconds_offset,
|
||||
"unixepoch",
|
||||
hour_modifier,
|
||||
minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
if params.cameras != "all":
|
||||
query = query.where(Recordings.camera << cameras.split(","))
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
recording_days = query.namedtuples()
|
||||
days = {day.day: True for day in recording_days}
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
return JSONResponse(content=days)
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
days[g.day] = True
|
||||
|
||||
return JSONResponse(content=dict(sorted(days.items())))
|
||||
|
||||
|
||||
@router.get(
|
||||
@ -476,61 +508,103 @@ def all_recordings_summary(
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
|
||||
recording_groups = (
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time, "unixepoch", hour_modifier, minute_modifier
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
days = {}
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day not in days:
|
||||
days[day] = {"events": events_count, "hours": [hour_data], "day": day}
|
||||
else:
|
||||
days[day]["events"] += events_count
|
||||
days[day]["hours"].append(hour_data)
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
@ -565,7 +639,11 @@ async def recordings(
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get("/recordings/unavailable", response_model=list[dict])
|
||||
@router.get(
|
||||
"/recordings/unavailable",
|
||||
response_model=list[dict],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def no_recordings(
|
||||
request: Request,
|
||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||
@ -692,6 +770,15 @@ async def recording_clip(
|
||||
.order_by(Recordings.start_time.asc())
|
||||
)
|
||||
|
||||
if recordings.count() == 0:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "No recordings found for the specified time range",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
file_name = sanitize_filename(f"playlist_{camera_name}_{start_ts}-{end_ts}.txt")
|
||||
file_path = os.path.join(CACHE_DIR, file_name)
|
||||
with open(file_path, "w") as file:
|
||||
@ -770,6 +857,7 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
|
||||
clips = []
|
||||
durations = []
|
||||
min_duration_ms = 100 # Minimum 100ms to ensure at least one video frame
|
||||
max_duration_ms = MAX_SEGMENT_DURATION * 1000
|
||||
|
||||
recording: Recordings
|
||||
@ -787,11 +875,11 @@ async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
if recording.end_time > end_ts:
|
||||
duration -= int((recording.end_time - end_ts) * 1000)
|
||||
|
||||
if duration <= 0:
|
||||
# skip if the clip has no valid duration
|
||||
if duration < min_duration_ms:
|
||||
# skip if the clip has no valid duration (too short to contain frames)
|
||||
continue
|
||||
|
||||
if 0 < duration < max_duration_ms:
|
||||
if min_duration_ms <= duration < max_duration_ms:
|
||||
clip["keyFrameDurations"] = [duration]
|
||||
clips.append(clip)
|
||||
durations.append(duration)
|
||||
@ -857,6 +945,7 @@ async def vod_hour(
|
||||
|
||||
@router.get(
|
||||
"/vod/event/{event_id}",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.",
|
||||
)
|
||||
async def vod_event(
|
||||
@ -973,7 +1062,10 @@ async def event_snapshot(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/thumbnail.{extension}")
|
||||
@router.get(
|
||||
"/events/{event_id}/thumbnail.{extension}",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
)
|
||||
async def event_thumbnail(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
@ -1171,7 +1263,10 @@ def grid_snapshot(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/snapshot-clean.webp")
|
||||
@router.get(
|
||||
"/events/{event_id}/snapshot-clean.webp",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
)
|
||||
def event_snapshot_clean(request: Request, event_id: str, download: bool = False):
|
||||
webp_bytes = None
|
||||
try:
|
||||
@ -1295,7 +1390,9 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
|
||||
)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/clip.mp4")
|
||||
@router.get(
|
||||
"/events/{event_id}/clip.mp4", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
async def event_clip(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
@ -1323,7 +1420,9 @@ async def event_clip(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/preview.gif")
|
||||
@router.get(
|
||||
"/events/{event_id}/preview.gif", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
def event_preview(request: Request, event_id: str):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
@ -1676,7 +1775,7 @@ def preview_mp4(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/review/{event_id}/preview")
|
||||
@router.get("/review/{event_id}/preview", dependencies=[Depends(require_camera_access)])
|
||||
def review_preview(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
@ -1702,8 +1801,12 @@ def review_preview(
|
||||
return preview_mp4(request, review.camera, start_ts, end_ts)
|
||||
|
||||
|
||||
@router.get("/preview/{file_name}/thumbnail.jpg")
|
||||
@router.get("/preview/{file_name}/thumbnail.webp")
|
||||
@router.get(
|
||||
"/preview/{file_name}/thumbnail.jpg", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
@router.get(
|
||||
"/preview/{file_name}/thumbnail.webp", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
def preview_thumbnail(file_name: str):
|
||||
"""Get a thumbnail from the cached preview frames."""
|
||||
if len(file_name) > 1000:
|
||||
|
||||
@ -5,11 +5,12 @@ import os
|
||||
from typing import Any
|
||||
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from peewee import DoesNotExist
|
||||
from py_vapid import Vapid01, utils
|
||||
|
||||
from frigate.api.auth import allow_any_authenticated
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import CONFIG_DIR
|
||||
from frigate.models import User
|
||||
@ -21,6 +22,7 @@ router = APIRouter(tags=[Tags.notifications])
|
||||
|
||||
@router.get(
|
||||
"/notifications/pubkey",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get VAPID public key",
|
||||
description="""Gets the VAPID public key for the notifications.
|
||||
Returns the public key or an error if notifications are not enabled.
|
||||
@ -47,6 +49,7 @@ def get_vapid_pub_key(request: Request):
|
||||
|
||||
@router.post(
|
||||
"/notifications/register",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Register notifications",
|
||||
description="""Registers a notifications subscription.
|
||||
Returns a success message or an error if the subscription is not provided.
|
||||
|
||||
@ -14,6 +14,7 @@ from peewee import Case, DoesNotExist, IntegrityError, fn, operator
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
get_current_user,
|
||||
require_camera_access,
|
||||
@ -36,14 +37,18 @@ from frigate.config import FrigateConfig
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.review])
|
||||
|
||||
|
||||
@router.get("/review", response_model=list[ReviewSegmentResponse])
|
||||
@router.get(
|
||||
"/review",
|
||||
response_model=list[ReviewSegmentResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def review(
|
||||
params: ReviewQueryParams = Depends(),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
@ -152,7 +157,11 @@ async def review(
|
||||
return JSONResponse(content=[r for r in review_query])
|
||||
|
||||
|
||||
@router.get("/review_ids", response_model=list[ReviewSegmentResponse])
|
||||
@router.get(
|
||||
"/review_ids",
|
||||
response_model=list[ReviewSegmentResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def review_ids(request: Request, ids: str):
|
||||
ids = ids.split(",")
|
||||
|
||||
@ -186,7 +195,11 @@ async def review_ids(request: Request, ids: str):
|
||||
)
|
||||
|
||||
|
||||
@router.get("/review/summary", response_model=ReviewSummaryResponse)
|
||||
@router.get(
|
||||
"/review/summary",
|
||||
response_model=ReviewSummaryResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def review_summary(
|
||||
params: ReviewSummaryQueryParams = Depends(),
|
||||
current_user: dict = Depends(get_current_user),
|
||||
@ -197,7 +210,6 @@ async def review_summary(
|
||||
|
||||
user_id = current_user["username"]
|
||||
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
||||
|
||||
cameras = params.cameras
|
||||
@ -329,94 +341,144 @@ async def review_summary(
|
||||
)
|
||||
clauses.append(reduce(operator.or_, label_clauses))
|
||||
|
||||
day_in_seconds = 60 * 60 * 24
|
||||
last_month_query = (
|
||||
# Find the time range of available data
|
||||
time_range_query = (
|
||||
ReviewSegment.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
ReviewSegment.start_time,
|
||||
"unixepoch",
|
||||
hour_modifier,
|
||||
minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_detection"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_detection"),
|
||||
)
|
||||
.left_outer_join(
|
||||
UserReviewStatus,
|
||||
on=(
|
||||
(ReviewSegment.id == UserReviewStatus.review_segment)
|
||||
& (UserReviewStatus.user_id == user_id)
|
||||
),
|
||||
fn.MIN(ReviewSegment.start_time).alias("min_time"),
|
||||
fn.MAX(ReviewSegment.start_time).alias("max_time"),
|
||||
)
|
||||
.where(reduce(operator.and_, clauses) if clauses else True)
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
|
||||
)
|
||||
.order_by(ReviewSegment.start_time.desc())
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
data = {
|
||||
"last24Hours": last_24_query,
|
||||
}
|
||||
|
||||
for e in last_month_query.dicts().iterator():
|
||||
data[e["day"]] = e
|
||||
# If no data, return early
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=data)
|
||||
|
||||
# Get DST transition periods
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
day_in_seconds = 60 * 60 * 24
|
||||
|
||||
# Query each DST period separately with the correct offset
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
# Calculate hour/minute modifiers for this period
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
# Build clauses including time range for this period
|
||||
period_clauses = clauses.copy()
|
||||
period_clauses.append(
|
||||
(ReviewSegment.start_time >= period_start)
|
||||
& (ReviewSegment.start_time <= period_end)
|
||||
)
|
||||
|
||||
period_query = (
|
||||
ReviewSegment.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
ReviewSegment.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection)
|
||||
& (UserReviewStatus.has_been_reviewed == True),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("reviewed_detection"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.alert),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_alert"),
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(
|
||||
(ReviewSegment.severity == SeverityEnum.detection),
|
||||
1,
|
||||
)
|
||||
],
|
||||
0,
|
||||
)
|
||||
).alias("total_detection"),
|
||||
)
|
||||
.left_outer_join(
|
||||
UserReviewStatus,
|
||||
on=(
|
||||
(ReviewSegment.id == UserReviewStatus.review_segment)
|
||||
& (UserReviewStatus.user_id == user_id)
|
||||
),
|
||||
)
|
||||
.where(reduce(operator.and_, period_clauses))
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
|
||||
)
|
||||
.order_by(ReviewSegment.start_time.desc())
|
||||
)
|
||||
|
||||
# Merge results from this period
|
||||
for e in period_query.dicts().iterator():
|
||||
day_key = e["day"]
|
||||
if day_key in data:
|
||||
# Merge counts if day already exists (edge case at DST boundary)
|
||||
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
|
||||
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
|
||||
data[day_key]["total_alert"] += e["total_alert"] or 0
|
||||
data[day_key]["total_detection"] += e["total_detection"] or 0
|
||||
else:
|
||||
data[day_key] = e
|
||||
|
||||
return JSONResponse(content=data)
|
||||
|
||||
|
||||
@router.post("/reviews/viewed", response_model=GenericResponse)
|
||||
@router.post(
|
||||
"/reviews/viewed",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def set_multiple_reviewed(
|
||||
request: Request,
|
||||
body: ReviewModifyMultipleBody,
|
||||
@ -515,7 +577,9 @@ def delete_reviews(body: ReviewModifyMultipleBody):
|
||||
|
||||
|
||||
@router.get(
|
||||
"/review/activity/motion", response_model=list[ReviewActivityMotionResponse]
|
||||
"/review/activity/motion",
|
||||
response_model=list[ReviewActivityMotionResponse],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
def motion_activity(
|
||||
params: ReviewActivityMotionQueryParams = Depends(),
|
||||
@ -599,7 +663,11 @@ def motion_activity(
|
||||
return JSONResponse(content=normalized)
|
||||
|
||||
|
||||
@router.get("/review/event/{event_id}", response_model=ReviewSegmentResponse)
|
||||
@router.get(
|
||||
"/review/event/{event_id}",
|
||||
response_model=ReviewSegmentResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def get_review_from_event(request: Request, event_id: str):
|
||||
try:
|
||||
review = ReviewSegment.get(
|
||||
@ -614,7 +682,11 @@ async def get_review_from_event(request: Request, event_id: str):
|
||||
)
|
||||
|
||||
|
||||
@router.get("/review/{review_id}", response_model=ReviewSegmentResponse)
|
||||
@router.get(
|
||||
"/review/{review_id}",
|
||||
response_model=ReviewSegmentResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def get_review(request: Request, review_id: str):
|
||||
try:
|
||||
review = ReviewSegment.get(ReviewSegment.id == review_id)
|
||||
@ -627,7 +699,11 @@ async def get_review(request: Request, review_id: str):
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/review/{review_id}/viewed", response_model=GenericResponse)
|
||||
@router.delete(
|
||||
"/review/{review_id}/viewed",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def set_not_reviewed(
|
||||
review_id: str,
|
||||
current_user: dict = Depends(get_current_user),
|
||||
@ -665,6 +741,7 @@ async def set_not_reviewed(
|
||||
|
||||
@router.post(
|
||||
"/review/summarize/start/{start_ts}/end/{end_ts}",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
description="Use GenAI to summarize review items over a period of time.",
|
||||
)
|
||||
def generate_review_summary(request: Request, start_ts: float, end_ts: float):
|
||||
|
||||
@ -136,6 +136,7 @@ class CameraMaintainer(threading.Thread):
|
||||
self.ptz_metrics[name],
|
||||
self.region_grids[name],
|
||||
self.stop_event,
|
||||
self.config.logger,
|
||||
)
|
||||
self.camera_processes[config.name] = camera_process
|
||||
camera_process.start()
|
||||
@ -156,7 +157,11 @@ class CameraMaintainer(threading.Thread):
|
||||
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
|
||||
|
||||
capture_process = CameraCapture(
|
||||
config, count, self.camera_metrics[name], self.stop_event
|
||||
config,
|
||||
count,
|
||||
self.camera_metrics[name],
|
||||
self.stop_event,
|
||||
self.config.logger,
|
||||
)
|
||||
capture_process.daemon = True
|
||||
self.capture_processes[name] = capture_process
|
||||
|
||||
@ -23,6 +23,7 @@ from frigate.const import (
|
||||
NOTIFICATION_TEST,
|
||||
REQUEST_REGION_GRID,
|
||||
UPDATE_AUDIO_ACTIVITY,
|
||||
UPDATE_AUDIO_TRANSCRIPTION_STATE,
|
||||
UPDATE_BIRDSEYE_LAYOUT,
|
||||
UPDATE_CAMERA_ACTIVITY,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
@ -61,6 +62,7 @@ class Dispatcher:
|
||||
self.model_state: dict[str, ModelStatusTypesEnum] = {}
|
||||
self.embeddings_reindex: dict[str, Any] = {}
|
||||
self.birdseye_layout: dict[str, Any] = {}
|
||||
self.audio_transcription_state: str = "idle"
|
||||
self._camera_settings_handlers: dict[str, Callable] = {
|
||||
"audio": self._on_audio_command,
|
||||
"audio_transcription": self._on_audio_transcription_command,
|
||||
@ -178,6 +180,19 @@ class Dispatcher:
|
||||
def handle_model_state() -> None:
|
||||
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||
|
||||
def handle_update_audio_transcription_state() -> None:
|
||||
if payload:
|
||||
self.audio_transcription_state = payload
|
||||
self.publish(
|
||||
"audio_transcription_state",
|
||||
json.dumps(self.audio_transcription_state),
|
||||
)
|
||||
|
||||
def handle_audio_transcription_state() -> None:
|
||||
self.publish(
|
||||
"audio_transcription_state", json.dumps(self.audio_transcription_state)
|
||||
)
|
||||
|
||||
def handle_update_embeddings_reindex_progress() -> None:
|
||||
self.embeddings_reindex = payload
|
||||
self.publish(
|
||||
@ -264,10 +279,12 @@ class Dispatcher:
|
||||
UPDATE_MODEL_STATE: handle_update_model_state,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
|
||||
UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout,
|
||||
UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state,
|
||||
NOTIFICATION_TEST: handle_notification_test,
|
||||
"restart": handle_restart,
|
||||
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
|
||||
"modelState": handle_model_state,
|
||||
"audioTranscriptionState": handle_audio_transcription_state,
|
||||
"birdseyeLayout": handle_birdseye_layout,
|
||||
"onConnect": handle_on_connect,
|
||||
}
|
||||
|
||||
@ -375,7 +375,19 @@ class WebPushClient(Communicator):
|
||||
ended = state == "end" or state == "genai"
|
||||
|
||||
if state == "genai" and payload["after"]["data"]["metadata"]:
|
||||
title = payload["after"]["data"]["metadata"]["title"]
|
||||
base_title = payload["after"]["data"]["metadata"]["title"]
|
||||
threat_level = payload["after"]["data"]["metadata"].get(
|
||||
"potential_threat_level", 0
|
||||
)
|
||||
|
||||
# Add prefix for threat levels 1 and 2
|
||||
if threat_level == 1:
|
||||
title = f"Needs Review: {base_title}"
|
||||
elif threat_level == 2:
|
||||
title = f"Security Concern: {base_title}"
|
||||
else:
|
||||
title = base_title
|
||||
|
||||
message = payload["after"]["data"]["metadata"]["scene"]
|
||||
else:
|
||||
title = f"{titlecase(', '.join(sorted_objects).replace('_', ' '))}{' was' if state == 'end' else ''} detected in {titlecase(', '.join(payload['after']['data']['zones']).replace('_', ' '))}"
|
||||
|
||||
@ -177,6 +177,12 @@ class CameraConfig(FrigateBaseModel):
|
||||
def ffmpeg_cmds(self) -> list[dict[str, list[str]]]:
|
||||
return self._ffmpeg_cmds
|
||||
|
||||
def get_formatted_name(self) -> str:
|
||||
"""Return the friendly name if set, otherwise return a formatted version of the camera name."""
|
||||
if self.friendly_name:
|
||||
return self.friendly_name
|
||||
return self.name.replace("_", " ").title() if self.name else ""
|
||||
|
||||
def create_ffmpeg_cmds(self):
|
||||
if "_ffmpeg_cmds" in self:
|
||||
return
|
||||
|
||||
@ -13,6 +13,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ZoneConfig(BaseModel):
|
||||
friendly_name: Optional[str] = Field(
|
||||
None, title="Zone friendly name used in the Frigate UI."
|
||||
)
|
||||
filters: dict[str, FilterConfig] = Field(
|
||||
default_factory=dict, title="Zone filters."
|
||||
)
|
||||
@ -53,6 +56,12 @@ class ZoneConfig(BaseModel):
|
||||
def contour(self) -> np.ndarray:
|
||||
return self._contour
|
||||
|
||||
def get_formatted_name(self, zone_name: str) -> str:
|
||||
"""Return the friendly name if set, otherwise return a formatted version of the zone name."""
|
||||
if self.friendly_name:
|
||||
return self.friendly_name
|
||||
return zone_name.replace("_", " ").title()
|
||||
|
||||
@field_validator("objects", mode="before")
|
||||
@classmethod
|
||||
def validate_objects(cls, v):
|
||||
|
||||
@ -792,6 +792,10 @@ class FrigateConfig(FrigateBaseModel):
|
||||
# copy over auth and proxy config in case auth needs to be enforced
|
||||
safe_config["auth"] = config.get("auth", {})
|
||||
safe_config["proxy"] = config.get("proxy", {})
|
||||
|
||||
# copy over database config for auth and so a new db is not created
|
||||
safe_config["database"] = config.get("database", {})
|
||||
|
||||
return cls.parse_object(safe_config, **context)
|
||||
|
||||
# Validate and return the config dict.
|
||||
|
||||
@ -113,6 +113,7 @@ CLEAR_ONGOING_REVIEW_SEGMENTS = "clear_ongoing_review_segments"
|
||||
UPDATE_CAMERA_ACTIVITY = "update_camera_activity"
|
||||
UPDATE_AUDIO_ACTIVITY = "update_audio_activity"
|
||||
EXPIRE_AUDIO_ACTIVITY = "expire_audio_activity"
|
||||
UPDATE_AUDIO_TRANSCRIPTION_STATE = "update_audio_transcription_state"
|
||||
UPDATE_EVENT_DESCRIPTION = "update_event_description"
|
||||
UPDATE_REVIEW_DESCRIPTION = "update_review_description"
|
||||
UPDATE_MODEL_STATE = "update_model_state"
|
||||
|
||||
@ -4,7 +4,6 @@ import logging
|
||||
import os
|
||||
|
||||
import sherpa_onnx
|
||||
from faster_whisper.utils import download_model
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
@ -25,6 +24,9 @@ class AudioTranscriptionModelRunner:
|
||||
|
||||
if model_size == "large":
|
||||
# use the Whisper download function instead of our own
|
||||
# Import dynamically to avoid crashes on systems without AVX support
|
||||
from faster_whisper.utils import download_model
|
||||
|
||||
logger.debug("Downloading Whisper audio transcription model")
|
||||
download_model(
|
||||
size_or_id="small" if device == "cuda" else "tiny",
|
||||
|
||||
@ -14,8 +14,8 @@ from typing import Any, List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from Levenshtein import distance, jaro_winkler
|
||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||
from rapidfuzz.distance import JaroWinkler, Levenshtein
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
@ -1123,7 +1123,9 @@ class LicensePlateProcessingMixin:
|
||||
for i, plate in enumerate(plates):
|
||||
merged = False
|
||||
for j, cluster in enumerate(clusters):
|
||||
sims = [jaro_winkler(plate["plate"], v["plate"]) for v in cluster]
|
||||
sims = [
|
||||
JaroWinkler.similarity(plate["plate"], v["plate"]) for v in cluster
|
||||
]
|
||||
if len(sims) > 0:
|
||||
avg_sim = sum(sims) / len(sims)
|
||||
if avg_sim >= self.cluster_threshold:
|
||||
@ -1500,7 +1502,7 @@ class LicensePlateProcessingMixin:
|
||||
and current_time - data["last_seen"]
|
||||
<= self.config.cameras[camera].lpr.expire_time
|
||||
):
|
||||
similarity = jaro_winkler(data["plate"], top_plate)
|
||||
similarity = JaroWinkler.similarity(data["plate"], top_plate)
|
||||
if similarity >= self.similarity_threshold:
|
||||
plate_id = existing_id
|
||||
logger.debug(
|
||||
@ -1580,7 +1582,8 @@ class LicensePlateProcessingMixin:
|
||||
for label, plates_list in self.lpr_config.known_plates.items()
|
||||
if any(
|
||||
re.match(f"^{plate}$", rep_plate)
|
||||
or distance(plate, rep_plate) <= self.lpr_config.match_distance
|
||||
or Levenshtein.distance(plate, rep_plate)
|
||||
<= self.lpr_config.match_distance
|
||||
for plate in plates_list
|
||||
)
|
||||
),
|
||||
|
||||
@ -6,15 +6,14 @@ import threading
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
from faster_whisper import WhisperModel
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
MODEL_CACHE_DIR,
|
||||
UPDATE_AUDIO_TRANSCRIPTION_STATE,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
)
|
||||
from frigate.data_processing.types import PostProcessDataEnum
|
||||
@ -32,11 +31,13 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
requestor: InterProcessRequestor,
|
||||
embeddings,
|
||||
metrics: DataProcessorMetrics,
|
||||
):
|
||||
super().__init__(config, metrics, None)
|
||||
self.config = config
|
||||
self.requestor = requestor
|
||||
self.embeddings = embeddings
|
||||
self.recognizer = None
|
||||
self.transcription_lock = threading.Lock()
|
||||
self.transcription_thread = None
|
||||
@ -50,6 +51,9 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||
|
||||
def __build_recognizer(self) -> None:
|
||||
try:
|
||||
# Import dynamically to avoid crashes on systems without AVX support
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
self.recognizer = WhisperModel(
|
||||
model_size_or_path="small",
|
||||
device="cuda"
|
||||
@ -128,10 +132,7 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||
)
|
||||
|
||||
# Embed the description
|
||||
self.requestor.send_data(
|
||||
EmbeddingsRequestEnum.embed_description.value,
|
||||
{"id": event_id, "description": transcription},
|
||||
)
|
||||
self.embeddings.embed_description(event_id, transcription)
|
||||
|
||||
except DoesNotExist:
|
||||
logger.debug("No recording found for audio transcription post-processing")
|
||||
@ -190,6 +191,8 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||
self.transcription_running = False
|
||||
self.transcription_thread = None
|
||||
|
||||
self.requestor.send_data(UPDATE_AUDIO_TRANSCRIPTION_STATE, "idle")
|
||||
|
||||
def handle_request(self, topic: str, request_data: dict[str, any]) -> str | None:
|
||||
if topic == "transcribe_audio":
|
||||
event = request_data["event"]
|
||||
@ -203,6 +206,8 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||
|
||||
# Mark as running and start the thread
|
||||
self.transcription_running = True
|
||||
self.requestor.send_data(UPDATE_AUDIO_TRANSCRIPTION_STATE, "processing")
|
||||
|
||||
self.transcription_thread = threading.Thread(
|
||||
target=self._transcription_wrapper, args=(event,), daemon=True
|
||||
)
|
||||
|
||||
@ -20,8 +20,8 @@ from frigate.genai import GenAIClient
|
||||
from frigate.models import Event
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import create_thumbnail, ensure_jpeg_bytes
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from frigate.embeddings import Embeddings
|
||||
|
||||
@ -12,10 +12,12 @@ from typing import Any
|
||||
|
||||
import cv2
|
||||
from peewee import DoesNotExist
|
||||
from titlecase import titlecase
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera import CameraConfig
|
||||
from frigate.config.camera.review import GenAIReviewConfig, ImageSourceEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, UPDATE_REVIEW_DESCRIPTION
|
||||
from frigate.data_processing.types import PostProcessDataEnum
|
||||
@ -30,6 +32,7 @@ from ..types import DataProcessorMetrics
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
RECORDING_BUFFER_EXTENSION_PERCENT = 0.10
|
||||
MIN_RECORDING_DURATION = 10
|
||||
|
||||
|
||||
class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
@ -90,7 +93,8 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
pixels_per_image = width * height
|
||||
tokens_per_image = pixels_per_image / 1250
|
||||
prompt_tokens = 3500
|
||||
available_tokens = context_size * 0.98 - prompt_tokens
|
||||
response_tokens = 300
|
||||
available_tokens = context_size - prompt_tokens - response_tokens
|
||||
max_frames = int(available_tokens / tokens_per_image)
|
||||
|
||||
return min(max(max_frames, 3), 20)
|
||||
@ -129,7 +133,15 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
|
||||
if image_source == ImageSourceEnum.recordings:
|
||||
duration = final_data["end_time"] - final_data["start_time"]
|
||||
buffer_extension = duration * RECORDING_BUFFER_EXTENSION_PERCENT
|
||||
buffer_extension = min(5, duration * RECORDING_BUFFER_EXTENSION_PERCENT)
|
||||
|
||||
# Ensure minimum total duration for short review items
|
||||
# This provides better context for brief events
|
||||
total_duration = duration + (2 * buffer_extension)
|
||||
if total_duration < MIN_RECORDING_DURATION:
|
||||
# Expand buffer to reach minimum duration, still respecting max of 5s per side
|
||||
additional_buffer_per_side = (MIN_RECORDING_DURATION - duration) / 2
|
||||
buffer_extension = min(5, additional_buffer_per_side)
|
||||
|
||||
thumbs = self.get_recording_frames(
|
||||
camera,
|
||||
@ -181,7 +193,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
self.requestor,
|
||||
self.genai_client,
|
||||
self.review_desc_speed,
|
||||
camera,
|
||||
camera_config,
|
||||
final_data,
|
||||
thumbs,
|
||||
camera_config.review.genai,
|
||||
@ -410,7 +422,7 @@ def run_analysis(
|
||||
requestor: InterProcessRequestor,
|
||||
genai_client: GenAIClient,
|
||||
review_inference_speed: InferenceSpeed,
|
||||
camera: str,
|
||||
camera_config: CameraConfig,
|
||||
final_data: dict[str, str],
|
||||
thumbs: list[bytes],
|
||||
genai_config: GenAIReviewConfig,
|
||||
@ -418,10 +430,19 @@ def run_analysis(
|
||||
attribute_labels: list[str],
|
||||
) -> None:
|
||||
start = datetime.datetime.now().timestamp()
|
||||
|
||||
# Format zone names using zone config friendly names if available
|
||||
formatted_zones = []
|
||||
for zone_name in final_data["data"]["zones"]:
|
||||
if zone_name in camera_config.zones:
|
||||
formatted_zones.append(
|
||||
camera_config.zones[zone_name].get_formatted_name(zone_name)
|
||||
)
|
||||
|
||||
analytics_data = {
|
||||
"id": final_data["id"],
|
||||
"camera": camera,
|
||||
"zones": final_data["data"]["zones"],
|
||||
"camera": camera_config.get_formatted_name(),
|
||||
"zones": formatted_zones,
|
||||
"start": datetime.datetime.fromtimestamp(final_data["start_time"]).strftime(
|
||||
"%A, %I:%M %p"
|
||||
),
|
||||
@ -435,14 +456,14 @@ def run_analysis(
|
||||
|
||||
for i, verified_label in enumerate(final_data["data"]["verified_objects"]):
|
||||
object_type = verified_label.replace("-verified", "").replace("_", " ")
|
||||
name = sub_labels_list[i].replace("_", " ").title()
|
||||
name = titlecase(sub_labels_list[i].replace("_", " "))
|
||||
unified_objects.append(f"{name} ({object_type})")
|
||||
|
||||
for label in objects_list:
|
||||
if "-verified" in label:
|
||||
continue
|
||||
elif label in labelmap_objects:
|
||||
object_type = label.replace("_", " ").title()
|
||||
object_type = titlecase(label.replace("_", " "))
|
||||
|
||||
if label in attribute_labels:
|
||||
unified_objects.append(f"{object_type} (delivery/service)")
|
||||
|
||||
@ -22,7 +22,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.embeddings.util import ZScoreNormalization
|
||||
from frigate.models import Event, Trigger
|
||||
from frigate.util.builtin import cosine_distance
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
|
||||
from ..post.api import PostProcessorApi
|
||||
from ..types import DataProcessorMetrics
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
"""Real time processor that works with classification tflite models."""
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
@ -21,6 +22,7 @@ from frigate.config.classification import (
|
||||
)
|
||||
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
|
||||
from frigate.util.object import box_overlaps, calculate_region
|
||||
|
||||
@ -97,6 +99,42 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
if self.inference_speed:
|
||||
self.inference_speed.update(duration)
|
||||
|
||||
def _should_save_image(
|
||||
self, camera: str, detected_state: str, score: float = 1.0
|
||||
) -> bool:
|
||||
"""
|
||||
Determine if we should save the image for training.
|
||||
Save when:
|
||||
- State is changing or being verified (regardless of score)
|
||||
- Score is less than 100% (even if state matches, useful for training)
|
||||
Don't save when:
|
||||
- State is stable (matches current_state) AND score is 100%
|
||||
"""
|
||||
if camera not in self.state_history:
|
||||
# First detection for this camera, save it
|
||||
return True
|
||||
|
||||
verification = self.state_history[camera]
|
||||
current_state = verification.get("current_state")
|
||||
pending_state = verification.get("pending_state")
|
||||
|
||||
# Save if there's a pending state change being verified
|
||||
if pending_state is not None:
|
||||
return True
|
||||
|
||||
# Save if the detected state differs from the current verified state
|
||||
# (state is changing)
|
||||
if current_state is not None and detected_state != current_state:
|
||||
return True
|
||||
|
||||
# If score is less than 100%, save even if state matches
|
||||
# (useful for training to improve confidence)
|
||||
if score < 1.0:
|
||||
return True
|
||||
|
||||
# Don't save if state is stable (detected_state == current_state) AND score is 100%
|
||||
return False
|
||||
|
||||
def verify_state_change(self, camera: str, detected_state: str) -> str | None:
|
||||
"""
|
||||
Verify state change requires 3 consecutive identical states before publishing.
|
||||
@ -210,14 +248,16 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
return
|
||||
|
||||
if self.interpreter is None:
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
)
|
||||
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
||||
if self._should_save_image(camera, "unknown", 0.0):
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
"unknown",
|
||||
0.0,
|
||||
)
|
||||
return
|
||||
|
||||
input = np.expand_dims(resized_frame, axis=0)
|
||||
@ -227,18 +267,24 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.tensor_output_details[0]["index"]
|
||||
)[0]
|
||||
probs = res / res.sum(axis=0)
|
||||
logger.debug(
|
||||
f"{self.model_config.name} Ran state classification with probabilities: {probs}"
|
||||
)
|
||||
best_id = np.argmax(probs)
|
||||
score = round(probs[best_id], 2)
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
self.labelmap[best_id],
|
||||
score,
|
||||
)
|
||||
detected_state = self.labelmap[best_id]
|
||||
|
||||
if self._should_save_image(camera, detected_state, score):
|
||||
write_classification_attempt(
|
||||
self.train_dir,
|
||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||
"none-none",
|
||||
now,
|
||||
detected_state,
|
||||
score,
|
||||
)
|
||||
|
||||
if score < self.model_config.threshold:
|
||||
logger.debug(
|
||||
@ -246,7 +292,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
)
|
||||
return
|
||||
|
||||
detected_state = self.labelmap[best_id]
|
||||
verified_state = self.verify_state_change(camera, detected_state)
|
||||
|
||||
if verified_state is not None:
|
||||
@ -281,6 +326,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
config: FrigateConfig,
|
||||
model_config: CustomClassificationConfig,
|
||||
sub_label_publisher: EventMetadataPublisher,
|
||||
requestor: InterProcessRequestor,
|
||||
metrics: DataProcessorMetrics,
|
||||
):
|
||||
super().__init__(config, metrics)
|
||||
@ -289,6 +335,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||
self.interpreter: Interpreter | None = None
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
self.requestor = requestor
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
self.tensor_output_details: dict[str, Any] | None = None
|
||||
self.classification_history: dict[str, list[tuple[str, float, float]]] = {}
|
||||
@ -398,9 +445,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
if obj_data.get("end_time") is not None:
|
||||
return
|
||||
|
||||
if obj_data.get("stationary"):
|
||||
return
|
||||
|
||||
object_id = obj_data["id"]
|
||||
|
||||
if (
|
||||
@ -418,8 +462,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
obj_data["box"][2],
|
||||
obj_data["box"][3],
|
||||
max(
|
||||
obj_data["box"][1] - obj_data["box"][0],
|
||||
obj_data["box"][3] - obj_data["box"][2],
|
||||
obj_data["box"][2] - obj_data["box"][0],
|
||||
obj_data["box"][3] - obj_data["box"][1],
|
||||
),
|
||||
1.0,
|
||||
)
|
||||
@ -455,6 +499,9 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.tensor_output_details[0]["index"]
|
||||
)[0]
|
||||
probs = res / res.sum(axis=0)
|
||||
logger.debug(
|
||||
f"{self.model_config.name} Ran object classification with probabilities: {probs}"
|
||||
)
|
||||
best_id = np.argmax(probs)
|
||||
score = round(probs[best_id], 2)
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||
@ -466,6 +513,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
now,
|
||||
self.labelmap[best_id],
|
||||
score,
|
||||
max_files=200,
|
||||
)
|
||||
|
||||
if score < self.model_config.threshold:
|
||||
@ -479,6 +527,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
)
|
||||
|
||||
if consensus_label is not None:
|
||||
camera = obj_data["camera"]
|
||||
|
||||
if (
|
||||
self.model_config.object_config.classification_type
|
||||
== ObjectClassificationType.sub_label
|
||||
@ -487,6 +537,20 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
(object_id, consensus_label, consensus_score),
|
||||
EventMetadataTypeEnum.sub_label,
|
||||
)
|
||||
self.requestor.send_data(
|
||||
"tracked_object_update",
|
||||
json.dumps(
|
||||
{
|
||||
"type": TrackedObjectUpdateTypesEnum.classification,
|
||||
"id": object_id,
|
||||
"camera": camera,
|
||||
"timestamp": now,
|
||||
"model": self.model_config.name,
|
||||
"sub_label": consensus_label,
|
||||
"score": consensus_score,
|
||||
}
|
||||
),
|
||||
)
|
||||
elif (
|
||||
self.model_config.object_config.classification_type
|
||||
== ObjectClassificationType.attribute
|
||||
@ -500,6 +564,20 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
),
|
||||
EventMetadataTypeEnum.attribute.value,
|
||||
)
|
||||
self.requestor.send_data(
|
||||
"tracked_object_update",
|
||||
json.dumps(
|
||||
{
|
||||
"type": TrackedObjectUpdateTypesEnum.classification,
|
||||
"id": object_id,
|
||||
"camera": camera,
|
||||
"timestamp": now,
|
||||
"model": self.model_config.name,
|
||||
"attribute": consensus_label,
|
||||
"score": consensus_score,
|
||||
}
|
||||
),
|
||||
)
|
||||
|
||||
def handle_request(self, topic, request_data):
|
||||
if topic == EmbeddingsRequestEnum.reload_classification_model.value:
|
||||
@ -529,6 +607,7 @@ def write_classification_attempt(
|
||||
timestamp: float,
|
||||
label: str,
|
||||
score: float,
|
||||
max_files: int = 100,
|
||||
) -> None:
|
||||
if "-" in label:
|
||||
label = label.replace("-", "_")
|
||||
@ -544,5 +623,8 @@ def write_classification_attempt(
|
||||
)
|
||||
|
||||
# delete oldest face image if maximum is reached
|
||||
if len(files) > 100:
|
||||
os.unlink(os.path.join(folder, files[-1]))
|
||||
try:
|
||||
if len(files) > max_files:
|
||||
os.unlink(os.path.join(folder, files[-1]))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
@ -166,6 +166,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
camera = obj_data["camera"]
|
||||
|
||||
if not self.config.cameras[camera].face_recognition.enabled:
|
||||
logger.debug(f"Face recognition disabled for camera {camera}, skipping")
|
||||
return
|
||||
|
||||
start = datetime.datetime.now().timestamp()
|
||||
@ -208,6 +209,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
person_box = obj_data.get("box")
|
||||
|
||||
if not person_box:
|
||||
logger.debug(f"No person box available for {id}")
|
||||
return
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
@ -233,7 +235,8 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
try:
|
||||
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_RGB2BGR)
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to convert face frame color for {id}: {e}")
|
||||
return
|
||||
else:
|
||||
# don't run for object without attributes
|
||||
@ -251,6 +254,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
|
||||
# no faces detected in this frame
|
||||
if not face:
|
||||
logger.debug(f"No face attributes found for {id}")
|
||||
return
|
||||
|
||||
face_box = face.get("box")
|
||||
@ -274,6 +278,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
res = self.recognizer.classify(face_frame)
|
||||
|
||||
if not res:
|
||||
logger.debug(f"Face recognizer returned no result for {id}")
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
return
|
||||
|
||||
@ -330,6 +335,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
|
||||
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
|
||||
self.recognizer.clear()
|
||||
return {"success": True, "message": "Face classifier cleared."}
|
||||
elif topic == EmbeddingsRequestEnum.recognize_face.value:
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(base64.b64decode(request_data["image"]), dtype=np.uint8),
|
||||
@ -417,7 +423,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
res = self.recognizer.classify(img)
|
||||
|
||||
if not res:
|
||||
return
|
||||
return {
|
||||
"message": "Model is still training, please try again in a few moments.",
|
||||
"success": False,
|
||||
}
|
||||
|
||||
sub_label, score = res
|
||||
|
||||
@ -436,6 +445,13 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
)
|
||||
shutil.move(current_file, new_file)
|
||||
|
||||
return {
|
||||
"message": f"Successfully reprocessed face. Result: {sub_label} (score: {score:.2f})",
|
||||
"success": True,
|
||||
"face_name": sub_label,
|
||||
"score": score,
|
||||
}
|
||||
|
||||
def expire_object(self, object_id: str, camera: str):
|
||||
if object_id in self.person_face_history:
|
||||
self.person_face_history.pop(object_id)
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
@ -161,12 +162,12 @@ class CudaGraphRunner(BaseModelRunner):
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def is_complex_model(model_type: str) -> bool:
|
||||
def is_model_supported(model_type: str) -> bool:
|
||||
# Import here to avoid circular imports
|
||||
from frigate.detectors.detector_config import ModelTypeEnum
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
return model_type in [
|
||||
return model_type not in [
|
||||
ModelTypeEnum.yolonas.value,
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
@ -234,11 +235,36 @@ class OpenVINOModelRunner(BaseModelRunner):
|
||||
# Import here to avoid circular imports
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
return model_type in [EnrichmentModelTypeEnum.paddleocr.value]
|
||||
return model_type in [
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def is_model_npu_supported(model_type: str) -> bool:
|
||||
# Import here to avoid circular imports
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
return model_type not in [
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
EnrichmentModelTypeEnum.arcface.value,
|
||||
]
|
||||
|
||||
def __init__(self, model_path: str, device: str, model_type: str, **kwargs):
|
||||
self.model_path = model_path
|
||||
self.device = device
|
||||
self.model_type = model_type
|
||||
|
||||
if device == "NPU" and not OpenVINOModelRunner.is_model_npu_supported(
|
||||
model_type
|
||||
):
|
||||
logger.warning(
|
||||
f"OpenVINO model {model_type} is not supported on NPU, using GPU instead"
|
||||
)
|
||||
device = "GPU"
|
||||
|
||||
self.complex_model = OpenVINOModelRunner.is_complex_model(model_type)
|
||||
|
||||
if not os.path.isfile(model_path):
|
||||
@ -266,6 +292,10 @@ class OpenVINOModelRunner(BaseModelRunner):
|
||||
self.infer_request = self.compiled_model.create_infer_request()
|
||||
self.input_tensor: ov.Tensor | None = None
|
||||
|
||||
# Thread lock to prevent concurrent inference (needed for JinaV2 which shares
|
||||
# one runner between text and vision embeddings called from different threads)
|
||||
self._inference_lock = threading.Lock()
|
||||
|
||||
if not self.complex_model:
|
||||
try:
|
||||
input_shape = self.compiled_model.inputs[0].get_shape()
|
||||
@ -309,57 +339,81 @@ class OpenVINOModelRunner(BaseModelRunner):
|
||||
Returns:
|
||||
List of output tensors
|
||||
"""
|
||||
# Handle single input case for backward compatibility
|
||||
if (
|
||||
len(inputs) == 1
|
||||
and len(self.compiled_model.inputs) == 1
|
||||
and self.input_tensor is not None
|
||||
):
|
||||
# Single input case - use the pre-allocated tensor for efficiency
|
||||
input_data = list(inputs.values())[0]
|
||||
np.copyto(self.input_tensor.data, input_data)
|
||||
self.infer_request.infer(self.input_tensor)
|
||||
else:
|
||||
if self.complex_model:
|
||||
# Lock prevents concurrent access to infer_request
|
||||
# Needed for JinaV2: genai thread (text) + embeddings thread (vision)
|
||||
with self._inference_lock:
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
if self.model_type in [EnrichmentModelTypeEnum.arcface.value]:
|
||||
# For face recognition models, create a fresh infer_request
|
||||
# for each inference to avoid state pollution that causes incorrect results.
|
||||
self.infer_request = self.compiled_model.create_infer_request()
|
||||
|
||||
# Handle single input case for backward compatibility
|
||||
if (
|
||||
len(inputs) == 1
|
||||
and len(self.compiled_model.inputs) == 1
|
||||
and self.input_tensor is not None
|
||||
):
|
||||
# Single input case - use the pre-allocated tensor for efficiency
|
||||
input_data = list(inputs.values())[0]
|
||||
np.copyto(self.input_tensor.data, input_data)
|
||||
self.infer_request.infer(self.input_tensor)
|
||||
else:
|
||||
if self.complex_model:
|
||||
try:
|
||||
# This ensures the model starts with a clean state for each sequence
|
||||
# Important for RNN models like PaddleOCR recognition
|
||||
self.infer_request.reset_state()
|
||||
except Exception:
|
||||
# this will raise an exception for models with AUTO set as the device
|
||||
pass
|
||||
|
||||
# Multiple inputs case - set each input by name
|
||||
for input_name, input_data in inputs.items():
|
||||
# Find the input by name and its index
|
||||
input_port = None
|
||||
input_index = None
|
||||
for idx, port in enumerate(self.compiled_model.inputs):
|
||||
if port.get_any_name() == input_name:
|
||||
input_port = port
|
||||
input_index = idx
|
||||
break
|
||||
|
||||
if input_port is None:
|
||||
raise ValueError(f"Input '{input_name}' not found in model")
|
||||
|
||||
# Create tensor with the correct element type
|
||||
input_element_type = input_port.get_element_type()
|
||||
|
||||
# Ensure input data matches the expected dtype to prevent type mismatches
|
||||
# that can occur with models like Jina-CLIP v2 running on OpenVINO
|
||||
expected_dtype = input_element_type.to_dtype()
|
||||
if input_data.dtype != expected_dtype:
|
||||
logger.debug(
|
||||
f"Converting input '{input_name}' from {input_data.dtype} to {expected_dtype}"
|
||||
)
|
||||
input_data = input_data.astype(expected_dtype)
|
||||
|
||||
input_tensor = ov.Tensor(input_element_type, input_data.shape)
|
||||
np.copyto(input_tensor.data, input_data)
|
||||
|
||||
# Set the input tensor for the specific port index
|
||||
self.infer_request.set_input_tensor(input_index, input_tensor)
|
||||
|
||||
# Run inference
|
||||
try:
|
||||
# This ensures the model starts with a clean state for each sequence
|
||||
# Important for RNN models like PaddleOCR recognition
|
||||
self.infer_request.reset_state()
|
||||
except Exception:
|
||||
# this will raise an exception for models with AUTO set as the device
|
||||
pass
|
||||
self.infer_request.infer()
|
||||
except Exception as e:
|
||||
logger.error(f"Error during OpenVINO inference: {e}")
|
||||
return []
|
||||
|
||||
# Multiple inputs case - set each input by name
|
||||
for input_name, input_data in inputs.items():
|
||||
# Find the input by name and its index
|
||||
input_port = None
|
||||
input_index = None
|
||||
for idx, port in enumerate(self.compiled_model.inputs):
|
||||
if port.get_any_name() == input_name:
|
||||
input_port = port
|
||||
input_index = idx
|
||||
break
|
||||
# Get all output tensors
|
||||
outputs = []
|
||||
for i in range(len(self.compiled_model.outputs)):
|
||||
outputs.append(self.infer_request.get_output_tensor(i).data)
|
||||
|
||||
if input_port is None:
|
||||
raise ValueError(f"Input '{input_name}' not found in model")
|
||||
|
||||
# Create tensor with the correct element type
|
||||
input_element_type = input_port.get_element_type()
|
||||
input_tensor = ov.Tensor(input_element_type, input_data.shape)
|
||||
np.copyto(input_tensor.data, input_data)
|
||||
|
||||
# Set the input tensor for the specific port index
|
||||
self.infer_request.set_input_tensor(input_index, input_tensor)
|
||||
|
||||
# Run inference
|
||||
self.infer_request.infer()
|
||||
|
||||
# Get all output tensors
|
||||
outputs = []
|
||||
for i in range(len(self.compiled_model.outputs)):
|
||||
outputs.append(self.infer_request.get_output_tensor(i).data)
|
||||
|
||||
return outputs
|
||||
return outputs
|
||||
|
||||
|
||||
class RKNNModelRunner(BaseModelRunner):
|
||||
@ -487,7 +541,7 @@ def get_optimized_runner(
|
||||
return OpenVINOModelRunner(model_path, device, model_type, **kwargs)
|
||||
|
||||
if (
|
||||
not CudaGraphRunner.is_complex_model(model_type)
|
||||
CudaGraphRunner.is_model_supported(model_type)
|
||||
and providers[0] == "CUDAExecutionProvider"
|
||||
):
|
||||
options[0] = {
|
||||
|
||||
@ -2,7 +2,6 @@ import glob
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import urllib.request
|
||||
import zipfile
|
||||
from queue import Queue
|
||||
@ -17,7 +16,7 @@ from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
ModelTypeEnum,
|
||||
)
|
||||
from frigate.util.model import post_process_yolo
|
||||
from frigate.util.file import FileLock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -55,6 +54,9 @@ class MemryXDetector(DetectionApi):
|
||||
)
|
||||
return
|
||||
|
||||
# Initialize stop_event as None, will be set later by set_stop_event()
|
||||
self.stop_event = None
|
||||
|
||||
model_cfg = getattr(detector_config, "model", None)
|
||||
|
||||
# Check if model_type was explicitly set by the user
|
||||
@ -177,44 +179,14 @@ class MemryXDetector(DetectionApi):
|
||||
logger.error(f"Failed to initialize MemryX model: {e}")
|
||||
raise
|
||||
|
||||
def _acquire_file_lock(self, lock_path: str, timeout: int = 60, poll: float = 0.2):
|
||||
"""
|
||||
Create an exclusive lock file. Blocks (with polling) until it can acquire,
|
||||
or raises TimeoutError. Uses only stdlib (os.O_EXCL).
|
||||
"""
|
||||
start = time.time()
|
||||
while True:
|
||||
try:
|
||||
fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
|
||||
os.close(fd)
|
||||
return
|
||||
except FileExistsError:
|
||||
if time.time() - start > timeout:
|
||||
raise TimeoutError(f"Timeout waiting for lock: {lock_path}")
|
||||
time.sleep(poll)
|
||||
|
||||
def _release_file_lock(self, lock_path: str):
|
||||
"""Best-effort removal of the lock file."""
|
||||
try:
|
||||
os.remove(lock_path)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
def load_yolo_constants(self):
|
||||
base = f"{self.cache_dir}/{self.model_folder}"
|
||||
# constants for yolov9 post-processing
|
||||
self.const_A = np.load(f"{base}/_model_22_Constant_9_output_0.npy")
|
||||
self.const_B = np.load(f"{base}/_model_22_Constant_10_output_0.npy")
|
||||
self.const_C = np.load(f"{base}/_model_22_Constant_12_output_0.npy")
|
||||
|
||||
def check_and_prepare_model(self):
|
||||
if not os.path.exists(self.cache_dir):
|
||||
os.makedirs(self.cache_dir, exist_ok=True)
|
||||
|
||||
lock_path = os.path.join(self.cache_dir, f".{self.model_folder}.lock")
|
||||
self._acquire_file_lock(lock_path)
|
||||
lock = FileLock(lock_path, timeout=60)
|
||||
|
||||
try:
|
||||
with lock:
|
||||
# ---------- CASE 1: user provided a custom model path ----------
|
||||
if self.memx_model_path:
|
||||
if not self.memx_model_path.endswith(".zip"):
|
||||
@ -258,7 +230,6 @@ class MemryXDetector(DetectionApi):
|
||||
|
||||
# Handle post model requirements by model type
|
||||
if self.memx_model_type in [
|
||||
ModelTypeEnum.yologeneric,
|
||||
ModelTypeEnum.yolonas,
|
||||
ModelTypeEnum.ssd,
|
||||
]:
|
||||
@ -267,7 +238,10 @@ class MemryXDetector(DetectionApi):
|
||||
f"No *_post.onnx file found in custom model zip for {self.memx_model_type.name}."
|
||||
)
|
||||
self.memx_post_model = post_candidates[0]
|
||||
elif self.memx_model_type == ModelTypeEnum.yolox:
|
||||
elif self.memx_model_type in [
|
||||
ModelTypeEnum.yolox,
|
||||
ModelTypeEnum.yologeneric,
|
||||
]:
|
||||
# Explicitly ignore any post model even if present
|
||||
self.memx_post_model = None
|
||||
else:
|
||||
@ -295,8 +269,6 @@ class MemryXDetector(DetectionApi):
|
||||
logger.info("Using cached models.")
|
||||
self.memx_model_path = dfp_path
|
||||
self.memx_post_model = post_path
|
||||
if self.memx_model_type == ModelTypeEnum.yologeneric:
|
||||
self.load_yolo_constants()
|
||||
return
|
||||
|
||||
# ---------- CASE 3: download MemryX model (no cache) ----------
|
||||
@ -325,9 +297,6 @@ class MemryXDetector(DetectionApi):
|
||||
else None
|
||||
)
|
||||
|
||||
if self.memx_model_type == ModelTypeEnum.yologeneric:
|
||||
self.load_yolo_constants()
|
||||
|
||||
finally:
|
||||
if os.path.exists(zip_path):
|
||||
try:
|
||||
@ -338,9 +307,6 @@ class MemryXDetector(DetectionApi):
|
||||
f"Failed to remove downloaded zip {zip_path}: {e}"
|
||||
)
|
||||
|
||||
finally:
|
||||
self._release_file_lock(lock_path)
|
||||
|
||||
def send_input(self, connection_id, tensor_input: np.ndarray):
|
||||
"""Pre-process (if needed) and send frame to MemryX input queue"""
|
||||
if tensor_input is None:
|
||||
@ -399,26 +365,43 @@ class MemryXDetector(DetectionApi):
|
||||
def process_input(self):
|
||||
"""Input callback function: wait for frames in the input queue, preprocess, and send to MX3 (return)"""
|
||||
while True:
|
||||
# Check if shutdown is requested
|
||||
if self.stop_event and self.stop_event.is_set():
|
||||
logger.debug("[process_input] Stop event detected, returning None")
|
||||
return None
|
||||
try:
|
||||
# Wait for a frame from the queue (blocking call)
|
||||
frame = self.capture_queue.get(
|
||||
block=True
|
||||
) # Blocks until data is available
|
||||
# Wait for a frame from the queue with timeout to check stop_event periodically
|
||||
frame = self.capture_queue.get(block=True, timeout=0.5)
|
||||
|
||||
return frame
|
||||
|
||||
except Exception as e:
|
||||
logger.info(f"[process_input] Error processing input: {e}")
|
||||
time.sleep(0.1) # Prevent busy waiting in case of error
|
||||
# Silently handle queue.Empty timeouts (expected during normal operation)
|
||||
# Log any other unexpected exceptions
|
||||
if "Empty" not in str(type(e).__name__):
|
||||
logger.warning(f"[process_input] Unexpected error: {e}")
|
||||
# Loop continues and will check stop_event at the top
|
||||
|
||||
def receive_output(self):
|
||||
"""Retrieve processed results from MemryX output queue + a copy of the original frame"""
|
||||
connection_id = (
|
||||
self.capture_id_queue.get()
|
||||
) # Get the corresponding connection ID
|
||||
detections = self.output_queue.get() # Get detections from MemryX
|
||||
try:
|
||||
# Get connection ID with timeout
|
||||
connection_id = self.capture_id_queue.get(
|
||||
block=True, timeout=1.0
|
||||
) # Get the corresponding connection ID
|
||||
detections = self.output_queue.get() # Get detections from MemryX
|
||||
|
||||
return connection_id, detections
|
||||
return connection_id, detections
|
||||
|
||||
except Exception as e:
|
||||
# On timeout or stop event, return None
|
||||
if self.stop_event and self.stop_event.is_set():
|
||||
logger.debug("[receive_output] Stop event detected, exiting")
|
||||
# Silently handle queue.Empty timeouts, they're expected during normal operation
|
||||
elif "Empty" not in str(type(e).__name__):
|
||||
logger.warning(f"[receive_output] Error receiving output: {e}")
|
||||
|
||||
return None, None
|
||||
|
||||
def post_process_yolonas(self, output):
|
||||
predictions = output[0]
|
||||
@ -625,127 +608,232 @@ class MemryXDetector(DetectionApi):
|
||||
|
||||
self.output_queue.put(final_detections)
|
||||
|
||||
def onnx_reshape_with_allowzero(
|
||||
self, data: np.ndarray, shape: np.ndarray, allowzero: int = 0
|
||||
def _generate_anchors(self, sizes=[80, 40, 20]):
|
||||
"""Generate anchor points for YOLOv9 style processing"""
|
||||
yscales = []
|
||||
xscales = []
|
||||
for s in sizes:
|
||||
r = np.arange(s) + 0.5
|
||||
yscales.append(np.repeat(r, s))
|
||||
xscales.append(np.repeat(r[None, ...], s, axis=0).flatten())
|
||||
|
||||
yscales = np.concatenate(yscales)
|
||||
xscales = np.concatenate(xscales)
|
||||
anchors = np.stack([xscales, yscales], axis=1)
|
||||
return anchors
|
||||
|
||||
def _generate_scales(self, sizes=[80, 40, 20]):
|
||||
"""Generate scaling factors for each detection level"""
|
||||
factors = [8, 16, 32]
|
||||
s = np.concatenate([np.ones([int(s * s)]) * f for s, f in zip(sizes, factors)])
|
||||
return s[:, None]
|
||||
|
||||
@staticmethod
|
||||
def _softmax(x: np.ndarray, axis: int) -> np.ndarray:
|
||||
"""Efficient softmax implementation"""
|
||||
x = x - np.max(x, axis=axis, keepdims=True)
|
||||
np.exp(x, out=x)
|
||||
x /= np.sum(x, axis=axis, keepdims=True)
|
||||
return x
|
||||
|
||||
def dfl(self, x: np.ndarray) -> np.ndarray:
|
||||
"""Distribution Focal Loss decoding - YOLOv9 style"""
|
||||
x = x.reshape(-1, 4, 16)
|
||||
weights = np.arange(16, dtype=np.float32)
|
||||
p = self._softmax(x, axis=2)
|
||||
p = p * weights[None, None, :]
|
||||
out = np.sum(p, axis=2, keepdims=False)
|
||||
return out
|
||||
|
||||
def dist2bbox(
|
||||
self, x: np.ndarray, anchors: np.ndarray, scales: np.ndarray
|
||||
) -> np.ndarray:
|
||||
shape = shape.astype(int)
|
||||
input_shape = data.shape
|
||||
output_shape = []
|
||||
"""Convert distances to bounding boxes - YOLOv9 style"""
|
||||
lt = x[:, :2]
|
||||
rb = x[:, 2:]
|
||||
|
||||
for i, dim in enumerate(shape):
|
||||
if dim == 0 and allowzero == 0:
|
||||
output_shape.append(input_shape[i]) # Copy dimension from input
|
||||
else:
|
||||
output_shape.append(dim)
|
||||
x1y1 = anchors - lt
|
||||
x2y2 = anchors + rb
|
||||
|
||||
# Now let NumPy infer any -1 if needed
|
||||
reshaped = np.reshape(data, output_shape)
|
||||
wh = x2y2 - x1y1
|
||||
c_xy = (x1y1 + x2y2) / 2
|
||||
|
||||
return reshaped
|
||||
out = np.concatenate([c_xy, wh], axis=1)
|
||||
out = out * scales
|
||||
return out
|
||||
|
||||
def post_process_yolo_optimized(self, outputs):
|
||||
"""
|
||||
Custom YOLOv9 post-processing optimized for MemryX ONNX outputs.
|
||||
Implements DFL decoding, confidence filtering, and NMS in pure NumPy.
|
||||
"""
|
||||
# YOLOv9 outputs: 6 outputs (lbox, lcls, mbox, mcls, sbox, scls)
|
||||
conv_out1, conv_out2, conv_out3, conv_out4, conv_out5, conv_out6 = outputs
|
||||
|
||||
# Determine grid sizes based on input resolution
|
||||
# YOLOv9 uses 3 detection heads with strides [8, 16, 32]
|
||||
# Grid sizes = input_size / stride
|
||||
sizes = [
|
||||
self.memx_model_height
|
||||
// 8, # Large objects (e.g., 80 for 640x640, 40 for 320x320)
|
||||
self.memx_model_height
|
||||
// 16, # Medium objects (e.g., 40 for 640x640, 20 for 320x320)
|
||||
self.memx_model_height
|
||||
// 32, # Small objects (e.g., 20 for 640x640, 10 for 320x320)
|
||||
]
|
||||
|
||||
# Generate anchors and scales if not already done
|
||||
if not hasattr(self, "anchors"):
|
||||
self.anchors = self._generate_anchors(sizes)
|
||||
self.scales = self._generate_scales(sizes)
|
||||
|
||||
# Process outputs in YOLOv9 format: reshape and moveaxis for ONNX format
|
||||
lbox = np.moveaxis(conv_out1, 1, -1) # Large boxes
|
||||
lcls = np.moveaxis(conv_out2, 1, -1) # Large classes
|
||||
mbox = np.moveaxis(conv_out3, 1, -1) # Medium boxes
|
||||
mcls = np.moveaxis(conv_out4, 1, -1) # Medium classes
|
||||
sbox = np.moveaxis(conv_out5, 1, -1) # Small boxes
|
||||
scls = np.moveaxis(conv_out6, 1, -1) # Small classes
|
||||
|
||||
# Determine number of classes dynamically from the class output shape
|
||||
# lcls shape should be (batch, height, width, num_classes)
|
||||
num_classes = lcls.shape[-1]
|
||||
|
||||
# Validate that all class outputs have the same number of classes
|
||||
if not (mcls.shape[-1] == num_classes and scls.shape[-1] == num_classes):
|
||||
raise ValueError(
|
||||
f"Class output shapes mismatch: lcls={lcls.shape}, mcls={mcls.shape}, scls={scls.shape}"
|
||||
)
|
||||
|
||||
# Concatenate boxes and classes
|
||||
boxes = np.concatenate(
|
||||
[
|
||||
lbox.reshape(-1, 64), # 64 is for 4 bbox coords * 16 DFL bins
|
||||
mbox.reshape(-1, 64),
|
||||
sbox.reshape(-1, 64),
|
||||
],
|
||||
axis=0,
|
||||
)
|
||||
|
||||
classes = np.concatenate(
|
||||
[
|
||||
lcls.reshape(-1, num_classes),
|
||||
mcls.reshape(-1, num_classes),
|
||||
scls.reshape(-1, num_classes),
|
||||
],
|
||||
axis=0,
|
||||
)
|
||||
|
||||
# Apply sigmoid to classes
|
||||
classes = self.sigmoid(classes)
|
||||
|
||||
# Apply DFL to box predictions
|
||||
boxes = self.dfl(boxes)
|
||||
|
||||
# YOLOv9 postprocessing with confidence filtering and NMS
|
||||
confidence_thres = 0.4
|
||||
iou_thres = 0.6
|
||||
|
||||
# Find the class with the highest score for each detection
|
||||
max_scores = np.max(classes, axis=1) # Maximum class score for each detection
|
||||
class_ids = np.argmax(classes, axis=1) # Index of the best class
|
||||
|
||||
# Filter out detections with scores below the confidence threshold
|
||||
valid_indices = np.where(max_scores >= confidence_thres)[0]
|
||||
if len(valid_indices) == 0:
|
||||
# Return empty detections array
|
||||
final_detections = np.zeros((20, 6), np.float32)
|
||||
return final_detections
|
||||
|
||||
# Select only valid detections
|
||||
valid_boxes = boxes[valid_indices]
|
||||
valid_class_ids = class_ids[valid_indices]
|
||||
valid_scores = max_scores[valid_indices]
|
||||
|
||||
# Convert distances to actual bounding boxes using anchors and scales
|
||||
valid_boxes = self.dist2bbox(
|
||||
valid_boxes, self.anchors[valid_indices], self.scales[valid_indices]
|
||||
)
|
||||
|
||||
# Convert bounding box coordinates from (x_center, y_center, w, h) to (x_min, y_min, x_max, y_max)
|
||||
x_center, y_center, width, height = (
|
||||
valid_boxes[:, 0],
|
||||
valid_boxes[:, 1],
|
||||
valid_boxes[:, 2],
|
||||
valid_boxes[:, 3],
|
||||
)
|
||||
x_min = x_center - width / 2
|
||||
y_min = y_center - height / 2
|
||||
x_max = x_center + width / 2
|
||||
y_max = y_center + height / 2
|
||||
|
||||
# Convert to format expected by cv2.dnn.NMSBoxes: [x, y, width, height]
|
||||
boxes_for_nms = []
|
||||
scores_for_nms = []
|
||||
|
||||
for i in range(len(valid_indices)):
|
||||
# Ensure coordinates are within bounds and positive
|
||||
x_min_clipped = max(0, x_min[i])
|
||||
y_min_clipped = max(0, y_min[i])
|
||||
x_max_clipped = min(self.memx_model_width, x_max[i])
|
||||
y_max_clipped = min(self.memx_model_height, y_max[i])
|
||||
|
||||
width_clipped = x_max_clipped - x_min_clipped
|
||||
height_clipped = y_max_clipped - y_min_clipped
|
||||
|
||||
if width_clipped > 0 and height_clipped > 0:
|
||||
boxes_for_nms.append(
|
||||
[x_min_clipped, y_min_clipped, width_clipped, height_clipped]
|
||||
)
|
||||
scores_for_nms.append(float(valid_scores[i]))
|
||||
|
||||
final_detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
if len(boxes_for_nms) == 0:
|
||||
return final_detections
|
||||
|
||||
# Apply NMS using OpenCV
|
||||
indices = cv2.dnn.NMSBoxes(
|
||||
boxes_for_nms, scores_for_nms, confidence_thres, iou_thres
|
||||
)
|
||||
|
||||
if len(indices) > 0:
|
||||
# Flatten indices if they are returned as a list of arrays
|
||||
if isinstance(indices[0], list) or isinstance(indices[0], np.ndarray):
|
||||
indices = [i[0] for i in indices]
|
||||
|
||||
# Limit to top 20 detections
|
||||
indices = indices[:20]
|
||||
|
||||
# Convert to Frigate format: [class_id, confidence, y_min, x_min, y_max, x_max] (normalized)
|
||||
for i, idx in enumerate(indices):
|
||||
class_id = valid_class_ids[idx]
|
||||
confidence = valid_scores[idx]
|
||||
|
||||
# Get the box coordinates
|
||||
box = boxes_for_nms[idx]
|
||||
x_min_norm = box[0] / self.memx_model_width
|
||||
y_min_norm = box[1] / self.memx_model_height
|
||||
x_max_norm = (box[0] + box[2]) / self.memx_model_width
|
||||
y_max_norm = (box[1] + box[3]) / self.memx_model_height
|
||||
|
||||
final_detections[i] = [
|
||||
class_id,
|
||||
confidence,
|
||||
y_min_norm, # Frigate expects y_min first
|
||||
x_min_norm,
|
||||
y_max_norm,
|
||||
x_max_norm,
|
||||
]
|
||||
|
||||
return final_detections
|
||||
|
||||
def process_output(self, *outputs):
|
||||
"""Output callback function -- receives frames from the MX3 and triggers post-processing"""
|
||||
if self.memx_model_type == ModelTypeEnum.yologeneric:
|
||||
if not self.memx_post_model:
|
||||
conv_out1 = outputs[0]
|
||||
conv_out2 = outputs[1]
|
||||
conv_out3 = outputs[2]
|
||||
conv_out4 = outputs[3]
|
||||
conv_out5 = outputs[4]
|
||||
conv_out6 = outputs[5]
|
||||
# Use complete YOLOv9-style postprocessing (includes NMS)
|
||||
final_detections = self.post_process_yolo_optimized(outputs)
|
||||
|
||||
concat_1 = self.onnx_concat([conv_out1, conv_out2], axis=1)
|
||||
concat_2 = self.onnx_concat([conv_out3, conv_out4], axis=1)
|
||||
concat_3 = self.onnx_concat([conv_out5, conv_out6], axis=1)
|
||||
|
||||
shape = np.array([1, 144, -1], dtype=np.int64)
|
||||
|
||||
reshaped_1 = self.onnx_reshape_with_allowzero(
|
||||
concat_1, shape, allowzero=0
|
||||
)
|
||||
reshaped_2 = self.onnx_reshape_with_allowzero(
|
||||
concat_2, shape, allowzero=0
|
||||
)
|
||||
reshaped_3 = self.onnx_reshape_with_allowzero(
|
||||
concat_3, shape, allowzero=0
|
||||
)
|
||||
|
||||
concat_4 = self.onnx_concat([reshaped_1, reshaped_2, reshaped_3], 2)
|
||||
|
||||
axis = 1
|
||||
split_sizes = [64, 80]
|
||||
|
||||
# Calculate indices at which to split
|
||||
indices = np.cumsum(split_sizes)[
|
||||
:-1
|
||||
] # [64] — split before the second chunk
|
||||
|
||||
# Perform split along axis 1
|
||||
split_0, split_1 = np.split(concat_4, indices, axis=axis)
|
||||
|
||||
num_boxes = 2100 if self.memx_model_height == 320 else 8400
|
||||
shape1 = np.array([1, 4, 16, num_boxes])
|
||||
reshape_4 = self.onnx_reshape_with_allowzero(
|
||||
split_0, shape1, allowzero=0
|
||||
)
|
||||
|
||||
transpose_1 = reshape_4.transpose(0, 2, 1, 3)
|
||||
|
||||
axis = 1 # As per ONNX softmax node
|
||||
|
||||
# Subtract max for numerical stability
|
||||
x_max = np.max(transpose_1, axis=axis, keepdims=True)
|
||||
x_exp = np.exp(transpose_1 - x_max)
|
||||
x_sum = np.sum(x_exp, axis=axis, keepdims=True)
|
||||
softmax_output = x_exp / x_sum
|
||||
|
||||
# Weight W from the ONNX initializer (1, 16, 1, 1) with values 0 to 15
|
||||
W = np.arange(16, dtype=np.float32).reshape(
|
||||
1, 16, 1, 1
|
||||
) # (1, 16, 1, 1)
|
||||
|
||||
# Apply 1x1 convolution: this is a weighted sum over channels
|
||||
conv_output = np.sum(
|
||||
softmax_output * W, axis=1, keepdims=True
|
||||
) # shape: (1, 1, 4, 8400)
|
||||
|
||||
shape2 = np.array([1, 4, num_boxes])
|
||||
reshape_5 = self.onnx_reshape_with_allowzero(
|
||||
conv_output, shape2, allowzero=0
|
||||
)
|
||||
|
||||
# ONNX Slice — get first 2 channels: [0:2] along axis 1
|
||||
slice_output1 = reshape_5[:, 0:2, :] # Result: (1, 2, 8400)
|
||||
|
||||
# Slice channels 2 to 4 → axis = 1
|
||||
slice_output2 = reshape_5[:, 2:4, :]
|
||||
|
||||
# Perform Subtraction
|
||||
sub_output = self.const_A - slice_output1 # Equivalent to ONNX Sub
|
||||
|
||||
# Perform the ONNX-style Add
|
||||
add_output = self.const_B + slice_output2
|
||||
|
||||
sub1 = add_output - sub_output
|
||||
|
||||
add1 = sub_output + add_output
|
||||
|
||||
div_output = add1 / 2.0
|
||||
|
||||
concat_5 = self.onnx_concat([div_output, sub1], axis=1)
|
||||
|
||||
# Expand B to (1, 1, 8400) so it can broadcast across axis=1 (4 channels)
|
||||
const_C_expanded = self.const_C[:, np.newaxis, :] # Shape: (1, 1, 8400)
|
||||
|
||||
# Perform ONNX-style element-wise multiplication
|
||||
mul_output = concat_5 * const_C_expanded # Result: (1, 4, 8400)
|
||||
|
||||
sigmoid_output = self.sigmoid(split_1)
|
||||
outputs = self.onnx_concat([mul_output, sigmoid_output], axis=1)
|
||||
|
||||
final_detections = post_process_yolo(
|
||||
outputs, self.memx_model_width, self.memx_model_height
|
||||
)
|
||||
self.output_queue.put(final_detections)
|
||||
|
||||
elif self.memx_model_type == ModelTypeEnum.yolonas:
|
||||
@ -762,6 +850,19 @@ class MemryXDetector(DetectionApi):
|
||||
f"{self.memx_model_type} is currently not supported for memryx. See the docs for more info on supported models."
|
||||
)
|
||||
|
||||
def set_stop_event(self, stop_event):
|
||||
"""Set the stop event for graceful shutdown."""
|
||||
self.stop_event = stop_event
|
||||
|
||||
def shutdown(self):
|
||||
"""Gracefully shutdown the MemryX accelerator"""
|
||||
try:
|
||||
if hasattr(self, "accl") and self.accl is not None:
|
||||
self.accl.shutdown()
|
||||
logger.info("MemryX accelerator shutdown complete")
|
||||
except Exception as e:
|
||||
logger.error(f"Error during MemryX shutdown: {e}")
|
||||
|
||||
def detect_raw(self, tensor_input: np.ndarray):
|
||||
"""Removed synchronous detect_raw() function so that we only use async"""
|
||||
return 0
|
||||
|
||||
@ -29,7 +29,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event, Trigger
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
|
||||
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
||||
from .onnx.jina_v2_embedding import JinaV2Embedding
|
||||
@ -472,7 +472,7 @@ class Embeddings:
|
||||
)
|
||||
thumbnail_missing = True
|
||||
except DoesNotExist:
|
||||
logger.warning(
|
||||
logger.debug(
|
||||
f"Event ID {trigger.data} for trigger {trigger_name} does not exist."
|
||||
)
|
||||
continue
|
||||
|
||||
@ -62,8 +62,8 @@ from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
|
||||
from frigate.genai import get_genai_client
|
||||
from frigate.models import Event, Recordings, ReviewSegment, Trigger
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import SharedMemoryFrameManager
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
||||
from .embeddings import Embeddings
|
||||
|
||||
@ -158,11 +158,13 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.realtime_processors: list[RealTimeProcessorApi] = []
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
logger.debug("Face recognition enabled, initializing FaceRealTimeProcessor")
|
||||
self.realtime_processors.append(
|
||||
FaceRealTimeProcessor(
|
||||
self.config, self.requestor, self.event_metadata_publisher, metrics
|
||||
)
|
||||
)
|
||||
logger.debug("FaceRealTimeProcessor initialized successfully")
|
||||
|
||||
if self.config.classification.bird.enabled:
|
||||
self.realtime_processors.append(
|
||||
@ -193,6 +195,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.requestor,
|
||||
self.metrics,
|
||||
)
|
||||
)
|
||||
@ -224,7 +227,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
for c in self.config.cameras.values()
|
||||
):
|
||||
self.post_processors.append(
|
||||
AudioTranscriptionPostProcessor(self.config, self.requestor, metrics)
|
||||
AudioTranscriptionPostProcessor(
|
||||
self.config, self.requestor, self.embeddings, metrics
|
||||
)
|
||||
)
|
||||
|
||||
semantic_trigger_processor: SemanticTriggerProcessor | None = None
|
||||
@ -283,44 +288,66 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
logger.info("Exiting embeddings maintenance...")
|
||||
|
||||
def _check_classification_config_updates(self) -> None:
|
||||
"""Check for classification config updates and add new processors."""
|
||||
"""Check for classification config updates and add/remove processors."""
|
||||
topic, model_config = self.classification_config_subscriber.check_for_update()
|
||||
|
||||
if topic and model_config:
|
||||
if topic:
|
||||
model_name = topic.split("/")[-1]
|
||||
self.config.classification.custom[model_name] = model_config
|
||||
|
||||
# Check if processor already exists
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
):
|
||||
if processor.model_config.name == model_name:
|
||||
logger.debug(
|
||||
f"Classification processor for model {model_name} already exists, skipping"
|
||||
if model_config is None:
|
||||
self.realtime_processors = [
|
||||
processor
|
||||
for processor in self.realtime_processors
|
||||
if not (
|
||||
isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
)
|
||||
return
|
||||
and processor.model_config.name == model_name
|
||||
)
|
||||
]
|
||||
|
||||
if model_config.state_config is not None:
|
||||
processor = CustomStateClassificationProcessor(
|
||||
self.config, model_config, self.requestor, self.metrics
|
||||
logger.info(
|
||||
f"Successfully removed classification processor for model: {model_name}"
|
||||
)
|
||||
else:
|
||||
processor = CustomObjectClassificationProcessor(
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.metrics,
|
||||
)
|
||||
self.config.classification.custom[model_name] = model_config
|
||||
|
||||
self.realtime_processors.append(processor)
|
||||
logger.info(
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
# Check if processor already exists
|
||||
for processor in self.realtime_processors:
|
||||
if isinstance(
|
||||
processor,
|
||||
(
|
||||
CustomStateClassificationProcessor,
|
||||
CustomObjectClassificationProcessor,
|
||||
),
|
||||
):
|
||||
if processor.model_config.name == model_name:
|
||||
logger.debug(
|
||||
f"Classification processor for model {model_name} already exists, skipping"
|
||||
)
|
||||
return
|
||||
|
||||
if model_config.state_config is not None:
|
||||
processor = CustomStateClassificationProcessor(
|
||||
self.config, model_config, self.requestor, self.metrics
|
||||
)
|
||||
else:
|
||||
processor = CustomObjectClassificationProcessor(
|
||||
self.config,
|
||||
model_config,
|
||||
self.event_metadata_publisher,
|
||||
self.requestor,
|
||||
self.metrics,
|
||||
)
|
||||
|
||||
self.realtime_processors.append(processor)
|
||||
logger.info(
|
||||
f"Added classification processor for model: {model_name} (type: {type(processor).__name__})"
|
||||
)
|
||||
|
||||
def _process_requests(self) -> None:
|
||||
"""Process embeddings requests"""
|
||||
@ -374,7 +401,14 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
source_type, _, camera, frame_name, data = update
|
||||
|
||||
logger.debug(
|
||||
f"Received update - source_type: {source_type}, camera: {camera}, data label: {data.get('label') if data else 'None'}"
|
||||
)
|
||||
|
||||
if not camera or source_type != EventTypeEnum.tracked_object:
|
||||
logger.debug(
|
||||
f"Skipping update - camera: {camera}, source_type: {source_type}"
|
||||
)
|
||||
return
|
||||
|
||||
if self.config.semantic_search.enabled:
|
||||
@ -384,6 +418,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
# no need to process updated objects if no processors are active
|
||||
if len(self.realtime_processors) == 0 and len(self.post_processors) == 0:
|
||||
logger.debug(
|
||||
f"No processors active - realtime: {len(self.realtime_processors)}, post: {len(self.post_processors)}"
|
||||
)
|
||||
return
|
||||
|
||||
# Create our own thumbnail based on the bounding box and the frame time
|
||||
@ -392,6 +429,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
frame_name, camera_config.frame_shape_yuv
|
||||
)
|
||||
except FileNotFoundError:
|
||||
logger.debug(f"Frame {frame_name} not found for camera {camera}")
|
||||
pass
|
||||
|
||||
if yuv_frame is None:
|
||||
@ -400,7 +438,11 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug(
|
||||
f"Processing {len(self.realtime_processors)} realtime processors for object {data.get('id')} (label: {data.get('label')})"
|
||||
)
|
||||
for processor in self.realtime_processors:
|
||||
logger.debug(f"Calling process_frame on {processor.__class__.__name__}")
|
||||
processor.process_frame(data, yuv_frame)
|
||||
|
||||
for processor in self.post_processors:
|
||||
|
||||
@ -12,7 +12,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.util.path import delete_event_snapshot, delete_event_thumbnail
|
||||
from frigate.util.file import delete_event_snapshot, delete_event_thumbnail
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -51,8 +51,7 @@ class GenAIClient:
|
||||
def get_concern_prompt() -> str:
|
||||
if concerns:
|
||||
concern_list = "\n - ".join(concerns)
|
||||
return f"""
|
||||
- `other_concerns` (list of strings): Include a list of any of the following concerns that are occurring:
|
||||
return f"""- `other_concerns` (list of strings): Include a list of any of the following concerns that are occurring:
|
||||
- {concern_list}"""
|
||||
else:
|
||||
return ""
|
||||
@ -70,7 +69,7 @@ class GenAIClient:
|
||||
return "\n- (No objects detected)"
|
||||
|
||||
context_prompt = f"""
|
||||
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"].replace("_", " ")} security camera.
|
||||
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera.
|
||||
|
||||
## Normal Activity Patterns for This Property
|
||||
|
||||
@ -100,8 +99,8 @@ When forming your description:
|
||||
## Response Format
|
||||
|
||||
Your response MUST be a flat JSON object with:
|
||||
- `title` (string): A concise, direct title that describes the purpose or overall action, not just what you literally see. Use names from "Objects in Scene" based on what you visually observe. If you see both a name and an unidentified object of the same type but visually observe only one person/object, use ONLY the name. Examples: "Joe walking dog", "Person taking out trash", "Joe accessing vehicle", "Joe and person on front porch".
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `title` (string): A concise, direct title that describes the primary action or event in the sequence, not just what you literally see. Use spatial context when available to make titles more meaningful. When multiple objects/actions are present, prioritize whichever is most prominent or occurs first. Use names from "Objects in Scene" based on what you visually observe. If you see both a name and an unidentified object of the same type but visually observe only one person/object, use ONLY the name. Examples: "Joe walking dog", "Person taking out trash", "Vehicle arriving in driveway", "Joe accessing vehicle", "Person leaving porch for driveway".
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish, in chronological order. Start by describing how the sequence begins, then describe the progression of events. **Describe all significant movements and actions in the order they occur.** For example, if a vehicle arrives and then a person exits, describe both actions sequentially. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined in "Normal Activity Patterns for This Property" above. Your threat level must be consistent with your scene description and the guidance above.
|
||||
{get_concern_prompt()}
|
||||
@ -110,11 +109,11 @@ Your response MUST be a flat JSON object with:
|
||||
|
||||
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
|
||||
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
|
||||
- Zones involved: {", ".join(z.replace("_", " ").title() for z in review_data["zones"]) or "None"}
|
||||
- Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"}
|
||||
|
||||
## Objects in Scene
|
||||
|
||||
Each line represents a detection state, not necessarily unique individuals. Objects with names in parentheses (e.g., "Name (person)") are verified identities. Objects without names (e.g., "Person") are detected but not identified.
|
||||
Each line represents a detection state, not necessarily unique individuals. Parentheses indicate object type or category, use only the name/label in your response, not the parentheses.
|
||||
|
||||
**CRITICAL: When you see both recognized and unrecognized entries of the same type (e.g., "Joe (person)" and "Person"), visually count how many distinct people/objects you actually see based on appearance and clothing. If you observe only ONE person throughout the sequence, use ONLY the recognized name (e.g., "Joe"). The same person may be recognized in some frames but not others. Only describe both if you visually see MULTIPLE distinct people with clearly different appearances.**
|
||||
|
||||
@ -206,14 +205,20 @@ Rules for the report:
|
||||
- Group bullets under subheadings when multiple events fall into the same category (e.g., Vehicle Activity, Porch Activity, Unusual Behavior).
|
||||
|
||||
- Threat levels
|
||||
- Always show (threat level: X) for each event.
|
||||
- Always show the threat level for each event using these labels:
|
||||
- Threat level 0: "Normal"
|
||||
- Threat level 1: "Needs review"
|
||||
- Threat level 2: "Security concern"
|
||||
- Format as (threat level: Normal), (threat level: Needs review), or (threat level: Security concern).
|
||||
- If multiple events at the same time share the same threat level, only state it once.
|
||||
|
||||
- Final assessment
|
||||
- End with a Final Assessment section.
|
||||
- If all events are threat level 1 with no escalation:
|
||||
- If all events are threat level 0:
|
||||
Final assessment: Only normal residential activity observed during this period.
|
||||
- If threat level 2+ events are present, clearly summarize them as Potential concerns requiring review.
|
||||
- If threat level 1 events are present:
|
||||
Final assessment: Some activity requires review but no security concerns identified.
|
||||
- If threat level 2 events are present, clearly summarize them as Security concerns requiring immediate attention.
|
||||
|
||||
- Conciseness
|
||||
- Do not repeat benign clothing/appearance details unless they distinguish individuals.
|
||||
|
||||
@ -18,6 +18,7 @@ class OpenAIClient(GenAIClient):
|
||||
"""Generative AI client for Frigate using OpenAI."""
|
||||
|
||||
provider: OpenAI
|
||||
context_size: Optional[int] = None
|
||||
|
||||
def _init_provider(self):
|
||||
"""Initialize the client."""
|
||||
@ -69,5 +70,33 @@ class OpenAIClient(GenAIClient):
|
||||
|
||||
def get_context_size(self) -> int:
|
||||
"""Get the context window size for OpenAI."""
|
||||
# OpenAI GPT-4 Vision models have 128K token context window
|
||||
return 128000
|
||||
if self.context_size is not None:
|
||||
return self.context_size
|
||||
|
||||
try:
|
||||
models = self.provider.models.list()
|
||||
for model in models.data:
|
||||
if model.id == self.genai_config.model:
|
||||
if hasattr(model, "max_model_len") and model.max_model_len:
|
||||
self.context_size = model.max_model_len
|
||||
logger.debug(
|
||||
f"Retrieved context size {self.context_size} for model {self.genai_config.model}"
|
||||
)
|
||||
return self.context_size
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Failed to fetch model context size from API: {e}, using default"
|
||||
)
|
||||
|
||||
# Default to 128K for ChatGPT models, 8K for others
|
||||
model_name = self.genai_config.model.lower()
|
||||
if "gpt" in model_name:
|
||||
self.context_size = 128000
|
||||
else:
|
||||
self.context_size = 8192
|
||||
|
||||
logger.debug(
|
||||
f"Using default context size {self.context_size} for model {self.genai_config.model}"
|
||||
)
|
||||
return self.context_size
|
||||
|
||||
@ -9,6 +9,7 @@ from multiprocessing import Queue, Value
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
|
||||
import numpy as np
|
||||
import zmq
|
||||
|
||||
from frigate.comms.object_detector_signaler import (
|
||||
ObjectDetectorPublisher,
|
||||
@ -42,6 +43,7 @@ class BaseLocalDetector(ObjectDetector):
|
||||
self,
|
||||
detector_config: BaseDetectorConfig = None,
|
||||
labels: str = None,
|
||||
stop_event: MpEvent = None,
|
||||
):
|
||||
self.fps = EventsPerSecond()
|
||||
if labels is None:
|
||||
@ -59,6 +61,10 @@ class BaseLocalDetector(ObjectDetector):
|
||||
|
||||
self.detect_api = create_detector(detector_config)
|
||||
|
||||
# If the detector supports stop_event, pass it
|
||||
if hasattr(self.detect_api, "set_stop_event") and stop_event:
|
||||
self.detect_api.set_stop_event(stop_event)
|
||||
|
||||
def _transform_input(self, tensor_input: np.ndarray) -> np.ndarray:
|
||||
if self.input_transform:
|
||||
tensor_input = np.transpose(tensor_input, self.input_transform)
|
||||
@ -239,6 +245,10 @@ class AsyncDetectorRunner(FrigateProcess):
|
||||
while not self.stop_event.is_set():
|
||||
connection_id, detections = self._detector.async_receive_output()
|
||||
|
||||
# Handle timeout case (queue.Empty) - just continue
|
||||
if connection_id is None:
|
||||
continue
|
||||
|
||||
if not self.send_times:
|
||||
# guard; shouldn't happen if send/recv are balanced
|
||||
continue
|
||||
@ -265,21 +275,38 @@ class AsyncDetectorRunner(FrigateProcess):
|
||||
|
||||
self._frame_manager = SharedMemoryFrameManager()
|
||||
self._publisher = ObjectDetectorPublisher()
|
||||
self._detector = AsyncLocalObjectDetector(detector_config=self.detector_config)
|
||||
self._detector = AsyncLocalObjectDetector(
|
||||
detector_config=self.detector_config, stop_event=self.stop_event
|
||||
)
|
||||
|
||||
for name in self.cameras:
|
||||
self.create_output_shm(name)
|
||||
|
||||
t_detect = threading.Thread(target=self._detect_worker, daemon=True)
|
||||
t_result = threading.Thread(target=self._result_worker, daemon=True)
|
||||
t_detect = threading.Thread(target=self._detect_worker, daemon=False)
|
||||
t_result = threading.Thread(target=self._result_worker, daemon=False)
|
||||
t_detect.start()
|
||||
t_result.start()
|
||||
|
||||
while not self.stop_event.is_set():
|
||||
time.sleep(0.5)
|
||||
try:
|
||||
while not self.stop_event.is_set():
|
||||
time.sleep(0.5)
|
||||
|
||||
self._publisher.stop()
|
||||
logger.info("Exited async detection process...")
|
||||
logger.info(
|
||||
"Stop event detected, waiting for detector threads to finish..."
|
||||
)
|
||||
|
||||
# Wait for threads to finish processing
|
||||
t_detect.join(timeout=5)
|
||||
t_result.join(timeout=5)
|
||||
|
||||
# Shutdown the AsyncDetector
|
||||
self._detector.detect_api.shutdown()
|
||||
|
||||
self._publisher.stop()
|
||||
except Exception as e:
|
||||
logger.error(f"Error during async detector shutdown: {e}")
|
||||
finally:
|
||||
logger.info("Exited Async detection process...")
|
||||
|
||||
|
||||
class ObjectDetectProcess:
|
||||
@ -307,7 +334,7 @@ class ObjectDetectProcess:
|
||||
# if the process has already exited on its own, just return
|
||||
if self.detect_process and self.detect_process.exitcode:
|
||||
return
|
||||
self.detect_process.terminate()
|
||||
|
||||
logging.info("Waiting for detection process to exit gracefully...")
|
||||
self.detect_process.join(timeout=30)
|
||||
if self.detect_process.exitcode is None:
|
||||
@ -377,6 +404,15 @@ class RemoteObjectDetector:
|
||||
if self.stop_event.is_set():
|
||||
return detections
|
||||
|
||||
# Drain any stale detection results from the ZMQ buffer before making a new request
|
||||
# This prevents reading detection results from a previous request
|
||||
# NOTE: This should never happen, but can in some rare cases
|
||||
while True:
|
||||
try:
|
||||
self.detector_subscriber.socket.recv_string(flags=zmq.NOBLOCK)
|
||||
except zmq.Again:
|
||||
break
|
||||
|
||||
# copy input to shared memory
|
||||
self.np_shm[:] = tensor_input[:]
|
||||
self.detection_queue.put(self.name)
|
||||
|
||||
@ -190,7 +190,11 @@ class OnvifController:
|
||||
ptz: ONVIFService = await onvif.create_ptz_service()
|
||||
self.cams[camera_name]["ptz"] = ptz
|
||||
|
||||
imaging: ONVIFService = await onvif.create_imaging_service()
|
||||
try:
|
||||
imaging: ONVIFService = await onvif.create_imaging_service()
|
||||
except (Fault, ONVIFError, TransportError, Exception) as e:
|
||||
logger.debug(f"Imaging service not supported for {camera_name}: {e}")
|
||||
imaging = None
|
||||
self.cams[camera_name]["imaging"] = imaging
|
||||
try:
|
||||
video_sources = await media.GetVideoSources()
|
||||
@ -381,7 +385,10 @@ class OnvifController:
|
||||
f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}"
|
||||
)
|
||||
|
||||
if self.cams[camera_name]["video_source_token"] is not None:
|
||||
if (
|
||||
self.cams[camera_name]["video_source_token"] is not None
|
||||
and imaging is not None
|
||||
):
|
||||
try:
|
||||
imaging_capabilities = await imaging.GetImagingSettings(
|
||||
{"VideoSourceToken": self.cams[camera_name]["video_source_token"]}
|
||||
@ -421,6 +428,7 @@ class OnvifController:
|
||||
if (
|
||||
"focus" in self.cams[camera_name]["features"]
|
||||
and self.cams[camera_name]["video_source_token"]
|
||||
and self.cams[camera_name]["imaging"] is not None
|
||||
):
|
||||
try:
|
||||
stop_request = self.cams[camera_name]["imaging"].create_type("Stop")
|
||||
@ -648,6 +656,7 @@ class OnvifController:
|
||||
if (
|
||||
"focus" not in self.cams[camera_name]["features"]
|
||||
or not self.cams[camera_name]["video_source_token"]
|
||||
or self.cams[camera_name]["imaging"] is None
|
||||
):
|
||||
logger.error(f"{camera_name} does not support ONVIF continuous focus.")
|
||||
return
|
||||
|
||||
@ -14,7 +14,8 @@ from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||
from frigate.util.builtin import clear_and_unlink, get_tomorrow_at_time
|
||||
from frigate.util.builtin import clear_and_unlink
|
||||
from frigate.util.time import get_tomorrow_at_time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ from frigate.ffmpeg_presets import (
|
||||
parse_preset_hardware_acceleration_encode,
|
||||
)
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.util.builtin import is_current_hour
|
||||
from frigate.util.time import is_current_hour
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -407,6 +407,19 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
segment.last_detection_time = frame_time
|
||||
|
||||
for object in activity.get_all_objects():
|
||||
# Alert-level objects should always be added (they extend/upgrade the segment)
|
||||
# Detection-level objects should only be added if:
|
||||
# - The segment is a detection segment (matching severity), OR
|
||||
# - The segment is an alert AND the object started before the alert ended
|
||||
# (objects starting after will be in the new detection segment)
|
||||
is_alert_object = object in activity.categorized_objects["alerts"]
|
||||
|
||||
if not is_alert_object and segment.severity == SeverityEnum.alert:
|
||||
# This is a detection-level object
|
||||
# Only add if it started during the alert's active period
|
||||
if object["start_time"] > segment.last_alert_time:
|
||||
continue
|
||||
|
||||
if not object["sub_label"]:
|
||||
segment.detections[object["id"]] = object["label"]
|
||||
elif object["sub_label"][0] in self.config.model.all_attributes:
|
||||
|
||||
@ -362,7 +362,7 @@ def stats_snapshot(
|
||||
stats["embeddings"]["review_description_speed"] = round(
|
||||
embeddings_metrics.review_desc_speed.value * 1000, 2
|
||||
)
|
||||
stats["embeddings"]["review_descriptions"] = round(
|
||||
stats["embeddings"]["review_description_events_per_second"] = round(
|
||||
embeddings_metrics.review_desc_dps.value, 2
|
||||
)
|
||||
|
||||
@ -370,7 +370,7 @@ def stats_snapshot(
|
||||
stats["embeddings"]["object_description_speed"] = round(
|
||||
embeddings_metrics.object_desc_speed.value * 1000, 2
|
||||
)
|
||||
stats["embeddings"]["object_descriptions"] = round(
|
||||
stats["embeddings"]["object_description_events_per_second"] = round(
|
||||
embeddings_metrics.object_desc_dps.value, 2
|
||||
)
|
||||
|
||||
@ -378,7 +378,7 @@ def stats_snapshot(
|
||||
stats["embeddings"][f"{key}_classification_speed"] = round(
|
||||
embeddings_metrics.classification_speeds[key].value * 1000, 2
|
||||
)
|
||||
stats["embeddings"][f"{key}_classification"] = round(
|
||||
stats["embeddings"][f"{key}_classification_events_per_second"] = round(
|
||||
embeddings_metrics.classification_cps[key].value, 2
|
||||
)
|
||||
|
||||
|
||||
@ -113,6 +113,7 @@ class StorageMaintainer(threading.Thread):
|
||||
recordings: Recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.camera,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
@ -137,7 +138,7 @@ class StorageMaintainer(threading.Thread):
|
||||
)
|
||||
|
||||
event_start = 0
|
||||
deleted_recordings = set()
|
||||
deleted_recordings = []
|
||||
for recording in recordings:
|
||||
# check if 1 hour of storage has been reclaimed
|
||||
if deleted_segments_size > hourly_bandwidth:
|
||||
@ -172,7 +173,7 @@ class StorageMaintainer(threading.Thread):
|
||||
if not keep:
|
||||
try:
|
||||
clear_and_unlink(Path(recording.path), missing_ok=False)
|
||||
deleted_recordings.add(recording.id)
|
||||
deleted_recordings.append(recording)
|
||||
deleted_segments_size += recording.segment_size
|
||||
except FileNotFoundError:
|
||||
# this file was not found so we must assume no space was cleaned up
|
||||
@ -186,6 +187,9 @@ class StorageMaintainer(threading.Thread):
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.camera,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.path,
|
||||
Recordings.segment_size,
|
||||
)
|
||||
@ -201,7 +205,7 @@ class StorageMaintainer(threading.Thread):
|
||||
try:
|
||||
clear_and_unlink(Path(recording.path), missing_ok=False)
|
||||
deleted_segments_size += recording.segment_size
|
||||
deleted_recordings.add(recording.id)
|
||||
deleted_recordings.append(recording)
|
||||
except FileNotFoundError:
|
||||
# this file was not found so we must assume no space was cleaned up
|
||||
pass
|
||||
@ -211,7 +215,50 @@ class StorageMaintainer(threading.Thread):
|
||||
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
|
||||
# delete up to 100,000 at a time
|
||||
max_deletes = 100000
|
||||
deleted_recordings_list = list(deleted_recordings)
|
||||
|
||||
# Update has_clip for events that overlap with deleted recordings
|
||||
if deleted_recordings:
|
||||
# Group deleted recordings by camera
|
||||
camera_recordings = {}
|
||||
for recording in deleted_recordings:
|
||||
if recording.camera not in camera_recordings:
|
||||
camera_recordings[recording.camera] = {
|
||||
"min_start": recording.start_time,
|
||||
"max_end": recording.end_time,
|
||||
}
|
||||
else:
|
||||
camera_recordings[recording.camera]["min_start"] = min(
|
||||
camera_recordings[recording.camera]["min_start"],
|
||||
recording.start_time,
|
||||
)
|
||||
camera_recordings[recording.camera]["max_end"] = max(
|
||||
camera_recordings[recording.camera]["max_end"],
|
||||
recording.end_time,
|
||||
)
|
||||
|
||||
# Find all events that overlap with deleted recordings time range per camera
|
||||
events_to_update = []
|
||||
for camera, time_range in camera_recordings.items():
|
||||
overlapping_events = Event.select(Event.id).where(
|
||||
Event.camera == camera,
|
||||
Event.has_clip == True,
|
||||
Event.start_time < time_range["max_end"],
|
||||
Event.end_time > time_range["min_start"],
|
||||
)
|
||||
|
||||
for event in overlapping_events:
|
||||
events_to_update.append(event.id)
|
||||
|
||||
# Update has_clip to False for overlapping events
|
||||
if events_to_update:
|
||||
for i in range(0, len(events_to_update), max_deletes):
|
||||
batch = events_to_update[i : i + max_deletes]
|
||||
Event.update(has_clip=False).where(Event.id << batch).execute()
|
||||
logger.debug(
|
||||
f"Updated has_clip to False for {len(events_to_update)} events"
|
||||
)
|
||||
|
||||
deleted_recordings_list = [r.id for r in deleted_recordings]
|
||||
for i in range(0, len(deleted_recordings_list), max_deletes):
|
||||
Recordings.delete().where(
|
||||
Recordings.id << deleted_recordings_list[i : i + max_deletes]
|
||||
|
||||
@ -3,6 +3,8 @@ import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from fastapi import Request
|
||||
from fastapi.testclient import TestClient
|
||||
from peewee_migrate import Router
|
||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
@ -16,6 +18,20 @@ from frigate.review.types import SeverityEnum
|
||||
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
|
||||
|
||||
|
||||
class AuthTestClient(TestClient):
|
||||
"""TestClient that automatically adds auth headers to all requests."""
|
||||
|
||||
def request(self, *args, **kwargs):
|
||||
# Add default auth headers if not already present
|
||||
headers = kwargs.get("headers") or {}
|
||||
if "remote-user" not in headers:
|
||||
headers["remote-user"] = "admin"
|
||||
if "remote-role" not in headers:
|
||||
headers["remote-role"] = "admin"
|
||||
kwargs["headers"] = headers
|
||||
return super().request(*args, **kwargs)
|
||||
|
||||
|
||||
class BaseTestHttp(unittest.TestCase):
|
||||
def setUp(self, models):
|
||||
# setup clean database for each test run
|
||||
@ -113,7 +129,9 @@ class BaseTestHttp(unittest.TestCase):
|
||||
pass
|
||||
|
||||
def create_app(self, stats=None, event_metadata_publisher=None):
|
||||
return create_fastapi_app(
|
||||
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
|
||||
|
||||
app = create_fastapi_app(
|
||||
FrigateConfig(**self.minimal_config),
|
||||
self.db,
|
||||
None,
|
||||
@ -123,8 +141,33 @@ class BaseTestHttp(unittest.TestCase):
|
||||
stats,
|
||||
event_metadata_publisher,
|
||||
None,
|
||||
enforce_default_admin=False,
|
||||
)
|
||||
|
||||
# Default test mocks for authentication
|
||||
# Tests can override these in their setUp if needed
|
||||
# This mock uses headers set by AuthTestClient
|
||||
async def mock_get_current_user(request: Request):
|
||||
username = request.headers.get("remote-user")
|
||||
role = request.headers.get("remote-role")
|
||||
if not username or not role:
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
return JSONResponse(
|
||||
content={"message": "No authorization headers."}, status_code=401
|
||||
)
|
||||
return {"username": username, "role": role}
|
||||
|
||||
async def mock_get_allowed_cameras_for_filter(request: Request):
|
||||
return list(self.minimal_config.get("cameras", {}).keys())
|
||||
|
||||
app.dependency_overrides[get_current_user] = mock_get_current_user
|
||||
app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||
mock_get_allowed_cameras_for_filter
|
||||
)
|
||||
|
||||
return app
|
||||
|
||||
def insert_mock_event(
|
||||
self,
|
||||
id: str,
|
||||
|
||||
@ -1,10 +1,8 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.stats.emitter import StatsEmitter
|
||||
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
||||
|
||||
|
||||
class TestHttpApp(BaseTestHttp):
|
||||
@ -20,7 +18,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
stats.get_latest_stats.return_value = self.test_stats
|
||||
app = super().create_app(stats)
|
||||
|
||||
with TestClient(app) as client:
|
||||
with AuthTestClient(app) as client:
|
||||
response = client.get("/stats")
|
||||
response_json = response.json()
|
||||
assert response_json == self.test_stats
|
||||
|
||||
@ -1,14 +1,13 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
from fastapi import HTTPException, Request
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from frigate.api.auth import (
|
||||
get_allowed_cameras_for_filter,
|
||||
get_current_user,
|
||||
)
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
||||
|
||||
|
||||
class TestCameraAccessEventReview(BaseTestHttp):
|
||||
@ -16,9 +15,17 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
super().setUp([Event, ReviewSegment, Recordings])
|
||||
self.app = super().create_app()
|
||||
|
||||
# Mock get_current_user to return valid user for all tests
|
||||
async def mock_get_current_user():
|
||||
return {"username": "test_user", "role": "user"}
|
||||
# Mock get_current_user for all tests
|
||||
async def mock_get_current_user(request: Request):
|
||||
username = request.headers.get("remote-user")
|
||||
role = request.headers.get("remote-role")
|
||||
if not username or not role:
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
return JSONResponse(
|
||||
content={"message": "No authorization headers."}, status_code=401
|
||||
)
|
||||
return {"username": username, "role": role}
|
||||
|
||||
self.app.dependency_overrides[get_current_user] = mock_get_current_user
|
||||
|
||||
@ -30,21 +37,25 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
super().insert_mock_event("event1", camera="front_door")
|
||||
super().insert_mock_event("event2", camera="back_door")
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door"
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events")
|
||||
assert resp.status_code == 200
|
||||
ids = [e["id"] for e in resp.json()]
|
||||
assert "event1" in ids
|
||||
assert "event2" not in ids
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events")
|
||||
assert resp.status_code == 200
|
||||
ids = [e["id"] for e in resp.json()]
|
||||
@ -54,21 +65,25 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
super().insert_mock_review_segment("rev1", camera="front_door")
|
||||
super().insert_mock_review_segment("rev2", camera="back_door")
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door"
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/review")
|
||||
assert resp.status_code == 200
|
||||
ids = [r["id"] for r in resp.json()]
|
||||
assert "rev1" in ids
|
||||
assert "rev2" not in ids
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/review")
|
||||
assert resp.status_code == 200
|
||||
ids = [r["id"] for r in resp.json()]
|
||||
@ -84,7 +99,7 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
with patch("frigate.api.event.require_camera_access", mock_require_allowed):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events/event1")
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()["id"] == "event1"
|
||||
@ -94,7 +109,7 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
with patch("frigate.api.event.require_camera_access", mock_require_disallowed):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events/event1")
|
||||
assert resp.status_code == 403
|
||||
|
||||
@ -108,7 +123,7 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
with patch("frigate.api.review.require_camera_access", mock_require_allowed):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/review/rev1")
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()["id"] == "rev1"
|
||||
@ -118,7 +133,7 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
with patch("frigate.api.review.require_camera_access", mock_require_disallowed):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/review/rev1")
|
||||
assert resp.status_code == 403
|
||||
|
||||
@ -126,21 +141,25 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
super().insert_mock_event("event1", camera="front_door")
|
||||
super().insert_mock_event("event2", camera="back_door")
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door"
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events", params={"cameras": "all"})
|
||||
assert resp.status_code == 200
|
||||
ids = [e["id"] for e in resp.json()]
|
||||
assert "event1" in ids
|
||||
assert "event2" not in ids
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events", params={"cameras": "all"})
|
||||
assert resp.status_code == 200
|
||||
ids = [e["id"] for e in resp.json()]
|
||||
@ -150,20 +169,24 @@ class TestCameraAccessEventReview(BaseTestHttp):
|
||||
super().insert_mock_event("event1", camera="front_door")
|
||||
super().insert_mock_event("event2", camera="back_door")
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door"
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events/summary")
|
||||
assert resp.status_code == 200
|
||||
summary_list = resp.json()
|
||||
assert len(summary_list) == 1
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
with TestClient(self.app) as client:
|
||||
async def mock_cameras(request: Request):
|
||||
return [
|
||||
"front_door",
|
||||
"back_door",
|
||||
]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = mock_cameras
|
||||
with AuthTestClient(self.app) as client:
|
||||
resp = client.get("/events/summary")
|
||||
summary_list = resp.json()
|
||||
assert len(summary_list) == 2
|
||||
|
||||
@ -2,14 +2,13 @@ from datetime import datetime
|
||||
from typing import Any
|
||||
from unittest.mock import Mock
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
|
||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||
from frigate.models import Event, Recordings, ReviewSegment, Timeline
|
||||
from frigate.stats.emitter import StatsEmitter
|
||||
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp, Request
|
||||
from frigate.test.test_storage import _insert_mock_event
|
||||
|
||||
|
||||
@ -18,14 +17,26 @@ class TestHttpApp(BaseTestHttp):
|
||||
super().setUp([Event, Recordings, ReviewSegment, Timeline])
|
||||
self.app = super().create_app()
|
||||
|
||||
# Mock auth to bypass camera access for tests
|
||||
async def mock_get_current_user(request: Any):
|
||||
return {"username": "test_user", "role": "admin"}
|
||||
# Mock get_current_user for all tests
|
||||
async def mock_get_current_user(request: Request):
|
||||
username = request.headers.get("remote-user")
|
||||
role = request.headers.get("remote-role")
|
||||
if not username or not role:
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
return JSONResponse(
|
||||
content={"message": "No authorization headers."}, status_code=401
|
||||
)
|
||||
return {"username": username, "role": role}
|
||||
|
||||
self.app.dependency_overrides[get_current_user] = mock_get_current_user
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door"
|
||||
]
|
||||
|
||||
async def mock_get_allowed_cameras_for_filter(request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||
mock_get_allowed_cameras_for_filter
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
self.app.dependency_overrides.clear()
|
||||
@ -35,20 +46,20 @@ class TestHttpApp(BaseTestHttp):
|
||||
################################### GET /events Endpoint #########################################################
|
||||
####################################################################################################################
|
||||
def test_get_event_list_no_events(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
events = client.get("/events").json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_no_match_event_id(self):
|
||||
id = "123456.random"
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
events = client.get("/events", params={"event_id": "abc"}).json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_match_event_id(self):
|
||||
id = "123456.random"
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
events = client.get("/events", params={"event_id": id}).json()
|
||||
assert len(events) == 1
|
||||
@ -58,7 +69,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
id = "123456.random"
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id, now, now + 1)
|
||||
events = client.get(
|
||||
"/events", params={"max_length": 1, "min_length": 1}
|
||||
@ -69,7 +80,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
def test_get_event_list_no_match_max_length(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, now, now + 2)
|
||||
events = client.get("/events", params={"max_length": 1}).json()
|
||||
@ -78,7 +89,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
def test_get_event_list_no_match_min_length(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, now, now + 2)
|
||||
events = client.get("/events", params={"min_length": 3}).json()
|
||||
@ -88,7 +99,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
id = "123456.random"
|
||||
id2 = "54321.random"
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
events = client.get("/events").json()
|
||||
assert len(events) == 1
|
||||
@ -108,14 +119,14 @@ class TestHttpApp(BaseTestHttp):
|
||||
def test_get_event_list_no_match_has_clip(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, now, now + 2)
|
||||
events = client.get("/events", params={"has_clip": 0}).json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_has_clip(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, has_clip=True)
|
||||
events = client.get("/events", params={"has_clip": 1}).json()
|
||||
@ -123,7 +134,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
assert events[0]["id"] == id
|
||||
|
||||
def test_get_event_list_sort_score(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
id2 = "54321.random"
|
||||
super().insert_mock_event(id, top_score=37, score=37, data={"score": 50})
|
||||
@ -141,7 +152,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
def test_get_event_list_sort_start_time(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
id2 = "54321.random"
|
||||
super().insert_mock_event(id, start_time=now + 3)
|
||||
@ -159,7 +170,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
def test_get_good_event(self):
|
||||
id = "123456.random"
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
event = client.get(f"/events/{id}").json()
|
||||
|
||||
@ -171,7 +182,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
id = "123456.random"
|
||||
bad_id = "654321.other"
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
event_response = client.get(f"/events/{bad_id}")
|
||||
assert event_response.status_code == 404
|
||||
@ -180,7 +191,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
def test_delete_event(self):
|
||||
id = "123456.random"
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
event = client.get(f"/events/{id}").json()
|
||||
assert event
|
||||
@ -193,7 +204,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
def test_event_retention(self):
|
||||
id = "123456.random"
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
client.post(f"/events/{id}/retain", headers={"remote-role": "admin"})
|
||||
event = client.get(f"/events/{id}").json()
|
||||
@ -212,12 +223,11 @@ class TestHttpApp(BaseTestHttp):
|
||||
morning = 1656590400 # 06/30/2022 6 am (GMT)
|
||||
evening = 1656633600 # 06/30/2022 6 pm (GMT)
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_event(morning_id, morning)
|
||||
super().insert_mock_event(evening_id, evening)
|
||||
# both events come back
|
||||
events = client.get("/events").json()
|
||||
print("events!!!", events)
|
||||
assert events
|
||||
assert len(events) == 2
|
||||
# morning event is excluded
|
||||
@ -248,7 +258,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
|
||||
mock_event_updater.publish.side_effect = update_event
|
||||
|
||||
with TestClient(app) as client:
|
||||
with AuthTestClient(app) as client:
|
||||
super().insert_mock_event(id)
|
||||
new_sub_label_response = client.post(
|
||||
f"/events/{id}/sub_label",
|
||||
@ -285,7 +295,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
|
||||
mock_event_updater.publish.side_effect = update_event
|
||||
|
||||
with TestClient(app) as client:
|
||||
with AuthTestClient(app) as client:
|
||||
super().insert_mock_event(id)
|
||||
client.post(
|
||||
f"/events/{id}/sub_label",
|
||||
@ -301,7 +311,7 @@ class TestHttpApp(BaseTestHttp):
|
||||
####################################################################################################################
|
||||
def test_get_metrics(self):
|
||||
"""ensure correct prometheus metrics api response"""
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
ts_start = datetime.now().timestamp()
|
||||
ts_end = ts_start + 30
|
||||
_insert_mock_event(
|
||||
|
||||
405
frigate/test/http_api/test_http_media.py
Normal file
@ -0,0 +1,405 @@
|
||||
"""Unit tests for recordings/media API endpoints."""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import pytz
|
||||
from fastapi import Request
|
||||
|
||||
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
|
||||
from frigate.models import Recordings
|
||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
||||
|
||||
|
||||
class TestHttpMedia(BaseTestHttp):
|
||||
"""Test media API endpoints, particularly recordings with DST handling."""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures."""
|
||||
super().setUp([Recordings])
|
||||
self.app = super().create_app()
|
||||
|
||||
# Mock get_current_user for all tests
|
||||
async def mock_get_current_user(request: Request):
|
||||
username = request.headers.get("remote-user")
|
||||
role = request.headers.get("remote-role")
|
||||
if not username or not role:
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
return JSONResponse(
|
||||
content={"message": "No authorization headers."}, status_code=401
|
||||
)
|
||||
return {"username": username, "role": role}
|
||||
|
||||
self.app.dependency_overrides[get_current_user] = mock_get_current_user
|
||||
|
||||
async def mock_get_allowed_cameras_for_filter(request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||
mock_get_allowed_cameras_for_filter
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests."""
|
||||
self.app.dependency_overrides.clear()
|
||||
super().tearDown()
|
||||
|
||||
def test_recordings_summary_across_dst_spring_forward(self):
|
||||
"""
|
||||
Test recordings summary across spring DST transition (spring forward).
|
||||
|
||||
In 2024, DST in America/New_York transitions on March 10, 2024 at 2:00 AM
|
||||
Clocks spring forward from 2:00 AM to 3:00 AM (EST to EDT)
|
||||
"""
|
||||
tz = pytz.timezone("America/New_York")
|
||||
|
||||
# March 9, 2024 at 12:00 PM EST (before DST)
|
||||
march_9_noon = tz.localize(datetime(2024, 3, 9, 12, 0, 0)).timestamp()
|
||||
|
||||
# March 10, 2024 at 12:00 PM EDT (after DST transition)
|
||||
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
|
||||
|
||||
# March 11, 2024 at 12:00 PM EDT (after DST)
|
||||
march_11_noon = tz.localize(datetime(2024, 3, 11, 12, 0, 0)).timestamp()
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
# Insert recordings for each day
|
||||
Recordings.insert(
|
||||
id="recording_march_9",
|
||||
path="/media/recordings/march_9.mp4",
|
||||
camera="front_door",
|
||||
start_time=march_9_noon,
|
||||
end_time=march_9_noon + 3600, # 1 hour recording
|
||||
duration=3600,
|
||||
motion=100,
|
||||
objects=5,
|
||||
).execute()
|
||||
|
||||
Recordings.insert(
|
||||
id="recording_march_10",
|
||||
path="/media/recordings/march_10.mp4",
|
||||
camera="front_door",
|
||||
start_time=march_10_noon,
|
||||
end_time=march_10_noon + 3600,
|
||||
duration=3600,
|
||||
motion=150,
|
||||
objects=8,
|
||||
).execute()
|
||||
|
||||
Recordings.insert(
|
||||
id="recording_march_11",
|
||||
path="/media/recordings/march_11.mp4",
|
||||
camera="front_door",
|
||||
start_time=march_11_noon,
|
||||
end_time=march_11_noon + 3600,
|
||||
duration=3600,
|
||||
motion=200,
|
||||
objects=10,
|
||||
).execute()
|
||||
|
||||
# Test recordings summary with America/New_York timezone
|
||||
response = client.get(
|
||||
"/recordings/summary",
|
||||
params={"timezone": "America/New_York", "cameras": "all"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
summary = response.json()
|
||||
|
||||
# Verify we get exactly 3 days
|
||||
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
|
||||
|
||||
# Verify the correct dates are returned (API returns dict with True values)
|
||||
assert "2024-03-09" in summary, f"Expected 2024-03-09 in {summary}"
|
||||
assert "2024-03-10" in summary, f"Expected 2024-03-10 in {summary}"
|
||||
assert "2024-03-11" in summary, f"Expected 2024-03-11 in {summary}"
|
||||
assert summary["2024-03-09"] is True
|
||||
assert summary["2024-03-10"] is True
|
||||
assert summary["2024-03-11"] is True
|
||||
|
||||
def test_recordings_summary_across_dst_fall_back(self):
|
||||
"""
|
||||
Test recordings summary across fall DST transition (fall back).
|
||||
|
||||
In 2024, DST in America/New_York transitions on November 3, 2024 at 2:00 AM
|
||||
Clocks fall back from 2:00 AM to 1:00 AM (EDT to EST)
|
||||
"""
|
||||
tz = pytz.timezone("America/New_York")
|
||||
|
||||
# November 2, 2024 at 12:00 PM EDT (before DST transition)
|
||||
nov_2_noon = tz.localize(datetime(2024, 11, 2, 12, 0, 0)).timestamp()
|
||||
|
||||
# November 3, 2024 at 12:00 PM EST (after DST transition)
|
||||
# Need to specify is_dst=False to get the time after fall back
|
||||
nov_3_noon = tz.localize(
|
||||
datetime(2024, 11, 3, 12, 0, 0), is_dst=False
|
||||
).timestamp()
|
||||
|
||||
# November 4, 2024 at 12:00 PM EST (after DST)
|
||||
nov_4_noon = tz.localize(datetime(2024, 11, 4, 12, 0, 0)).timestamp()
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
# Insert recordings for each day
|
||||
Recordings.insert(
|
||||
id="recording_nov_2",
|
||||
path="/media/recordings/nov_2.mp4",
|
||||
camera="front_door",
|
||||
start_time=nov_2_noon,
|
||||
end_time=nov_2_noon + 3600,
|
||||
duration=3600,
|
||||
motion=100,
|
||||
objects=5,
|
||||
).execute()
|
||||
|
||||
Recordings.insert(
|
||||
id="recording_nov_3",
|
||||
path="/media/recordings/nov_3.mp4",
|
||||
camera="front_door",
|
||||
start_time=nov_3_noon,
|
||||
end_time=nov_3_noon + 3600,
|
||||
duration=3600,
|
||||
motion=150,
|
||||
objects=8,
|
||||
).execute()
|
||||
|
||||
Recordings.insert(
|
||||
id="recording_nov_4",
|
||||
path="/media/recordings/nov_4.mp4",
|
||||
camera="front_door",
|
||||
start_time=nov_4_noon,
|
||||
end_time=nov_4_noon + 3600,
|
||||
duration=3600,
|
||||
motion=200,
|
||||
objects=10,
|
||||
).execute()
|
||||
|
||||
# Test recordings summary with America/New_York timezone
|
||||
response = client.get(
|
||||
"/recordings/summary",
|
||||
params={"timezone": "America/New_York", "cameras": "all"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
summary = response.json()
|
||||
|
||||
# Verify we get exactly 3 days
|
||||
assert len(summary) == 3, f"Expected 3 days, got {len(summary)}"
|
||||
|
||||
# Verify the correct dates are returned (API returns dict with True values)
|
||||
assert "2024-11-02" in summary, f"Expected 2024-11-02 in {summary}"
|
||||
assert "2024-11-03" in summary, f"Expected 2024-11-03 in {summary}"
|
||||
assert "2024-11-04" in summary, f"Expected 2024-11-04 in {summary}"
|
||||
assert summary["2024-11-02"] is True
|
||||
assert summary["2024-11-03"] is True
|
||||
assert summary["2024-11-04"] is True
|
||||
|
||||
def test_recordings_summary_multiple_cameras_across_dst(self):
|
||||
"""
|
||||
Test recordings summary with multiple cameras across DST boundary.
|
||||
"""
|
||||
tz = pytz.timezone("America/New_York")
|
||||
|
||||
# March 9, 2024 at 10:00 AM EST (before DST)
|
||||
march_9_morning = tz.localize(datetime(2024, 3, 9, 10, 0, 0)).timestamp()
|
||||
|
||||
# March 10, 2024 at 3:00 PM EDT (after DST transition)
|
||||
march_10_afternoon = tz.localize(datetime(2024, 3, 10, 15, 0, 0)).timestamp()
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
# Override allowed cameras for this test to include both
|
||||
async def mock_get_allowed_cameras_for_filter(_request: Request):
|
||||
return ["front_door", "back_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||
mock_get_allowed_cameras_for_filter
|
||||
)
|
||||
|
||||
# Insert recordings for front_door on March 9
|
||||
Recordings.insert(
|
||||
id="front_march_9",
|
||||
path="/media/recordings/front_march_9.mp4",
|
||||
camera="front_door",
|
||||
start_time=march_9_morning,
|
||||
end_time=march_9_morning + 3600,
|
||||
duration=3600,
|
||||
motion=100,
|
||||
objects=5,
|
||||
).execute()
|
||||
|
||||
# Insert recordings for back_door on March 10
|
||||
Recordings.insert(
|
||||
id="back_march_10",
|
||||
path="/media/recordings/back_march_10.mp4",
|
||||
camera="back_door",
|
||||
start_time=march_10_afternoon,
|
||||
end_time=march_10_afternoon + 3600,
|
||||
duration=3600,
|
||||
motion=150,
|
||||
objects=8,
|
||||
).execute()
|
||||
|
||||
# Test with all cameras
|
||||
response = client.get(
|
||||
"/recordings/summary",
|
||||
params={"timezone": "America/New_York", "cameras": "all"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
summary = response.json()
|
||||
|
||||
# Verify we get both days
|
||||
assert len(summary) == 2, f"Expected 2 days, got {len(summary)}"
|
||||
assert "2024-03-09" in summary
|
||||
assert "2024-03-10" in summary
|
||||
assert summary["2024-03-09"] is True
|
||||
assert summary["2024-03-10"] is True
|
||||
|
||||
# Reset dependency override back to default single camera for other tests
|
||||
async def reset_allowed_cameras(_request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||
reset_allowed_cameras
|
||||
)
|
||||
|
||||
def test_recordings_summary_at_dst_transition_time(self):
|
||||
"""
|
||||
Test recordings that span the exact DST transition time.
|
||||
"""
|
||||
tz = pytz.timezone("America/New_York")
|
||||
|
||||
# March 10, 2024 at 1:00 AM EST (1 hour before DST transition)
|
||||
# At 2:00 AM, clocks jump to 3:00 AM
|
||||
before_transition = tz.localize(datetime(2024, 3, 10, 1, 0, 0)).timestamp()
|
||||
|
||||
# Recording that spans the transition (1:00 AM to 3:30 AM EDT)
|
||||
# This is 1.5 hours of actual time but spans the "missing" hour
|
||||
after_transition = tz.localize(datetime(2024, 3, 10, 3, 30, 0)).timestamp()
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
Recordings.insert(
|
||||
id="recording_during_transition",
|
||||
path="/media/recordings/transition.mp4",
|
||||
camera="front_door",
|
||||
start_time=before_transition,
|
||||
end_time=after_transition,
|
||||
duration=after_transition - before_transition,
|
||||
motion=100,
|
||||
objects=5,
|
||||
).execute()
|
||||
|
||||
response = client.get(
|
||||
"/recordings/summary",
|
||||
params={"timezone": "America/New_York", "cameras": "all"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
summary = response.json()
|
||||
|
||||
# The recording should appear on March 10
|
||||
assert len(summary) == 1
|
||||
assert "2024-03-10" in summary
|
||||
assert summary["2024-03-10"] is True
|
||||
|
||||
def test_recordings_summary_utc_timezone(self):
|
||||
"""
|
||||
Test recordings summary with UTC timezone (no DST).
|
||||
"""
|
||||
# Use UTC timestamps directly
|
||||
march_9_utc = datetime(2024, 3, 9, 17, 0, 0, tzinfo=timezone.utc).timestamp()
|
||||
march_10_utc = datetime(2024, 3, 10, 17, 0, 0, tzinfo=timezone.utc).timestamp()
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
Recordings.insert(
|
||||
id="recording_march_9_utc",
|
||||
path="/media/recordings/march_9_utc.mp4",
|
||||
camera="front_door",
|
||||
start_time=march_9_utc,
|
||||
end_time=march_9_utc + 3600,
|
||||
duration=3600,
|
||||
motion=100,
|
||||
objects=5,
|
||||
).execute()
|
||||
|
||||
Recordings.insert(
|
||||
id="recording_march_10_utc",
|
||||
path="/media/recordings/march_10_utc.mp4",
|
||||
camera="front_door",
|
||||
start_time=march_10_utc,
|
||||
end_time=march_10_utc + 3600,
|
||||
duration=3600,
|
||||
motion=150,
|
||||
objects=8,
|
||||
).execute()
|
||||
|
||||
# Test with UTC timezone
|
||||
response = client.get(
|
||||
"/recordings/summary", params={"timezone": "utc", "cameras": "all"}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
summary = response.json()
|
||||
|
||||
# Verify we get both days
|
||||
assert len(summary) == 2
|
||||
assert "2024-03-09" in summary
|
||||
assert "2024-03-10" in summary
|
||||
assert summary["2024-03-09"] is True
|
||||
assert summary["2024-03-10"] is True
|
||||
|
||||
def test_recordings_summary_no_recordings(self):
|
||||
"""
|
||||
Test recordings summary when no recordings exist.
|
||||
"""
|
||||
with AuthTestClient(self.app) as client:
|
||||
response = client.get(
|
||||
"/recordings/summary",
|
||||
params={"timezone": "America/New_York", "cameras": "all"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
summary = response.json()
|
||||
assert len(summary) == 0
|
||||
|
||||
def test_recordings_summary_single_camera_filter(self):
|
||||
"""
|
||||
Test recordings summary filtered to a single camera.
|
||||
"""
|
||||
tz = pytz.timezone("America/New_York")
|
||||
march_10_noon = tz.localize(datetime(2024, 3, 10, 12, 0, 0)).timestamp()
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
# Insert recordings for both cameras
|
||||
Recordings.insert(
|
||||
id="front_recording",
|
||||
path="/media/recordings/front.mp4",
|
||||
camera="front_door",
|
||||
start_time=march_10_noon,
|
||||
end_time=march_10_noon + 3600,
|
||||
duration=3600,
|
||||
motion=100,
|
||||
objects=5,
|
||||
).execute()
|
||||
|
||||
Recordings.insert(
|
||||
id="back_recording",
|
||||
path="/media/recordings/back.mp4",
|
||||
camera="back_door",
|
||||
start_time=march_10_noon,
|
||||
end_time=march_10_noon + 3600,
|
||||
duration=3600,
|
||||
motion=150,
|
||||
objects=8,
|
||||
).execute()
|
||||
|
||||
# Test with only front_door camera
|
||||
response = client.get(
|
||||
"/recordings/summary",
|
||||
params={"timezone": "America/New_York", "cameras": "front_door"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
summary = response.json()
|
||||
assert len(summary) == 1
|
||||
assert "2024-03-10" in summary
|
||||
assert summary["2024-03-10"] is True
|
||||
@ -1,12 +1,12 @@
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
from fastapi import Request
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.api.auth import get_allowed_cameras_for_filter, get_current_user
|
||||
from frigate.models import Event, Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
||||
|
||||
|
||||
class TestHttpReview(BaseTestHttp):
|
||||
@ -16,14 +16,26 @@ class TestHttpReview(BaseTestHttp):
|
||||
self.user_id = "admin"
|
||||
|
||||
# Mock get_current_user for all tests
|
||||
async def mock_get_current_user():
|
||||
return {"username": self.user_id, "role": "admin"}
|
||||
# This mock uses headers set by AuthTestClient
|
||||
async def mock_get_current_user(request: Request):
|
||||
username = request.headers.get("remote-user")
|
||||
role = request.headers.get("remote-role")
|
||||
if not username or not role:
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
return JSONResponse(
|
||||
content={"message": "No authorization headers."}, status_code=401
|
||||
)
|
||||
return {"username": username, "role": role}
|
||||
|
||||
self.app.dependency_overrides[get_current_user] = mock_get_current_user
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = lambda: [
|
||||
"front_door"
|
||||
]
|
||||
async def mock_get_allowed_cameras_for_filter(request: Request):
|
||||
return ["front_door"]
|
||||
|
||||
self.app.dependency_overrides[get_allowed_cameras_for_filter] = (
|
||||
mock_get_allowed_cameras_for_filter
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
self.app.dependency_overrides.clear()
|
||||
@ -57,7 +69,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
but ends after is included in the results."""
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random", now, now + 2)
|
||||
response = client.get("/review")
|
||||
assert response.status_code == 200
|
||||
@ -67,7 +79,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review_no_filters(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now - 2, now - 1)
|
||||
response = client.get("/review")
|
||||
@ -81,7 +93,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
"""Test that review items outside the range are not returned."""
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now - 2, now - 1)
|
||||
super().insert_mock_review_segment(f"{id}2", now + 4, now + 5)
|
||||
@ -97,7 +109,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review_with_time_filter(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
params = {
|
||||
@ -113,7 +125,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review_with_limit_filter(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
id2 = "654321.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
@ -132,7 +144,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review_with_severity_filters_no_matches(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2, SeverityEnum.detection)
|
||||
params = {
|
||||
@ -149,7 +161,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review_with_severity_filters(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2, SeverityEnum.detection)
|
||||
params = {
|
||||
@ -165,7 +177,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review_with_all_filters(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now, now + 2)
|
||||
params = {
|
||||
@ -188,7 +200,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
################################### GET /review/summary Endpoint #################################################
|
||||
####################################################################################################################
|
||||
def test_get_review_summary_all_filters(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
params = {
|
||||
"cameras": "front_door",
|
||||
@ -219,7 +231,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
self.assertEqual(response_json, expected_response)
|
||||
|
||||
def test_get_review_summary_no_filters(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
response = client.get("/review/summary")
|
||||
assert response.status_code == 200
|
||||
@ -247,7 +259,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
now = datetime.now()
|
||||
five_days_ago = datetime.today() - timedelta(days=5)
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment(
|
||||
"123456.random", now.timestamp() - 2, now.timestamp() - 1
|
||||
)
|
||||
@ -291,7 +303,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
now = datetime.now()
|
||||
five_days_ago = datetime.today() - timedelta(days=5)
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random", now.timestamp())
|
||||
five_days_ago_ts = five_days_ago.timestamp()
|
||||
for i in range(20):
|
||||
@ -342,7 +354,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review_summary_multiple_in_same_day_with_reviewed(self):
|
||||
five_days_ago = datetime.today() - timedelta(days=5)
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
five_days_ago_ts = five_days_ago.timestamp()
|
||||
for i in range(10):
|
||||
id = f"123456_{i}.random_alert_not_reviewed"
|
||||
@ -393,14 +405,14 @@ class TestHttpReview(BaseTestHttp):
|
||||
####################################################################################################################
|
||||
|
||||
def test_post_reviews_viewed_no_body(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
response = client.post("/reviews/viewed")
|
||||
# Missing ids
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_viewed_no_body_ids(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
body = {"ids": [""]}
|
||||
response = client.post("/reviews/viewed", json=body)
|
||||
@ -408,7 +420,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_viewed_non_existent_id(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": ["1"]}
|
||||
@ -425,7 +437,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
)
|
||||
|
||||
def test_post_reviews_viewed(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": [id]}
|
||||
@ -445,14 +457,14 @@ class TestHttpReview(BaseTestHttp):
|
||||
################################### POST reviews/delete Endpoint ################################################
|
||||
####################################################################################################################
|
||||
def test_post_reviews_delete_no_body(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
response = client.post("/reviews/delete", headers={"remote-role": "admin"})
|
||||
# Missing ids
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_delete_no_body_ids(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
super().insert_mock_review_segment("123456.random")
|
||||
body = {"ids": [""]}
|
||||
response = client.post(
|
||||
@ -462,7 +474,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_post_reviews_delete_non_existent_id(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": ["1"]}
|
||||
@ -479,7 +491,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
assert review_ids_in_db_after[0].id == id
|
||||
|
||||
def test_post_reviews_delete(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id)
|
||||
body = {"ids": [id]}
|
||||
@ -495,7 +507,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
assert len(review_ids_in_db_after) == 0
|
||||
|
||||
def test_post_reviews_delete_many(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
ids = ["123456.random", "654321.random"]
|
||||
for id in ids:
|
||||
super().insert_mock_review_segment(id)
|
||||
@ -527,7 +539,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_review_activity_motion_no_data_for_time_range(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
params = {
|
||||
"after": now,
|
||||
"before": now + 3,
|
||||
@ -540,7 +552,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_review_activity_motion(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
one_m = int((datetime.now() + timedelta(minutes=1)).timestamp())
|
||||
id = "123456.random"
|
||||
id2 = "123451.random"
|
||||
@ -573,7 +585,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
################################### GET /review/event/{event_id} Endpoint #######################################
|
||||
####################################################################################################################
|
||||
def test_review_event_not_found(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
response = client.get("/review/event/123456.random")
|
||||
assert response.status_code == 404
|
||||
response_json = response.json()
|
||||
@ -585,7 +597,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_review_event_not_found_in_data(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now + 1, now + 2)
|
||||
response = client.get(f"/review/event/{id}")
|
||||
@ -599,7 +611,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_review_get_specific_event(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
event_id = "123456.event.random"
|
||||
super().insert_mock_event(event_id)
|
||||
review_id = "123456.review.random"
|
||||
@ -626,7 +638,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
################################### GET /review/{review_id} Endpoint #######################################
|
||||
####################################################################################################################
|
||||
def test_review_not_found(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
response = client.get("/review/123456.random")
|
||||
assert response.status_code == 404
|
||||
response_json = response.json()
|
||||
@ -638,7 +650,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_get_review(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
review_id = "123456.review.random"
|
||||
super().insert_mock_review_segment(review_id, now + 1, now + 2)
|
||||
response = client.get(f"/review/{review_id}")
|
||||
@ -662,7 +674,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
####################################################################################################################
|
||||
|
||||
def test_delete_review_viewed_review_not_found(self):
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
review_id = "123456.random"
|
||||
response = client.delete(f"/review/{review_id}/viewed")
|
||||
assert response.status_code == 404
|
||||
@ -675,7 +687,7 @@ class TestHttpReview(BaseTestHttp):
|
||||
def test_delete_review_viewed(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
with AuthTestClient(self.app) as client:
|
||||
review_id = "123456.review.random"
|
||||
super().insert_mock_review_segment(review_id, now + 1, now + 2)
|
||||
self._insert_user_review_status(review_id, reviewed=True)
|
||||
|
||||
@ -109,6 +109,7 @@ class TimelineProcessor(threading.Thread):
|
||||
event_data["region"],
|
||||
),
|
||||
"attribute": "",
|
||||
"score": event_data["score"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@ -23,9 +23,11 @@ class ModelStatusTypesEnum(str, Enum):
|
||||
error = "error"
|
||||
training = "training"
|
||||
complete = "complete"
|
||||
failed = "failed"
|
||||
|
||||
|
||||
class TrackedObjectUpdateTypesEnum(str, Enum):
|
||||
description = "description"
|
||||
face = "face"
|
||||
lpr = "lpr"
|
||||
classification = "classification"
|
||||
|
||||
@ -15,12 +15,9 @@ from collections.abc import Mapping
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Tuple, Union
|
||||
from zoneinfo import ZoneInfoNotFoundError
|
||||
|
||||
import numpy as np
|
||||
import pytz
|
||||
from ruamel.yaml import YAML
|
||||
from tzlocal import get_localzone
|
||||
|
||||
from frigate.const import REGEX_HTTP_CAMERA_USER_PASS, REGEX_RTSP_CAMERA_USER_PASS
|
||||
|
||||
@ -157,17 +154,6 @@ def load_labels(path: Optional[str], encoding="utf-8", prefill=91):
|
||||
return labels
|
||||
|
||||
|
||||
def get_tz_modifiers(tz_name: str) -> Tuple[str, str, float]:
|
||||
seconds_offset = (
|
||||
datetime.datetime.now(pytz.timezone(tz_name)).utcoffset().total_seconds()
|
||||
)
|
||||
hours_offset = int(seconds_offset / 60 / 60)
|
||||
minutes_offset = int(seconds_offset / 60 - hours_offset * 60)
|
||||
hour_modifier = f"{hours_offset} hour"
|
||||
minute_modifier = f"{minutes_offset} minute"
|
||||
return hour_modifier, minute_modifier, seconds_offset
|
||||
|
||||
|
||||
def to_relative_box(
|
||||
width: int, height: int, box: Tuple[int, int, int, int]
|
||||
) -> Tuple[int | float, int | float, int | float, int | float]:
|
||||
@ -298,34 +284,6 @@ def find_by_key(dictionary, target_key):
|
||||
return None
|
||||
|
||||
|
||||
def get_tomorrow_at_time(hour: int) -> datetime.datetime:
|
||||
"""Returns the datetime of the following day at 2am."""
|
||||
try:
|
||||
tomorrow = datetime.datetime.now(get_localzone()) + datetime.timedelta(days=1)
|
||||
except ZoneInfoNotFoundError:
|
||||
tomorrow = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
|
||||
days=1
|
||||
)
|
||||
logger.warning(
|
||||
"Using utc for maintenance due to missing or incorrect timezone set"
|
||||
)
|
||||
|
||||
return tomorrow.replace(hour=hour, minute=0, second=0).astimezone(
|
||||
datetime.timezone.utc
|
||||
)
|
||||
|
||||
|
||||
def is_current_hour(timestamp: int) -> bool:
|
||||
"""Returns if timestamp is in the current UTC hour."""
|
||||
start_of_next_hour = (
|
||||
datetime.datetime.now(datetime.timezone.utc).replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
)
|
||||
+ datetime.timedelta(hours=1)
|
||||
).timestamp()
|
||||
return timestamp < start_of_next_hour
|
||||
|
||||
|
||||
def clear_and_unlink(file: Path, missing_ok: bool = True) -> None:
|
||||
"""clear file then unlink to avoid space retained by file descriptors."""
|
||||
if not missing_ok and not file.exists():
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
"""Util for classification models."""
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
@ -20,17 +22,103 @@ from frigate.const import (
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
from frigate.util.process import FrigateProcess
|
||||
|
||||
BATCH_SIZE = 16
|
||||
EPOCHS = 50
|
||||
LEARNING_RATE = 0.001
|
||||
TRAINING_METADATA_FILE = ".training_metadata.json"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def write_training_metadata(model_name: str, image_count: int) -> None:
|
||||
"""
|
||||
Write training metadata to a hidden file in the model's clips directory.
|
||||
|
||||
Args:
|
||||
model_name: Name of the classification model
|
||||
image_count: Number of images used in training
|
||||
"""
|
||||
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
|
||||
os.makedirs(clips_model_dir, exist_ok=True)
|
||||
|
||||
metadata_path = os.path.join(clips_model_dir, TRAINING_METADATA_FILE)
|
||||
metadata = {
|
||||
"last_training_date": datetime.datetime.now().isoformat(),
|
||||
"last_training_image_count": image_count,
|
||||
}
|
||||
|
||||
try:
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
logger.info(f"Wrote training metadata for {model_name}: {image_count} images")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to write training metadata for {model_name}: {e}")
|
||||
|
||||
|
||||
def read_training_metadata(model_name: str) -> dict[str, any] | None:
|
||||
"""
|
||||
Read training metadata from the hidden file in the model's clips directory.
|
||||
|
||||
Args:
|
||||
model_name: Name of the classification model
|
||||
|
||||
Returns:
|
||||
Dictionary with last_training_date and last_training_image_count, or None if not found
|
||||
"""
|
||||
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
|
||||
metadata_path = os.path.join(clips_model_dir, TRAINING_METADATA_FILE)
|
||||
|
||||
if not os.path.exists(metadata_path):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(metadata_path, "r") as f:
|
||||
metadata = json.load(f)
|
||||
return metadata
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read training metadata for {model_name}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def get_dataset_image_count(model_name: str) -> int:
|
||||
"""
|
||||
Count the total number of images in the model's dataset directory.
|
||||
|
||||
Args:
|
||||
model_name: Name of the classification model
|
||||
|
||||
Returns:
|
||||
Total count of images across all categories
|
||||
"""
|
||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||
|
||||
if not os.path.exists(dataset_dir):
|
||||
return 0
|
||||
|
||||
total_count = 0
|
||||
try:
|
||||
for category in os.listdir(dataset_dir):
|
||||
category_dir = os.path.join(dataset_dir, category)
|
||||
if not os.path.isdir(category_dir):
|
||||
continue
|
||||
|
||||
image_files = [
|
||||
f
|
||||
for f in os.listdir(category_dir)
|
||||
if f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))
|
||||
]
|
||||
total_count += len(image_files)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to count dataset images for {model_name}: {e}")
|
||||
return 0
|
||||
|
||||
return total_count
|
||||
|
||||
|
||||
class ClassificationTrainingProcess(FrigateProcess):
|
||||
def __init__(self, model_name: str) -> None:
|
||||
super().__init__(
|
||||
@ -42,7 +130,8 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
|
||||
def run(self) -> None:
|
||||
self.pre_run_setup()
|
||||
self.__train_classification_model()
|
||||
success = self.__train_classification_model()
|
||||
exit(0 if success else 1)
|
||||
|
||||
def __generate_representative_dataset_factory(self, dataset_dir: str):
|
||||
def generate_representative_dataset():
|
||||
@ -65,85 +154,117 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __train_classification_model(self) -> bool:
|
||||
"""Train a classification model."""
|
||||
try:
|
||||
# import in the function so that tensorflow is not initialized multiple times
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models, optimizers
|
||||
from tensorflow.keras.applications import MobileNetV2
|
||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||
|
||||
# import in the function so that tensorflow is not initialized multiple times
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models, optimizers
|
||||
from tensorflow.keras.applications import MobileNetV2
|
||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||
dataset_dir = os.path.join(CLIPS_DIR, self.model_name, "dataset")
|
||||
model_dir = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
|
||||
logger.info(f"Kicking off classification training for {self.model_name}.")
|
||||
dataset_dir = os.path.join(CLIPS_DIR, self.model_name, "dataset")
|
||||
model_dir = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
num_classes = len(
|
||||
[
|
||||
d
|
||||
for d in os.listdir(dataset_dir)
|
||||
if os.path.isdir(os.path.join(dataset_dir, d))
|
||||
]
|
||||
)
|
||||
num_classes = len(
|
||||
[
|
||||
d
|
||||
for d in os.listdir(dataset_dir)
|
||||
if os.path.isdir(os.path.join(dataset_dir, d))
|
||||
]
|
||||
)
|
||||
|
||||
# Start with imagenet base model with 35% of channels in each layer
|
||||
base_model = MobileNetV2(
|
||||
input_shape=(224, 224, 3),
|
||||
include_top=False,
|
||||
weights="imagenet",
|
||||
alpha=0.35,
|
||||
)
|
||||
base_model.trainable = False # Freeze pre-trained layers
|
||||
if num_classes < 2:
|
||||
logger.error(
|
||||
f"Training failed for {self.model_name}: Need at least 2 classes, found {num_classes}"
|
||||
)
|
||||
return False
|
||||
|
||||
model = models.Sequential(
|
||||
[
|
||||
base_model,
|
||||
layers.GlobalAveragePooling2D(),
|
||||
layers.Dense(128, activation="relu"),
|
||||
layers.Dropout(0.3),
|
||||
layers.Dense(num_classes, activation="softmax"),
|
||||
]
|
||||
)
|
||||
# Start with imagenet base model with 35% of channels in each layer
|
||||
base_model = MobileNetV2(
|
||||
input_shape=(224, 224, 3),
|
||||
include_top=False,
|
||||
weights="imagenet",
|
||||
alpha=0.35,
|
||||
)
|
||||
base_model.trainable = False # Freeze pre-trained layers
|
||||
|
||||
model.compile(
|
||||
optimizer=optimizers.Adam(learning_rate=LEARNING_RATE),
|
||||
loss="categorical_crossentropy",
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
model = models.Sequential(
|
||||
[
|
||||
base_model,
|
||||
layers.GlobalAveragePooling2D(),
|
||||
layers.Dense(128, activation="relu"),
|
||||
layers.Dropout(0.3),
|
||||
layers.Dense(num_classes, activation="softmax"),
|
||||
]
|
||||
)
|
||||
|
||||
# create training set
|
||||
datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2)
|
||||
train_gen = datagen.flow_from_directory(
|
||||
dataset_dir,
|
||||
target_size=(224, 224),
|
||||
batch_size=BATCH_SIZE,
|
||||
class_mode="categorical",
|
||||
subset="training",
|
||||
)
|
||||
model.compile(
|
||||
optimizer=optimizers.Adam(learning_rate=LEARNING_RATE),
|
||||
loss="categorical_crossentropy",
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
|
||||
# write labelmap
|
||||
class_indices = train_gen.class_indices
|
||||
index_to_class = {v: k for k, v in class_indices.items()}
|
||||
sorted_classes = [index_to_class[i] for i in range(len(index_to_class))]
|
||||
with open(os.path.join(model_dir, "labelmap.txt"), "w") as f:
|
||||
for class_name in sorted_classes:
|
||||
f.write(f"{class_name}\n")
|
||||
# create training set
|
||||
datagen = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2)
|
||||
train_gen = datagen.flow_from_directory(
|
||||
dataset_dir,
|
||||
target_size=(224, 224),
|
||||
batch_size=BATCH_SIZE,
|
||||
class_mode="categorical",
|
||||
subset="training",
|
||||
)
|
||||
|
||||
# train the model
|
||||
model.fit(train_gen, epochs=EPOCHS, verbose=0)
|
||||
total_images = train_gen.samples
|
||||
logger.debug(
|
||||
f"Training {self.model_name}: {total_images} images across {num_classes} classes"
|
||||
)
|
||||
|
||||
# convert model to tflite
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
converter.representative_dataset = (
|
||||
self.__generate_representative_dataset_factory(dataset_dir)
|
||||
)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.inference_input_type = tf.uint8
|
||||
converter.inference_output_type = tf.uint8
|
||||
tflite_model = converter.convert()
|
||||
# write labelmap
|
||||
class_indices = train_gen.class_indices
|
||||
index_to_class = {v: k for k, v in class_indices.items()}
|
||||
sorted_classes = [index_to_class[i] for i in range(len(index_to_class))]
|
||||
with open(os.path.join(model_dir, "labelmap.txt"), "w") as f:
|
||||
for class_name in sorted_classes:
|
||||
f.write(f"{class_name}\n")
|
||||
|
||||
# write model
|
||||
with open(os.path.join(model_dir, "model.tflite"), "wb") as f:
|
||||
f.write(tflite_model)
|
||||
# train the model
|
||||
logger.debug(f"Training {self.model_name} for {EPOCHS} epochs...")
|
||||
model.fit(train_gen, epochs=EPOCHS, verbose=0)
|
||||
logger.debug(f"Converting {self.model_name} to TFLite...")
|
||||
|
||||
# convert model to tflite
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
converter.representative_dataset = (
|
||||
self.__generate_representative_dataset_factory(dataset_dir)
|
||||
)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.inference_input_type = tf.uint8
|
||||
converter.inference_output_type = tf.uint8
|
||||
tflite_model = converter.convert()
|
||||
|
||||
# write model
|
||||
model_path = os.path.join(model_dir, "model.tflite")
|
||||
with open(model_path, "wb") as f:
|
||||
f.write(tflite_model)
|
||||
|
||||
# verify model file was written successfully
|
||||
if not os.path.exists(model_path) or os.path.getsize(model_path) == 0:
|
||||
logger.error(
|
||||
f"Training failed for {self.model_name}: Model file was not created or is empty"
|
||||
)
|
||||
return False
|
||||
|
||||
# write training metadata with image count
|
||||
dataset_image_count = get_dataset_image_count(self.model_name)
|
||||
write_training_metadata(self.model_name, dataset_image_count)
|
||||
|
||||
logger.info(f"Finished training {self.model_name}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Training failed for {self.model_name}: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def kickoff_model_training(
|
||||
@ -165,18 +286,36 @@ def kickoff_model_training(
|
||||
training_process.start()
|
||||
training_process.join()
|
||||
|
||||
# reload model and mark training as complete
|
||||
embeddingRequestor.send_data(
|
||||
EmbeddingsRequestEnum.reload_classification_model.value,
|
||||
{"model_name": model_name},
|
||||
)
|
||||
requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": model_name,
|
||||
"state": ModelStatusTypesEnum.complete,
|
||||
},
|
||||
)
|
||||
# check if training succeeded by examining the exit code
|
||||
training_success = training_process.exitcode == 0
|
||||
|
||||
if training_success:
|
||||
# reload model and mark training as complete
|
||||
embeddingRequestor.send_data(
|
||||
EmbeddingsRequestEnum.reload_classification_model.value,
|
||||
{"model_name": model_name},
|
||||
)
|
||||
requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": model_name,
|
||||
"state": ModelStatusTypesEnum.complete,
|
||||
},
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"Training subprocess failed for {model_name} (exit code: {training_process.exitcode})"
|
||||
)
|
||||
# mark training as failed so UI shows error state
|
||||
# don't reload the model since it failed
|
||||
requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": model_name,
|
||||
"state": ModelStatusTypesEnum.failed,
|
||||
},
|
||||
)
|
||||
|
||||
requestor.stop()
|
||||
|
||||
|
||||
|
||||
@ -348,7 +348,7 @@ def migrate_016_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
|
||||
|
||||
|
||||
def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]:
|
||||
"""Handle migrating frigate config to 0.16-0"""
|
||||
"""Handle migrating frigate config to 0.17-0"""
|
||||
new_config = config.copy()
|
||||
|
||||
# migrate global to new recording configuration
|
||||
@ -380,16 +380,17 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
|
||||
|
||||
if global_genai:
|
||||
new_genai_config = {}
|
||||
new_object_config = config.get("objects", {})
|
||||
new_object_config = new_config.get("objects", {})
|
||||
new_object_config["genai"] = {}
|
||||
|
||||
for key in global_genai.keys():
|
||||
if key not in ["enabled", "model", "provider", "base_url", "api_key"]:
|
||||
new_object_config["genai"][key] = global_genai[key]
|
||||
else:
|
||||
if key in ["model", "provider", "base_url", "api_key"]:
|
||||
new_genai_config[key] = global_genai[key]
|
||||
else:
|
||||
new_object_config["genai"][key] = global_genai[key]
|
||||
|
||||
config["genai"] = new_genai_config
|
||||
new_config["genai"] = new_genai_config
|
||||
new_config["objects"] = new_object_config
|
||||
|
||||
for name, camera in config.get("cameras", {}).items():
|
||||
camera_config: dict[str, dict[str, Any]] = camera.copy()
|
||||
@ -415,8 +416,9 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
|
||||
camera_genai = camera_config.get("genai", {})
|
||||
|
||||
if camera_genai:
|
||||
new_object_config = config.get("objects", {})
|
||||
new_object_config["genai"] = camera_genai
|
||||
camera_object_config = camera_config.get("objects", {})
|
||||
camera_object_config["genai"] = camera_genai
|
||||
camera_config["objects"] = camera_object_config
|
||||
del camera_config["genai"]
|
||||
|
||||
new_config["cameras"][name] = camera_config
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Callable, List
|
||||
|
||||
@ -10,40 +9,11 @@ import requests
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import UPDATE_MODEL_STATE
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.file import FileLock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileLock:
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.lock_file = f"{path}.lock"
|
||||
|
||||
# we have not acquired the lock yet so it should not exist
|
||||
if os.path.exists(self.lock_file):
|
||||
try:
|
||||
os.remove(self.lock_file)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def acquire(self):
|
||||
parent_dir = os.path.dirname(self.lock_file)
|
||||
os.makedirs(parent_dir, exist_ok=True)
|
||||
|
||||
while True:
|
||||
try:
|
||||
with open(self.lock_file, "x"):
|
||||
return
|
||||
except FileExistsError:
|
||||
time.sleep(0.1)
|
||||
|
||||
def release(self):
|
||||
try:
|
||||
os.remove(self.lock_file)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
class ModelDownloader:
|
||||
def __init__(
|
||||
self,
|
||||
@ -81,15 +51,13 @@ class ModelDownloader:
|
||||
def _download_models(self):
|
||||
for file_name in self.file_names:
|
||||
path = os.path.join(self.download_path, file_name)
|
||||
lock = FileLock(path)
|
||||
lock_path = f"{path}.lock"
|
||||
lock = FileLock(lock_path, cleanup_stale_on_init=True)
|
||||
|
||||
if not os.path.exists(path):
|
||||
lock.acquire()
|
||||
try:
|
||||
with lock:
|
||||
if not os.path.exists(path):
|
||||
self.download_func(path)
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
self.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
|
||||
276
frigate/util/file.py
Normal file
@ -0,0 +1,276 @@
|
||||
"""Path and file utilities."""
|
||||
|
||||
import base64
|
||||
import fcntl
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
from numpy import ndarray
|
||||
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.models import Event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
||||
if event.thumbnail:
|
||||
return base64.b64decode(event.thumbnail)
|
||||
else:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
||||
) as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_event_snapshot(event: Event) -> ndarray:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
|
||||
### Deletion
|
||||
|
||||
|
||||
def delete_event_images(event: Event) -> bool:
|
||||
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
||||
|
||||
|
||||
def delete_event_snapshot(event: Event) -> bool:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
try:
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
||||
media_path.unlink(missing_ok=True)
|
||||
# also delete clean.png (legacy) for backward compatibility
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||
media_path.unlink(missing_ok=True)
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def delete_event_thumbnail(event: Event) -> bool:
|
||||
if event.thumbnail:
|
||||
return True
|
||||
else:
|
||||
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
||||
missing_ok=True
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
### File Locking
|
||||
|
||||
|
||||
class FileLock:
|
||||
"""
|
||||
A file-based lock for coordinating access to resources across processes.
|
||||
|
||||
Uses fcntl.flock() for proper POSIX file locking on Linux. Supports timeouts,
|
||||
stale lock detection, and can be used as a context manager.
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Using as a context manager (recommended)
|
||||
with FileLock("/path/to/resource.lock", timeout=60):
|
||||
# Critical section
|
||||
do_something()
|
||||
|
||||
# Manual acquisition and release
|
||||
lock = FileLock("/path/to/resource.lock")
|
||||
if lock.acquire(timeout=60):
|
||||
try:
|
||||
do_something()
|
||||
finally:
|
||||
lock.release()
|
||||
```
|
||||
|
||||
Attributes:
|
||||
lock_path: Path to the lock file
|
||||
timeout: Maximum time to wait for lock acquisition (seconds)
|
||||
poll_interval: Time to wait between lock acquisition attempts (seconds)
|
||||
stale_timeout: Time after which a lock is considered stale (seconds)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
lock_path: str | Path,
|
||||
timeout: int = 300,
|
||||
poll_interval: float = 1.0,
|
||||
stale_timeout: int = 600,
|
||||
cleanup_stale_on_init: bool = False,
|
||||
):
|
||||
"""
|
||||
Initialize a FileLock.
|
||||
|
||||
Args:
|
||||
lock_path: Path to the lock file
|
||||
timeout: Maximum time to wait for lock acquisition in seconds (default: 300)
|
||||
poll_interval: Time to wait between lock attempts in seconds (default: 1.0)
|
||||
stale_timeout: Time after which a lock is considered stale in seconds (default: 600)
|
||||
cleanup_stale_on_init: Whether to clean up stale locks on initialization (default: False)
|
||||
"""
|
||||
self.lock_path = Path(lock_path)
|
||||
self.timeout = timeout
|
||||
self.poll_interval = poll_interval
|
||||
self.stale_timeout = stale_timeout
|
||||
self._fd: Optional[int] = None
|
||||
self._acquired = False
|
||||
|
||||
if cleanup_stale_on_init:
|
||||
self._cleanup_stale_lock()
|
||||
|
||||
def _cleanup_stale_lock(self) -> bool:
|
||||
"""
|
||||
Clean up a stale lock file if it exists and is old.
|
||||
|
||||
Returns:
|
||||
True if lock was cleaned up, False otherwise
|
||||
"""
|
||||
try:
|
||||
if self.lock_path.exists():
|
||||
# Check if lock file is older than stale_timeout
|
||||
lock_age = time.time() - self.lock_path.stat().st_mtime
|
||||
if lock_age > self.stale_timeout:
|
||||
logger.warning(
|
||||
f"Removing stale lock file: {self.lock_path} (age: {lock_age:.1f}s)"
|
||||
)
|
||||
self.lock_path.unlink()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up stale lock: {e}")
|
||||
|
||||
return False
|
||||
|
||||
def is_stale(self) -> bool:
|
||||
"""
|
||||
Check if the lock file is stale (older than stale_timeout).
|
||||
|
||||
Returns:
|
||||
True if lock is stale, False otherwise
|
||||
"""
|
||||
try:
|
||||
if self.lock_path.exists():
|
||||
lock_age = time.time() - self.lock_path.stat().st_mtime
|
||||
return lock_age > self.stale_timeout
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def acquire(self, timeout: Optional[int] = None) -> bool:
|
||||
"""
|
||||
Acquire the file lock using fcntl.flock().
|
||||
|
||||
Args:
|
||||
timeout: Maximum time to wait for lock in seconds (uses instance timeout if None)
|
||||
|
||||
Returns:
|
||||
True if lock acquired, False if timeout or error
|
||||
"""
|
||||
if self._acquired:
|
||||
logger.warning(f"Lock already acquired: {self.lock_path}")
|
||||
return True
|
||||
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
|
||||
# Ensure parent directory exists
|
||||
self.lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Clean up stale lock before attempting to acquire
|
||||
self._cleanup_stale_lock()
|
||||
|
||||
try:
|
||||
self._fd = os.open(self.lock_path, os.O_CREAT | os.O_RDWR)
|
||||
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
try:
|
||||
fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
self._acquired = True
|
||||
logger.debug(f"Acquired lock: {self.lock_path}")
|
||||
return True
|
||||
except (OSError, IOError):
|
||||
# Lock is held by another process
|
||||
if time.time() - start_time >= timeout:
|
||||
logger.warning(f"Timeout waiting for lock: {self.lock_path}")
|
||||
os.close(self._fd)
|
||||
self._fd = None
|
||||
return False
|
||||
|
||||
time.sleep(self.poll_interval)
|
||||
|
||||
# Timeout reached
|
||||
if self._fd is not None:
|
||||
os.close(self._fd)
|
||||
self._fd = None
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error acquiring lock: {e}")
|
||||
if self._fd is not None:
|
||||
try:
|
||||
os.close(self._fd)
|
||||
except Exception:
|
||||
pass
|
||||
self._fd = None
|
||||
return False
|
||||
|
||||
def release(self) -> None:
|
||||
"""
|
||||
Release the file lock.
|
||||
|
||||
This closes the file descriptor and removes the lock file.
|
||||
"""
|
||||
if not self._acquired:
|
||||
return
|
||||
|
||||
try:
|
||||
# Close file descriptor and release fcntl lock
|
||||
if self._fd is not None:
|
||||
try:
|
||||
fcntl.flock(self._fd, fcntl.LOCK_UN)
|
||||
os.close(self._fd)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing lock file descriptor: {e}")
|
||||
finally:
|
||||
self._fd = None
|
||||
|
||||
# Remove lock file
|
||||
if self.lock_path.exists():
|
||||
self.lock_path.unlink()
|
||||
logger.debug(f"Released lock: {self.lock_path}")
|
||||
|
||||
except FileNotFoundError:
|
||||
# Lock file already removed, that's fine
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.error(f"Error releasing lock: {e}")
|
||||
finally:
|
||||
self._acquired = False
|
||||
|
||||
def __enter__(self):
|
||||
"""Context manager entry - acquire the lock."""
|
||||
if not self.acquire():
|
||||
raise TimeoutError(f"Failed to acquire lock: {self.lock_path}")
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Context manager exit - release the lock."""
|
||||
self.release()
|
||||
return False
|
||||
|
||||
def __del__(self):
|
||||
"""Destructor - ensure lock is released."""
|
||||
if self._acquired:
|
||||
self.release()
|
||||
@ -369,6 +369,10 @@ def get_ort_providers(
|
||||
"enable_cpu_mem_arena": False,
|
||||
}
|
||||
)
|
||||
elif provider == "AzureExecutionProvider":
|
||||
# Skip Azure provider - not typically available on local hardware
|
||||
# and prevents fallback to OpenVINO when it's the first provider
|
||||
continue
|
||||
else:
|
||||
providers.append(provider)
|
||||
options.append({})
|
||||
|
||||
@ -1,62 +0,0 @@
|
||||
"""Path utilities."""
|
||||
|
||||
import base64
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
from numpy import ndarray
|
||||
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.models import Event
|
||||
|
||||
|
||||
def get_event_thumbnail_bytes(event: Event) -> bytes | None:
|
||||
if event.thumbnail:
|
||||
return base64.b64decode(event.thumbnail)
|
||||
else:
|
||||
try:
|
||||
with open(
|
||||
os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb"
|
||||
) as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_event_snapshot(event: Event) -> ndarray:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
|
||||
### Deletion
|
||||
|
||||
|
||||
def delete_event_images(event: Event) -> bool:
|
||||
return delete_event_snapshot(event) and delete_event_thumbnail(event)
|
||||
|
||||
|
||||
def delete_event_snapshot(event: Event) -> bool:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
|
||||
|
||||
try:
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.webp")
|
||||
media_path.unlink(missing_ok=True)
|
||||
# also delete clean.png (legacy) for backward compatibility
|
||||
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png")
|
||||
media_path.unlink(missing_ok=True)
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def delete_event_thumbnail(event: Event) -> bool:
|
||||
if event.thumbnail:
|
||||
return True
|
||||
else:
|
||||
Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink(
|
||||
missing_ok=True
|
||||
)
|
||||
return True
|
||||