mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-03-23 16:48:23 +03:00
Compare commits
259 Commits
v0.17.0-rc
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7b6d0c5e42 | ||
|
|
57c0473e6e | ||
|
|
6251b758b4 | ||
|
|
59f570f436 | ||
|
|
ec7040bed5 | ||
|
|
b6c03c99de | ||
|
|
74c89beaf9 | ||
|
|
6d2b84e202 | ||
|
|
a8da4c4521 | ||
|
|
acd10d0e08 | ||
|
|
373bcadef8 | ||
|
|
34a06ac77b | ||
|
|
23820718ee | ||
|
|
411798004a | ||
|
|
68de18f10d | ||
|
|
cedcbdba07 | ||
|
|
c6991db432 | ||
|
|
a9a2eecebb | ||
|
|
ede8b74371 | ||
|
|
e2bfa26719 | ||
|
|
c93dad9bd9 | ||
|
|
416a9b7692 | ||
|
|
d11c26970d | ||
|
|
e78da2758d | ||
|
|
a05f35c747 | ||
|
|
7972b3240c | ||
|
|
c1f315a250 | ||
|
|
7cec1f81b9 | ||
|
|
12dc698357 | ||
|
|
e23a57db3e | ||
|
|
5e3a625227 | ||
|
|
97186e9819 | ||
|
|
94a0d0a72b | ||
|
|
04201ce63a | ||
|
|
861c36957f | ||
|
|
45b4e3736d | ||
|
|
5cf6adbff4 | ||
|
|
bcc493680f | ||
|
|
b9ecb1f433 | ||
|
|
cb0563823d | ||
|
|
97718b8760 | ||
|
|
1b7ead885e | ||
|
|
6e21caaa0e | ||
|
|
ae9b307dfc | ||
|
|
7700d73f09 | ||
|
|
f658dbb158 | ||
|
|
2ace8d3670 | ||
|
|
dc27d4ad16 | ||
|
|
7708523865 | ||
|
|
aea91a91d5 | ||
|
|
01c16a9250 | ||
|
|
d4731c1dea | ||
|
|
65ca90db20 | ||
|
|
722ef6a1fe | ||
|
|
bd289f3146 | ||
|
|
c08ec9652f | ||
|
|
7485b48f0e | ||
|
|
6d7b1ce384 | ||
|
|
e80da2b297 | ||
|
|
49ffd0b01a | ||
|
|
bf2dcfd622 | ||
|
|
09df43a675 | ||
|
|
dfc6ff9202 | ||
|
|
bc29c4ba71 | ||
|
|
3ec2305e6a | ||
|
|
5a214eb0d1 | ||
|
|
310b5dfe05 | ||
|
|
8b035be132 | ||
|
|
be79ad89b6 | ||
|
|
687fefb343 | ||
|
|
7864c446fc | ||
|
|
b147b53522 | ||
|
|
d2b2faa2d7 | ||
|
|
614a6b39d4 | ||
|
|
f29ee53fb4 | ||
|
|
324953d3a5 | ||
|
|
0c988da485 | ||
|
|
544d3c6139 | ||
|
|
104e623923 | ||
|
|
192aba901a | ||
|
|
947ddfa542 | ||
|
|
9eb037c369 | ||
|
|
59fc8449ed | ||
|
|
a0b8271532 | ||
|
|
19480867fb | ||
|
|
5254bfd00e | ||
|
|
e75b8ca6cc | ||
|
|
29ffcea851 | ||
|
|
925cb049ce | ||
|
|
c575e6b38f | ||
|
|
d568207b1e | ||
|
|
29d071f676 | ||
|
|
e5494bbb0f | ||
|
|
8d010c52e0 | ||
|
|
39c878b871 | ||
|
|
c0c64feae3 | ||
|
|
f0ecc5b0da | ||
|
|
a8ea77bebc | ||
|
|
b9a35dc873 | ||
|
|
dbf997692f | ||
|
|
f6f4c67372 | ||
|
|
36efcf938d | ||
|
|
665fc76397 | ||
|
|
00fd1ccee3 | ||
|
|
9d3bc82e87 | ||
|
|
983ee58b40 | ||
|
|
34b5e3c5b0 | ||
|
|
4c5124e615 | ||
|
|
d8d5f1c0b2 | ||
|
|
7f061ba5b1 | ||
|
|
bfb275fc47 | ||
|
|
d838d13d3a | ||
|
|
f79e07da9e | ||
|
|
6291107532 | ||
|
|
acbec0bd59 | ||
|
|
fe22d994bc | ||
|
|
5de553ca06 | ||
|
|
680361c35c | ||
|
|
e6405a2566 | ||
|
|
5f75289c5b | ||
|
|
0723380d4a | ||
|
|
a2eabdbeef | ||
|
|
ec49ccf83a | ||
|
|
6007ec82b5 | ||
|
|
0a6c440f24 | ||
|
|
aa5085caa0 | ||
|
|
3dd584b5ef | ||
|
|
e1ae32640c | ||
|
|
68fe7884b7 | ||
|
|
7b2c75be49 | ||
|
|
b405877587 | ||
|
|
d88c0e260e | ||
|
|
90ad5213c7 | ||
|
|
6c47e837e3 | ||
|
|
5cec1be91d | ||
|
|
e8b9225175 | ||
|
|
119137c4fe | ||
|
|
1188d87588 | ||
|
|
9cbd80d981 | ||
|
|
c4a5ac0e77 | ||
|
|
dd9497baf2 | ||
|
|
e930492ccc | ||
|
|
41e290449e | ||
|
|
b6f78bd1f2 | ||
|
|
bde518e861 | ||
|
|
b2c7840c29 | ||
|
|
df27e04c0f | ||
|
|
ef07563d0a | ||
|
|
a705f254e5 | ||
|
|
4e71a835cb | ||
|
|
537e723c30 | ||
|
|
acdfed40a9 | ||
|
|
889dfca36c | ||
|
|
dda9f7bfed | ||
|
|
f316244495 | ||
|
|
d1f3a807d3 | ||
|
|
c2e667c0dd | ||
|
|
c9bd907721 | ||
|
|
34cc1208a6 | ||
|
|
2babfd2ec9 | ||
|
|
ab3cef813c | ||
|
|
229436c94a | ||
|
|
02678f4a09 | ||
|
|
65db9b0aec | ||
|
|
2782931c72 | ||
|
|
b2118382cb | ||
|
|
95956a690b | ||
|
|
5e7d426768 | ||
|
|
c338533c83 | ||
|
|
8c67704ffb | ||
|
|
d311974949 | ||
|
|
c3c27d036f | ||
|
|
0dd1e94d60 | ||
|
|
720e949fef | ||
|
|
1f1d546326 | ||
|
|
4232cc483d | ||
|
|
6a21b2952d | ||
|
|
c687aa5119 | ||
|
|
e064024a31 | ||
|
|
96c70eee4c | ||
|
|
fa1f9a1fa4 | ||
|
|
e7250f24cb | ||
|
|
eeefbf2bb5 | ||
|
|
ba0e7bbc1a | ||
|
|
e16763cff9 | ||
|
|
b88186983a | ||
|
|
b4eac11cbd | ||
|
|
9c3a74b4f5 | ||
|
|
91714b8743 | ||
|
|
e5087b092d | ||
|
|
5f02e33e55 | ||
|
|
84760c42cb | ||
|
|
bb6e889449 | ||
|
|
12506f8c80 | ||
|
|
fef1fb36cc | ||
|
|
2db0269825 | ||
|
|
a4362caa0a | ||
|
|
fa0feebd03 | ||
|
|
c78ab2dc87 | ||
|
|
e76b48f98b | ||
|
|
af2339b35c | ||
|
|
9b7cee18db | ||
|
|
d3260e34b6 | ||
|
|
ee2c96c793 | ||
|
|
542295dcb3 | ||
|
|
56c7a13fbe | ||
|
|
88348bf535 | ||
|
|
b66e69efc9 | ||
|
|
63e7bf8b28 | ||
|
|
39ad565f81 | ||
|
|
9ef8b70208 | ||
|
|
6b77952b72 | ||
|
|
3745f5ff93 | ||
|
|
3297cab347 | ||
|
|
fc3545310c | ||
|
|
dde738cfdc | ||
|
|
004bb7d80d | ||
|
|
85feb4edcb | ||
|
|
cffa54c80d | ||
|
|
48164f6dfc | ||
|
|
bc457743b6 | ||
|
|
451d6f5c22 | ||
|
|
d24b96d3bb | ||
|
|
0310a9654d | ||
|
|
7df3622243 | ||
|
|
a0d6cb5c15 | ||
|
|
dd8282ff3c | ||
|
|
352d271fe4 | ||
|
|
a6e11a59d6 | ||
|
|
984d654c40 | ||
|
|
a7d8d13d9a | ||
|
|
4d51f7a1bb | ||
|
|
c9be98f935 | ||
|
|
85ed8c6432 | ||
|
|
f0d69f7856 | ||
|
|
5b16978430 | ||
|
|
b6142e3017 | ||
|
|
e1a6f69c4e | ||
|
|
d940ff3341 | ||
|
|
1f14f1cda0 | ||
|
|
806c5892b6 | ||
|
|
8bc82060f1 | ||
|
|
3cc8311b48 | ||
|
|
71139ef842 | ||
|
|
f4f32a3f59 | ||
|
|
29a4076589 | ||
|
|
d4d4164f99 | ||
|
|
5cc81bc7a1 | ||
|
|
1856e62ad0 | ||
|
|
252f1a6eb9 | ||
|
|
ad076aefff | ||
|
|
8a95cd2472 | ||
|
|
3aeeb09834 | ||
|
|
8c98b4c9d0 | ||
|
|
a2d6e04f45 | ||
|
|
7c11747ab3 | ||
|
|
5f2536dcd8 | ||
|
|
ef5608a970 | ||
|
|
3101d5f27b |
@ -229,6 +229,7 @@ Reolink
|
||||
restream
|
||||
restreamed
|
||||
restreaming
|
||||
RJSF
|
||||
rkmpp
|
||||
rknn
|
||||
rkrga
|
||||
|
||||
16
.github/copilot-instructions.md
vendored
16
.github/copilot-instructions.md
vendored
@ -324,6 +324,12 @@ try:
|
||||
value = await sensor.read()
|
||||
except Exception: # ❌ Too broad
|
||||
logger.error("Failed")
|
||||
|
||||
# Returning exceptions in JSON responses
|
||||
except ValueError as e:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": str(e)},
|
||||
)
|
||||
```
|
||||
|
||||
### ✅ Use These Instead
|
||||
@ -353,6 +359,16 @@ try:
|
||||
value = await sensor.read()
|
||||
except SensorException as err: # ✅ Specific
|
||||
logger.exception("Failed to read sensor")
|
||||
|
||||
# Safe error responses
|
||||
except ValueError:
|
||||
logger.exception("Invalid parameters for API request")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Invalid request parameters",
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Project-Specific Conventions
|
||||
|
||||
49
.github/pull_request_template.md
vendored
49
.github/pull_request_template.md
vendored
@ -1,17 +1,17 @@
|
||||
_Please read the [contributing guidelines](https://github.com/blakeblackshear/frigate/blob/dev/CONTRIBUTING.md) before submitting a PR._
|
||||
|
||||
## Proposed change
|
||||
<!--
|
||||
Thank you!
|
||||
|
||||
Describe what this pull request does and how it will benefit users of Frigate.
|
||||
Please describe in detail any considerations, breaking changes, etc.
|
||||
|
||||
If you're introducing a new feature or significantly refactoring existing functionality,
|
||||
we encourage you to start a discussion first. This helps ensure your idea aligns with
|
||||
Frigate's development goals.
|
||||
|
||||
Describe what this pull request does and how it will benefit users of Frigate.
|
||||
Please describe in detail any considerations, breaking changes, etc. that are
|
||||
made in this pull request.
|
||||
-->
|
||||
|
||||
|
||||
## Type of change
|
||||
|
||||
- [ ] Dependency upgrade
|
||||
@ -26,6 +26,44 @@
|
||||
- This PR fixes or closes issue: fixes #
|
||||
- This PR is related to issue:
|
||||
|
||||
## For new features
|
||||
|
||||
<!--
|
||||
Every new feature adds scope that maintainers must test, maintain, and support long-term.
|
||||
We try to be thoughtful about what we take on, and sometimes that means saying no to
|
||||
good code if the feature isn't the right fit — or saying yes to something we weren't sure
|
||||
about. These calls are sometimes subjective, and we won't always get them right. We're
|
||||
happy to discuss and reconsider.
|
||||
|
||||
Linking to an existing feature request or discussion with community interest helps us
|
||||
understand demand, but a great idea is a great idea even without a crowd behind it.
|
||||
|
||||
You can delete this section for bugfixes and non-feature changes.
|
||||
-->
|
||||
|
||||
- [ ] There is an existing feature request or discussion with community interest for this change.
|
||||
- Link:
|
||||
|
||||
## AI disclosure
|
||||
|
||||
<!--
|
||||
We welcome contributions that use AI tools, but we need to understand your relationship
|
||||
with the code you're submitting. See our AI usage policy in CONTRIBUTING.md for details.
|
||||
|
||||
Be honest — this won't disqualify your PR. Trust matters more than method.
|
||||
-->
|
||||
|
||||
- [ ] No AI tools were used in this PR.
|
||||
- [ ] AI tools were used in this PR. Details below:
|
||||
|
||||
**AI tool(s) used** (e.g., Claude, Copilot, ChatGPT, Cursor):
|
||||
|
||||
**How AI was used** (e.g., code generation, code review, debugging, documentation):
|
||||
|
||||
**Extent of AI involvement** (e.g., generated entire implementation, assisted with specific functions, suggested fixes):
|
||||
|
||||
**Human oversight**: Describe what manual review, testing, and validation you performed on the AI-generated portions.
|
||||
|
||||
## Checklist
|
||||
|
||||
<!--
|
||||
@ -35,5 +73,6 @@
|
||||
- [ ] The code change is tested and works locally.
|
||||
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
|
||||
- [ ] There is no commented out code in this PR.
|
||||
- [ ] I can explain every line of code in this PR if asked.
|
||||
- [ ] UI changes including text have used i18n keys and have been added to the `en` locale.
|
||||
- [ ] The code has been formatted using Ruff (`ruff format frigate`)
|
||||
|
||||
16
.github/workflows/ci.yml
vendored
16
.github/workflows/ci.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push amd64 standard build
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: .
|
||||
file: docker/main/Dockerfile
|
||||
@ -56,7 +56,7 @@ jobs:
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push arm64 standard build
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: .
|
||||
file: docker/main/Dockerfile
|
||||
@ -67,7 +67,7 @@ jobs:
|
||||
${{ steps.setup.outputs.image-name }}-standard-arm64
|
||||
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||
- name: Build and push RPi build
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v7
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
@ -96,7 +96,7 @@ jobs:
|
||||
BASE_IMAGE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
|
||||
SLIM_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
|
||||
TRT_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v7
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
@ -124,7 +124,7 @@ jobs:
|
||||
- name: Build and push TensorRT (x86 GPU)
|
||||
env:
|
||||
COMPUTE_LEVEL: "50 60 70 80 90"
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v7
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
@ -137,7 +137,7 @@ jobs:
|
||||
- name: AMD/ROCm general build
|
||||
env:
|
||||
HSA_OVERRIDE: 0
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v7
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
@ -163,7 +163,7 @@ jobs:
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Rockchip build
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v7
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
@ -188,7 +188,7 @@ jobs:
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Synaptics build
|
||||
uses: docker/bake-action@v6
|
||||
uses: docker/bake-action@v7
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
|
||||
3
.github/workflows/pull_request.yml
vendored
3
.github/workflows/pull_request.yml
vendored
@ -27,6 +27,9 @@ jobs:
|
||||
- name: Lint
|
||||
run: npm run lint
|
||||
working-directory: ./web
|
||||
- name: Check i18n keys
|
||||
run: npm run i18n:extract:ci
|
||||
working-directory: ./web
|
||||
|
||||
web_test:
|
||||
name: Web - Test
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -3,6 +3,8 @@ __pycache__
|
||||
.mypy_cache
|
||||
*.swp
|
||||
debug
|
||||
.claude/*
|
||||
.mcp.json
|
||||
.vscode/*
|
||||
!.vscode/launch.json
|
||||
config/*
|
||||
|
||||
17
.vscode/launch.json
vendored
17
.vscode/launch.json
vendored
@ -6,6 +6,23 @@
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "frigate"
|
||||
},
|
||||
{
|
||||
"type": "editor-browser",
|
||||
"request": "launch",
|
||||
"name": "Vite: Launch in integrated browser",
|
||||
"url": "http://localhost:5173"
|
||||
},
|
||||
{
|
||||
"type": "editor-browser",
|
||||
"request": "launch",
|
||||
"name": "Nginx: Launch in integrated browser",
|
||||
"url": "http://localhost:5000"
|
||||
},
|
||||
{
|
||||
"type": "editor-browser",
|
||||
"request": "attach",
|
||||
"name": "Attach to integrated browser"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
140
CONTRIBUTING.md
Normal file
140
CONTRIBUTING.md
Normal file
@ -0,0 +1,140 @@
|
||||
# Contributing to Frigate
|
||||
|
||||
Thank you for your interest in contributing to Frigate. This document covers the expectations and guidelines for contributions. Please read it before submitting a pull request.
|
||||
|
||||
## Before you start
|
||||
|
||||
### Bugfixes
|
||||
|
||||
If you've found a bug and want to fix it, go for it. Link to the relevant issue in your PR if one exists, or describe the bug in the PR description.
|
||||
|
||||
### New features
|
||||
|
||||
Every new feature adds scope that the maintainers must test, maintain, and support long-term. Before writing code for a new feature:
|
||||
|
||||
1. **Check for existing discussion.** Search [feature requests](https://github.com/blakeblackshear/frigate/issues) and [discussions](https://github.com/blakeblackshear/frigate/discussions) to see if it's been proposed or discussed. Pinned feature requests are on our radar — we plan to get to them, but we don't maintain a public roadmap or timeline. Check in with us first if you have interest in contributing to one.
|
||||
2. **Start a discussion or feature request first.** This helps ensure your idea aligns with Frigate's direction before you invest time building it. Community interest in a feature request helps us gauge demand, though a great idea is a great idea even without a crowd behind it.
|
||||
3. **Be open to "no".** We try to be thoughtful about what we take on, and sometimes that means saying no to good code if the feature isn't the right fit for the project. These calls are sometimes subjective, and we won't always get them right. We're happy to discuss and reconsider.
|
||||
|
||||
## AI usage policy
|
||||
|
||||
AI tools are a reality of modern development and we're not opposed to their use. But we need to understand your relationship with the code you're submitting. The more AI was involved, the more important it is that you've genuinely reviewed, tested, and understood what it produced.
|
||||
|
||||
### Requirements when AI is used
|
||||
|
||||
If AI is used to generate any portion of the code, contributors must adhere to the following requirements:
|
||||
|
||||
1. **Explicitly disclose the manner in which AI was employed.** The PR template asks for this. Be honest — this won't automatically disqualify your PR. We'd rather have an honest disclosure than find out later. Trust matters more than method.
|
||||
2. **Perform a comprehensive manual review prior to submitting the pull request.** Don't submit code you haven't read carefully and tested locally.
|
||||
3. **Be prepared to explain every line of code they submitted when asked about it by a maintainer.** If you can't explain why something works the way it does, you're not ready to submit it.
|
||||
4. **It is strictly prohibited to use AI to write your posts for you** (bug reports, feature requests, pull request descriptions, GitHub discussions, responding to humans, etc.). We need to hear from _you_, not your AI assistant. These are the spaces where we build trust and understanding with contributors, and that only works if we're talking to each other.
|
||||
|
||||
### Established contributors
|
||||
|
||||
Contributors with a long history of thoughtful, quality contributions to Frigate have earned trust through that track record. The level of scrutiny we apply to AI usage naturally reflects that trust. This isn't a formal exemption — it's just how trust works. If you've been around, we know how you think and how you work. If you're new, we're still getting to know you, and clear disclosure helps build that relationship.
|
||||
|
||||
### What this means in practice
|
||||
|
||||
We're not trying to gatekeep how you write code. Use whatever tools make you productive. But there's a difference between using AI as a tool to implement something you understand and handing a feature request to an AI and submitting whatever comes back. The former is fine. The latter creates maintenance risk for the project.
|
||||
|
||||
Some honest context: when we review a PR, we're not just evaluating whether the code works today. We're evaluating whether we can maintain it, debug it, and extend it long-term — often without the original author's involvement. Code that the author doesn't deeply understand is code that nobody understands, and that's a liability.
|
||||
|
||||
## Pull request guidelines
|
||||
|
||||
### Before submitting
|
||||
|
||||
- **Search for existing PRs** to avoid duplicating effort.
|
||||
- **Test your changes locally.** Your PR cannot be merged unless tests pass.
|
||||
- **Format your code.** Run `ruff format frigate` for Python and `npm run prettier:write` from the `web/` directory for frontend changes.
|
||||
- **Run the linter.** Run `ruff check frigate` for Python and `npm run lint` from `web/` for frontend.
|
||||
- **One concern per PR.** Don't combine unrelated changes. A bugfix and a new feature should be separate PRs.
|
||||
|
||||
### What we look for in review
|
||||
|
||||
- **Does it work?** Tested locally, tests pass, no regressions.
|
||||
- **Is it maintainable?** Clear code, appropriate complexity, good separation of concerns.
|
||||
- **Does it fit?** Consistent with Frigate's architecture and design philosophy.
|
||||
- **Is it scoped well?** Solves the stated problem without unnecessary additions.
|
||||
|
||||
### After submitting
|
||||
|
||||
- Be responsive to review feedback. We may ask for changes.
|
||||
- Expect honest, direct feedback. We try to be respectful but we also try to be efficient.
|
||||
- If your PR goes stale, rebase it on the latest `dev` branch.
|
||||
|
||||
## Coding standards
|
||||
|
||||
### Python (backend)
|
||||
|
||||
- **Python** — use modern language features (type hints, pattern matching, f-strings, dataclasses)
|
||||
- **Formatting**: Ruff (configured in `pyproject.toml`)
|
||||
- **Linting**: Ruff
|
||||
- **Testing**: `python3 -u -m unittest`
|
||||
- **Logging**: Use module-level `logger = logging.getLogger(__name__)` with lazy formatting
|
||||
- **Async**: All external I/O must be async. No blocking calls in async functions.
|
||||
- **Error handling**: Use specific exception types. Keep try blocks minimal.
|
||||
- **Language**: American English for all code, comments, and documentation
|
||||
|
||||
### TypeScript/React (frontend)
|
||||
|
||||
- **Linting**: ESLint (`npm run lint` from `web/`)
|
||||
- **Formatting**: Prettier (`npm run prettier:write` from `web/`)
|
||||
- **Type safety**: TypeScript strict mode. Avoid `any`.
|
||||
- **i18n**: All user-facing strings must use `react-i18next`. Never hardcode display text in components. Add English strings to the appropriate files in `web/public/locales/en/`.
|
||||
- **Components**: Use Radix UI/shadcn primitives and TailwindCSS with the `cn()` utility.
|
||||
|
||||
### Development commands
|
||||
|
||||
```bash
|
||||
# Python
|
||||
python3 -u -m unittest # Run all tests
|
||||
python3 -u -m unittest frigate.test.test_ffmpeg_presets # Run specific test
|
||||
ruff format frigate # Format
|
||||
ruff check frigate # Lint
|
||||
|
||||
# Frontend (from web/ directory)
|
||||
npm run build # Build
|
||||
npm run lint # Lint
|
||||
npm run lint:fix # Lint + fix
|
||||
npm run prettier:write # Format
|
||||
```
|
||||
|
||||
## Project structure
|
||||
|
||||
```
|
||||
frigate/ # Python backend
|
||||
api/ # FastAPI route handlers
|
||||
config/ # Configuration parsing and validation
|
||||
detectors/ # Object detection backends
|
||||
events/ # Event management and storage
|
||||
test/ # Backend tests
|
||||
util/ # Shared utilities
|
||||
web/ # React/TypeScript frontend
|
||||
src/
|
||||
api/ # API client functions
|
||||
components/ # Reusable components
|
||||
hooks/ # Custom React hooks
|
||||
pages/ # Route components
|
||||
types/ # TypeScript type definitions
|
||||
views/ # Complex view components
|
||||
docker/ # Docker build files
|
||||
docs/ # Documentation site
|
||||
migrations/ # Database migrations
|
||||
```
|
||||
|
||||
## Translations
|
||||
|
||||
Frigate uses [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) for managing language translations. If you'd like to help translate Frigate into your language:
|
||||
|
||||
1. Visit the [Frigate project on Weblate](https://hosted.weblate.org/projects/frigate-nvr/).
|
||||
2. Create an account or log in.
|
||||
3. Browse the available languages and select the one you'd like to contribute to, or request a new language.
|
||||
4. Translate strings directly in the Weblate interface — no code changes or pull requests needed.
|
||||
|
||||
Translation contributions through Weblate are automatically synced to the repository. Please do not submit pull requests for translation changes — use Weblate instead so that translations are properly tracked and coordinated.
|
||||
|
||||
## Resources
|
||||
|
||||
- [Documentation](https://docs.frigate.video)
|
||||
- [Discussions, Support, and Bug Reports](https://github.com/blakeblackshear/frigate/discussions)
|
||||
- [Feature Requests](https://github.com/blakeblackshear/frigate/issues)
|
||||
5
Makefile
5
Makefile
@ -1,7 +1,7 @@
|
||||
default_target: local
|
||||
|
||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
VERSION = 0.17.0
|
||||
VERSION = 0.18.0
|
||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
BOARDS= #Initialized empty
|
||||
@ -49,7 +49,8 @@ push: push-boards
|
||||
--push
|
||||
|
||||
run: local
|
||||
docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest
|
||||
docker run --rm --publish=5000:5000 --publish=8971:8971 \
|
||||
--volume=${PWD}/config:/config frigate:latest
|
||||
|
||||
run_tests: local
|
||||
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
|
||||
|
||||
@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
FROM scratch AS go2rtc
|
||||
ARG TARGETARCH
|
||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
|
||||
FROM wget AS tempio
|
||||
ARG TARGETARCH
|
||||
@ -266,6 +266,12 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
||||
pip3 install -U /deps/wheels/*.whl
|
||||
|
||||
# Install Axera Engine
|
||||
RUN pip3 install https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3-frigate/axengine-0.1.3-py3-none-any.whl
|
||||
|
||||
ENV PATH="${PATH}:/usr/bin/axcl"
|
||||
ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/lib/axcl"
|
||||
|
||||
# Install MemryX runtime (requires libgomp (OpenMP) in the final docker image)
|
||||
RUN --mount=type=bind,source=docker/main/install_memryx.sh,target=/deps/install_memryx.sh \
|
||||
bash -c "bash /deps/install_memryx.sh"
|
||||
|
||||
@ -73,6 +73,7 @@ cd /tmp/nginx
|
||||
--with-file-aio \
|
||||
--with-http_sub_module \
|
||||
--with-http_ssl_module \
|
||||
--with-http_v2_module \
|
||||
--with-http_auth_request_module \
|
||||
--with-http_realip_module \
|
||||
--with-threads \
|
||||
|
||||
@ -52,7 +52,7 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
|
||||
rm -rf ffmpeg.tar.xz
|
||||
mkdir -p /usr/lib/ffmpeg/7.0
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2026-03-19-13-03/ffmpeg-n7.1.3-43-g5a1f107b4c-linux64-gpl-7.1.tar.xz"
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
|
||||
rm -rf ffmpeg.tar.xz
|
||||
fi
|
||||
@ -64,7 +64,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
|
||||
rm -f ffmpeg.tar.xz
|
||||
mkdir -p /usr/lib/ffmpeg/7.0
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2026-03-19-13-03/ffmpeg-n7.1.3-43-g5a1f107b4c-linuxarm64-gpl-7.1.tar.xz"
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
|
||||
rm -f ffmpeg.tar.xz
|
||||
fi
|
||||
@ -105,9 +105,9 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
# install legacy and standard intel icd and level-zero-gpu
|
||||
# see https://github.com/intel/compute-runtime/blob/master/LEGACY_PLATFORMS.md for more info
|
||||
# needed core package
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/libigdgmm12_22.5.5_amd64.deb
|
||||
dpkg -i libigdgmm12_22.5.5_amd64.deb
|
||||
rm libigdgmm12_22.5.5_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/libigdgmm12_22.7.0_amd64.deb
|
||||
dpkg -i libigdgmm12_22.7.0_amd64.deb
|
||||
rm libigdgmm12_22.7.0_amd64.deb
|
||||
|
||||
# legacy packages
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.35.30872.36/intel-opencl-icd-legacy1_24.35.30872.36_amd64.deb
|
||||
@ -115,18 +115,19 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-opencl_1.0.17537.24_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.17537.24/intel-igc-core_1.0.17537.24_amd64.deb
|
||||
# standard packages
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-opencl-icd_24.52.32224.5_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/24.52.32224.5/intel-level-zero-gpu_1.6.32224.5_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-opencl-2_2.5.6+18417_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.5.6/intel-igc-core-2_2.5.6+18417_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/intel-opencl-icd_25.13.33276.19_amd64.deb
|
||||
wget https://github.com/intel/compute-runtime/releases/download/25.13.33276.19/intel-level-zero-gpu_1.6.33276.19_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.10.10/intel-igc-opencl-2_2.10.10+18926_amd64.deb
|
||||
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.10.10/intel-igc-core-2_2.10.10+18926_amd64.deb
|
||||
# npu packages
|
||||
wget https://github.com/oneapi-src/level-zero/releases/download/v1.21.9/level-zero_1.21.9+u22.04_amd64.deb
|
||||
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-driver-compiler-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
|
||||
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-fw-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
|
||||
wget https://github.com/intel/linux-npu-driver/releases/download/v1.17.0/intel-level-zero-npu_1.17.0.20250508-14912879441_ubuntu22.04_amd64.deb
|
||||
wget https://github.com/oneapi-src/level-zero/releases/download/v1.28.2/level-zero_1.28.2+u22.04_amd64.deb
|
||||
wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-driver-compiler-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb
|
||||
wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-fw-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb
|
||||
wget https://github.com/intel/linux-npu-driver/releases/download/v1.19.0/intel-level-zero-npu_1.19.0.20250707-16111289554_ubuntu22.04_amd64.deb
|
||||
|
||||
dpkg -i *.deb
|
||||
rm *.deb
|
||||
apt-get -qq install -f -y
|
||||
fi
|
||||
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
|
||||
@ -10,7 +10,8 @@ echo "[INFO] Starting certsync..."
|
||||
|
||||
lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
|
||||
|
||||
tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled`
|
||||
tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled`
|
||||
listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port`
|
||||
|
||||
while true
|
||||
do
|
||||
@ -34,7 +35,7 @@ do
|
||||
;;
|
||||
esac
|
||||
|
||||
liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'`
|
||||
liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'`
|
||||
|
||||
case "$liveprint" in
|
||||
*Fingerprint*)
|
||||
|
||||
@ -55,7 +55,7 @@ function setup_homekit_config() {
|
||||
|
||||
if [[ ! -f "${config_path}" ]]; then
|
||||
echo "[INFO] Creating empty config file for HomeKit..."
|
||||
echo '{}' > "${config_path}"
|
||||
: > "${config_path}"
|
||||
fi
|
||||
|
||||
# Convert YAML to JSON for jq processing
|
||||
@ -65,23 +65,25 @@ function setup_homekit_config() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# Use jq to filter and keep only the homekit section
|
||||
local cleaned_json="/tmp/cache/homekit_cleaned.json"
|
||||
jq '
|
||||
# Keep only the homekit section if it exists, otherwise empty object
|
||||
if has("homekit") then {homekit: .homekit} else {} end
|
||||
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || {
|
||||
echo '{}' > "${cleaned_json}"
|
||||
}
|
||||
# Use jq to extract the homekit section, if it exists
|
||||
local homekit_json
|
||||
homekit_json=$(jq '
|
||||
if has("homekit") then {homekit: .homekit} else null end
|
||||
' "${temp_json}" 2>/dev/null) || homekit_json="null"
|
||||
|
||||
# Convert back to YAML and write to the config file
|
||||
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
|
||||
# If no homekit section, write an empty config file
|
||||
if [[ "${homekit_json}" == "null" ]]; then
|
||||
: > "${config_path}"
|
||||
else
|
||||
# Convert homekit JSON back to YAML and write to the config file
|
||||
echo "${homekit_json}" | yq eval -P - > "${config_path}" 2>/dev/null || {
|
||||
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
|
||||
echo '{}' > "${config_path}"
|
||||
: > "${config_path}"
|
||||
}
|
||||
fi
|
||||
|
||||
# Clean up temp files
|
||||
rm -f "${temp_json}" "${cleaned_json}"
|
||||
rm -f "${temp_json}"
|
||||
}
|
||||
|
||||
set_libva_version
|
||||
|
||||
@ -80,12 +80,12 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain.
|
||||
fi
|
||||
|
||||
# build templates for optional FRIGATE_BASE_PATH environment variable
|
||||
python3 /usr/local/nginx/get_base_path.py | \
|
||||
python3 /usr/local/nginx/get_nginx_settings.py | \
|
||||
tempio -template /usr/local/nginx/templates/base_path.gotmpl \
|
||||
-out /usr/local/nginx/conf/base_path.conf
|
||||
|
||||
# build templates for optional TLS support
|
||||
python3 /usr/local/nginx/get_listen_settings.py | \
|
||||
# build templates for additional network settings
|
||||
python3 /usr/local/nginx/get_nginx_settings.py | \
|
||||
tempio -template /usr/local/nginx/templates/listen.gotmpl \
|
||||
-out /usr/local/nginx/conf/listen.conf
|
||||
|
||||
|
||||
@ -63,6 +63,9 @@ http {
|
||||
server {
|
||||
include listen.conf;
|
||||
|
||||
# enable HTTP/2 for TLS connections to eliminate browser 6-connection limit
|
||||
http2 on;
|
||||
|
||||
# vod settings
|
||||
vod_base_url '';
|
||||
vod_segments_base_url '';
|
||||
|
||||
@ -1,11 +0,0 @@
|
||||
"""Prints the base path as json to stdout."""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
|
||||
|
||||
result: dict[str, Any] = {"base_path": base_path}
|
||||
|
||||
print(json.dumps(result))
|
||||
@ -1,35 +0,0 @@
|
||||
"""Prints the tls config as json to stdout."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.util.config import find_config_file
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
config_file = find_config_file()
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
raw_config = f.read()
|
||||
|
||||
if config_file.endswith((".yaml", ".yml")):
|
||||
config: dict[str, Any] = yaml.load(raw_config)
|
||||
elif config_file.endswith(".json"):
|
||||
config: dict[str, Any] = json.loads(raw_config)
|
||||
except FileNotFoundError:
|
||||
config: dict[str, Any] = {}
|
||||
|
||||
tls_config: dict[str, any] = config.get("tls", {"enabled": True})
|
||||
networking_config = config.get("networking", {})
|
||||
ipv6_config = networking_config.get("ipv6", {"enabled": False})
|
||||
|
||||
output = {"tls": tls_config, "ipv6": ipv6_config}
|
||||
|
||||
print(json.dumps(output))
|
||||
62
docker/main/rootfs/usr/local/nginx/get_nginx_settings.py
Normal file
62
docker/main/rootfs/usr/local/nginx/get_nginx_settings.py
Normal file
@ -0,0 +1,62 @@
|
||||
"""Prints the nginx settings as json to stdout."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.util.config import find_config_file
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
config_file = find_config_file()
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
raw_config = f.read()
|
||||
|
||||
if config_file.endswith((".yaml", ".yml")):
|
||||
config: dict[str, Any] = yaml.load(raw_config)
|
||||
elif config_file.endswith(".json"):
|
||||
config: dict[str, Any] = json.loads(raw_config)
|
||||
except FileNotFoundError:
|
||||
config: dict[str, Any] = {}
|
||||
|
||||
tls_config: dict[str, Any] = config.get("tls", {})
|
||||
tls_config.setdefault("enabled", True)
|
||||
|
||||
networking_config: dict[str, Any] = config.get("networking", {})
|
||||
ipv6_config: dict[str, Any] = networking_config.get("ipv6", {})
|
||||
ipv6_config.setdefault("enabled", False)
|
||||
|
||||
listen_config: dict[str, Any] = networking_config.get("listen", {})
|
||||
listen_config.setdefault("internal", 5000)
|
||||
listen_config.setdefault("external", 8971)
|
||||
|
||||
# handle case where internal port is a string with ip:port
|
||||
internal_port = listen_config["internal"]
|
||||
if type(internal_port) is str:
|
||||
internal_port = int(internal_port.split(":")[-1])
|
||||
listen_config["internal_port"] = internal_port
|
||||
|
||||
# handle case where external port is a string with ip:port
|
||||
external_port = listen_config["external"]
|
||||
if type(external_port) is str:
|
||||
external_port = int(external_port.split(":")[-1])
|
||||
listen_config["external_port"] = external_port
|
||||
|
||||
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
|
||||
|
||||
result: dict[str, Any] = {
|
||||
"tls": tls_config,
|
||||
"ipv6": ipv6_config,
|
||||
"listen": listen_config,
|
||||
"base_path": base_path,
|
||||
}
|
||||
|
||||
print(json.dumps(result))
|
||||
@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ {
|
||||
# remove base_url from the path before passing upstream
|
||||
rewrite ^{{ .base_path }}/(.*) /$1 break;
|
||||
|
||||
proxy_pass $scheme://127.0.0.1:8971;
|
||||
proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }};
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
@ -1,15 +1,12 @@
|
||||
|
||||
# Internal (IPv4 always; IPv6 optional)
|
||||
listen 5000;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
|
||||
|
||||
listen {{ .listen.internal }};
|
||||
{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }}
|
||||
|
||||
# intended for external traffic, protected by auth
|
||||
{{ if .tls }}
|
||||
{{ if .tls.enabled }}
|
||||
{{ if .tls.enabled }}
|
||||
# external HTTPS (IPv4 always; IPv6 optional)
|
||||
listen 8971 ssl;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
|
||||
listen {{ .listen.external }} ssl;
|
||||
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }}
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
|
||||
@ -32,14 +29,8 @@ listen 5000;
|
||||
default_type "text/plain";
|
||||
root /etc/letsencrypt/www;
|
||||
}
|
||||
{{ else }}
|
||||
# external HTTP (IPv4 always; IPv6 optional)
|
||||
listen 8971;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
# (No tls section) default to HTTP (IPv4 always; IPv6 optional)
|
||||
listen 8971;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
|
||||
# (No tls) default to HTTP (IPv4 always; IPv6 optional)
|
||||
listen {{ .listen.external }};
|
||||
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }}
|
||||
{{ end }}
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ ARG ROCM
|
||||
|
||||
RUN apt update -qq && \
|
||||
apt install -y wget gpg && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \
|
||||
apt install -y ./rocm.deb && \
|
||||
apt update && \
|
||||
apt install -qq -y rocm
|
||||
@ -56,13 +56,17 @@ FROM scratch AS rocm-dist
|
||||
|
||||
ARG ROCM
|
||||
|
||||
# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime
|
||||
COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
|
||||
# Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3)
|
||||
# Copy MIOpen database files for gfx10xx, gfx11xx, and gfx12xx only (RDNA2/RDNA3/RDNA4)
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx11* /opt/rocm-$ROCM/share/miopen/db/
|
||||
# Copy rocBLAS library files for gfx10xx and gfx11xx only
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx12* /opt/rocm-$ROCM/share/miopen/db/
|
||||
# Copy rocBLAS library files for gfx10xx, gfx11xx, and gfx12xx only
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx10* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx11* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx12* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||
COPY --from=rocm /opt/rocm-dist/ /
|
||||
|
||||
#######################################################################
|
||||
|
||||
@ -1 +1 @@
|
||||
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
|
||||
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
|
||||
@ -1,5 +1,5 @@
|
||||
variable "ROCM" {
|
||||
default = "7.1.1"
|
||||
default = "7.2.0"
|
||||
}
|
||||
variable "HSA_OVERRIDE_GFX_VERSION" {
|
||||
default = ""
|
||||
|
||||
@ -1,18 +1,18 @@
|
||||
# NVidia TensorRT Support (amd64 only)
|
||||
# Nvidia ONNX Runtime GPU Support
|
||||
--extra-index-url 'https://pypi.nvidia.com'
|
||||
cython==3.0.*; platform_machine == 'x86_64'
|
||||
nvidia_cuda_cupti_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu12==12.5.3.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu12==9.3.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu12==11.2.3.*; platform_machine == 'x86_64'
|
||||
nvidia-curand-cu12==10.3.6.*; platform_machine == 'x86_64'
|
||||
nvidia_cuda_nvcc_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia-cuda-nvrtc-cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia_cuda_runtime_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
|
||||
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
|
||||
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
|
||||
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
nvidia-cuda-cupti-cu12==12.9.79; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu12==12.9.1.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu12==9.19.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu12==11.4.1.*; platform_machine == 'x86_64'
|
||||
nvidia-curand-cu12==10.3.10.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-nvcc-cu12==12.9.86; platform_machine == 'x86_64'
|
||||
nvidia-cuda-nvrtc-cu12==12.9.86; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu12==12.9.79; platform_machine == 'x86_64'
|
||||
nvidia-cusolver-cu12==11.7.5.*; platform_machine == 'x86_64'
|
||||
nvidia-cusparse-cu12==12.5.10.*; platform_machine == 'x86_64'
|
||||
nvidia-nccl-cu12==2.29.7; platform_machine == 'x86_64'
|
||||
nvidia-nvjitlink-cu12==12.9.86; platform_machine == 'x86_64'
|
||||
onnx==1.16.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.24.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
||||
@ -44,13 +44,21 @@ go2rtc:
|
||||
|
||||
### `environment_vars`
|
||||
|
||||
This section can be used to set environment variables for those unable to modify the environment of the container, like within Home Assistant OS.
|
||||
This section can be used to set environment variables for those unable to modify the environment of the container, like within Home Assistant OS. Docker users should set environment variables in their `docker run` command (`-e FRIGATE_MQTT_PASSWORD=secret`) or `docker-compose.yml` file (`environment:` section) instead. Note that values set here are stored in plain text in your config file, so if the goal is to keep credentials out of your configuration, use Docker environment variables or Docker secrets instead.
|
||||
|
||||
Variables prefixed with `FRIGATE_` can be referenced in config fields that support environment variable substitution (such as MQTT host and credentials, camera stream URLs, and ONVIF host and credentials) using the `{FRIGATE_VARIABLE_NAME}` syntax.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
environment_vars:
|
||||
VARIABLE_NAME: variable_value
|
||||
FRIGATE_MQTT_USER: my_mqtt_user
|
||||
FRIGATE_MQTT_PASSWORD: my_mqtt_password
|
||||
|
||||
mqtt:
|
||||
host: "{FRIGATE_MQTT_HOST}"
|
||||
user: "{FRIGATE_MQTT_USER}"
|
||||
password: "{FRIGATE_MQTT_PASSWORD}"
|
||||
```
|
||||
|
||||
#### TensorFlow Thread Configuration
|
||||
@ -155,34 +163,33 @@ services:
|
||||
|
||||
### Enabling IPv6
|
||||
|
||||
IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example:
|
||||
IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows:
|
||||
|
||||
```
|
||||
{{ if not .enabled }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen 8971;
|
||||
{{ else }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen 8971 ssl;
|
||||
|
||||
# intended for internal traffic, not protected by auth
|
||||
listen 5000;
|
||||
```yaml
|
||||
networking:
|
||||
ipv6:
|
||||
enabled: True
|
||||
```
|
||||
|
||||
becomes
|
||||
### Listen on different ports
|
||||
|
||||
```
|
||||
{{ if not .enabled }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen [::]:8971 ipv6only=off;
|
||||
{{ else }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen [::]:8971 ipv6only=off ssl;
|
||||
You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container.
|
||||
|
||||
# intended for internal traffic, not protected by auth
|
||||
listen [::]:5000 ipv6only=off;
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
networking:
|
||||
listen:
|
||||
internal: 127.0.0.1:5000
|
||||
external: 8971
|
||||
```
|
||||
|
||||
:::warning
|
||||
|
||||
This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations.
|
||||
|
||||
:::
|
||||
|
||||
## Base path
|
||||
|
||||
By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing.
|
||||
@ -234,7 +241,7 @@ To do this:
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
|
||||
@ -86,7 +86,7 @@ Frigate looks for a JWT token secret in the following order:
|
||||
|
||||
1. An environment variable named `FRIGATE_JWT_SECRET`
|
||||
2. A file named `FRIGATE_JWT_SECRET` in the directory specified by the `CREDENTIALS_DIRECTORY` environment variable (defaults to the Docker Secrets directory: `/run/secrets/`)
|
||||
3. A `jwt_secret` option from the Home Assistant Add-on options
|
||||
3. A `jwt_secret` option from the Home Assistant App options
|
||||
4. A `.jwt_secret` file in the config directory
|
||||
|
||||
If no secret is found on startup, Frigate generates one and stores it in a `.jwt_secret` file in the config directory.
|
||||
@ -232,7 +232,7 @@ The viewer role provides read-only access to all cameras in the UI and API. Cust
|
||||
|
||||
### Role Configuration Example
|
||||
|
||||
```yaml
|
||||
```yaml {11-16}
|
||||
cameras:
|
||||
front_door:
|
||||
# ... camera config
|
||||
|
||||
@ -24,7 +24,7 @@ A custom icon can be added to the birdseye background by providing a 180x180 ima
|
||||
|
||||
If you want to include a camera in Birdseye view only for specific circumstances, or just don't include it at all, the Birdseye setting can be set at the camera level.
|
||||
|
||||
```yaml
|
||||
```yaml {8-10,12-14}
|
||||
# Include all cameras by default in Birdseye view
|
||||
birdseye:
|
||||
enabled: True
|
||||
@ -48,6 +48,7 @@ By default birdseye shows all cameras that have had the configured activity in t
|
||||
```yaml
|
||||
birdseye:
|
||||
enabled: True
|
||||
# highlight-next-line
|
||||
inactivity_threshold: 15
|
||||
```
|
||||
|
||||
@ -78,9 +79,11 @@ birdseye:
|
||||
cameras:
|
||||
front:
|
||||
birdseye:
|
||||
# highlight-next-line
|
||||
order: 1
|
||||
back:
|
||||
birdseye:
|
||||
# highlight-next-line
|
||||
order: 2
|
||||
```
|
||||
|
||||
@ -92,7 +95,7 @@ It is possible to limit the number of cameras shown on birdseye at one time. Whe
|
||||
|
||||
For example, this can be configured to only show the most recently active camera.
|
||||
|
||||
```yaml
|
||||
```yaml {3-4}
|
||||
birdseye:
|
||||
enabled: True
|
||||
layout:
|
||||
@ -103,7 +106,7 @@ birdseye:
|
||||
|
||||
By default birdseye tries to fit 2 cameras in each row and then double in size until a suitable layout is found. The scaling can be configured with a value between 1.0 and 5.0 depending on use case.
|
||||
|
||||
```yaml
|
||||
```yaml {3-4}
|
||||
birdseye:
|
||||
enabled: True
|
||||
layout:
|
||||
|
||||
@ -23,6 +23,7 @@ Some cameras support h265 with different formats, but Safari only supports the a
|
||||
cameras:
|
||||
h265_cam: # <------ Doesn't matter what the camera is called
|
||||
ffmpeg:
|
||||
# highlight-next-line
|
||||
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||
```
|
||||
|
||||
@ -30,7 +31,7 @@ cameras:
|
||||
|
||||
Note that mjpeg cameras require encoding the video into h264 for recording, and restream roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. It is recommended to use the restream role to create an h264 restream and then use that as the source for ffmpeg.
|
||||
|
||||
```yaml
|
||||
```yaml {3,10}
|
||||
go2rtc:
|
||||
streams:
|
||||
mjpeg_cam: "ffmpeg:http://your_mjpeg_stream_url#video=h264#hardware" # <- use hardware acceleration to create an h264 stream usable for other components.
|
||||
@ -96,6 +97,7 @@ This camera is H.265 only. To be able to play clips on some devices (like MacOs
|
||||
cameras:
|
||||
annkec800: # <------ Name the camera
|
||||
ffmpeg:
|
||||
# highlight-next-line
|
||||
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||
output_args:
|
||||
record: preset-record-generic-audio-aac
|
||||
@ -244,7 +246,7 @@ go2rtc:
|
||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||
```
|
||||
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp)
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
||||
|
||||
@ -274,7 +276,7 @@ To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's
|
||||
|
||||
- In your Frigate Configuration File, add the go2rtc stream and roles as appropriate:
|
||||
|
||||
```
|
||||
```yaml {4,11-12}
|
||||
go2rtc:
|
||||
streams:
|
||||
usb_camera:
|
||||
|
||||
@ -66,7 +66,7 @@ Not every PTZ supports ONVIF, which is the standard protocol Frigate uses to com
|
||||
|
||||
Add the onvif section to your camera in your configuration file:
|
||||
|
||||
```yaml
|
||||
```yaml {4-8}
|
||||
cameras:
|
||||
back:
|
||||
ffmpeg: ...
|
||||
|
||||
@ -7,11 +7,11 @@ Object classification allows you to train a custom MobileNetV2 classification mo
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
Object classification models are lightweight and run very fast on CPU.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
|
||||
A CPU with AVX instructions is required for training and inference.
|
||||
A CPU with AVX + AVX2 instructions is required for training and inference.
|
||||
|
||||
## Classes
|
||||
|
||||
@ -27,7 +27,6 @@ For object classification:
|
||||
### Classification Type
|
||||
|
||||
- **Sub label**:
|
||||
|
||||
- Applied to the object’s `sub_label` field.
|
||||
- Ideal for a single, more specific identity or type.
|
||||
- Example: `cat` → `Leo`, `Charlie`, `None`.
|
||||
@ -119,6 +118,7 @@ Enable debug logs for classification models by adding `frigate.data_processing.r
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
# highlight-next-line
|
||||
frigate.data_processing.real_time.custom_classification: debug
|
||||
```
|
||||
|
||||
|
||||
@ -7,11 +7,11 @@ State classification allows you to train a custom MobileNetV2 classification mod
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
|
||||
State classification models are lightweight and run very fast on CPU.
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
|
||||
A CPU with AVX instructions is required for training and inference.
|
||||
A CPU with AVX + AVX2 instructions is required for training and inference.
|
||||
|
||||
## Classes
|
||||
|
||||
@ -85,6 +85,7 @@ Enable debug logs for classification models by adding `frigate.data_processing.r
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
# highlight-next-line
|
||||
frigate.data_processing.real_time.custom_classification: debug
|
||||
```
|
||||
|
||||
|
||||
@ -32,6 +32,8 @@ All of these features run locally on your system.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
A CPU with AVX + AVX2 instructions is required to run Face Recognition.
|
||||
|
||||
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
|
||||
|
||||
The `large` model is optimized for accuracy, an integrated or discrete GPU / NPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
||||
@ -143,17 +145,14 @@ Start with the [Usage](#usage) section and re-read the [Model Requirements](#mod
|
||||
1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Recent Recognitions tab in the Frigate UI's Face Library.
|
||||
|
||||
If you are using a Frigate+ or `face` detecting model:
|
||||
|
||||
- Watch the debug view (Settings --> Debug) to ensure that `face` is being detected along with `person`.
|
||||
- You may need to adjust the `min_score` for the `face` object if faces are not being detected.
|
||||
|
||||
If you are **not** using a Frigate+ or `face` detecting model:
|
||||
|
||||
- Check your `detect` stream resolution and ensure it is sufficiently high enough to capture face details on `person` objects.
|
||||
- You may need to lower your `detection_threshold` if faces are not being detected.
|
||||
|
||||
2. Any detected faces will then be _recognized_.
|
||||
|
||||
- Make sure you have trained at least one face per the recommendations above.
|
||||
- Adjust `recognition_threshold` settings per the suggestions [above](#advanced-configuration).
|
||||
|
||||
|
||||
@ -5,39 +5,31 @@ title: Configuring Generative AI
|
||||
|
||||
## Configuration
|
||||
|
||||
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI-Compatible section below.
|
||||
|
||||
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||
|
||||
## Ollama
|
||||
## Local Providers
|
||||
|
||||
Local providers run on your own hardware and keep all data processing private. These require a GPU or dedicated hardware for best performance.
|
||||
|
||||
:::warning
|
||||
|
||||
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
|
||||
Running Generative AI models on CPU is not recommended, as high inference times make using Generative AI impractical.
|
||||
|
||||
:::
|
||||
|
||||
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||
### Recommended Local Models
|
||||
|
||||
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
|
||||
You must use a vision-capable model with Frigate. The following models are recommended for local deployment:
|
||||
|
||||
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests).
|
||||
|
||||
### Model Types: Instruct vs Thinking
|
||||
|
||||
Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions.
|
||||
|
||||
- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case.
|
||||
- **Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models.
|
||||
|
||||
Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, Frigate will always use instruct-style prompts and specifically disables thinking-mode behaviors to ensure concise, useful responses.
|
||||
|
||||
**Recommendation:**
|
||||
Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model provider’s documentation or model library for guidance on the correct model variant to use.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, Ollama will try to download the model but it may take longer than the timeout, it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
|
||||
| Model | Notes |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `qwen3-vl` | Strong visual and situational understanding, strong ability to identify smaller objects and interactions with object. |
|
||||
| `qwen3.5` | Strong situational understanding, but missing DeepStack from qwen3-vl leading to worse performance for identifying objects in people's hand and other small details. |
|
||||
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
||||
| `gemma3` | Slower model with good vision and temporal understanding |
|
||||
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
|
||||
|
||||
:::info
|
||||
|
||||
@ -45,108 +37,79 @@ Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger s
|
||||
|
||||
:::
|
||||
|
||||
:::note
|
||||
|
||||
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 24 GB to run the 33B models.
|
||||
|
||||
:::
|
||||
|
||||
### Model Types: Instruct vs Thinking
|
||||
|
||||
Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions.
|
||||
|
||||
- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case.
|
||||
- **Reasoning / Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models.
|
||||
|
||||
Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, it is recommended to disable reasoning / thinking, which is generally model specific (see your models documentation).
|
||||
|
||||
**Recommendation:**
|
||||
Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model provider's documentation or model library for guidance on the correct model variant to use.
|
||||
|
||||
### llama.cpp
|
||||
|
||||
[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server.
|
||||
|
||||
It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance.
|
||||
|
||||
#### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format.
|
||||
|
||||
#### Configuration
|
||||
|
||||
All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters.
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: llamacpp
|
||||
base_url: http://localhost:8080
|
||||
model: your-model-name
|
||||
provider_options:
|
||||
context_size: 16000 # Tell Frigate your context size so it can send the appropriate amount of information.
|
||||
```
|
||||
|
||||
### Ollama
|
||||
|
||||
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||
|
||||
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
|
||||
|
||||
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests).
|
||||
|
||||
:::tip
|
||||
|
||||
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. qwen3-VL supports vision and tools simultaneously in Ollama.
|
||||
|
||||
:::
|
||||
|
||||
The following models are recommended:
|
||||
Note that Frigate will not automatically download the model you specify in your config. Ollama will try to download the model but it may take longer than the timeout, so it is recommended to pull the model beforehand by running `ollama pull your_model` on your Ollama server/Docker container. The model specified in Frigate's config must match the downloaded model tag.
|
||||
|
||||
| Model | Notes |
|
||||
| ------------- | -------------------------------------------------------------------- |
|
||||
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
|
||||
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
||||
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
||||
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
|
||||
|
||||
:::note
|
||||
|
||||
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||
|
||||
:::
|
||||
|
||||
#### Ollama Cloud models
|
||||
|
||||
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
|
||||
|
||||
### Configuration
|
||||
#### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: qwen3-vl:4b
|
||||
provider_options: # other Ollama client options can be defined
|
||||
keep_alive: -1
|
||||
options:
|
||||
num_ctx: 8192 # make sure the context matches other services that are using ollama
|
||||
```
|
||||
|
||||
## Google Gemini
|
||||
### OpenAI-Compatible
|
||||
|
||||
Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini).
|
||||
|
||||
### Get API Key
|
||||
|
||||
To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com).
|
||||
|
||||
1. Accept the Terms of Service
|
||||
2. Click "Get API Key" from the right hand navigation
|
||||
3. Click "Create API key in new project"
|
||||
4. Copy the API key for use in your config
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-2.5-flash
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different Gemini-compatible API endpoint, set the `provider_options` with the `base_url` key to your provider's API URL. For example:
|
||||
|
||||
```
|
||||
genai:
|
||||
provider: gemini
|
||||
...
|
||||
provider_options:
|
||||
base_url: https://...
|
||||
```
|
||||
|
||||
Other HTTP options are available, see the [python-genai documentation](https://github.com/googleapis/python-genai).
|
||||
|
||||
:::
|
||||
|
||||
## OpenAI
|
||||
|
||||
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models).
|
||||
|
||||
### Get API Key
|
||||
|
||||
To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview).
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: openai
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
model: gpt-4o
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
|
||||
|
||||
:::
|
||||
Frigate supports any provider that implements the OpenAI API standard. This includes self-hosted solutions like [vLLM](https://docs.vllm.ai/), [LocalAI](https://localai.io/), and other OpenAI-compatible servers.
|
||||
|
||||
:::tip
|
||||
|
||||
@ -165,19 +128,134 @@ This ensures Frigate uses the correct context window size when generating prompt
|
||||
|
||||
:::
|
||||
|
||||
## Azure OpenAI
|
||||
#### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: openai
|
||||
base_url: http://your-server:port
|
||||
api_key: your-api-key # May not be required for local servers
|
||||
model: your-model-name
|
||||
```
|
||||
|
||||
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
|
||||
|
||||
## Cloud Providers
|
||||
|
||||
Cloud providers run on remote infrastructure and require an API key for authentication. These services handle all model inference on their servers.
|
||||
|
||||
### Ollama Cloud
|
||||
|
||||
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
|
||||
|
||||
#### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: cloud-model-name
|
||||
```
|
||||
|
||||
### Google Gemini
|
||||
|
||||
Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation.
|
||||
|
||||
#### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini).
|
||||
|
||||
#### Get API Key
|
||||
|
||||
To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com).
|
||||
|
||||
1. Accept the Terms of Service
|
||||
2. Click "Get API Key" from the right hand navigation
|
||||
3. Click "Create API key in new project"
|
||||
4. Copy the API key for use in your config
|
||||
|
||||
#### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-2.5-flash
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different Gemini-compatible API endpoint, set the `provider_options` with the `base_url` key to your provider's API URL. For example:
|
||||
|
||||
```yaml {4,5}
|
||||
genai:
|
||||
provider: gemini
|
||||
...
|
||||
provider_options:
|
||||
base_url: https://...
|
||||
```
|
||||
|
||||
Other HTTP options are available, see the [python-genai documentation](https://github.com/googleapis/python-genai).
|
||||
|
||||
:::
|
||||
|
||||
### OpenAI
|
||||
|
||||
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
|
||||
|
||||
#### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models).
|
||||
|
||||
#### Get API Key
|
||||
|
||||
To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview).
|
||||
|
||||
#### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: openai
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
model: gpt-4o
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
|
||||
|
||||
:::
|
||||
|
||||
:::tip
|
||||
|
||||
For OpenAI-compatible servers (such as llama.cpp) that don't expose the configured context size in the API response, you can manually specify the context size in `provider_options`:
|
||||
|
||||
```yaml {5,6}
|
||||
genai:
|
||||
provider: openai
|
||||
base_url: http://your-llama-server
|
||||
model: your-model-name
|
||||
provider_options:
|
||||
context_size: 8192 # Specify the configured context size
|
||||
```
|
||||
|
||||
This ensures Frigate uses the correct context window size when generating prompts.
|
||||
|
||||
:::
|
||||
|
||||
### Azure OpenAI
|
||||
|
||||
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
||||
|
||||
### Supported Models
|
||||
#### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).
|
||||
|
||||
### Create Resource and Get API Key
|
||||
#### Create Resource and Get API Key
|
||||
|
||||
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key, model name, and resource URL, which must include the `api-version` parameter (see the example below).
|
||||
|
||||
### Configuration
|
||||
#### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
|
||||
@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones
|
||||
|
||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||
|
||||
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
|
||||
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset).
|
||||
|
||||
## Usage and Best Practices
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi
|
||||
|
||||
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
|
||||
|
||||
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
|
||||
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset).
|
||||
|
||||
## Review Summary Usage and Best Practices
|
||||
|
||||
@ -80,6 +80,7 @@ By default, review summaries use preview images (cached preview frames) which ha
|
||||
review:
|
||||
genai:
|
||||
enabled: true
|
||||
# highlight-next-line
|
||||
image_source: recordings # Options: "preview" (default) or "recordings"
|
||||
```
|
||||
|
||||
@ -104,7 +105,7 @@ If recordings are not available for a given time period, the system will automat
|
||||
|
||||
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
|
||||
|
||||
```yaml
|
||||
```yaml {4,5}
|
||||
review:
|
||||
genai:
|
||||
enabled: true
|
||||
@ -116,7 +117,7 @@ review:
|
||||
|
||||
By default, review summaries are generated in English. You can configure Frigate to generate summaries in your preferred language by setting the `preferred_language` option:
|
||||
|
||||
```yaml
|
||||
```yaml {4}
|
||||
review:
|
||||
genai:
|
||||
enabled: true
|
||||
|
||||
@ -12,23 +12,20 @@ Some of Frigate's enrichments can use a discrete GPU or integrated GPU for accel
|
||||
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU / NPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU / NPU and configure the enrichment according to its specific documentation.
|
||||
|
||||
- **AMD**
|
||||
|
||||
- ROCm support in the `-rocm` Frigate image is automatically detected for enrichments, but only some enrichment models are available due to ROCm's focus on LLMs and limited stability with certain neural network models. Frigate disables models that perform poorly or are unstable to ensure reliable operation, so only compatible enrichments may be active.
|
||||
|
||||
- **Intel**
|
||||
|
||||
- OpenVINO will automatically be detected and used for enrichments in the default Frigate image.
|
||||
- **Note:** Intel NPUs have limited model support for enrichments. GPU is recommended for enrichments when available.
|
||||
|
||||
- **Nvidia**
|
||||
|
||||
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
|
||||
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
|
||||
|
||||
- **RockChip**
|
||||
- RockChip NPU will automatically be detected and used for semantic search v1 and face recognition in the `-rk` Frigate image.
|
||||
|
||||
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is TensorRT for object detection and OpenVINO for enrichments.
|
||||
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image to run enrichments on an Nvidia GPU and still use other dedicated hardware like a Coral or Hailo for object detection. However, one combination that is not supported is the `tensorrt` image for object detection on an Nvidia GPU and Intel iGPU for enrichments.
|
||||
|
||||
:::note
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@ import CommunityBadge from '@site/src/components/CommunityBadge';
|
||||
It is highly recommended to use an integrated or discrete GPU for hardware acceleration video decoding in Frigate.
|
||||
|
||||
Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg. To verify that hardware acceleration is working:
|
||||
|
||||
- Check the logs: A message will either say that hardware acceleration was automatically detected, or there will be a warning that no hardware acceleration was automatically detected
|
||||
- If hardware acceleration is specified in the config, verification can be done by ensuring the logs are free from errors. There is no CPU fallback for hardware acceleration.
|
||||
|
||||
@ -67,7 +68,7 @@ Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video
|
||||
|
||||
:::note
|
||||
|
||||
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
|
||||
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `config.yml` for HA App users](advanced.md#environment_vars).
|
||||
|
||||
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is.
|
||||
|
||||
@ -116,12 +117,13 @@ services:
|
||||
frigate:
|
||||
...
|
||||
image: ghcr.io/blakeblackshear/frigate:stable
|
||||
# highlight-next-line
|
||||
privileged: true
|
||||
```
|
||||
|
||||
##### Docker Run CLI - Privileged
|
||||
|
||||
```bash
|
||||
```bash {4}
|
||||
docker run -d \
|
||||
--name frigate \
|
||||
...
|
||||
@ -135,7 +137,7 @@ Only recent versions of Docker support the `CAP_PERFMON` capability. You can tes
|
||||
|
||||
##### Docker Compose - CAP_PERFMON
|
||||
|
||||
```yaml
|
||||
```yaml {5,6}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -146,7 +148,7 @@ services:
|
||||
|
||||
##### Docker Run CLI - CAP_PERFMON
|
||||
|
||||
```bash
|
||||
```bash {4}
|
||||
docker run -d \
|
||||
--name frigate \
|
||||
...
|
||||
@ -188,7 +190,7 @@ Frigate can utilize modern AMD integrated GPUs and AMD GPUs to accelerate video
|
||||
|
||||
### Configuring Radeon Driver
|
||||
|
||||
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
|
||||
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA App users](advanced.md#environment_vars).
|
||||
|
||||
### Via VAAPI
|
||||
|
||||
@ -213,7 +215,7 @@ Additional configuration is needed for the Docker container to be able to access
|
||||
|
||||
#### Docker Compose - Nvidia GPU
|
||||
|
||||
```yaml
|
||||
```yaml {5-12}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -230,7 +232,7 @@ services:
|
||||
|
||||
#### Docker Run CLI - Nvidia GPU
|
||||
|
||||
```bash
|
||||
```bash {4}
|
||||
docker run -d \
|
||||
--name frigate \
|
||||
...
|
||||
@ -292,7 +294,7 @@ These instructions were originally based on the [Jellyfin documentation](https:/
|
||||
## Raspberry Pi 3/4
|
||||
|
||||
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
|
||||
If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
|
||||
If you are using the HA App, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
|
||||
|
||||
```yaml
|
||||
# if you want to decode a h264 stream
|
||||
@ -309,7 +311,7 @@ ffmpeg:
|
||||
If running Frigate through Docker, you either need to run in privileged mode or
|
||||
map the `/dev/video*` devices to Frigate. With Docker Compose add:
|
||||
|
||||
```yaml
|
||||
```yaml {4-5}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -319,7 +321,7 @@ services:
|
||||
|
||||
Or with `docker run`:
|
||||
|
||||
```bash
|
||||
```bash {4}
|
||||
docker run -d \
|
||||
--name frigate \
|
||||
...
|
||||
@ -351,7 +353,7 @@ You will need to use the image with the nvidia container runtime:
|
||||
|
||||
### Docker Run CLI - Jetson
|
||||
|
||||
```bash
|
||||
```bash {3}
|
||||
docker run -d \
|
||||
...
|
||||
--runtime nvidia
|
||||
@ -360,7 +362,7 @@ docker run -d \
|
||||
|
||||
### Docker Compose - Jetson
|
||||
|
||||
```yaml
|
||||
```yaml {5}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -451,14 +453,14 @@ Restarting ffmpeg...
|
||||
|
||||
you should try to uprade to FFmpeg 7. This can be done using this config option:
|
||||
|
||||
```
|
||||
```yaml
|
||||
ffmpeg:
|
||||
path: "7.0"
|
||||
```
|
||||
|
||||
You can set this option globally to use FFmpeg 7 for all cameras or on camera level to use it only for specific cameras. Do not confuse this option with:
|
||||
|
||||
```
|
||||
```yaml
|
||||
cameras:
|
||||
name:
|
||||
ffmpeg:
|
||||
@ -480,7 +482,7 @@ Make sure to follow the [Synaptics specific installation instructions](/frigate/
|
||||
|
||||
Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
|
||||
|
||||
```yaml
|
||||
```yaml {2}
|
||||
ffmpeg:
|
||||
hwaccel_args: -c:v h264_v4l2m2m
|
||||
input_args: preset-rtsp-restream
|
||||
|
||||
@ -3,7 +3,7 @@ id: index
|
||||
title: Frigate Configuration
|
||||
---
|
||||
|
||||
For Home Assistant Add-on installations, the config file should be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](#accessing-add-on-config-dir).
|
||||
For Home Assistant App installations, the config file should be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate App you are running. See the list of directories [here](#accessing-app-config-dir).
|
||||
|
||||
For all other installation types, the config file should be mapped to `/config/config.yml` inside the container.
|
||||
|
||||
@ -25,12 +25,12 @@ cameras:
|
||||
- detect
|
||||
```
|
||||
|
||||
## Accessing the Home Assistant Add-on configuration directory {#accessing-add-on-config-dir}
|
||||
## Accessing the Home Assistant App configuration directory {#accessing-app-config-dir}
|
||||
|
||||
When running Frigate through the HA Add-on, the Frigate `/config` directory is mapped to `/addon_configs/<addon_directory>` in the host, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running.
|
||||
When running Frigate through the HA App, the Frigate `/config` directory is mapped to `/addon_configs/<addon_directory>` in the host, where `<addon_directory>` is specific to the variant of the Frigate App you are running.
|
||||
|
||||
| Add-on Variant | Configuration directory |
|
||||
| -------------------------- | -------------------------------------------- |
|
||||
| App Variant | Configuration directory |
|
||||
| -------------------------- | ----------------------------------------- |
|
||||
| Frigate | `/addon_configs/ccab4aaf_frigate` |
|
||||
| Frigate (Full Access) | `/addon_configs/ccab4aaf_frigate-fa` |
|
||||
| Frigate Beta | `/addon_configs/ccab4aaf_frigate-beta` |
|
||||
@ -38,11 +38,11 @@ When running Frigate through the HA Add-on, the Frigate `/config` directory is m
|
||||
|
||||
**Whenever you see `/config` in the documentation, it refers to this directory.**
|
||||
|
||||
If for example you are running the standard Add-on variant and use the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file.
|
||||
If for example you are running the standard App variant and use the [VS Code App](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file.
|
||||
|
||||
## VS Code Configuration Schema
|
||||
|
||||
VS Code supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VS Code and Frigate as an Add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VS Code on another machine.
|
||||
VS Code supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VS Code and Frigate as an App, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VS Code on another machine.
|
||||
|
||||
## Environment Variable Substitution
|
||||
|
||||
@ -50,6 +50,7 @@ Frigate supports the use of environment variables starting with `FRIGATE_` **onl
|
||||
|
||||
```yaml
|
||||
mqtt:
|
||||
host: "{FRIGATE_MQTT_HOST}"
|
||||
user: "{FRIGATE_MQTT_USER}"
|
||||
password: "{FRIGATE_MQTT_PASSWORD}"
|
||||
```
|
||||
@ -60,7 +61,7 @@ mqtt:
|
||||
|
||||
```yaml
|
||||
onvif:
|
||||
host: 10.0.10.10
|
||||
host: "192.168.1.12"
|
||||
port: 8000
|
||||
user: "{FRIGATE_RTSP_USER}"
|
||||
password: "{FRIGATE_RTSP_PASSWORD}"
|
||||
@ -82,10 +83,10 @@ genai:
|
||||
|
||||
Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values.
|
||||
|
||||
### Raspberry Pi Home Assistant Add-on with USB Coral
|
||||
### Raspberry Pi Home Assistant App with USB Coral
|
||||
|
||||
- Single camera with 720p, 5fps stream for detect
|
||||
- MQTT connected to the Home Assistant Mosquitto Add-on
|
||||
- MQTT connected to the Home Assistant Mosquitto App
|
||||
- Hardware acceleration for decoding video
|
||||
- USB Coral detector
|
||||
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
|
||||
@ -109,15 +110,16 @@ detectors:
|
||||
|
||||
record:
|
||||
enabled: True
|
||||
retain:
|
||||
motion:
|
||||
days: 7
|
||||
mode: motion
|
||||
alerts:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
|
||||
snapshots:
|
||||
enabled: True
|
||||
@ -137,7 +139,10 @@ cameras:
|
||||
- detect
|
||||
motion:
|
||||
mask:
|
||||
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400
|
||||
timestamp:
|
||||
friendly_name: "Camera timestamp"
|
||||
enabled: true
|
||||
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
|
||||
```
|
||||
|
||||
### Standalone Intel Mini PC with USB Coral
|
||||
@ -165,15 +170,16 @@ detectors:
|
||||
|
||||
record:
|
||||
enabled: True
|
||||
retain:
|
||||
motion:
|
||||
days: 7
|
||||
mode: motion
|
||||
alerts:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
|
||||
snapshots:
|
||||
enabled: True
|
||||
@ -193,7 +199,10 @@ cameras:
|
||||
- detect
|
||||
motion:
|
||||
mask:
|
||||
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400
|
||||
timestamp:
|
||||
friendly_name: "Camera timestamp"
|
||||
enabled: true
|
||||
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
|
||||
```
|
||||
|
||||
### Home Assistant integrated Intel Mini PC with OpenVino
|
||||
@ -231,15 +240,16 @@ model:
|
||||
|
||||
record:
|
||||
enabled: True
|
||||
retain:
|
||||
motion:
|
||||
days: 7
|
||||
mode: motion
|
||||
alerts:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
detections:
|
||||
retain:
|
||||
days: 30
|
||||
mode: motion
|
||||
|
||||
snapshots:
|
||||
enabled: True
|
||||
@ -259,5 +269,8 @@ cameras:
|
||||
- detect
|
||||
motion:
|
||||
mask:
|
||||
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400
|
||||
timestamp:
|
||||
friendly_name: "Camera timestamp"
|
||||
enabled: true
|
||||
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
|
||||
```
|
||||
|
||||
@ -30,7 +30,7 @@ In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle`
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
|
||||
License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM and a CPU with AVX + AVX2 instructions is required.
|
||||
|
||||
## Configuration
|
||||
|
||||
@ -43,7 +43,7 @@ lpr:
|
||||
|
||||
Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You should disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras:
|
||||
|
||||
```yaml
|
||||
```yaml {4,5}
|
||||
cameras:
|
||||
garage:
|
||||
...
|
||||
@ -375,7 +375,6 @@ Use `match_distance` to allow small character mismatches. Alternatively, define
|
||||
Start with ["Why isn't my license plate being detected and recognized?"](#why-isnt-my-license-plate-being-detected-and-recognized). If you are still having issues, work through these steps.
|
||||
|
||||
1. Start with a simplified LPR config.
|
||||
|
||||
- Remove or comment out everything in your LPR config, including `min_area`, `min_plate_length`, `format`, `known_plates`, or `enhancement` values so that the only values left are `enabled` and `debug_save_plates`. This will run LPR with Frigate's default values.
|
||||
|
||||
```yaml
|
||||
@ -386,31 +385,28 @@ Start with ["Why isn't my license plate being detected and recognized?"](#why-is
|
||||
```
|
||||
|
||||
2. Enable debug logs to see exactly what Frigate is doing.
|
||||
|
||||
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only keep this enabled when necessary. Restart Frigate after this change.
|
||||
|
||||
```yaml
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
# highlight-next-line
|
||||
frigate.data_processing.common.license_plate: debug
|
||||
```
|
||||
|
||||
3. Ensure your plates are being _detected_.
|
||||
|
||||
If you are using a Frigate+ or `license_plate` detecting model:
|
||||
|
||||
- Watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected.
|
||||
- View MQTT messages for `frigate/events` to verify detected plates.
|
||||
- You may need to adjust your `min_score` and/or `threshold` for the `license_plate` object if your plates are not being detected.
|
||||
|
||||
If you are **not** using a Frigate+ or `license_plate` detecting model:
|
||||
|
||||
- Watch the debug logs for messages from the YOLOv9 plate detector.
|
||||
- You may need to adjust your `detection_threshold` if your plates are not being detected.
|
||||
|
||||
4. Ensure the characters on detected plates are being _recognized_.
|
||||
|
||||
- Enable `debug_save_plates` to save images of detected text on plates to the clips directory (`/media/frigate/clips/lpr`). Ensure these images are readable and the text is clear.
|
||||
- Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` or `motorcycle` label will change to the recognized plate when LPR is enabled and working.
|
||||
- Adjust `recognition_threshold` settings per the suggestions [above](#advanced-configuration).
|
||||
|
||||
@ -77,7 +77,7 @@ Configure the `streams` option with a "friendly name" for your stream followed b
|
||||
|
||||
Using Frigate's internal version of go2rtc is required to use this feature. You cannot specify paths in the `streams` configuration, only go2rtc stream names.
|
||||
|
||||
```yaml
|
||||
```yaml {3,6,8,25-29}
|
||||
go2rtc:
|
||||
streams:
|
||||
test_cam:
|
||||
@ -114,9 +114,9 @@ cameras:
|
||||
WebRTC works by creating a TCP or UDP connection on port `8555`. However, it requires additional configuration:
|
||||
|
||||
- For external access, over the internet, setup your router to forward port `8555` to port `8555` on the Frigate device, for both TCP and UDP.
|
||||
- For internal/local access, unless you are running through the HA Add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate:
|
||||
- For internal/local access, unless you are running through the HA App, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate:
|
||||
|
||||
```yaml title="config.yml"
|
||||
```yaml title="config.yml" {4-7}
|
||||
go2rtc:
|
||||
streams:
|
||||
test_cam: ...
|
||||
@ -132,9 +132,9 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
|
||||
|
||||
:::tip
|
||||
|
||||
This extra configuration may not be required if Frigate has been installed as a Home Assistant Add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate.
|
||||
This extra configuration may not be required if Frigate has been installed as a Home Assistant App, as Frigate uses the Supervisor's API to generate a WebRTC candidate.
|
||||
|
||||
However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate Add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the Add-on logs page during the initialization:
|
||||
However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate App fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the App logs page during the initialization:
|
||||
|
||||
```log
|
||||
[WARN] Failed to get IP address from supervisor
|
||||
@ -154,7 +154,7 @@ If not running in host mode, port 8555 will need to be mapped for the container:
|
||||
|
||||
docker-compose.yml
|
||||
|
||||
```yaml
|
||||
```yaml {4-6}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -222,34 +222,28 @@ Note that disabling a camera through the config file (`enabled: False`) removes
|
||||
When your browser runs into problems playing back your camera streams, it will log short error messages to the browser console. They indicate playback, codec, or network issues on the client/browser side, not something server side with Frigate itself. Below are the common messages you may see and simple actions you can take to try to resolve them.
|
||||
|
||||
- **startup**
|
||||
|
||||
- What it means: The player failed to initialize or connect to the live stream (network or startup error).
|
||||
- What to try: Reload the Live view or click _Reset_. Verify `go2rtc` is running and the camera stream is reachable. Try switching to a different stream from the Live UI dropdown (if available) or use a different browser.
|
||||
|
||||
- Possible console messages from the player code:
|
||||
|
||||
- `Error opening MediaSource.`
|
||||
- `Browser reported a network error.`
|
||||
- `Max error count ${errorCount} exceeded.` (the numeric value will vary)
|
||||
|
||||
- **mse-decode**
|
||||
|
||||
- What it means: The browser reported a decoding error while trying to play the stream, which usually is a result of a codec incompatibility or corrupted frames.
|
||||
- What to try: Check the browser console for the supported and negotiated codecs. Ensure your camera/restream is using H.264 video and AAC audio (these are the most compatible). If your camera uses a non-standard audio codec, configure `go2rtc` to transcode the stream to AAC. Try another browser (some browsers have stricter MSE/codec support) and, for iPhone, ensure you're on iOS 17.1 or newer.
|
||||
|
||||
- Possible console messages from the player code:
|
||||
|
||||
- `Safari cannot open MediaSource.`
|
||||
- `Safari reported InvalidStateError.`
|
||||
- `Safari reported decoding errors.`
|
||||
|
||||
- **stalled**
|
||||
|
||||
- What it means: Playback has stalled because the player has fallen too far behind live (extended buffering or no data arriving).
|
||||
- What to try: This is usually indicative of the browser struggling to decode too many high-resolution streams at once. Try selecting a lower-bandwidth stream (substream), reduce the number of live streams open, improve the network connection, or lower the camera resolution. Also check your camera's keyframe (I-frame) interval — shorter intervals make playback start and recover faster. You can also try increasing the timeout value in the UI pane of Frigate's settings.
|
||||
|
||||
- Possible console messages from the player code:
|
||||
|
||||
- `Buffer time (10 seconds) exceeded, browser may not be playing media correctly.`
|
||||
- `Media playback has stalled after <n> seconds due to insufficient buffering or a network interruption.` (the seconds value will vary)
|
||||
|
||||
@ -270,21 +264,18 @@ When your browser runs into problems playing back your camera streams, it will l
|
||||
If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again.
|
||||
|
||||
Errors in stream playback (e.g., connection failures, codec issues, or buffering timeouts) that cause the fallback to low bandwidth mode (jsmpeg) are logged to the browser console for easier debugging. These errors may include:
|
||||
|
||||
- Network issues (e.g., MSE or WebRTC network connection problems).
|
||||
- Unsupported codecs or stream formats (e.g., H.265 in WebRTC, which is not supported in some browsers).
|
||||
- Buffering timeouts or low bandwidth conditions causing fallback to jsmpeg.
|
||||
- Browser compatibility problems (e.g., iOS Safari limitations with MSE).
|
||||
|
||||
To view browser console logs:
|
||||
|
||||
1. Open the Frigate Live View in your browser.
|
||||
2. Open the browser's Developer Tools (F12 or right-click > Inspect > Console tab).
|
||||
3. Reproduce the error (e.g., load a problematic stream or simulate network issues).
|
||||
4. Look for messages prefixed with the camera name.
|
||||
|
||||
These logs help identify if the issue is player-specific (MSE vs. WebRTC) or related to camera configuration (e.g., go2rtc streams, codecs). If you see frequent errors:
|
||||
|
||||
- Verify your camera's H.264/AAC settings (see [Frigate's camera settings recommendations](#camera_settings_recommendations)).
|
||||
- Check go2rtc configuration for transcoding (e.g., audio to AAC/OPUS).
|
||||
- Test with a different stream via the UI dropdown (if `live -> streams` is configured).
|
||||
@ -324,9 +315,7 @@ When your browser runs into problems playing back your camera streams, it will l
|
||||
To prevent this, make the `detect` stream match the go2rtc live stream's aspect ratio (resolution does not need to match, just the aspect ratio). You can either adjust the camera's output resolution or set the `width` and `height` values in your config's `detect` section to a resolution with an aspect ratio that matches.
|
||||
|
||||
Example: Resolutions from two streams
|
||||
|
||||
- Mismatched (may cause aspect ratio switching on the dashboard):
|
||||
|
||||
- Live/go2rtc stream: 1920x1080 (16:9)
|
||||
- Detect stream: 640x352 (~1.82:1, not 16:9)
|
||||
|
||||
|
||||
@ -33,18 +33,55 @@ Your config file will be updated with the relative coordinates of the mask/zone:
|
||||
|
||||
```yaml
|
||||
motion:
|
||||
mask: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
|
||||
mask:
|
||||
# Motion mask name (required)
|
||||
mask1:
|
||||
# Optional: A friendly name for the mask
|
||||
friendly_name: "Timestamp area"
|
||||
# Optional: Whether this mask is active (default: true)
|
||||
enabled: true
|
||||
# Required: Coordinates polygon for the mask
|
||||
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400"
|
||||
```
|
||||
|
||||
Multiple masks can be listed in your config.
|
||||
Multiple motion masks can be listed in your config:
|
||||
|
||||
```yaml
|
||||
motion:
|
||||
mask:
|
||||
- 0.239,1.246,0.175,0.901,0.165,0.805,0.195,0.802
|
||||
- 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456
|
||||
mask1:
|
||||
friendly_name: "Timestamp area"
|
||||
enabled: true
|
||||
coordinates: "0.239,1.246,0.175,0.901,0.165,0.805,0.195,0.802"
|
||||
mask2:
|
||||
friendly_name: "Tree area"
|
||||
enabled: true
|
||||
coordinates: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456"
|
||||
```
|
||||
|
||||
Object filter masks can also be created through the UI or manually in the config. They are configured under the object filters section for each object type:
|
||||
|
||||
```yaml
|
||||
objects:
|
||||
filters:
|
||||
person:
|
||||
mask:
|
||||
person_filter1:
|
||||
friendly_name: "Roof area"
|
||||
enabled: true
|
||||
coordinates: "0.000,0.000,1.000,0.000,1.000,0.400,0.000,0.400"
|
||||
car:
|
||||
mask:
|
||||
car_filter1:
|
||||
friendly_name: "Sidewalk area"
|
||||
enabled: true
|
||||
coordinates: "0.000,0.700,1.000,0.700,1.000,1.000,0.000,1.000"
|
||||
```
|
||||
|
||||
## Enabling/Disabling Masks
|
||||
|
||||
Both motion masks and object filter masks can be toggled on or off without removing them from the configuration. Disabled masks are completely ignored at runtime - they will not affect motion detection or object filtering. This is useful for temporarily disabling a mask during certain seasons or times of day without modifying the configuration.
|
||||
|
||||
### Further Clarification
|
||||
|
||||
This is a response to a [question posed on reddit](https://www.reddit.com/r/homeautomation/comments/ppxdve/replacing_my_doorbell_with_a_security_camera_a_6/hd876w4?utm_source=share&utm_medium=web2x&context=3):
|
||||
|
||||
@ -38,7 +38,6 @@ Remember that motion detection is just used to determine when object detection s
|
||||
The threshold value dictates how much of a change in a pixels luminance is required to be considered motion.
|
||||
|
||||
```yaml
|
||||
# default threshold value
|
||||
motion:
|
||||
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||
@ -53,7 +52,6 @@ Watching the motion boxes in the debug view, increase the threshold until you on
|
||||
### Contour Area
|
||||
|
||||
```yaml
|
||||
# default contour_area value
|
||||
motion:
|
||||
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
|
||||
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
|
||||
@ -81,27 +79,49 @@ However, if the preferred day settings do not work well at night it is recommend
|
||||
|
||||
## Tuning For Large Changes In Motion
|
||||
|
||||
### Lightning Threshold
|
||||
|
||||
```yaml
|
||||
# default lightning_threshold:
|
||||
motion:
|
||||
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
||||
# needs to recalibrate. (default: shown below)
|
||||
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
||||
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
|
||||
# a doorbell camera.
|
||||
# Optional: The percentage of the image used to detect lightning or
|
||||
# other substantial changes where motion detection needs to
|
||||
# recalibrate. (default: shown below)
|
||||
# Increasing this value will make motion detection more likely
|
||||
# to consider lightning or IR mode changes as valid motion.
|
||||
# Decreasing this value will make motion detection more likely
|
||||
# to ignore large amounts of motion such as a person
|
||||
# approaching a doorbell camera.
|
||||
lightning_threshold: 0.8
|
||||
```
|
||||
|
||||
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. `lightning_threshold` defines the percentage of the image used to detect these substantial changes. Increasing this value makes motion detection more likely to treat large changes (like IR mode switches) as valid motion. Decreasing it makes motion detection more likely to ignore large amounts of motion, such as a person approaching a doorbell camera.
|
||||
|
||||
Note that `lightning_threshold` does **not** stop motion-based recordings from being saved — it only prevents additional motion analysis after the threshold is exceeded, reducing false positive object detections during high-motion periods (e.g. storms or PTZ sweeps) without interfering with recordings.
|
||||
|
||||
:::warning
|
||||
|
||||
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
|
||||
Some cameras, like doorbell cameras, may have missed detections when someone walks directly in front of the camera and the `lightning_threshold` causes motion detection to recalibrate. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
|
||||
|
||||
:::
|
||||
|
||||
:::note
|
||||
### Skip Motion On Large Scene Changes
|
||||
|
||||
Lightning threshold does not stop motion based recordings from being saved.
|
||||
```yaml
|
||||
motion:
|
||||
# Optional: Fraction of the frame that must change in a single update
|
||||
# before Frigate will completely ignore any motion in that frame.
|
||||
# Values range between 0.0 and 1.0, leave unset (null) to disable.
|
||||
# Setting this to 0.7 would cause Frigate to **skip** reporting
|
||||
# motion boxes when more than 70% of the image appears to change
|
||||
# (e.g. during lightning storms, IR/color mode switches, or other
|
||||
# sudden lighting events).
|
||||
skip_motion_threshold: 0.7
|
||||
```
|
||||
|
||||
This option is handy when you want to prevent large transient changes from triggering recordings or object detection. It differs from `lightning_threshold` because it completely suppresses motion instead of just forcing a recalibration.
|
||||
|
||||
:::warning
|
||||
|
||||
When the skip threshold is exceeded, **no motion is reported** for that frame, meaning **nothing is recorded** for that frame. That means you can miss something important, like a PTZ camera auto-tracking an object or activity while the camera is moving. If you prefer to guarantee that every frame is saved, leave this unset and accept occasional recordings containing scene noise — they typically only take up a few megabytes and are quick to scan in the timeline UI.
|
||||
|
||||
:::
|
||||
|
||||
Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in a pause in object detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.
|
||||
|
||||
@ -34,7 +34,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
**Nvidia GPU**
|
||||
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
||||
- [ONNX](#onnx): Nvidia GPUs will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Nvidia Jetson** <CommunityBadge />
|
||||
|
||||
@ -49,6 +49,11 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
|
||||
|
||||
**AXERA** <CommunityBadge />
|
||||
|
||||
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
|
||||
|
||||
|
||||
**For Testing**
|
||||
|
||||
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
||||
@ -65,7 +70,7 @@ This does not affect using hardware for accelerating other tasks such as [semant
|
||||
|
||||
# Officially Supported Detectors
|
||||
|
||||
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `memryx`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
|
||||
Frigate provides a number of builtin detector types. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
|
||||
|
||||
## Edge TPU Detector
|
||||
|
||||
@ -157,7 +162,13 @@ A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite`
|
||||
|
||||
#### YOLOv9
|
||||
|
||||
YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are supported, but not included by default. [Download the model](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite), bind mount the file into the container, and provide the path with `model.path`. Note that the linked model requires a 17-label [labelmap file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) that includes only 17 COCO classes.
|
||||
YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are supported, but not included by default. [Instructions](#yolov9-for-google-coral-support) for downloading a model with support for the Google Coral.
|
||||
|
||||
:::tip
|
||||
|
||||
**Frigate+ Users:** Follow the [instructions](/integrations/plus#use-models) to set a model ID in your config file.
|
||||
|
||||
:::
|
||||
|
||||
<details>
|
||||
<summary>YOLOv9 Setup & Config</summary>
|
||||
@ -566,7 +577,7 @@ $ docker run --device=/dev/kfd --device=/dev/dri \
|
||||
|
||||
When using Docker Compose:
|
||||
|
||||
```yaml
|
||||
```yaml {4-6}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -597,7 +608,7 @@ $ docker run -e HSA_OVERRIDE_GFX_VERSION=10.0.0 \
|
||||
|
||||
When using Docker Compose:
|
||||
|
||||
```yaml
|
||||
```yaml {4-5}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -654,11 +665,9 @@ ONNX is an open format for building machine learning models, Frigate supports ru
|
||||
If the correct build is used for your GPU then the GPU will be detected and used automatically.
|
||||
|
||||
- **AMD**
|
||||
|
||||
- ROCm will automatically be detected and used with the ONNX detector in the `-rocm` Frigate image.
|
||||
|
||||
- **Intel**
|
||||
|
||||
- OpenVINO will automatically be detected and used with the ONNX detector in the default Frigate image.
|
||||
|
||||
- **Nvidia**
|
||||
@ -1474,6 +1483,42 @@ model:
|
||||
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
|
||||
```
|
||||
|
||||
## AXERA
|
||||
|
||||
Hardware accelerated object detection is supported on the following SoCs:
|
||||
|
||||
- AX650N
|
||||
- AX8850N
|
||||
|
||||
This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AXERA-TECH/Pulsar2).
|
||||
|
||||
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
|
||||
|
||||
### Configuration
|
||||
|
||||
When configuring the AXEngine detector, you have to specify the model name.
|
||||
|
||||
#### yolov9
|
||||
|
||||
A yolov9 model is provided in the container at `/axmodels` and is used by this detector type by default.
|
||||
|
||||
Use the model configuration shown below when using the axengine detector with the default axmodel:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
axengine:
|
||||
type: axengine
|
||||
|
||||
model:
|
||||
path: frigate-yolov9-tiny
|
||||
model_type: yolo-generic
|
||||
width: 320
|
||||
height: 320
|
||||
input_dtype: int
|
||||
input_pixel_format: bgr
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
# Models
|
||||
|
||||
Some model types are not included in Frigate by default.
|
||||
@ -1514,11 +1559,11 @@ RF-DETR can be exported as ONNX by running the command below. You can copy and p
|
||||
|
||||
```sh
|
||||
docker build . --build-arg MODEL_SIZE=Nano --rm --output . -f- <<'EOF'
|
||||
FROM python:3.11 AS build
|
||||
FROM python:3.12 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.10.4 /uv /bin/
|
||||
WORKDIR /rfdetr
|
||||
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnx==1.19.1 onnxscript
|
||||
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnx==1.19.1 transformers==4.57.6 onnxscript
|
||||
ARG MODEL_SIZE
|
||||
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export(simplify=True)"
|
||||
FROM scratch
|
||||
@ -1556,19 +1601,23 @@ cd tensorrt_demos/yolo
|
||||
python3 yolo_to_onnx.py -m yolov7-320
|
||||
```
|
||||
|
||||
#### YOLOv9
|
||||
#### YOLOv9 for Google Coral Support
|
||||
|
||||
[Download the model](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite), bind mount the file into the container, and provide the path with `model.path`. Note that the linked model requires a 17-label [labelmap file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) that includes only 17 COCO classes.
|
||||
|
||||
#### YOLOv9 for other detectors
|
||||
|
||||
YOLOv9 model can be exported as ONNX using the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=t` and `IMG_SIZE=320` in the first line to the [model size](https://github.com/WongKinYiu/yolov9#performance) you would like to convert (available model sizes are `t`, `s`, `m`, `c`, and `e`, common image sizes are `320` and `640`).
|
||||
|
||||
```sh
|
||||
docker build . --build-arg MODEL_SIZE=t --build-arg IMG_SIZE=320 --output . -f- <<'EOF'
|
||||
FROM python:3.11 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y cmake libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.10.4 /uv /bin/
|
||||
WORKDIR /yolov9
|
||||
ADD https://github.com/WongKinYiu/yolov9.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1 onnxscript
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier==0.4.* onnxscript
|
||||
ARG MODEL_SIZE
|
||||
ARG IMG_SIZE
|
||||
ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt
|
||||
|
||||
188
docs/docs/configuration/profiles.md
Normal file
188
docs/docs/configuration/profiles.md
Normal file
@ -0,0 +1,188 @@
|
||||
---
|
||||
id: profiles
|
||||
title: Profiles
|
||||
---
|
||||
|
||||
Profiles allow you to define named sets of camera configuration overrides that can be activated and deactivated at runtime without restarting Frigate. This is useful for scenarios like switching between "Home" and "Away" modes, daytime and nighttime configurations, or any situation where you want to quickly change how multiple cameras behave.
|
||||
|
||||
## How Profiles Work
|
||||
|
||||
Profiles operate as a two-level system:
|
||||
|
||||
1. **Profile definitions** are declared at the top level of your config under `profiles`. Each definition has a machine name (the key) and a `friendly_name` for display in the UI.
|
||||
2. **Camera profile overrides** are declared under each camera's `profiles` section, keyed by the profile name. Only the settings you want to change need to be specified — everything else is inherited from the camera's base configuration.
|
||||
|
||||
When a profile is activated, Frigate merges each camera's profile overrides on top of its base config. When the profile is deactivated, all cameras revert to their original settings. Only one profile can be active at a time.
|
||||
|
||||
:::info
|
||||
|
||||
Profile changes are applied in-memory and take effect immediately — no restart is required. The active profile is persisted across Frigate restarts (stored in the `/config/.active_profile` file).
|
||||
|
||||
:::
|
||||
|
||||
## Configuration
|
||||
|
||||
The easiest way to define profiles is to use the Frigate UI. Profiles can also be configured manually in your configuration file.
|
||||
|
||||
### Using the UI
|
||||
|
||||
To create and manage profiles from the UI, open **Settings**. From there you can:
|
||||
|
||||
1. **Create a profile** — Navigate to **Profiles**. Click the **Add Profile** button, enter a name (and optionally a profile ID).
|
||||
2. **Configure overrides** — Navigate to a camera configuration section (e.g. Motion detection, Record, Notifications). In the top right, two buttons will appear - choose a camera and a profile from the profile selector to edit overrides for that camera and section. Only the fields you change will be stored as overrides — fields that require a restart are hidden since profiles are applied at runtime. You can click the **Remove Profile Override** button
|
||||
3. **Activate a profile** — Use the **Profiles** option in Frigate's main menu to choose a profile. Alternatively, in Settings, navigate to **Profiles**, then choose a profile in the Active Profile dropdown to activate it. The active profile is also shown in the status bar at the bottom of the screen on desktop browsers.
|
||||
4. **Delete a profile** — Navigate to **Profiles**, then click the trash icon for a profile. This removes the profile definition and all camera overrides associated with it.
|
||||
|
||||
### Defining Profiles in YAML
|
||||
|
||||
First, define your profiles at the top level of your Frigate config. Every profile name referenced by a camera must be defined here.
|
||||
|
||||
```yaml
|
||||
profiles:
|
||||
home:
|
||||
friendly_name: Home
|
||||
away:
|
||||
friendly_name: Away
|
||||
night:
|
||||
friendly_name: Night Mode
|
||||
```
|
||||
|
||||
### Camera Profile Overrides
|
||||
|
||||
Under each camera, add a `profiles` section with overrides for each profile. You only need to include the settings you want to change.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
front_door:
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://camera:554/stream
|
||||
roles:
|
||||
- detect
|
||||
- record
|
||||
detect:
|
||||
enabled: true
|
||||
record:
|
||||
enabled: true
|
||||
profiles:
|
||||
away:
|
||||
detect:
|
||||
enabled: true
|
||||
notifications:
|
||||
enabled: true
|
||||
objects:
|
||||
track:
|
||||
- person
|
||||
- car
|
||||
- package
|
||||
review:
|
||||
alerts:
|
||||
labels:
|
||||
- person
|
||||
- car
|
||||
- package
|
||||
home:
|
||||
detect:
|
||||
enabled: true
|
||||
notifications:
|
||||
enabled: false
|
||||
objects:
|
||||
track:
|
||||
- person
|
||||
```
|
||||
|
||||
### Supported Override Sections
|
||||
|
||||
The following camera configuration sections can be overridden in a profile:
|
||||
|
||||
| Section | Description |
|
||||
| ------------------ | ----------------------------------------- |
|
||||
| `enabled` | Enable or disable the camera entirely |
|
||||
| `audio` | Audio detection settings |
|
||||
| `birdseye` | Birdseye view settings |
|
||||
| `detect` | Object detection settings |
|
||||
| `face_recognition` | Face recognition settings |
|
||||
| `lpr` | License plate recognition settings |
|
||||
| `motion` | Motion detection settings |
|
||||
| `notifications` | Notification settings |
|
||||
| `objects` | Object tracking and filter settings |
|
||||
| `record` | Recording settings |
|
||||
| `review` | Review alert and detection settings |
|
||||
| `snapshots` | Snapshot settings |
|
||||
| `zones` | Zone definitions (merged with base zones) |
|
||||
|
||||
:::note
|
||||
|
||||
Only the fields you explicitly set in a profile override are applied. All other fields retain their base configuration values. For zones, profile zones are merged with the camera's base zones — any zone defined in the profile will override or add to the base zones.
|
||||
|
||||
:::
|
||||
|
||||
## Activating Profiles
|
||||
|
||||
Profiles can be activated and deactivated from the Frigate UI. Open the Settings cog and select **Profiles** from the submenu to see all defined profiles. From there you can activate any profile or deactivate the current one. The active profile is indicated in the UI so you always know which profile is in effect.
|
||||
|
||||
## Example: Home / Away Setup
|
||||
|
||||
A common use case is having different detection and notification settings based on whether you are home or away.
|
||||
|
||||
```yaml
|
||||
profiles:
|
||||
home:
|
||||
friendly_name: Home
|
||||
away:
|
||||
friendly_name: Away
|
||||
|
||||
cameras:
|
||||
front_door:
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://camera:554/stream
|
||||
roles:
|
||||
- detect
|
||||
- record
|
||||
detect:
|
||||
enabled: true
|
||||
record:
|
||||
enabled: true
|
||||
notifications:
|
||||
enabled: false
|
||||
profiles:
|
||||
away:
|
||||
notifications:
|
||||
enabled: true
|
||||
review:
|
||||
alerts:
|
||||
labels:
|
||||
- person
|
||||
- car
|
||||
home:
|
||||
notifications:
|
||||
enabled: false
|
||||
|
||||
indoor_cam:
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://camera:554/indoor
|
||||
roles:
|
||||
- detect
|
||||
- record
|
||||
detect:
|
||||
enabled: false
|
||||
record:
|
||||
enabled: false
|
||||
profiles:
|
||||
away:
|
||||
enabled: true
|
||||
detect:
|
||||
enabled: true
|
||||
record:
|
||||
enabled: true
|
||||
home:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
In this example:
|
||||
|
||||
- **Away profile**: The front door camera enables notifications and tracks specific alert labels. The indoor camera is fully enabled with detection and recording.
|
||||
- **Home profile**: The front door camera disables notifications. The indoor camera is completely disabled for privacy.
|
||||
- **No profile active**: All cameras use their base configuration values.
|
||||
@ -130,7 +130,7 @@ When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means
|
||||
|
||||
To configure the speed-up factor, the frame rate and further custom settings, the configuration parameter `timelapse_args` can be used. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS:
|
||||
|
||||
```yaml
|
||||
```yaml {3-4}
|
||||
record:
|
||||
enabled: True
|
||||
export:
|
||||
@ -139,7 +139,13 @@ record:
|
||||
|
||||
:::tip
|
||||
|
||||
When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large.
|
||||
When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras.<camera>.record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264).
|
||||
|
||||
:::
|
||||
|
||||
:::tip
|
||||
|
||||
The encoder determines its own behavior so the resulting file size may be undesirably large.
|
||||
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
|
||||
|
||||
:::
|
||||
@ -148,19 +154,16 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe
|
||||
|
||||
Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices.
|
||||
|
||||
## Syncing Recordings With Disk
|
||||
## Syncing Media Files With Disk
|
||||
|
||||
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.
|
||||
Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
sync_recordings: True
|
||||
```
|
||||
Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database.
|
||||
|
||||
This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart.
|
||||
The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint.
|
||||
|
||||
:::warning
|
||||
|
||||
The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary.
|
||||
This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended.
|
||||
|
||||
:::
|
||||
|
||||
@ -16,6 +16,8 @@ mqtt:
|
||||
# Optional: Enable mqtt server (default: shown below)
|
||||
enabled: True
|
||||
# Required: host name
|
||||
# NOTE: MQTT host can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'.
|
||||
# e.g. host: '{FRIGATE_MQTT_HOST}'
|
||||
host: mqtt.server.com
|
||||
# Optional: port (default: shown below)
|
||||
port: 1883
|
||||
@ -73,11 +75,19 @@ tls:
|
||||
# Optional: Enable TLS for port 8971 (default: shown below)
|
||||
enabled: True
|
||||
|
||||
# Optional: IPv6 configuration
|
||||
# Optional: Networking configuration
|
||||
networking:
|
||||
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
|
||||
ipv6:
|
||||
enabled: False
|
||||
# Optional: Override ports Frigate uses for listening (defaults: shown below)
|
||||
# An IP address may also be provided to bind to a specific interface, e.g. ip:port
|
||||
# NOTE: This setting is for advanced users and may break some integrations. The majority
|
||||
# of users should change ports in the docker compose file
|
||||
# or use the docker run `--publish` option to select a different port.
|
||||
listen:
|
||||
internal: 5000
|
||||
external: 8971
|
||||
|
||||
# Optional: Proxy configuration
|
||||
proxy:
|
||||
@ -337,7 +347,15 @@ objects:
|
||||
# Optional: mask to prevent all object types from being detected in certain areas (default: no mask)
|
||||
# Checks based on the bottom center of the bounding box of the object.
|
||||
# NOTE: This mask is COMBINED with the object type specific mask below
|
||||
mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278
|
||||
mask:
|
||||
# Object filter mask name (required)
|
||||
mask1:
|
||||
# Optional: A friendly name for the mask
|
||||
friendly_name: "Object filter mask area"
|
||||
# Optional: Whether this mask is active (default: true)
|
||||
enabled: true
|
||||
# Required: Coordinates polygon for the mask
|
||||
coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278"
|
||||
# Optional: filters to reduce false positives for specific object types
|
||||
filters:
|
||||
person:
|
||||
@ -357,7 +375,15 @@ objects:
|
||||
threshold: 0.7
|
||||
# Optional: mask to prevent this object type from being detected in certain areas (default: no mask)
|
||||
# Checks based on the bottom center of the bounding box of the object
|
||||
mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278
|
||||
mask:
|
||||
# Object filter mask name (required)
|
||||
mask1:
|
||||
# Optional: A friendly name for the mask
|
||||
friendly_name: "Object filter mask area"
|
||||
# Optional: Whether this mask is active (default: true)
|
||||
enabled: true
|
||||
# Required: Coordinates polygon for the mask
|
||||
coordinates: "0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278"
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
genai:
|
||||
# Optional: Enable AI object description generation (default: shown below)
|
||||
@ -456,12 +482,16 @@ motion:
|
||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||
# The value should be between 1 and 255.
|
||||
threshold: 30
|
||||
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
||||
# needs to recalibrate. (default: shown below)
|
||||
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection needs
|
||||
# to recalibrate and motion checks stop for that frame. Recordings are unaffected. (default: shown below)
|
||||
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
||||
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching
|
||||
# a doorbell camera.
|
||||
# Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera.
|
||||
lightning_threshold: 0.8
|
||||
# Optional: Fraction of the frame that must change in a single update before motion boxes are completely
|
||||
# ignored. Values range between 0.0 and 1.0. When exceeded, no motion boxes are reported and **no motion
|
||||
# recording** is created for that frame. Leave unset (null) to disable this feature. Use with care on PTZ
|
||||
# cameras or other situations where you require guaranteed frame capture.
|
||||
skip_motion_threshold: None
|
||||
# Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below)
|
||||
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
|
||||
# make motion detection more sensitive to smaller moving objects.
|
||||
@ -481,7 +511,15 @@ motion:
|
||||
frame_height: 100
|
||||
# Optional: motion mask
|
||||
# NOTE: see docs for more detailed info on creating masks
|
||||
mask: 0.000,0.469,1.000,0.469,1.000,1.000,0.000,1.000
|
||||
mask:
|
||||
# Motion mask name (required)
|
||||
mask1:
|
||||
# Optional: A friendly name for the mask
|
||||
friendly_name: "Motion mask area"
|
||||
# Optional: Whether this mask is active (default: true)
|
||||
enabled: true
|
||||
# Required: Coordinates polygon for the mask
|
||||
coordinates: "0.000,0.469,1.000,0.469,1.000,1.000,0.000,1.000"
|
||||
# Optional: improve contrast (default: shown below)
|
||||
# Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive
|
||||
# for daytime.
|
||||
@ -510,8 +548,6 @@ record:
|
||||
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
||||
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
||||
expire_interval: 60
|
||||
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
|
||||
sync_recordings: False
|
||||
# Optional: Continuous retention settings
|
||||
continuous:
|
||||
# Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below)
|
||||
@ -534,6 +570,8 @@ record:
|
||||
# The -r (framerate) dictates how smooth the output video is.
|
||||
# So the args would be -vf setpts=0.02*PTS -r 30 in that case.
|
||||
timelapse_args: "-vf setpts=0.04*PTS -r 30"
|
||||
# Optional: Global hardware acceleration settings for timelapse exports. (default: inherit)
|
||||
hwaccel_args: auto
|
||||
# Optional: Recording Preview Settings
|
||||
preview:
|
||||
# Optional: Quality of recording preview (default: shown below).
|
||||
@ -580,13 +618,12 @@ record:
|
||||
# never stored, so setting the mode to "all" here won't bring them back.
|
||||
mode: motion
|
||||
|
||||
# Optional: Configuration for the jpg snapshots written to the clips directory for each tracked object
|
||||
# Optional: Configuration for the snapshots written to the clips directory for each tracked object
|
||||
# Timestamp, bounding_box, crop and height settings are applied by default to API requests for snapshots.
|
||||
# NOTE: Can be overridden at the camera level
|
||||
snapshots:
|
||||
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
|
||||
# Optional: Enable writing snapshot images to /media/frigate/clips (default: shown below)
|
||||
enabled: False
|
||||
# Optional: save a clean copy of the snapshot image (default: shown below)
|
||||
clean_copy: True
|
||||
# Optional: print a timestamp on the snapshots (default: shown below)
|
||||
timestamp: False
|
||||
# Optional: draw bounding box on the snapshots (default: shown below)
|
||||
@ -604,8 +641,8 @@ snapshots:
|
||||
# Optional: Per object retention days
|
||||
objects:
|
||||
person: 15
|
||||
# Optional: quality of the encoded jpeg, 0-100 (default: shown below)
|
||||
quality: 70
|
||||
# Optional: quality of the encoded snapshot image, 0-100 (default: shown below)
|
||||
quality: 60
|
||||
|
||||
# Optional: Configuration for semantic search capability
|
||||
semantic_search:
|
||||
@ -752,7 +789,7 @@ classification:
|
||||
interval: None
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.13)
|
||||
# NOTE: The default go2rtc API port (1984) must be used,
|
||||
# changing this port for the integrated go2rtc instance is not supported.
|
||||
go2rtc:
|
||||
@ -838,6 +875,11 @@ cameras:
|
||||
# Optional: camera specific output args (default: inherit)
|
||||
# output_args:
|
||||
|
||||
# Optional: camera specific hwaccel args for timelapse export (default: inherit)
|
||||
# record:
|
||||
# export:
|
||||
# hwaccel_args:
|
||||
|
||||
# Optional: timeout for highest scoring image before allowing it
|
||||
# to be replaced by a newer image. (default: shown below)
|
||||
best_image_timeout: 60
|
||||
@ -853,6 +895,9 @@ cameras:
|
||||
front_steps:
|
||||
# Optional: A friendly name or descriptive text for the zones
|
||||
friendly_name: ""
|
||||
# Optional: Whether this zone is active (default: shown below)
|
||||
# Disabled zones are completely ignored at runtime - no object tracking or debug drawing
|
||||
enabled: True
|
||||
# Required: List of x,y coordinates to define the polygon of the zone.
|
||||
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
|
||||
coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428
|
||||
@ -906,6 +951,8 @@ cameras:
|
||||
onvif:
|
||||
# Required: host of the camera being connected to.
|
||||
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
|
||||
# NOTE: ONVIF user, and password can be specified with environment variables or docker secrets
|
||||
# that must begin with 'FRIGATE_'. e.g. host: '{FRIGATE_ONVIF_USERNAME}'
|
||||
host: 0.0.0.0
|
||||
# Optional: ONVIF port for device (default: shown below).
|
||||
port: 8000
|
||||
@ -982,6 +1029,49 @@ cameras:
|
||||
actions:
|
||||
- notification
|
||||
|
||||
# Optional: Named config profiles with partial overrides that can be activated at runtime.
|
||||
# NOTE: Profile names must be defined in the top-level 'profiles' section.
|
||||
profiles:
|
||||
# Required: name of the profile (must match a top-level profile definition)
|
||||
away:
|
||||
# Optional: Enable or disable the camera when this profile is active (default: not set, inherits base)
|
||||
enabled: true
|
||||
# Optional: Override audio settings
|
||||
audio:
|
||||
enabled: true
|
||||
# Optional: Override birdseye settings
|
||||
# birdseye:
|
||||
# Optional: Override detect settings
|
||||
detect:
|
||||
enabled: true
|
||||
# Optional: Override face_recognition settings
|
||||
# face_recognition:
|
||||
# Optional: Override lpr settings
|
||||
# lpr:
|
||||
# Optional: Override motion settings
|
||||
# motion:
|
||||
# Optional: Override notification settings
|
||||
notifications:
|
||||
enabled: true
|
||||
# Optional: Override objects settings
|
||||
objects:
|
||||
track:
|
||||
- person
|
||||
- car
|
||||
# Optional: Override record settings
|
||||
record:
|
||||
enabled: true
|
||||
# Optional: Override review settings
|
||||
review:
|
||||
alerts:
|
||||
labels:
|
||||
- person
|
||||
- car
|
||||
# Optional: Override snapshot settings
|
||||
# snapshots:
|
||||
# Optional: Override or add zones (merged with base zones)
|
||||
# zones:
|
||||
|
||||
# Optional
|
||||
ui:
|
||||
# Optional: Set a timezone to use in the UI (default: use browser local time)
|
||||
@ -1048,4 +1138,14 @@ camera_groups:
|
||||
icon: LuCar
|
||||
# Required: index of this group
|
||||
order: 0
|
||||
|
||||
# Optional: Profile definitions for named config overrides
|
||||
# NOTE: Profile names defined here can be referenced in camera profiles sections
|
||||
profiles:
|
||||
# Required: name of the profile (machine name used internally)
|
||||
home:
|
||||
# Required: display name shown in the UI
|
||||
friendly_name: Home
|
||||
away:
|
||||
friendly_name: Away
|
||||
```
|
||||
|
||||
@ -7,7 +7,7 @@ title: Restream
|
||||
|
||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features.
|
||||
|
||||
:::note
|
||||
|
||||
@ -34,7 +34,7 @@ To improve connection speed when using Birdseye via restream you can enable a sm
|
||||
|
||||
The go2rtc restream can be secured with RTSP based username / password authentication. Ex:
|
||||
|
||||
```yaml
|
||||
```yaml {2-4}
|
||||
go2rtc:
|
||||
rtsp:
|
||||
username: "admin"
|
||||
@ -147,6 +147,7 @@ For example:
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
# highlight-error-line
|
||||
my_camera: rtsp://username:$@foo%@192.168.1.100
|
||||
```
|
||||
|
||||
@ -155,6 +156,7 @@ becomes
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
# highlight-next-line
|
||||
my_camera: rtsp://username:$%40foo%25@192.168.1.100
|
||||
```
|
||||
|
||||
@ -206,7 +208,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
:::warning
|
||||
|
||||
|
||||
@ -71,7 +71,7 @@ To exclude a specific camera from alerts or detections, simply provide an empty
|
||||
|
||||
For example, to exclude objects on the camera _gatecamera_ from any detections, include this in your config:
|
||||
|
||||
```yaml
|
||||
```yaml {3-5}
|
||||
cameras:
|
||||
gatecamera:
|
||||
review:
|
||||
|
||||
@ -13,7 +13,7 @@ Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
||||
|
||||
Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all.
|
||||
|
||||
A minimum of 8GB of RAM is required to use Semantic Search. A GPU is not strictly required but will provide a significant performance increase over CPU-only systems.
|
||||
A minimum of 8GB of RAM is required to use Semantic Search. A CPU with AVX + AVX2 instructions is required to run Semantic Search. A GPU is not strictly required but will provide a significant performance increase over CPU-only systems.
|
||||
|
||||
For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
|
||||
|
||||
@ -76,6 +76,40 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
|
||||
|
||||
:::
|
||||
|
||||
### GenAI Provider
|
||||
|
||||
Frigate can use a GenAI provider for semantic search embeddings when that provider has the `embeddings` role. Currently, only **llama.cpp** supports multimodal embeddings (both text and images).
|
||||
|
||||
To use llama.cpp for semantic search:
|
||||
|
||||
1. Configure a GenAI provider in your config with `embeddings` in its `roles`.
|
||||
2. Set `semantic_search.model` to the GenAI config key (e.g. `default`).
|
||||
3. Start the llama.cpp server with `--embeddings` and `--mmproj` for image support:
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
default:
|
||||
provider: llamacpp
|
||||
base_url: http://localhost:8080
|
||||
model: your-model-name
|
||||
roles:
|
||||
- embeddings
|
||||
- vision
|
||||
- tools
|
||||
|
||||
semantic_search:
|
||||
enabled: True
|
||||
model: default
|
||||
```
|
||||
|
||||
The llama.cpp server must be started with `--embeddings` for the embeddings API, and a multi-modal embeddings model. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for details.
|
||||
|
||||
:::note
|
||||
|
||||
Switching between Jina models and a GenAI provider requires reindexing. Embeddings from different backends are incompatible.
|
||||
|
||||
:::
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
|
||||
|
||||
@ -3,10 +3,29 @@ id: snapshots
|
||||
title: Snapshots
|
||||
---
|
||||
|
||||
Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `<camera>-<id>.jpg`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx)
|
||||
Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `<camera>-<id>-clean.webp`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx)
|
||||
|
||||
Snapshots are accessible in the UI in the Explore pane. This allows for quick submission to the Frigate+ service.
|
||||
|
||||
To only save snapshots for objects that enter a specific zone, [see the zone docs](./zones.md#restricting-snapshots-to-specific-zones)
|
||||
|
||||
Snapshots sent via MQTT are configured in the [config file](https://docs.frigate.video/configuration/) under `cameras -> your_camera -> mqtt`
|
||||
Snapshots sent via MQTT are configured in the [config file](/configuration) under `cameras -> your_camera -> mqtt`
|
||||
|
||||
## Frame Selection
|
||||
|
||||
Frigate does not save every frame. It picks a single "best" frame for each tracked object based on detection confidence, object size, and the presence of key attributes like faces or license plates. Frames where the object touches the edge of the frame are deprioritized. That best frame is written to disk once tracking ends.
|
||||
|
||||
MQTT snapshots are published more frequently — each time a better thumbnail frame is found during tracking, or when the current best image is older than `best_image_timeout` (default: 60s). These use their own annotation settings configured under `cameras -> your_camera -> mqtt`.
|
||||
|
||||
## Rendering
|
||||
|
||||
Frigate stores a single clean snapshot on disk:
|
||||
|
||||
| API / Use | Result |
|
||||
| ---------------------------------------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| Stored file | `<camera>-<id>-clean.webp`, always unannotated |
|
||||
| `/api/events/<id>/snapshot.jpg` | Starts from the camera's `snapshots` defaults, then applies any query param overrides at request time |
|
||||
| `/api/events/<id>/snapshot-clean.webp` | Returns the same stored snapshot without annotations |
|
||||
| [Frigate+](/plus/first_model) submission | Uses the same stored clean snapshot |
|
||||
|
||||
MQTT snapshots are configured separately under `cameras -> your_camera -> mqtt` and are unrelated to the stored event snapshot.
|
||||
|
||||
@ -20,7 +20,7 @@ tls:
|
||||
|
||||
TLS certificates can be mounted at `/etc/letsencrypt/live/frigate` using a bind mount or docker volume.
|
||||
|
||||
```yaml
|
||||
```yaml {3-4}
|
||||
frigate:
|
||||
...
|
||||
volumes:
|
||||
@ -32,7 +32,7 @@ Within the folder, the private key is expected to be named `privkey.pem` and the
|
||||
|
||||
Note that certbot uses symlinks, and those can't be followed by the container unless it has access to the targets as well, so if using certbot you'll also have to mount the `archive` folder for your domain, e.g.:
|
||||
|
||||
```yaml
|
||||
```yaml {3-5}
|
||||
frigate:
|
||||
...
|
||||
volumes:
|
||||
@ -46,7 +46,7 @@ Frigate automatically compares the fingerprint of the certificate at `/etc/letse
|
||||
|
||||
If you issue Frigate valid certificates you will likely want to configure it to run on port 443 so you can access it without a port number like `https://your-frigate-domain.com` by mapping 8971 to 443.
|
||||
|
||||
```yaml
|
||||
```yaml {3-4}
|
||||
frigate:
|
||||
...
|
||||
ports:
|
||||
|
||||
@ -10,6 +10,10 @@ For example, the cat in this image is currently in Zone 1, but **not** Zone 2.
|
||||
|
||||
Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera.
|
||||
|
||||
## Enabling/Disabling Zones
|
||||
|
||||
Zones can be toggled on or off without removing them from the configuration. Disabled zones are completely ignored at runtime - objects will not be tracked for zone presence, and zones will not appear in the debug view. This is useful for temporarily disabling a zone during certain seasons or times of day without modifying the configuration.
|
||||
|
||||
During testing, enable the Zones option for the Debug view of your camera (Settings --> Debug) so you can adjust as needed. The zone line will increase in thickness when any object enters the zone.
|
||||
|
||||
To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead.
|
||||
@ -18,7 +22,7 @@ To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the
|
||||
|
||||
Often you will only want alerts to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to have an alert created when an object enters your entire_yard zone, the config would be:
|
||||
|
||||
```yaml
|
||||
```yaml {6,8}
|
||||
cameras:
|
||||
name_of_your_camera:
|
||||
review:
|
||||
@ -86,7 +90,6 @@ cameras:
|
||||
|
||||
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
|
||||
|
||||
|
||||
### Zone Loitering
|
||||
|
||||
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone.
|
||||
@ -94,6 +97,7 @@ Sometimes objects are expected to be passing through a zone, but an object loite
|
||||
:::note
|
||||
|
||||
When using loitering zones, a review item will behave in the following way:
|
||||
|
||||
- When a person is in a loitering zone, the review item will remain active until the person leaves the loitering zone, regardless of if they are stationary.
|
||||
- When any other object is in a loitering zone, the review item will remain active until the loitering time is met. Then if the object is stationary the review item will end.
|
||||
|
||||
@ -104,6 +108,7 @@ cameras:
|
||||
name_of_your_camera:
|
||||
zones:
|
||||
sidewalk:
|
||||
# highlight-next-line
|
||||
loitering_time: 4 # unit is in seconds
|
||||
objects:
|
||||
- person
|
||||
@ -118,6 +123,7 @@ cameras:
|
||||
name_of_your_camera:
|
||||
zones:
|
||||
front_yard:
|
||||
# highlight-next-line
|
||||
inertia: 3
|
||||
objects:
|
||||
- person
|
||||
@ -130,6 +136,7 @@ cameras:
|
||||
name_of_your_camera:
|
||||
zones:
|
||||
driveway_entrance:
|
||||
# highlight-next-line
|
||||
inertia: 1
|
||||
objects:
|
||||
- car
|
||||
@ -192,5 +199,6 @@ cameras:
|
||||
coordinates: ...
|
||||
distances: ...
|
||||
inertia: 1
|
||||
# highlight-next-line
|
||||
speed_threshold: 20 # unit is in kph or mph, depending on how unit_system is set (see above)
|
||||
```
|
||||
|
||||
@ -17,15 +17,15 @@ From here, follow the guides for:
|
||||
- [Web Interface](#web-interface)
|
||||
- [Documentation](#documentation)
|
||||
|
||||
### Frigate Home Assistant Add-on
|
||||
### Frigate Home Assistant App
|
||||
|
||||
This repository holds the Home Assistant Add-on, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab.
|
||||
This repository holds the Home Assistant App, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab.
|
||||
|
||||
Fork [blakeblackshear/frigate-hass-addons](https://github.com/blakeblackshear/frigate-hass-addons) to your own Github profile, then clone the forked repo to your local machine.
|
||||
|
||||
### Frigate Home Assistant Integration
|
||||
|
||||
This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you are running Frigate as a standalone Docker container or as a [Home Assistant Add-on](#frigate-home-assistant-add-on).
|
||||
This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you are running Frigate as a standalone Docker container or as a [Home Assistant App](#frigate-home-assistant-app).
|
||||
|
||||
Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshear/frigate-hass-integration) to your own GitHub profile, then clone the forked repo to your local machine.
|
||||
|
||||
@ -89,6 +89,14 @@ After closing VS Code, you may still have containers running. To close everythin
|
||||
|
||||
### Testing
|
||||
|
||||
#### Unit Tests
|
||||
|
||||
GitHub will execute unit tests on new PRs. You must ensure that all tests pass.
|
||||
|
||||
```shell
|
||||
python3 -u -m unittest
|
||||
```
|
||||
|
||||
#### FFMPEG Hardware Acceleration
|
||||
|
||||
The following commands are used inside the container to ensure hardware acceleration is working properly.
|
||||
@ -125,6 +133,28 @@ ffmpeg -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format
|
||||
ffmpeg -c:v h264_qsv -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null
|
||||
```
|
||||
|
||||
### Submitting a pull request
|
||||
|
||||
Code must be formatted, linted and type-tested. GitHub will run these checks on pull requests, so it is advised to run them yourself prior to opening.
|
||||
|
||||
**Formatting**
|
||||
|
||||
```shell
|
||||
ruff format frigate migrations docker *.py
|
||||
```
|
||||
|
||||
**Linting**
|
||||
|
||||
```shell
|
||||
ruff check frigate migrations docker *.py
|
||||
```
|
||||
|
||||
**MyPy Static Typing**
|
||||
|
||||
```shell
|
||||
python3 -u -m mypy --config-file frigate/mypy.ini frigate
|
||||
```
|
||||
|
||||
## Web Interface
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@ -26,7 +26,7 @@ I may earn a small commission for my endorsement, recommendation, testimonial, o
|
||||
|
||||
## Server
|
||||
|
||||
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral, Hailo, or other AI accelerators.
|
||||
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU (with AVX + AVX2 instructions) and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral, Hailo, or other AI accelerators.
|
||||
|
||||
Note that many of these mini PCs come with Windows pre-installed, and you will need to install Linux according to the [getting started guide](../guides/getting_started.md).
|
||||
|
||||
@ -41,8 +41,8 @@ If the EQ13 is out of stock, the link below may take you to a suggested alternat
|
||||
| Name | Capabilities | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | --------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | Can run object detection on several 1080p cameras with low-medium activity | Dual gigabit NICs for easy isolated camera network. |
|
||||
| Intel 1120p ([Amazon](https://www.amazon.com/Beelink-i3-1220P-Computer-Display-Gigabit/dp/B0DDCKT9YP) | Can handle a large number of 1080p cameras with high activity | |
|
||||
| Intel 125H ([Amazon](https://www.amazon.com/MINISFORUM-Pro-125H-Barebone-Computer-HDMI2-1/dp/B0FH21FSZM) | Can handle a significant number of 1080p cameras with high activity | Includes NPU for more efficient detection in 0.17+ |
|
||||
| Intel 1120p ([Amazon](https://www.amazon.com/Beelink-i3-1220P-Computer-Display-Gigabit/dp/B0DDCKT9YP)) | Can handle a large number of 1080p cameras with high activity | |
|
||||
| Intel 125H ([Amazon](https://www.amazon.com/MINISFORUM-Pro-125H-Barebone-Computer-HDMI2-1/dp/B0FH21FSZM)) | Can handle a significant number of 1080p cameras with high activity | Includes NPU for more efficient detection in 0.17+ |
|
||||
|
||||
## Detectors
|
||||
|
||||
@ -86,7 +86,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
**Nvidia**
|
||||
|
||||
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs to provide efficient object detection.
|
||||
- [Nvidia GPU](#nvidia-gpus): Nvidia GPUs can provide efficient object detection.
|
||||
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#onnx-supported-models)
|
||||
- Runs well with any size models including large
|
||||
|
||||
@ -103,6 +103,10 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
|
||||
|
||||
**AXERA** <CommunityBadge />
|
||||
|
||||
- [AXEngine](#axera): axera models can run on AXERA NPUs via AXEngine, delivering highly efficient object detection.
|
||||
|
||||
:::
|
||||
|
||||
### Hailo-8
|
||||
@ -172,7 +176,7 @@ Inference speeds vary greatly depending on the CPU or GPU used, some known examp
|
||||
| Intel Arc A380 | ~ 6 ms | | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
|
||||
| Intel Arc A750 | ~ 4 ms | | 320: ~ 8 ms | | |
|
||||
|
||||
### TensorRT - Nvidia GPU
|
||||
### Nvidia GPUs
|
||||
|
||||
Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA libraries.
|
||||
|
||||
@ -182,27 +186,26 @@ Frigate is able to utilize an Nvidia GPU which supports the 12.x series of CUDA
|
||||
|
||||
Make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU.
|
||||
|
||||
There are improved capabilities in newer GPU architectures that TensorRT can benefit from, such as INT8 operations and Tensor cores. The features compatible with your hardware will be optimized when the model is converted to a trt file. Currently the script provided for generating the model provides a switch to enable/disable FP16 operations. If you wish to use newer features such as INT8 optimization, more work is required.
|
||||
|
||||
#### Compatibility References:
|
||||
|
||||
[NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-841/support-matrix/index.html)
|
||||
[NVIDIA TensorRT Support Matrix](https://docs.nvidia.com/deeplearning/tensorrt-rtx/latest/getting-started/support-matrix.html)
|
||||
|
||||
[NVIDIA CUDA Compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html)
|
||||
|
||||
[NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus)
|
||||
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
Inference is done with the `onnx` detector type. Speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny (t)` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
|
||||
✅ - Accelerated with CUDA Graphs
|
||||
❌ - Not accelerated with CUDA Graphs
|
||||
|
||||
| Name | ✅ YOLOv9 Inference Time | ✅ RF-DETR Inference Time | ❌ YOLO-NAS Inference Time |
|
||||
| --------- | ------------------------------------- | ------------------------- | -------------------------- |
|
||||
| ----------- | ------------------------------------- | ------------------------- | -------------------------- |
|
||||
| GTX 1070 | s-320: 16 ms | | 320: 14 ms |
|
||||
| RTX 3050 | t-320: 8 ms s-320: 10 ms s-640: 28 ms | Nano-320: ~ 12 ms | 320: ~ 10 ms 640: ~ 16 ms |
|
||||
| RTX 3070 | t-320: 6 ms s-320: 8 ms s-640: 25 ms | Nano-320: ~ 9 ms | 320: ~ 8 ms 640: ~ 14 ms |
|
||||
| RTX 5060 Ti | t-320: 5 ms s-320: 7 ms s-640: 22 ms | Nano-320: ~ 6 ms | |
|
||||
| RTX A4000 | | | 320: ~ 15 ms |
|
||||
| Tesla P40 | | | 320: ~ 105 ms |
|
||||
|
||||
@ -290,6 +293,14 @@ The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms fo
|
||||
| ssd mobilenet | ~ 25 ms |
|
||||
| yolov5m | ~ 118 ms |
|
||||
|
||||
### AXERA
|
||||
|
||||
- **AXEngine** Default model is **yolov9**
|
||||
|
||||
| Name | AXERA AX650N/AX8850N Inference Time |
|
||||
| ---------------- | ----------------------------------- |
|
||||
| yolov9-tiny | ~ 4 ms |
|
||||
|
||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||
|
||||
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
||||
|
||||
@ -3,11 +3,11 @@ id: installation
|
||||
title: Installation
|
||||
---
|
||||
|
||||
Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant Add-on](https://www.home-assistant.io/addons/). Note that the Home Assistant Add-on is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant Add-on.
|
||||
Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant App](https://www.home-assistant.io/apps/). Note that the Home Assistant App is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant App.
|
||||
|
||||
:::tip
|
||||
|
||||
If you already have Frigate installed as a Home Assistant Add-on, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate.
|
||||
If you already have Frigate installed as a Home Assistant App, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate.
|
||||
|
||||
:::
|
||||
|
||||
@ -56,7 +56,7 @@ services:
|
||||
volumes:
|
||||
- /path/to/your/config:/config
|
||||
- /path/to/your/storage:/media/frigate
|
||||
- type: tmpfs # Recommended: 1GB of memory
|
||||
- type: tmpfs # 1GB In-memory filesystem for recording segment storage
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 1000000000
|
||||
@ -92,7 +92,7 @@ $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576
|
||||
253MB
|
||||
```
|
||||
|
||||
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
||||
The shm size cannot be set per container for Home Assistant Apps. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
||||
|
||||
## Extra Steps for Specific Hardware
|
||||
|
||||
@ -153,6 +153,7 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
|
||||
```
|
||||
/lib/modules/6.6.31+rpt-rpi-2712/kernel/drivers/media/pci/hailo/hailo_pci.ko.xz
|
||||
```
|
||||
|
||||
Save the module path to a variable:
|
||||
|
||||
```bash
|
||||
@ -185,7 +186,7 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
|
||||
|
||||
This command should return no results.
|
||||
|
||||
3. **Run the installation script**:
|
||||
2. **Run the installation script**:
|
||||
|
||||
Download the installation script:
|
||||
|
||||
@ -206,14 +207,13 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
|
||||
```
|
||||
|
||||
The script will:
|
||||
|
||||
- Install necessary build dependencies
|
||||
- Clone and build the Hailo driver from the official repository
|
||||
- Install the driver
|
||||
- Download and install the required firmware
|
||||
- Set up udev rules
|
||||
|
||||
4. **Reboot your system**:
|
||||
3. **Reboot your system**:
|
||||
|
||||
After the script completes successfully, reboot to load the firmware:
|
||||
|
||||
@ -221,7 +221,7 @@ On Raspberry Pi OS **Trixie**, the Hailo driver is no longer shipped with the ke
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
5. **Verify the installation**:
|
||||
4. **Verify the installation**:
|
||||
|
||||
After rebooting, verify that the Hailo device is available:
|
||||
|
||||
@ -439,6 +439,42 @@ or add these options to your `docker run` command:
|
||||
|
||||
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
|
||||
|
||||
### AXERA
|
||||
|
||||
AXERA accelerators are available in an M.2 form factor, compatible with both Raspberry Pi and Orange Pi. This form factor has also been successfully tested on x86 platforms, making it a versatile choice for various computing environments.
|
||||
|
||||
#### Installation
|
||||
|
||||
Using AXERA accelerators requires the installation of the AXCL driver. We provide a convenient Linux script to complete this installation.
|
||||
|
||||
Follow these steps for installation:
|
||||
|
||||
1. Copy or download [this script](https://github.com/ivanshi1108/assets/releases/download/v0.16.2/user_installation.sh).
|
||||
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
||||
3. Run the script with `./user_installation.sh`
|
||||
|
||||
#### Setup
|
||||
|
||||
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
|
||||
|
||||
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
|
||||
|
||||
```yaml
|
||||
devices:
|
||||
- /dev/axcl_host
|
||||
- /dev/ax_mmb_dev
|
||||
- /dev/msg_userdev
|
||||
volumes:
|
||||
- /usr/bin/axcl:/usr/bin/axcl
|
||||
- /usr/lib/axcl:/usr/lib/axcl
|
||||
```
|
||||
|
||||
If you are using `docker run`, add this option to your command `--device /dev/axcl_host --device /dev/ax_mmb_dev --device /dev/msg_userdev`
|
||||
|
||||
#### Configuration
|
||||
|
||||
Finally, configure [hardware object detection](/configuration/object_detectors#axera) to complete the setup.
|
||||
|
||||
## Docker
|
||||
|
||||
Running through Docker with Docker Compose is the recommended install method.
|
||||
@ -462,7 +498,7 @@ services:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /path/to/your/config:/config
|
||||
- /path/to/your/storage:/media/frigate
|
||||
- type: tmpfs # Recommended: 1GB of memory
|
||||
- type: tmpfs # 1GB In-memory filesystem for recording segment storage
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 1000000000
|
||||
@ -502,15 +538,15 @@ The official docker image tags for the current stable version are:
|
||||
|
||||
- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64. This build includes support for Hailo devices as well.
|
||||
- `stable-standard-arm64` - Standard Frigate build for arm64
|
||||
- `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU
|
||||
- `stable-tensorrt` - Frigate build specific for amd64 devices running an Nvidia GPU
|
||||
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
|
||||
|
||||
The community supported docker image tags for the current stable version are:
|
||||
|
||||
- `stable-tensorrt-jp6` - Frigate build optimized for nvidia Jetson devices running Jetpack 6
|
||||
- `stable-tensorrt-jp6` - Frigate build optimized for Nvidia Jetson devices running Jetpack 6
|
||||
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
|
||||
|
||||
## Home Assistant Add-on
|
||||
## Home Assistant App
|
||||
|
||||
:::warning
|
||||
|
||||
@ -521,7 +557,7 @@ There are important limitations in HA OS to be aware of:
|
||||
- Separate local storage for media is not yet supported by Home Assistant
|
||||
- AMD GPUs are not supported because HA OS does not include the mesa driver.
|
||||
- Intel NPUs are not supported because HA OS does not include the NPU firmware.
|
||||
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
|
||||
- Nvidia GPUs are not supported because HA Apps do not support the Nvidia runtime.
|
||||
|
||||
:::
|
||||
|
||||
@ -531,27 +567,27 @@ See [the network storage guide](/guides/ha_network_storage.md) for instructions
|
||||
|
||||
:::
|
||||
|
||||
Home Assistant OS users can install via the Add-on repository.
|
||||
Home Assistant OS users can install via the App repository.
|
||||
|
||||
1. In Home Assistant, navigate to _Settings_ > _Add-ons_ > _Add-on Store_ > _Repositories_
|
||||
1. In Home Assistant, navigate to _Settings_ > _Apps_ > _App Store_ > _Repositories_
|
||||
2. Add `https://github.com/blakeblackshear/frigate-hass-addons`
|
||||
3. Install the desired variant of the Frigate Add-on (see below)
|
||||
3. Install the desired variant of the Frigate App (see below)
|
||||
4. Setup your network configuration in the `Configuration` tab
|
||||
5. Start the Add-on
|
||||
5. Start the App
|
||||
6. Use the _Open Web UI_ button to access the Frigate UI, then click in the _cog icon_ > _Configuration editor_ and configure Frigate to your liking
|
||||
|
||||
There are several variants of the Add-on available:
|
||||
There are several variants of the App available:
|
||||
|
||||
| Add-on Variant | Description |
|
||||
| App Variant | Description |
|
||||
| -------------------------- | ---------------------------------------------------------- |
|
||||
| Frigate | Current release with protection mode on |
|
||||
| Frigate (Full Access) | Current release with the option to disable protection mode |
|
||||
| Frigate Beta | Beta release with protection mode on |
|
||||
| Frigate Beta (Full Access) | Beta release with the option to disable protection mode |
|
||||
|
||||
If you are using hardware acceleration for ffmpeg, you **may** need to use the _Full Access_ variant of the Add-on. This is because the Frigate Add-on runs in a container with limited access to the host system. The _Full Access_ variant allows you to disable _Protection mode_ and give Frigate full access to the host system.
|
||||
If you are using hardware acceleration for ffmpeg, you **may** need to use the _Full Access_ variant of the App. This is because the Frigate App runs in a container with limited access to the host system. The _Full Access_ variant allows you to disable _Protection mode_ and give Frigate full access to the host system.
|
||||
|
||||
You can also edit the Frigate configuration file through the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) or similar. In that case, the configuration file will be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](../configuration/index.md#accessing-add-on-config-dir).
|
||||
You can also edit the Frigate configuration file through the [VS Code App](https://github.com/hassio-addons/addon-vscode) or similar. In that case, the configuration file will be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate App you are running. See the list of directories [here](../configuration/index.md#accessing-app-config-dir).
|
||||
|
||||
## Kubernetes
|
||||
|
||||
@ -689,3 +725,43 @@ docker run \
|
||||
```
|
||||
|
||||
Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page.
|
||||
|
||||
## macOS - Apple Silicon
|
||||
|
||||
:::warning
|
||||
|
||||
macOS uses port 5000 for its Airplay Receiver service. If you want to expose port 5000 in Frigate for local app and API access the port will need to be mapped to another port on the host e.g. 5001
|
||||
|
||||
Failure to remap port 5000 on the host will result in the WebUI and all API endpoints on port 5000 being unreachable, even if port 5000 is exposed correctly in Docker.
|
||||
|
||||
:::
|
||||
|
||||
Docker containers on macOS can be orchestrated by either [Docker Desktop](https://docs.docker.com/desktop/setup/install/mac-install/) or [OrbStack](https://orbstack.dev) (native swift app). The difference in inference speeds is negligable, however CPU, power consumption and container start times will be lower on OrbStack because it is a native Swift application.
|
||||
|
||||
To allow Frigate to use the Apple Silicon Neural Engine / Processing Unit (NPU) the host must be running [Apple Silicon Detector](../configuration/object_detectors.md#apple-silicon-detector) on the host (outside Docker)
|
||||
|
||||
#### Docker Compose example
|
||||
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
container_name: frigate
|
||||
image: ghcr.io/blakeblackshear/frigate:stable-standard-arm64
|
||||
restart: unless-stopped
|
||||
shm_size: "512mb" # update for your cameras based on calculation above
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /path/to/your/config:/config
|
||||
- /path/to/your/recordings:/recordings
|
||||
ports:
|
||||
- "8971:8971"
|
||||
# If exposing on macOS map to a diffent host port like 5001 or any orher port with no conflicts
|
||||
# - "5001:5000" # Internal unauthenticated access. Expose carefully.
|
||||
- "8554:8554" # RTSP feeds
|
||||
extra_hosts:
|
||||
# This is very important
|
||||
# It allows frigate access to the NPU on Apple Silicon via Apple Silicon Detector
|
||||
- "host.docker.internal:host-gateway" # Required to talk to the NPU detector
|
||||
environment:
|
||||
- FRIGATE_RTSP_PASSWORD: "password"
|
||||
```
|
||||
|
||||
@ -34,11 +34,14 @@ For commercial installations it is important to verify the number of supported c
|
||||
|
||||
There are many different hardware options for object detection depending on priorities and available hardware. See [the recommended hardware page](./hardware.md#detectors) for more specifics on what hardware is recommended for object detection.
|
||||
|
||||
### CPU
|
||||
|
||||
Frigate requires a CPU with AVX + AVX2 instructions. Most modern CPUs (post-2011) support AVX and AVX2, but it is generally absent in low-power or budget-oriented processors, particularly older Intel Pentium, Celeron, and Atom-based chips. Specifically, Intel Celeron and Pentium models prior to the 2020 Tiger Lake generation typically lack AVX. Older Intel Xeon models may have AVX, but may lack AVX2.
|
||||
|
||||
### Storage
|
||||
|
||||
Storage is an important consideration when planning a new installation. To get a more precise estimate of your storage requirements, you can use an IP camera storage calculator. Websites like [IPConfigure Storage Calculator](https://calculator.ipconfigure.com/) can help you determine the necessary disk space based on your camera settings.
|
||||
|
||||
|
||||
#### SSDs (Solid State Drives)
|
||||
|
||||
SSDs are an excellent choice for Frigate, offering high speed and responsiveness. The older concern that SSDs would quickly "wear out" from constant video recording is largely no longer valid for modern consumer and enterprise-grade SSDs.
|
||||
|
||||
@ -7,7 +7,7 @@ title: Updating
|
||||
|
||||
The current stable version of Frigate is **0.17.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.17.0).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant App, etc.). Below are instructions for the most common setups.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
@ -20,7 +20,6 @@ Keeping Frigate up to date ensures you benefit from the latest features, perform
|
||||
If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
|
||||
1. **Stop the Container**:
|
||||
|
||||
- If using Docker Compose:
|
||||
```bash
|
||||
docker compose down frigate
|
||||
@ -31,9 +30,8 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
```
|
||||
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.3`). For example:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.4`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
@ -51,7 +49,6 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
|
||||
- If using Docker Compose:
|
||||
```bash
|
||||
docker compose up -d
|
||||
@ -70,33 +67,30 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
- If you’ve customized other settings (e.g., `shm-size`), ensure they’re still appropriate after the update.
|
||||
- Docker will automatically use the updated image when you restart the container, as long as you pulled the correct version.
|
||||
|
||||
## Updating the Home Assistant Addon
|
||||
## Updating the Home Assistant App (formerly Addon)
|
||||
|
||||
For users running Frigate as a Home Assistant Addon:
|
||||
For users running Frigate as a Home Assistant App:
|
||||
|
||||
1. **Check for Updates**:
|
||||
|
||||
- Navigate to **Settings > Add-ons** in Home Assistant.
|
||||
- Find your installed Frigate addon (e.g., "Frigate NVR" or "Frigate NVR (Full Access)").
|
||||
- Navigate to **Settings > Apps** in Home Assistant.
|
||||
- Find your installed Frigate app (e.g., "Frigate NVR" or "Frigate NVR (Full Access)").
|
||||
- If an update is available, you’ll see an "Update" button.
|
||||
|
||||
2. **Update the Addon**:
|
||||
|
||||
- Click the "Update" button next to the Frigate addon.
|
||||
2. **Update the App**:
|
||||
- Click the "Update" button next to the Frigate app.
|
||||
- Wait for the process to complete. Home Assistant will handle downloading and installing the new version.
|
||||
|
||||
3. **Restart the Addon**:
|
||||
|
||||
- After updating, go to the addon’s page and click "Restart" to apply the changes.
|
||||
3. **Restart the App**:
|
||||
- After updating, go to the app’s page and click "Restart" to apply the changes.
|
||||
|
||||
4. **Verify the Update**:
|
||||
- Check the addon logs (under the "Log" tab) to ensure Frigate starts without errors.
|
||||
- Check the app logs (under the "Log" tab) to ensure Frigate starts without errors.
|
||||
- Access the Frigate Web UI to confirm the new version is running.
|
||||
|
||||
### Notes
|
||||
|
||||
- Ensure your `/config/frigate.yml` is compatible with the new version by reviewing the [Release notes](https://github.com/blakeblackshear/frigate/releases).
|
||||
- If using custom hardware (e.g., Coral or GPU), verify that configurations still work, as addon updates don’t modify your hardware settings.
|
||||
- If using custom hardware (e.g., Coral or GPU), verify that configurations still work, as app updates don’t modify your hardware settings.
|
||||
|
||||
## Rolling Back
|
||||
|
||||
@ -105,9 +99,9 @@ If an update causes issues:
|
||||
1. Stop Frigate.
|
||||
2. Restore your backed-up config file and database.
|
||||
3. Revert to the previous image version:
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`), and re-run `docker compose up -d`.
|
||||
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.4`), and re-run `docker compose up -d`.
|
||||
- For Home Assistant: Restore from the app/addon backup you took before you updated.
|
||||
4. Verify the old version is running again.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
@ -37,18 +37,18 @@ The following diagram adds a lot more detail than the simple view explained befo
|
||||
%%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%%
|
||||
|
||||
flowchart TD
|
||||
RecStore[(Recording\nstore)]
|
||||
SnapStore[(Snapshot\nstore)]
|
||||
RecStore[(Recording<br>store)]
|
||||
SnapStore[(Snapshot<br>store)]
|
||||
|
||||
subgraph Acquisition
|
||||
Cam["Camera"] -->|FFmpeg supported| Stream
|
||||
Cam -->|"Other streaming\nprotocols"| go2rtc
|
||||
Cam -->|"Other streaming<br>protocols"| go2rtc
|
||||
go2rtc("go2rtc") --> Stream
|
||||
Stream[Capture main and\nsub streams] --> |detect stream|Decode(Decode and\ndownscale)
|
||||
Stream[Capture main and<br>sub streams] --> |detect stream|Decode(Decode and<br>downscale)
|
||||
end
|
||||
subgraph Motion
|
||||
Decode --> MotionM(Apply\nmotion masks)
|
||||
MotionM --> MotionD(Motion\ndetection)
|
||||
Decode --> MotionM(Apply<br>motion masks)
|
||||
MotionM --> MotionD(Motion<br>detection)
|
||||
end
|
||||
subgraph Detection
|
||||
MotionD --> |motion regions| ObjectD(Object detection)
|
||||
@ -60,8 +60,8 @@ flowchart TD
|
||||
MotionD --> |motion event|Birdseye
|
||||
ObjectZ --> |object event|Birdseye
|
||||
|
||||
MotionD --> |"video segments\n(retain motion)"|RecStore
|
||||
MotionD --> |"video segments<br>(retain motion)"|RecStore
|
||||
ObjectZ --> |detection clip|RecStore
|
||||
Stream -->|"video segments\n(retain all)"| RecStore
|
||||
Stream -->|"video segments<br>(retain all)"| RecStore
|
||||
ObjectZ --> |detection snapshot|SnapStore
|
||||
```
|
||||
|
||||
@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
||||
|
||||
## Setup a go2rtc stream
|
||||
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp.
|
||||
|
||||
:::tip
|
||||
|
||||
@ -33,22 +33,19 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
||||
### What if my video doesn't play?
|
||||
|
||||
- Check Logs:
|
||||
|
||||
- Access the go2rtc logs in the Frigate UI under Logs in the sidebar.
|
||||
- If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log.
|
||||
|
||||
- Check go2rtc Web Interface: if you don't see any errors in the logs, try viewing the camera through go2rtc's web interface.
|
||||
|
||||
- Navigate to port 1984 in your browser to access go2rtc's web interface.
|
||||
- If using Frigate through Home Assistant, enable the web interface at port 1984.
|
||||
- If using Docker, forward port 1984 before accessing the web interface.
|
||||
- Click `stream` for the specific camera to see if the camera's stream is being received.
|
||||
|
||||
- Check Video Codec:
|
||||
|
||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
@ -58,7 +55,6 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
||||
```
|
||||
|
||||
- Switch to FFmpeg if needed:
|
||||
|
||||
- Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types.
|
||||
|
||||
```yaml
|
||||
@ -101,9 +97,9 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
||||
|
||||
:::warning
|
||||
|
||||
To access the go2rtc stream externally when utilizing the Frigate Add-On (for
|
||||
To access the go2rtc stream externally when utilizing the Frigate App (for
|
||||
instance through VLC), you must first enable the RTSP Restream port.
|
||||
You can do this by visiting the Frigate Add-On configuration page within Home
|
||||
You can do this by visiting the Frigate App configuration page within Home
|
||||
Assistant and revealing the hidden options under the "Show disabled ports"
|
||||
section.
|
||||
|
||||
|
||||
@ -9,7 +9,7 @@ title: Getting started
|
||||
|
||||
If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below.
|
||||
|
||||
If you already have Frigate installed through Docker or through a Home Assistant Add-on, you can continue to [Configuring Frigate](#configuring-frigate) below.
|
||||
If you already have Frigate installed through Docker or through a Home Assistant App, you can continue to [Configuring Frigate](#configuring-frigate) below.
|
||||
|
||||
:::
|
||||
|
||||
@ -81,7 +81,7 @@ Now you have a minimal Debian server that requires very little maintenance.
|
||||
|
||||
## Installing Frigate
|
||||
|
||||
This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant Add-on or another way, you can continue to [Configuring Frigate](#configuring-frigate).
|
||||
This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant App or another way, you can continue to [Configuring Frigate](#configuring-frigate).
|
||||
|
||||
### Setup directories
|
||||
|
||||
@ -119,7 +119,7 @@ services:
|
||||
volumes:
|
||||
- ./config:/config
|
||||
- ./storage:/media/frigate
|
||||
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
|
||||
- type: tmpfs # 1GB In-memory filesystem for recording segment storage
|
||||
target: /tmp/cache
|
||||
tmpfs:
|
||||
size: 1000000000
|
||||
@ -150,7 +150,7 @@ Here is an example configuration with hardware acceleration configured to work w
|
||||
|
||||
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
|
||||
|
||||
```yaml
|
||||
```yaml {4,5}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -168,17 +168,57 @@ cameras:
|
||||
name_of_your_camera:
|
||||
ffmpeg:
|
||||
inputs: ...
|
||||
# highlight-next-line
|
||||
hwaccel_args: preset-vaapi
|
||||
detect: ...
|
||||
```
|
||||
|
||||
### Step 4: Configure detectors
|
||||
|
||||
By default, Frigate will use a single CPU detector. If you have a USB Coral, you will need to add a detectors section to your config.
|
||||
By default, Frigate will use a single CPU detector.
|
||||
|
||||
In many cases, the integrated graphics on Intel CPUs provides sufficient performance for typical Frigate setups. If you have an Intel processor, you can follow the configuration below.
|
||||
|
||||
<details>
|
||||
<summary>Use Intel OpenVINO detector</summary>
|
||||
|
||||
You need to refer to **Configure hardware acceleration** above to enable the container to use the GPU.
|
||||
|
||||
```yaml {3-6,9-15,20-21}
|
||||
mqtt: ...
|
||||
|
||||
detectors: # <---- add detectors
|
||||
ov:
|
||||
type: openvino # <---- use openvino detector
|
||||
device: GPU
|
||||
|
||||
# We will use the default MobileNet_v2 model from OpenVINO.
|
||||
model:
|
||||
width: 300
|
||||
height: 300
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: bgr
|
||||
path: /openvino-model/ssdlite_mobilenet_v2.xml
|
||||
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
||||
|
||||
cameras:
|
||||
name_of_your_camera:
|
||||
ffmpeg: ...
|
||||
detect:
|
||||
enabled: True # <---- turn on detection
|
||||
...
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
If you have a USB Coral, you will need to add a detectors section to your config.
|
||||
|
||||
<details>
|
||||
<summary>Use USB Coral detector</summary>
|
||||
|
||||
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
|
||||
|
||||
```yaml
|
||||
```yaml {4-6}
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -188,7 +228,7 @@ services:
|
||||
...
|
||||
```
|
||||
|
||||
```yaml
|
||||
```yaml {3-6,11-12}
|
||||
mqtt: ...
|
||||
|
||||
detectors: # <---- add detectors
|
||||
@ -204,6 +244,8 @@ cameras:
|
||||
...
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
More details on available detectors can be found [here](../configuration/object_detectors.md).
|
||||
|
||||
Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/reference.md).
|
||||
@ -222,7 +264,7 @@ Note that motion masks should not be used to mark out areas where you do not wan
|
||||
|
||||
Your configuration should look similar to this now.
|
||||
|
||||
```yaml
|
||||
```yaml {16-18}
|
||||
mqtt:
|
||||
enabled: False
|
||||
|
||||
@ -240,7 +282,10 @@ cameras:
|
||||
- detect
|
||||
motion:
|
||||
mask:
|
||||
- 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432
|
||||
motion_area:
|
||||
friendly_name: "Motion mask"
|
||||
enabled: true
|
||||
coordinates: "0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432"
|
||||
```
|
||||
|
||||
### Step 6: Enable recordings
|
||||
@ -249,7 +294,7 @@ In order to review activity in the Frigate UI, recordings need to be enabled.
|
||||
|
||||
To enable recording video, add the `record` role to a stream and enable it in the config. If record is disabled in the config, it won't be possible to enable it in the UI.
|
||||
|
||||
```yaml
|
||||
```yaml {16-17}
|
||||
mqtt: ...
|
||||
|
||||
detectors: ...
|
||||
|
||||
@ -3,7 +3,7 @@ id: ha_network_storage
|
||||
title: Home Assistant network storage
|
||||
---
|
||||
|
||||
As of Home Assistant 2023.6, Network Mounted Storage is supported for Add-ons.
|
||||
As of Home Assistant 2023.6, Network Mounted Storage is supported for Apps.
|
||||
|
||||
## Setting Up Remote Storage For Frigate
|
||||
|
||||
@ -14,7 +14,7 @@ As of Home Assistant 2023.6, Network Mounted Storage is supported for Add-ons.
|
||||
|
||||
### Initial Setup
|
||||
|
||||
1. Stop the Frigate Add-on
|
||||
1. Stop the Frigate App
|
||||
|
||||
### Move current data
|
||||
|
||||
@ -37,4 +37,4 @@ Keeping the current data is optional, but the data will need to be moved regardl
|
||||
4. Fill out the additional required info for your particular NAS
|
||||
5. Connect
|
||||
6. Move files from `/media/frigate_tmp` to `/media/frigate` if they were kept in previous step
|
||||
7. Start the Frigate Add-on
|
||||
7. Start the Frigate App
|
||||
|
||||
@ -16,7 +16,15 @@ See the [MQTT integration
|
||||
documentation](https://www.home-assistant.io/integrations/mqtt/) for more
|
||||
details.
|
||||
|
||||
In addition, MQTT must be enabled in your Frigate configuration file and Frigate must be connected to the same MQTT server as Home Assistant for many of the entities created by the integration to function.
|
||||
In addition, MQTT must be enabled in your Frigate configuration file and Frigate must be connected to the same MQTT server as Home Assistant for many of the entities created by the integration to function, e.g.:
|
||||
|
||||
```yaml
|
||||
mqtt:
|
||||
enabled: True
|
||||
host: mqtt.server.com # the address of your HA server that's running the MQTT integration
|
||||
user: your_mqtt_broker_username
|
||||
password: your_mqtt_broker_password
|
||||
```
|
||||
|
||||
### Integration installation
|
||||
|
||||
@ -91,12 +99,12 @@ services:
|
||||
...
|
||||
```
|
||||
|
||||
### Home Assistant Add-on
|
||||
### Home Assistant App
|
||||
|
||||
If you are using Home Assistant Add-on, the URL should be one of the following depending on which Add-on variant you are using. Note that if you are using the Proxy Add-on, you should NOT point the integration at the proxy URL. Just enter the same URL used to access Frigate directly from your network.
|
||||
If you are using Home Assistant App, the URL should be one of the following depending on which App variant you are using. Note that if you are using the Proxy App, you should NOT point the integration at the proxy URL. Just enter the same URL used to access Frigate directly from your network.
|
||||
|
||||
| Add-on Variant | URL |
|
||||
| -------------------------- | ----------------------------------------- |
|
||||
| App Variant | URL |
|
||||
| -------------------------- | -------------------------------------- |
|
||||
| Frigate | `http://ccab4aaf-frigate:5000` |
|
||||
| Frigate (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
||||
| Frigate Beta | `http://ccab4aaf-frigate-beta:5000` |
|
||||
|
||||
@ -11,7 +11,8 @@ These are the MQTT messages generated by Frigate. The default topic_prefix is `f
|
||||
|
||||
Designed to be used as an availability topic with Home Assistant. Possible message are:
|
||||
"online": published when Frigate is running (on startup)
|
||||
"offline": published after Frigate has stopped
|
||||
"stopped": published when Frigate is stopped normally
|
||||
"offline": published automatically by the MQTT broker if Frigate disconnects unexpectedly (via MQTT Will Message)
|
||||
|
||||
### `frigate/restart`
|
||||
|
||||
@ -120,7 +121,7 @@ Message published for each changed tracked object. The first message is publishe
|
||||
|
||||
### `frigate/tracked_object_update`
|
||||
|
||||
Message published for updates to tracked object metadata, for example:
|
||||
Message published for updates to tracked object metadata. All messages include an `id` field which is the tracked object's event ID, and can be used to look up the event via the API or match it to items in the UI.
|
||||
|
||||
#### Generative AI Description Update
|
||||
|
||||
@ -134,12 +135,14 @@ Message published for updates to tracked object metadata, for example:
|
||||
|
||||
#### Face Recognition Update
|
||||
|
||||
Published after each recognition attempt, regardless of whether the score meets `recognition_threshold`. See the [Face Recognition](/configuration/face_recognition) documentation for details on how scoring works.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "face",
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"name": "John",
|
||||
"score": 0.95,
|
||||
"name": "John", // best matching person, or null if no match
|
||||
"score": 0.95, // running weighted average across all recognition attempts
|
||||
"camera": "front_door_cam",
|
||||
"timestamp": 1607123958.748393
|
||||
}
|
||||
@ -147,15 +150,18 @@ Message published for updates to tracked object metadata, for example:
|
||||
|
||||
#### License Plate Recognition Update
|
||||
|
||||
Published when a license plate is recognized on a car object. See the [License Plate Recognition](/configuration/license_plate_recognition) documentation for details.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "lpr",
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"name": "John's Car",
|
||||
"name": "John's Car", // known name for the plate, or null
|
||||
"plate": "123ABC",
|
||||
"score": 0.95,
|
||||
"camera": "driveway_cam",
|
||||
"timestamp": 1607123958.748393
|
||||
"timestamp": 1607123958.748393,
|
||||
"plate_box": [917, 487, 1029, 529] // box coordinates of the detected license plate in the frame
|
||||
}
|
||||
```
|
||||
|
||||
@ -270,6 +276,14 @@ Same data available at `/api/stats` published at a configurable interval.
|
||||
|
||||
Returns data about each camera, its current features, and if it is detecting motion, objects, etc. Can be triggered by publising to `frigate/onConnect`
|
||||
|
||||
### `frigate/profile/set`
|
||||
|
||||
Topic to activate or deactivate a [profile](/configuration/profiles). Publish a profile name to activate it, or `none` to deactivate the current profile.
|
||||
|
||||
### `frigate/profile/state`
|
||||
|
||||
Topic with the currently active profile name. Published value is the profile name or `none` if no profile is active. This topic is retained.
|
||||
|
||||
### `frigate/notifications/set`
|
||||
|
||||
Topic to turn notifications on and off. Expected values are `ON` and `OFF`.
|
||||
@ -425,6 +439,30 @@ Topic to adjust motion contour area for a camera. Expected value is an integer.
|
||||
|
||||
Topic with current motion contour area for a camera. Published value is an integer.
|
||||
|
||||
### `frigate/<camera_name>/motion_mask/<mask_name>/set`
|
||||
|
||||
Topic to turn a specific motion mask for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/motion_mask/<mask_name>/state`
|
||||
|
||||
Topic with current state of a specific motion mask for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/object_mask/<mask_name>/set`
|
||||
|
||||
Topic to turn a specific object mask for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/object_mask/<mask_name>/state`
|
||||
|
||||
Topic with current state of a specific object mask for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/zone/<zone_name>/set`
|
||||
|
||||
Topic to turn a specific zone for a camera on and off. Expected values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/zone/<zone_name>/state`
|
||||
|
||||
Topic with current state of a specific zone for a camera. Published values are `ON` and `OFF`.
|
||||
|
||||
### `frigate/<camera_name>/review_status`
|
||||
|
||||
Topic with current activity status of the camera. Possible values are `NONE`, `DETECTION`, or `ALERT`.
|
||||
|
||||
@ -19,11 +19,11 @@ Once logged in, you can generate an API key for Frigate in Settings.
|
||||
|
||||
### Set your API key
|
||||
|
||||
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Add-ons > Frigate > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
|
||||
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant App users can set it under Settings > Apps > Frigate > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
|
||||
|
||||
:::warning
|
||||
|
||||
You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or Home Assistant Add-on config.
|
||||
You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or Home Assistant App config.
|
||||
|
||||
:::
|
||||
|
||||
@ -54,6 +54,8 @@ Once you have [requested your first model](../plus/first_model.md) and gotten yo
|
||||
You can either choose the new model from the Frigate+ pane in the Settings page of the Frigate UI, or manually set the model at the root level in your config:
|
||||
|
||||
```yaml
|
||||
detectors: ...
|
||||
|
||||
model:
|
||||
path: plus://<your_model_id>
|
||||
```
|
||||
|
||||
@ -42,3 +42,7 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
|
||||
## [Scrypted - Frigate bridge plugin](https://github.com/apocaliss92/scrypted-frigate-bridge)
|
||||
|
||||
[Scrypted - Frigate bridge](https://github.com/apocaliss92/scrypted-frigate-bridge) is an plugin that allows to ingest Frigate detections, motion, videoclips on Scrypted as well as provide templates to export rebroadcast configurations on Frigate.
|
||||
|
||||
## [Strix](https://github.com/eduard256/Strix)
|
||||
|
||||
[Strix](https://github.com/eduard256/Strix) auto-discovers working stream URLs for IP cameras and generates ready-to-use Frigate configs. It tests thousands of URL patterns against your camera and supports cameras without RTSP or ONVIF. 67K+ camera models from 3.6K+ brands.
|
||||
|
||||
@ -25,10 +25,9 @@ Yes. Subscriptions to Frigate+ provide access to the infrastructure used to trai
|
||||
|
||||
### Why can't I submit images to Frigate+?
|
||||
|
||||
If you've configured your API key and the Frigate+ Settings page in the UI shows that the key is active, you need to ensure that you've enabled both snapshots and `clean_copy` snapshots for the cameras you'd like to submit images for. Note that `clean_copy` is enabled by default when snapshots are enabled.
|
||||
If you've configured your API key and the Frigate+ Settings page in the UI shows that the key is active, you need to ensure that snapshots are enabled for the cameras you'd like to submit images for.
|
||||
|
||||
```yaml
|
||||
snapshots:
|
||||
enabled: true
|
||||
clean_copy: true
|
||||
```
|
||||
|
||||
@ -24,6 +24,8 @@ You will receive an email notification when your Frigate+ model is ready.
|
||||
Models available in Frigate+ can be used with a special model path. No other information needs to be configured because it fetches the remaining config from Frigate+ automatically.
|
||||
|
||||
```yaml
|
||||
detectors: ...
|
||||
|
||||
model:
|
||||
path: plus://<your_model_id>
|
||||
```
|
||||
|
||||
@ -16,14 +16,14 @@ There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yo
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
|
||||
|
||||
| Model Type | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX, Apple Silicon, and Rockchip NPUs. |
|
||||
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on most hardware. |
|
||||
|
||||
### YOLOv9 Details
|
||||
|
||||
YOLOv9 models are available in `s` and `t` sizes. When requesting a `yolov9` model, you will be prompted to choose a size. If you are unsure what size to choose, you should perform some tests with the base models to find the performance level that suits you. The `s` size is most similar to the current `yolonas` models in terms of inference times and accuracy, and a good place to start is the `320x320` resolution model for `yolov9s`.
|
||||
YOLOv9 models are available in `s`, `t`, `edgetpu` variants. When requesting a `yolov9` model, you will be prompted to choose a variant. If you want the model to be compatible with a Google Coral, you will need to choose the `edgetpu` variant. If you are unsure what variant to choose, you should perform some tests with the base models to find the performance level that suits you. The `s` size is most similar to the current `yolonas` models in terms of inference times and accuracy, and a good place to start is the `320x320` resolution model for `yolov9s`.
|
||||
|
||||
:::info
|
||||
|
||||
@ -37,23 +37,21 @@ If you have a Hailo device, you will need to specify the hardware you have when
|
||||
|
||||
#### Rockchip (RKNN) Support
|
||||
|
||||
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is available in 0.17 and later.
|
||||
Rockchip models are automatically converted as of 0.17. For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it.
|
||||
|
||||
## Supported detector types
|
||||
|
||||
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), Hailo (`hailo8l`), and Rockchip\* (`rknn`) detectors.
|
||||
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), Hailo (`hailo8l`), and Rockchip (`rknn`) detectors.
|
||||
|
||||
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||
| -------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `yolov9` |
|
||||
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolov9` |
|
||||
| [NVidia GPU](/configuration/object_detectors#onnx) | `onnx` | `yolov9` |
|
||||
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector) | `onnx` | `yolov9` |
|
||||
| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` |
|
||||
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` |
|
||||
|
||||
_\* Requires manual conversion in 0.16. Automatic conversion available in 0.17 and later._
|
||||
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform) | `rknn` | `yolov9` |
|
||||
|
||||
## Improving your model
|
||||
|
||||
@ -81,7 +79,7 @@ Candidate labels are also available for annotation. These labels don't have enou
|
||||
|
||||
Where possible, these labels are mapped to existing labels during training. For example, any `baby` labels are mapped to `person` until support for new labels is added.
|
||||
|
||||
The candidate labels are: `baby`, `bpost`, `badger`, `possum`, `rodent`, `chicken`, `groundhog`, `boar`, `hedgehog`, `tractor`, `golf cart`, `garbage truck`, `bus`, `sports ball`
|
||||
The candidate labels are: `baby`, `bpost`, `badger`, `possum`, `rodent`, `chicken`, `groundhog`, `boar`, `hedgehog`, `tractor`, `golf cart`, `garbage truck`, `bus`, `sports ball`, `la_poste`, `lawnmower`, `heron`, `rickshaw`, `wombat`, `auspost`, `aramex`, `bobcat`, `mustelid`, `transoflex`, `airplane`, `drone`, `mountain_lion`, `crocodile`, `turkey`, `baby_stroller`, `monkey`, `coyote`, `porcupine`, `parcelforce`, `sheep`, `snake`, `helicopter`, `lizard`, `duck`, `hermes`, `cargus`, `fan_courier`, `sameday`
|
||||
|
||||
Candidate labels are not available for automatic suggestions.
|
||||
|
||||
|
||||
@ -3,17 +3,67 @@ id: dummy-camera
|
||||
title: Analyzing Object Detection
|
||||
---
|
||||
|
||||
When investigating object detection or tracking problems, it can be helpful to replay an exported video as a temporary "dummy" camera. This lets you reproduce issues locally, iterate on configuration (detections, zones, enrichment settings), and capture logs and clips for analysis.
|
||||
Frigate provides several tools for investigating object detection and tracking behavior: reviewing recorded detections through the UI, using the built-in Debug Replay feature, and manually setting up a dummy camera for advanced scenarios.
|
||||
|
||||
## When to use
|
||||
## Reviewing Detections in the UI
|
||||
|
||||
- Replaying an exported clip to reproduce incorrect detections
|
||||
- Testing configuration changes (model settings, trackers, filters) against a known clip
|
||||
- Gathering deterministic logs and recordings for debugging or issue reports
|
||||
Before setting up a replay, you can often diagnose detection issues by reviewing existing recordings directly in the Frigate UI.
|
||||
|
||||
## Example Config
|
||||
### Detail View (History)
|
||||
|
||||
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml` like this:
|
||||
The **Detail Stream** view in History shows recorded video with detection overlays (bounding boxes, path points, and zone highlights) drawn on top. Select a review item to see its tracked objects and lifecycle events. Clicking a lifecycle event seeks the video to that point so you can see exactly what the detector saw.
|
||||
|
||||
### Tracking Details (Explore)
|
||||
|
||||
In **Explore**, clicking a thumbnail opens the **Tracking Details** pane, which shows the full lifecycle of a single tracked object: every detection, zone entry/exit, and attribute change. The video plays back with the bounding box overlaid, letting you step through the object's entire lifecycle.
|
||||
|
||||
### Annotation Offset
|
||||
|
||||
Both views support an **Annotation Offset** setting (`detect.annotation_offset` in your camera config) that shifts the detection overlay in time relative to the recorded video. This compensates for the timing drift between the `detect` and `record` pipelines.
|
||||
|
||||
These streams use fundamentally different clocks with different buffering and latency characteristics, so the detection data and the recorded video are never perfectly synchronized. The annotation offset shifts the overlay to visually align the bounding boxes with the objects in the recorded video.
|
||||
|
||||
#### Why the offset varies between clips
|
||||
|
||||
The base timing drift between detect and record is roughly constant for a given camera, so a single offset value works well on average. However, you may notice the alignment is not pixel-perfect in every clip. This is normal and caused by several factors:
|
||||
|
||||
- **Keyframe-constrained seeking**: When the browser seeks to a timestamp, it can only land on the nearest keyframe. Each recording segment has keyframes at different positions relative to the detection timestamps, so the same offset may land slightly early in one clip and slightly late in another.
|
||||
- **Segment boundary trimming**: When a recording range starts mid-segment, the video is trimmed to the requested start point. This trim may not align with a keyframe, shifting the effective reference point.
|
||||
- **Capture-time jitter**: Network buffering, camera buffer flushes, and ffmpeg's own buffering mean the system-clock timestamp and the corresponding recorded frame are not always offset by exactly the same amount.
|
||||
|
||||
The per-clip variation is typically quite low and is mostly an artifact of keyframe granularity rather than a change in the true drift. A "perfect" alignment would require per-frame, keyframe-aware offset compensation, which is not practical. Treat the annotation offset as a best-effort average for your camera.
|
||||
|
||||
## Debug Replay
|
||||
|
||||
Debug Replay lets you re-run Frigate's detection pipeline against a section of recorded video without manually configuring a dummy camera. It automatically extracts the recording, creates a temporary camera with the same detection settings as the original, and loops the clip through the pipeline so you can observe detections in real time.
|
||||
|
||||
### When to use
|
||||
|
||||
- Reproducing a detection or tracking issue from a specific time range
|
||||
- Testing configuration changes (model settings, zones, filters, motion) against a known clip
|
||||
- Gathering logs and debug overlays for a bug report
|
||||
|
||||
:::note
|
||||
|
||||
Only one replay session can be active at a time. If a session is already running, you will be prompted to navigate to it or stop it first.
|
||||
|
||||
:::
|
||||
|
||||
### Variables to consider
|
||||
|
||||
- The replay will not always produce identical results to the original run. Different frames may be selected on replay, which can change detections and tracking.
|
||||
- Motion detection depends on the exact frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
|
||||
- Object detection is not fully deterministic: models and post-processing can yield slightly different results across runs.
|
||||
|
||||
Treat the replay as a close approximation rather than an exact reproduction. Run multiple loops and examine the debug overlays and logs to understand the behavior.
|
||||
|
||||
## Manual Dummy Camera
|
||||
|
||||
For advanced scenarios — such as testing with a clip from a different source, debugging ffmpeg behavior, or running a clip through a completely custom configuration — you can set up a dummy camera manually.
|
||||
|
||||
### Example config
|
||||
|
||||
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml`:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
@ -32,10 +82,10 @@ cameras:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- `-re -stream_loop -1` tells `ffmpeg` to play the file in realtime and loop indefinitely, which is useful for long debugging sessions.
|
||||
- `-fflags +genpts` helps generate presentation timestamps when they are missing in the file.
|
||||
- `-re -stream_loop -1` tells ffmpeg to play the file in real time and loop indefinitely.
|
||||
- `-fflags +genpts` generates presentation timestamps when they are missing in the file.
|
||||
|
||||
## Steps
|
||||
### Steps
|
||||
|
||||
1. Export or copy the clip you want to replay to the Frigate host (e.g., `/media/frigate/` or `debug/clips/`). Depending on what you are looking to debug, it is often helpful to add some "pre-capture" time (where the tracked object is not yet visible) to the clip when exporting.
|
||||
2. Add the temporary camera to `config/config.yml` (example above). Use a unique name such as `test` or `replay_camera` so it's easy to remove later.
|
||||
@ -45,16 +95,8 @@ cameras:
|
||||
5. Iterate on camera or enrichment settings (model, fps, zones, filters) and re-check the replay until the behavior is resolved.
|
||||
6. Remove the temporary camera from your config after debugging to avoid spurious telemetry or recordings.
|
||||
|
||||
## Variables to consider in object tracking
|
||||
### Troubleshooting
|
||||
|
||||
- The exported video will not always line up exactly with how it originally ran through Frigate (or even with the last loop). Different frames may be used on replay, which can change detections and tracking.
|
||||
- Motion detection depends on the frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
|
||||
- Object detection is not deterministic: models and post-processing can yield different results across runs, so you may not get identical detections or track IDs every time.
|
||||
|
||||
When debugging, treat the replay as a close approximation rather than a byte-for-byte replay. Capture multiple runs, enable recording if helpful, and examine logs and saved event clips to understand variability.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- No video: verify the path is correct and accessible from the Frigate process/container.
|
||||
- FFmpeg errors: check the log output for ffmpeg-specific flags and adjust `input_args` accordingly for your file/container. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
|
||||
- No detections: confirm the camera `roles` include `detect`, and model/detector configuration is enabled.
|
||||
- **No video**: verify the file path is correct and accessible from the Frigate process/container.
|
||||
- **FFmpeg errors**: check the log output and adjust `input_args` for your file format. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
|
||||
- **No detections**: confirm the camera `roles` include `detect` and that the model/detector configuration is enabled.
|
||||
|
||||
@ -32,7 +32,7 @@ The USB coral can draw up to 900mA and this can be too much for some on-device U
|
||||
The USB coral has different IDs when it is uninitialized and initialized.
|
||||
|
||||
- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped.
|
||||
- When running through the Home Assistant OS you may need to run the Full Access variant of the Frigate Add-on with the _Protection mode_ switch disabled so that the coral can be accessed.
|
||||
- When running through the Home Assistant OS you may need to run the Full Access variant of the Frigate App with the _Protection mode_ switch disabled so that the coral can be accessed.
|
||||
|
||||
### Synology 716+II running DSM 7.2.1-69057 Update 5
|
||||
|
||||
|
||||
@ -83,6 +83,17 @@ const config: Config = {
|
||||
},
|
||||
},
|
||||
prism: {
|
||||
magicComments:[
|
||||
{
|
||||
className: 'theme-code-block-highlighted-line',
|
||||
line: 'highlight-next-line',
|
||||
block: {start: 'highlight-start', end: 'highlight-end'},
|
||||
},
|
||||
{
|
||||
className: 'code-block-error-line',
|
||||
line: 'highlight-error-line',
|
||||
},
|
||||
],
|
||||
additionalLanguages: ["bash", "json"],
|
||||
},
|
||||
languageTabs: [
|
||||
|
||||
@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
|
||||
{
|
||||
type: "link",
|
||||
label: "Go2RTC Configuration Reference",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration",
|
||||
} as PropSidebarItemLink,
|
||||
],
|
||||
Detectors: [
|
||||
@ -94,6 +94,7 @@ const sidebars: SidebarsConfig = {
|
||||
"Extra Configuration": [
|
||||
"configuration/authentication",
|
||||
"configuration/notifications",
|
||||
"configuration/profiles",
|
||||
"configuration/ffmpeg_presets",
|
||||
"configuration/pwa",
|
||||
"configuration/tls",
|
||||
|
||||
@ -234,3 +234,11 @@
|
||||
content: "schema";
|
||||
color: var(--ifm-color-secondary-contrast-foreground);
|
||||
}
|
||||
|
||||
.code-block-error-line {
|
||||
background-color: #ff000020;
|
||||
display: block;
|
||||
margin: 0 calc(-1 * var(--ifm-pre-padding));
|
||||
padding: 0 var(--ifm-pre-padding);
|
||||
border-left: 3px solid #ff000080;
|
||||
}
|
||||
2751
docs/static/frigate-api.yaml
vendored
2751
docs/static/frigate-api.yaml
vendored
File diff suppressed because it is too large
Load Diff
@ -19,6 +19,7 @@ from fastapi import APIRouter, Body, Path, Request, Response
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi.params import Depends
|
||||
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
|
||||
from filelock import FileLock, Timeout
|
||||
from markupsafe import escape
|
||||
from peewee import SQL, fn, operator
|
||||
from pydantic import ValidationError
|
||||
@ -30,22 +31,35 @@ from frigate.api.auth import (
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.request.app_body import (
|
||||
AppConfigSetBody,
|
||||
MediaSyncBody,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.updater import (
|
||||
CameraConfigUpdateEnum,
|
||||
CameraConfigUpdateTopic,
|
||||
)
|
||||
from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector
|
||||
from frigate.jobs.media_sync import (
|
||||
get_current_media_sync_job,
|
||||
get_media_sync_job_by_id,
|
||||
start_media_sync_job,
|
||||
)
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.stats.prometheus import get_metrics, update_metrics
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
deep_merge,
|
||||
flatten_config_data,
|
||||
load_labels,
|
||||
process_config_query_string,
|
||||
update_yaml_file_bulk,
|
||||
)
|
||||
from frigate.util.config import find_config_file
|
||||
from frigate.util.config import apply_section_update, find_config_file
|
||||
from frigate.util.schema import get_config_schema
|
||||
from frigate.util.services import (
|
||||
get_nvidia_driver_info,
|
||||
process_logs,
|
||||
@ -70,9 +84,7 @@ def is_healthy():
|
||||
|
||||
@router.get("/config/schema.json", dependencies=[Depends(allow_public())])
|
||||
def config_schema(request: Request):
|
||||
return Response(
|
||||
content=request.app.frigate_config.schema_json(), media_type="application/json"
|
||||
)
|
||||
return JSONResponse(content=get_config_schema(FrigateConfig))
|
||||
|
||||
|
||||
@router.get(
|
||||
@ -118,6 +130,10 @@ def config(request: Request):
|
||||
config: dict[str, dict[str, Any]] = config_obj.model_dump(
|
||||
mode="json", warnings="none", exclude_none=True
|
||||
)
|
||||
config["detectors"] = {
|
||||
name: detector.model_dump(mode="json", warnings="none", exclude_none=True)
|
||||
for name, detector in config_obj.detectors.items()
|
||||
}
|
||||
|
||||
# remove the mqtt password
|
||||
config["mqtt"].pop("password", None)
|
||||
@ -141,6 +157,31 @@ def config(request: Request):
|
||||
for zone_name, zone in config_obj.cameras[camera_name].zones.items():
|
||||
camera_dict["zones"][zone_name]["color"] = zone.color
|
||||
|
||||
# Re-dump profile overrides with exclude_unset so that only
|
||||
# explicitly-set fields are returned (not Pydantic defaults).
|
||||
# Without this, the frontend merges defaults (e.g. threshold=30)
|
||||
# over the camera's actual base values (e.g. threshold=20).
|
||||
if camera.profiles:
|
||||
for profile_name, profile_config in camera.profiles.items():
|
||||
camera_dict.setdefault("profiles", {})[profile_name] = (
|
||||
profile_config.model_dump(
|
||||
mode="json", warnings="none", exclude_unset=True
|
||||
)
|
||||
)
|
||||
|
||||
# When a profile is active, the top-level camera sections contain
|
||||
# profile-merged (effective) values. Include the original base
|
||||
# configs so the frontend settings can display them separately.
|
||||
if (
|
||||
config_obj.active_profile is not None
|
||||
and request.app.profile_manager is not None
|
||||
):
|
||||
base_sections = request.app.profile_manager.get_base_configs_for_api(
|
||||
camera_name
|
||||
)
|
||||
if base_sections:
|
||||
camera_dict["base_config"] = base_sections
|
||||
|
||||
# remove go2rtc stream passwords
|
||||
go2rtc: dict[str, Any] = config_obj.go2rtc.model_dump(
|
||||
mode="json", warnings="none", exclude_none=True
|
||||
@ -188,6 +229,68 @@ def config(request: Request):
|
||||
return JSONResponse(content=config)
|
||||
|
||||
|
||||
@router.get("/profiles", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_profiles(request: Request):
|
||||
"""List all available profiles and the currently active profile."""
|
||||
profile_manager = request.app.profile_manager
|
||||
return JSONResponse(content=profile_manager.get_profile_info())
|
||||
|
||||
|
||||
@router.get("/profile/active", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_active_profile(request: Request):
|
||||
"""Get the currently active profile."""
|
||||
config_obj: FrigateConfig = request.app.frigate_config
|
||||
return JSONResponse(content={"active_profile": config_obj.active_profile})
|
||||
|
||||
|
||||
@router.get("/ffmpeg/presets", dependencies=[Depends(allow_any_authenticated())])
|
||||
def ffmpeg_presets():
|
||||
"""Return available ffmpeg preset keys for config UI usage."""
|
||||
|
||||
# Whitelist based on documented presets in ffmpeg_presets.md
|
||||
hwaccel_presets = [
|
||||
"preset-rpi-64-h264",
|
||||
"preset-rpi-64-h265",
|
||||
"preset-vaapi",
|
||||
"preset-intel-qsv-h264",
|
||||
"preset-intel-qsv-h265",
|
||||
"preset-nvidia",
|
||||
"preset-jetson-h264",
|
||||
"preset-jetson-h265",
|
||||
"preset-rkmpp",
|
||||
]
|
||||
input_presets = [
|
||||
"preset-http-jpeg-generic",
|
||||
"preset-http-mjpeg-generic",
|
||||
"preset-http-reolink",
|
||||
"preset-rtmp-generic",
|
||||
"preset-rtsp-generic",
|
||||
"preset-rtsp-restream",
|
||||
"preset-rtsp-restream-low-latency",
|
||||
"preset-rtsp-udp",
|
||||
"preset-rtsp-blue-iris",
|
||||
]
|
||||
record_output_presets = [
|
||||
"preset-record-generic",
|
||||
"preset-record-generic-audio-copy",
|
||||
"preset-record-generic-audio-aac",
|
||||
"preset-record-mjpeg",
|
||||
"preset-record-jpeg",
|
||||
"preset-record-ubiquiti",
|
||||
]
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"hwaccel_args": hwaccel_presets,
|
||||
"input_args": input_presets,
|
||||
"output_args": {
|
||||
"record": record_output_presets,
|
||||
"detect": [],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))])
|
||||
def config_raw_paths(request: Request):
|
||||
"""Admin-only endpoint that returns camera paths and go2rtc streams without credential masking."""
|
||||
@ -218,7 +321,7 @@ def config_raw_paths(request: Request):
|
||||
return JSONResponse(content=raw_paths)
|
||||
|
||||
|
||||
@router.get("/config/raw", dependencies=[Depends(allow_any_authenticated())])
|
||||
@router.get("/config/raw", dependencies=[Depends(require_role(["admin"]))])
|
||||
def config_raw():
|
||||
config_file = find_config_file()
|
||||
|
||||
@ -362,10 +465,104 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
||||
)
|
||||
|
||||
|
||||
def _config_set_in_memory(request: Request, body: AppConfigSetBody) -> JSONResponse:
|
||||
"""Apply config changes in-memory only, without writing to YAML.
|
||||
|
||||
Used for temporary config changes like debug replay camera tuning.
|
||||
Updates the in-memory Pydantic config and publishes ZMQ updates,
|
||||
bypassing YAML parsing entirely.
|
||||
"""
|
||||
try:
|
||||
updates = {}
|
||||
if body.config_data:
|
||||
updates = flatten_config_data(body.config_data)
|
||||
updates = {k: ("" if v is None else v) for k, v in updates.items()}
|
||||
|
||||
if not updates:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "No configuration data provided"},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
# Group flat key paths into nested per-camera, per-section dicts
|
||||
grouped: dict[str, dict[str, dict]] = {}
|
||||
for key_path, value in updates.items():
|
||||
parts = key_path.split(".")
|
||||
if len(parts) < 3 or parts[0] != "cameras":
|
||||
continue
|
||||
|
||||
cam, section = parts[1], parts[2]
|
||||
grouped.setdefault(cam, {}).setdefault(section, {})
|
||||
|
||||
# Build nested dict from remaining path (e.g. "filters.person.threshold")
|
||||
target = grouped[cam][section]
|
||||
for part in parts[3:-1]:
|
||||
target = target.setdefault(part, {})
|
||||
if len(parts) > 3:
|
||||
target[parts[-1]] = value
|
||||
elif isinstance(value, dict):
|
||||
grouped[cam][section] = deep_merge(
|
||||
grouped[cam][section], value, override=True
|
||||
)
|
||||
else:
|
||||
grouped[cam][section] = value
|
||||
|
||||
# Apply each section update
|
||||
for cam_name, sections in grouped.items():
|
||||
camera_config = config.cameras.get(cam_name)
|
||||
if not camera_config:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Camera '{cam_name}' not found",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
for section_name, update in sections.items():
|
||||
err = apply_section_update(camera_config, section_name, update)
|
||||
if err is not None:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": err},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
# Publish ZMQ updates so processing threads pick up changes
|
||||
if body.update_topic and body.update_topic.startswith("config/cameras/"):
|
||||
_, _, camera, field = body.update_topic.split("/")
|
||||
settings = getattr(config.cameras.get(camera, None), field, None)
|
||||
|
||||
if settings is not None:
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
|
||||
settings,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Config applied in-memory"},
|
||||
status_code=200,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying config in-memory: {e}")
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Error applying config"},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
|
||||
@router.put("/config/set", dependencies=[Depends(require_role(["admin"]))])
|
||||
def config_set(request: Request, body: AppConfigSetBody):
|
||||
config_file = find_config_file()
|
||||
|
||||
if body.skip_save:
|
||||
return _config_set_in_memory(request, body)
|
||||
|
||||
lock = FileLock(f"{config_file}.lock", timeout=5)
|
||||
|
||||
try:
|
||||
with lock:
|
||||
with open(config_file, "r") as f:
|
||||
old_raw_config = f.read()
|
||||
|
||||
@ -374,7 +571,9 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
|
||||
# process query string parameters (takes precedence over body.config_data)
|
||||
parsed_url = urllib.parse.urlparse(str(request.url))
|
||||
query_string = urllib.parse.parse_qs(parsed_url.query, keep_blank_values=True)
|
||||
query_string = urllib.parse.parse_qs(
|
||||
parsed_url.query, keep_blank_values=True
|
||||
)
|
||||
|
||||
# Filter out empty keys but keep blank values for non-empty keys
|
||||
query_string = {k: v for k, v in query_string.items() if k}
|
||||
@ -383,11 +582,16 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
updates = process_config_query_string(query_string)
|
||||
elif body.config_data:
|
||||
updates = flatten_config_data(body.config_data)
|
||||
# Convert None values to empty strings for deletion (e.g., when deleting masks)
|
||||
updates = {k: ("" if v is None else v) for k, v in updates.items()}
|
||||
|
||||
if not updates:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "No configuration data provided"}
|
||||
{
|
||||
"success": False,
|
||||
"message": "No configuration data provided",
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
@ -425,11 +629,30 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
if body.requires_restart == 0 or body.update_topic:
|
||||
old_config: FrigateConfig = request.app.frigate_config
|
||||
request.app.frigate_config = config
|
||||
request.app.genai_manager.update_config(config)
|
||||
|
||||
if request.app.profile_manager is not None:
|
||||
request.app.profile_manager.update_config(config)
|
||||
|
||||
if request.app.stats_emitter is not None:
|
||||
request.app.stats_emitter.config = config
|
||||
|
||||
if body.update_topic:
|
||||
if body.update_topic.startswith("config/cameras/"):
|
||||
_, _, camera, field = body.update_topic.split("/")
|
||||
|
||||
if camera == "*":
|
||||
# Wildcard: fan out update to all cameras
|
||||
enum_value = CameraConfigUpdateEnum[field]
|
||||
for camera_name in config.cameras:
|
||||
settings = config.get_nested_object(
|
||||
f"config/cameras/{camera_name}/{field}"
|
||||
)
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(enum_value, camera_name),
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
if field == "add":
|
||||
settings = config.cameras[camera]
|
||||
elif field == "remove":
|
||||
@ -438,7 +661,9 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
settings = config.get_nested_object(body.update_topic)
|
||||
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
|
||||
CameraConfigUpdateTopic(
|
||||
CameraConfigUpdateEnum[field], camera
|
||||
),
|
||||
settings,
|
||||
)
|
||||
else:
|
||||
@ -459,11 +684,29 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
except Timeout:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": "Another process is currently updating the config. Please try again in a few seconds.",
|
||||
}
|
||||
),
|
||||
status_code=503,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())])
|
||||
def vainfo():
|
||||
vainfo = vainfo_hwaccel()
|
||||
# Use LibvaGpuSelector to pick an appropriate libva device (if available)
|
||||
selected_gpu = ""
|
||||
try:
|
||||
selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or ""
|
||||
except Exception:
|
||||
selected_gpu = ""
|
||||
|
||||
# If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`.
|
||||
vainfo = vainfo_hwaccel(device_name=selected_gpu or None)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"return_code": vainfo.returncode,
|
||||
@ -598,6 +841,98 @@ def restart():
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/media/sync",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Start media sync job",
|
||||
description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
|
||||
Returns 202 with job details when queued, or 409 if a job is already running.""",
|
||||
)
|
||||
def sync_media(body: MediaSyncBody = Body(...)):
|
||||
"""Start async media sync job - remove orphaned files.
|
||||
|
||||
Syncs specified media types: event snapshots, event thumbnails, review thumbnails,
|
||||
previews, exports, and/or recordings. Job runs in background; use /media/sync/current
|
||||
or /media/sync/status/{job_id} to check status.
|
||||
|
||||
Args:
|
||||
body: MediaSyncBody with dry_run flag and media_types list.
|
||||
media_types can include: 'all', 'event_snapshots', 'event_thumbnails',
|
||||
'review_thumbnails', 'previews', 'exports', 'recordings'
|
||||
|
||||
Returns:
|
||||
202 Accepted with job_id, or 409 Conflict if job already running.
|
||||
"""
|
||||
job_id = start_media_sync_job(
|
||||
dry_run=body.dry_run, media_types=body.media_types, force=body.force
|
||||
)
|
||||
|
||||
if job_id is None:
|
||||
# A job is already running
|
||||
current = get_current_media_sync_job()
|
||||
return JSONResponse(
|
||||
content={
|
||||
"error": "A media sync job is already running",
|
||||
"current_job_id": current.id if current else None,
|
||||
},
|
||||
status_code=409,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"job": {
|
||||
"job_type": "media_sync",
|
||||
"status": JobStatusTypesEnum.queued,
|
||||
"id": job_id,
|
||||
}
|
||||
},
|
||||
status_code=202,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/media/sync/current",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get current media sync job",
|
||||
description="""Retrieve the current running media sync job, if any. Returns the job details
|
||||
or null when no job is active.""",
|
||||
)
|
||||
def get_media_sync_current():
|
||||
"""Get the current running media sync job, if any."""
|
||||
job = get_current_media_sync_job()
|
||||
|
||||
if job is None:
|
||||
return JSONResponse(content={"job": None}, status_code=200)
|
||||
|
||||
return JSONResponse(
|
||||
content={"job": job.to_dict()},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/media/sync/status/{job_id}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get media sync job status",
|
||||
description="""Get status and results for the specified media sync job id. Returns 200 with
|
||||
job details including results, or 404 if the job is not found.""",
|
||||
)
|
||||
def get_media_sync_status(job_id: str):
|
||||
"""Get the status of a specific media sync job."""
|
||||
job = get_media_sync_job_by_id(job_id)
|
||||
|
||||
if job is None:
|
||||
return JSONResponse(
|
||||
content={"error": "Job not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={"job": job.to_dict()},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/labels", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_labels(camera: str = ""):
|
||||
try:
|
||||
@ -647,6 +982,12 @@ def get_sub_labels(split_joined: Optional[int] = None):
|
||||
return JSONResponse(content=sub_labels)
|
||||
|
||||
|
||||
@router.get("/audio_labels", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_audio_labels():
|
||||
labels = load_labels("/audio-labelmap.txt", prefill=521)
|
||||
return JSONResponse(content=labels)
|
||||
|
||||
|
||||
@router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())])
|
||||
def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
|
||||
if not request.app.frigate_config.plus_api.is_active():
|
||||
@ -732,7 +1073,12 @@ def get_recognized_license_plates(
|
||||
|
||||
|
||||
@router.get("/timeline", dependencies=[Depends(allow_any_authenticated())])
|
||||
def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = None):
|
||||
def timeline(
|
||||
camera: str = "all",
|
||||
limit: int = 100,
|
||||
source_id: Optional[str] = None,
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
clauses = []
|
||||
|
||||
selected_columns = [
|
||||
@ -754,6 +1100,9 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
|
||||
else:
|
||||
clauses.append((Timeline.source_id.in_(source_ids)))
|
||||
|
||||
# Enforce per-camera access control
|
||||
clauses.append((Timeline.camera << allowed_cameras))
|
||||
|
||||
if len(clauses) == 0:
|
||||
clauses.append((True))
|
||||
|
||||
@ -769,7 +1118,10 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
|
||||
|
||||
|
||||
@router.get("/timeline/hourly", dependencies=[Depends(allow_any_authenticated())])
|
||||
def hourly_timeline(params: AppTimelineHourlyQueryParameters = Depends()):
|
||||
def hourly_timeline(
|
||||
params: AppTimelineHourlyQueryParameters = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Get hourly summary for timeline."""
|
||||
cameras = params.cameras
|
||||
labels = params.labels
|
||||
@ -787,6 +1139,9 @@ def hourly_timeline(params: AppTimelineHourlyQueryParameters = Depends()):
|
||||
camera_list = cameras.split(",")
|
||||
clauses.append((Timeline.camera << camera_list))
|
||||
|
||||
# Enforce per-camera access control
|
||||
clauses.append((Timeline.camera << allowed_cameras))
|
||||
|
||||
if labels != "all":
|
||||
label_list = labels.split(",")
|
||||
clauses.append((Timeline.data["label"] << label_list))
|
||||
|
||||
@ -26,12 +26,18 @@ from frigate.api.defs.request.app_body import (
|
||||
AppPutRoleBody,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import AuthConfig, ProxyConfig
|
||||
from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig
|
||||
from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM
|
||||
from frigate.models import User
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# In-memory cache to track which clients we've logged for an anonymous access event.
|
||||
# Keyed by a hashed value combining remote address + user-agent. The value is
|
||||
# an expiration timestamp (float).
|
||||
FIRST_LOAD_TTL_SECONDS = 60 * 60 * 24 * 7 # 7 days
|
||||
_first_load_seen: dict[str, float] = {}
|
||||
|
||||
|
||||
def require_admin_by_default():
|
||||
"""
|
||||
@ -41,7 +47,7 @@ def require_admin_by_default():
|
||||
endpoints require admin access unless explicitly overridden with
|
||||
allow_public(), allow_any_authenticated(), or require_role().
|
||||
|
||||
Port 5000 (internal) always has admin role set by the /auth endpoint,
|
||||
Internal port always has admin role set by the /auth endpoint,
|
||||
so this check passes automatically for internal requests.
|
||||
|
||||
Certain paths are exempted from the global admin check because they must
|
||||
@ -67,7 +73,6 @@ def require_admin_by_default():
|
||||
"/stats",
|
||||
"/stats/history",
|
||||
"/config",
|
||||
"/config/raw",
|
||||
"/vainfo",
|
||||
"/nvinfo",
|
||||
"/labels",
|
||||
@ -130,7 +135,7 @@ def require_admin_by_default():
|
||||
pass
|
||||
|
||||
# For all other paths, require admin role
|
||||
# Port 5000 (internal) requests have admin role set automatically
|
||||
# Internal port requests have admin role set automatically
|
||||
role = request.headers.get("remote-role")
|
||||
if role == "admin":
|
||||
return
|
||||
@ -143,6 +148,17 @@ def require_admin_by_default():
|
||||
return admin_checker
|
||||
|
||||
|
||||
def _is_authenticated(request: Request) -> bool:
|
||||
"""
|
||||
Helper to determine if a request is from an authenticated user.
|
||||
|
||||
Returns True if the request has a valid authenticated user (not anonymous).
|
||||
Internal port requests are considered anonymous despite having admin role.
|
||||
"""
|
||||
username = request.headers.get("remote-user")
|
||||
return username is not None and username != "anonymous"
|
||||
|
||||
|
||||
def allow_public():
|
||||
"""
|
||||
Override dependency to allow unauthenticated access to an endpoint.
|
||||
@ -171,6 +187,7 @@ def allow_any_authenticated():
|
||||
|
||||
Rejects:
|
||||
- Requests with no remote-user header (did not pass through /auth endpoint)
|
||||
- External port requests with anonymous user (auth disabled, no proxy auth)
|
||||
|
||||
Example:
|
||||
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
|
||||
@ -179,8 +196,14 @@ def allow_any_authenticated():
|
||||
async def auth_checker(request: Request):
|
||||
# Ensure a remote-user has been set by the /auth endpoint
|
||||
username = request.headers.get("remote-user")
|
||||
if username is None:
|
||||
|
||||
# Internal port requests have admin role and should be allowed
|
||||
role = request.headers.get("remote-role")
|
||||
|
||||
if role != "admin":
|
||||
if username is None or not _is_authenticated(request):
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
|
||||
return
|
||||
|
||||
return auth_checker
|
||||
@ -266,6 +289,15 @@ def get_remote_addr(request: Request):
|
||||
return remote_addr or "127.0.0.1"
|
||||
|
||||
|
||||
def _cleanup_first_load_seen() -> None:
|
||||
"""Cleanup expired entries in the in-memory first-load cache."""
|
||||
now = time.time()
|
||||
# Build list for removal to avoid mutating dict during iteration
|
||||
expired = [k for k, exp in _first_load_seen.items() if exp <= now]
|
||||
for k in expired:
|
||||
del _first_load_seen[k]
|
||||
|
||||
|
||||
def get_jwt_secret() -> str:
|
||||
jwt_secret = None
|
||||
# check env var
|
||||
@ -570,12 +602,18 @@ def resolve_role(
|
||||
def auth(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
proxy_config: ProxyConfig = request.app.frigate_config.proxy
|
||||
networking_config: NetworkingConfig = request.app.frigate_config.networking
|
||||
|
||||
success_response = Response("", status_code=202)
|
||||
|
||||
# handle case where internal port is a string with ip:port
|
||||
internal_port = networking_config.listen.internal
|
||||
if type(internal_port) is str:
|
||||
internal_port = int(internal_port.split(":")[-1])
|
||||
|
||||
# dont require auth if the request is on the internal port
|
||||
# this header is set by Frigate's nginx proxy, so it cant be spoofed
|
||||
if int(request.headers.get("x-server-port", default=0)) == 5000:
|
||||
if int(request.headers.get("x-server-port", default=0)) == internal_port:
|
||||
success_response.headers["remote-user"] = "anonymous"
|
||||
success_response.headers["remote-role"] = "admin"
|
||||
return success_response
|
||||
@ -720,10 +758,30 @@ def profile(request: Request):
|
||||
roles_dict = request.app.frigate_config.auth.roles
|
||||
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
|
||||
|
||||
return JSONResponse(
|
||||
response = JSONResponse(
|
||||
content={"username": username, "role": role, "allowed_cameras": allowed_cameras}
|
||||
)
|
||||
|
||||
if username == "anonymous":
|
||||
try:
|
||||
remote_addr = get_remote_addr(request)
|
||||
except Exception:
|
||||
remote_addr = (
|
||||
request.client.host if hasattr(request, "client") else "unknown"
|
||||
)
|
||||
|
||||
ua = request.headers.get("user-agent", "")
|
||||
key_material = f"{remote_addr}|{ua}"
|
||||
cache_key = hashlib.sha256(key_material.encode()).hexdigest()
|
||||
|
||||
_cleanup_first_load_seen()
|
||||
now = time.time()
|
||||
if cache_key not in _first_load_seen:
|
||||
_first_load_seen[cache_key] = now + FIRST_LOAD_TTL_SECONDS
|
||||
logger.info(f"Anonymous user access from {remote_addr} ua={ua[:200]}")
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@router.get(
|
||||
"/logout",
|
||||
@ -837,6 +895,7 @@ def create_user(
|
||||
User.notification_tokens: [],
|
||||
}
|
||||
).execute()
|
||||
request.app.config_publisher.publisher.publish("config/auth", None)
|
||||
return JSONResponse(content={"username": body.username})
|
||||
|
||||
|
||||
@ -854,6 +913,7 @@ def delete_user(request: Request, username: str):
|
||||
)
|
||||
|
||||
User.delete_by_id(username)
|
||||
request.app.config_publisher.publisher.publish("config/auth", None)
|
||||
return JSONResponse(content={"success": True})
|
||||
|
||||
|
||||
@ -973,6 +1033,7 @@ async def update_role(
|
||||
)
|
||||
|
||||
User.set_by_id(username, {User.role: body.role})
|
||||
request.app.config_publisher.publisher.publish("config/auth", None)
|
||||
return JSONResponse(content={"success": True})
|
||||
|
||||
|
||||
@ -986,7 +1047,16 @@ async def require_camera_access(
|
||||
|
||||
current_user = await get_current_user(request)
|
||||
if isinstance(current_user, JSONResponse):
|
||||
return current_user
|
||||
detail = "Authentication required"
|
||||
try:
|
||||
error_payload = json.loads(current_user.body)
|
||||
detail = (
|
||||
error_payload.get("message") or error_payload.get("detail") or detail
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
raise HTTPException(status_code=current_user.status_code, detail=detail)
|
||||
|
||||
role = current_user["role"]
|
||||
all_camera_names = set(request.app.frigate_config.cameras.keys())
|
||||
@ -1004,6 +1074,61 @@ async def require_camera_access(
|
||||
)
|
||||
|
||||
|
||||
def _get_stream_owner_cameras(request: Request, stream_name: str) -> set[str]:
|
||||
owner_cameras: set[str] = set()
|
||||
|
||||
for camera_name, camera in request.app.frigate_config.cameras.items():
|
||||
if stream_name == camera_name:
|
||||
owner_cameras.add(camera_name)
|
||||
continue
|
||||
|
||||
if stream_name in camera.live.streams.values():
|
||||
owner_cameras.add(camera_name)
|
||||
|
||||
return owner_cameras
|
||||
|
||||
|
||||
async def require_go2rtc_stream_access(
|
||||
stream_name: Optional[str] = None,
|
||||
request: Request = None,
|
||||
):
|
||||
"""Dependency to enforce go2rtc stream access based on owning camera access."""
|
||||
if stream_name is None:
|
||||
return
|
||||
|
||||
current_user = await get_current_user(request)
|
||||
if isinstance(current_user, JSONResponse):
|
||||
detail = "Authentication required"
|
||||
try:
|
||||
error_payload = json.loads(current_user.body)
|
||||
detail = (
|
||||
error_payload.get("message") or error_payload.get("detail") or detail
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
raise HTTPException(status_code=current_user.status_code, detail=detail)
|
||||
|
||||
role = current_user["role"]
|
||||
all_camera_names = set(request.app.frigate_config.cameras.keys())
|
||||
roles_dict = request.app.frigate_config.auth.roles
|
||||
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
|
||||
|
||||
# Admin or full access bypasses
|
||||
if role == "admin" or not roles_dict.get(role):
|
||||
return
|
||||
|
||||
owner_cameras = _get_stream_owner_cameras(request, stream_name)
|
||||
|
||||
if owner_cameras & set(allowed_cameras):
|
||||
return
|
||||
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail=f"Access denied to camera '{stream_name}'. Allowed: {allowed_cameras}",
|
||||
)
|
||||
|
||||
|
||||
async def get_allowed_cameras_for_filter(request: Request):
|
||||
"""Dependency to get allowed_cameras for filtering lists."""
|
||||
current_user = await get_current_user(request)
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
"""Camera apis."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
@ -11,18 +12,27 @@ import httpx
|
||||
import requests
|
||||
from fastapi import APIRouter, Depends, Query, Request, Response
|
||||
from fastapi.responses import JSONResponse
|
||||
from filelock import FileLock, Timeout
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
from ruamel.yaml import YAML
|
||||
from zeep.exceptions import Fault, TransportError
|
||||
from zeep.transports import AsyncTransport
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
require_camera_access,
|
||||
require_go2rtc_stream_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.request.app_body import CameraSetBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config.config import FrigateConfig
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.updater import (
|
||||
CameraConfigUpdateEnum,
|
||||
CameraConfigUpdateTopic,
|
||||
)
|
||||
from frigate.util.builtin import clean_camera_user_pass
|
||||
from frigate.util.camera_cleanup import cleanup_camera_db, cleanup_camera_files
|
||||
from frigate.util.config import find_config_file
|
||||
from frigate.util.image import run_ffmpeg_snapshot
|
||||
from frigate.util.services import ffprobe_stream
|
||||
|
||||
@ -71,14 +81,27 @@ def go2rtc_streams():
|
||||
|
||||
|
||||
@router.get(
|
||||
"/go2rtc/streams/{camera_name}", dependencies=[Depends(require_camera_access)]
|
||||
"/go2rtc/streams/{stream_name}",
|
||||
dependencies=[Depends(require_go2rtc_stream_access)],
|
||||
)
|
||||
def go2rtc_camera_stream(request: Request, camera_name: str):
|
||||
def go2rtc_camera_stream(request: Request, stream_name: str):
|
||||
r = requests.get(
|
||||
f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=allµphone"
|
||||
"http://127.0.0.1:1984/api/streams",
|
||||
params={
|
||||
"src": stream_name,
|
||||
"video": "all",
|
||||
"audio": "all",
|
||||
"microphone": "",
|
||||
},
|
||||
)
|
||||
if not r.ok:
|
||||
camera_config = request.app.frigate_config.cameras.get(stream_name)
|
||||
|
||||
if camera_config is None:
|
||||
for camera_name, camera in request.app.frigate_config.cameras.items():
|
||||
if stream_name in camera.live.streams.values():
|
||||
camera_config = request.app.frigate_config.cameras.get(camera_name)
|
||||
break
|
||||
|
||||
if camera_config and camera_config.enabled:
|
||||
logger.error("Failed to fetch streams from go2rtc")
|
||||
@ -995,3 +1018,227 @@ async def onvif_probe(
|
||||
await onvif_camera.close()
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing ONVIF camera session: {e}")
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/cameras/{camera_name}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
)
|
||||
async def delete_camera(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
delete_exports: bool = Query(default=False),
|
||||
):
|
||||
"""Delete a camera and all its associated data.
|
||||
|
||||
Removes the camera from config, stops processes, and cleans up
|
||||
all database entries and media files.
|
||||
|
||||
Args:
|
||||
camera_name: Name of the camera to delete
|
||||
delete_exports: Whether to also delete exports for this camera
|
||||
"""
|
||||
frigate_config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
if camera_name not in frigate_config.cameras:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Camera {camera_name} not found",
|
||||
},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
old_camera_config = frigate_config.cameras[camera_name]
|
||||
config_file = find_config_file()
|
||||
lock = FileLock(f"{config_file}.lock", timeout=5)
|
||||
|
||||
try:
|
||||
with lock:
|
||||
with open(config_file, "r") as f:
|
||||
old_raw_config = f.read()
|
||||
|
||||
try:
|
||||
yaml = YAML()
|
||||
yaml.indent(mapping=2, sequence=4, offset=2)
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
data = yaml.load(f)
|
||||
|
||||
# Remove camera from config
|
||||
if "cameras" in data and camera_name in data["cameras"]:
|
||||
del data["cameras"][camera_name]
|
||||
|
||||
# Remove camera from auth roles
|
||||
auth = data.get("auth", {})
|
||||
if auth and "roles" in auth:
|
||||
empty_roles = []
|
||||
for role_name, cameras_list in auth["roles"].items():
|
||||
if (
|
||||
isinstance(cameras_list, list)
|
||||
and camera_name in cameras_list
|
||||
):
|
||||
cameras_list.remove(camera_name)
|
||||
# Custom roles can't be empty; mark for removal
|
||||
if not cameras_list and role_name not in (
|
||||
"admin",
|
||||
"viewer",
|
||||
):
|
||||
empty_roles.append(role_name)
|
||||
for role_name in empty_roles:
|
||||
del auth["roles"][role_name]
|
||||
|
||||
with open(config_file, "w") as f:
|
||||
yaml.dump(data, f)
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
new_raw_config = f.read()
|
||||
|
||||
try:
|
||||
config = FrigateConfig.parse(new_raw_config)
|
||||
except Exception:
|
||||
with open(config_file, "w") as f:
|
||||
f.write(old_raw_config)
|
||||
logger.exception(
|
||||
"Config error after removing camera %s",
|
||||
camera_name,
|
||||
)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Error parsing config after camera removal",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error updating config to remove camera %s: %s", camera_name, e
|
||||
)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Error updating config",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
# Update runtime config
|
||||
request.app.frigate_config = config
|
||||
request.app.genai_manager.update_config(config)
|
||||
|
||||
# Publish removal to stop ffmpeg processes and clean up runtime state
|
||||
request.app.config_publisher.publish_update(
|
||||
CameraConfigUpdateTopic(CameraConfigUpdateEnum.remove, camera_name),
|
||||
old_camera_config,
|
||||
)
|
||||
|
||||
except Timeout:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Another process is currently updating the config",
|
||||
},
|
||||
status_code=409,
|
||||
)
|
||||
|
||||
# Clean up database entries
|
||||
counts, export_paths = await asyncio.to_thread(
|
||||
cleanup_camera_db, camera_name, delete_exports
|
||||
)
|
||||
|
||||
# Clean up media files in background thread
|
||||
await asyncio.to_thread(
|
||||
cleanup_camera_files, camera_name, export_paths if delete_exports else None
|
||||
)
|
||||
|
||||
# Best-effort go2rtc stream removal
|
||||
try:
|
||||
requests.delete(
|
||||
"http://127.0.0.1:1984/api/streams",
|
||||
params={"src": camera_name},
|
||||
timeout=5,
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("Failed to remove go2rtc stream for %s", camera_name)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": f"Camera {camera_name} has been deleted",
|
||||
"cleanup": counts,
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
_SUB_COMMAND_FEATURES = {"motion_mask", "object_mask", "zone"}
|
||||
|
||||
|
||||
@router.put(
|
||||
"/camera/{camera_name}/set/{feature}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
)
|
||||
@router.put(
|
||||
"/camera/{camera_name}/set/{feature}/{sub_command}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
)
|
||||
def camera_set(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
feature: str,
|
||||
body: CameraSetBody,
|
||||
sub_command: str | None = None,
|
||||
):
|
||||
"""Set a camera feature state. Use camera_name='*' to target all cameras."""
|
||||
dispatcher = request.app.dispatcher
|
||||
frigate_config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
if feature == "profile":
|
||||
if camera_name != "*":
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Profile feature requires camera_name='*'",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
dispatcher._receive("profile/set", body.value)
|
||||
return JSONResponse(content={"success": True})
|
||||
|
||||
if feature not in dispatcher._camera_settings_handlers:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": f"Unknown feature: {feature}"},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if sub_command and feature not in _SUB_COMMAND_FEATURES:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Feature '{feature}' does not support sub-commands",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if camera_name == "*":
|
||||
cameras = list(frigate_config.cameras.keys())
|
||||
elif camera_name not in frigate_config.cameras:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Camera '{camera_name}' not found",
|
||||
},
|
||||
status_code=404,
|
||||
)
|
||||
else:
|
||||
cameras = [camera_name]
|
||||
|
||||
for cam in cameras:
|
||||
topic = (
|
||||
f"{cam}/{feature}/{sub_command}/set"
|
||||
if sub_command
|
||||
else f"{cam}/{feature}/set"
|
||||
)
|
||||
dispatcher._receive(topic, body.value)
|
||||
|
||||
return JSONResponse(content={"success": True})
|
||||
|
||||
1220
frigate/api/chat.py
Normal file
1220
frigate/api/chat.py
Normal file
File diff suppressed because it is too large
Load Diff
176
frigate/api/debug_replay.py
Normal file
176
frigate/api/debug_replay.py
Normal file
@ -0,0 +1,176 @@
|
||||
"""Debug replay API endpoints."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from frigate.api.auth import require_role
|
||||
from frigate.api.defs.tags import Tags
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.app])
|
||||
|
||||
|
||||
class DebugReplayStartBody(BaseModel):
|
||||
"""Request body for starting a debug replay session."""
|
||||
|
||||
camera: str = Field(title="Source camera name")
|
||||
start_time: float = Field(title="Start timestamp")
|
||||
end_time: float = Field(title="End timestamp")
|
||||
|
||||
|
||||
class DebugReplayStartResponse(BaseModel):
|
||||
"""Response for starting a debug replay session."""
|
||||
|
||||
success: bool
|
||||
replay_camera: str
|
||||
|
||||
|
||||
class DebugReplayStatusResponse(BaseModel):
|
||||
"""Response for debug replay status."""
|
||||
|
||||
active: bool
|
||||
replay_camera: str | None = None
|
||||
source_camera: str | None = None
|
||||
start_time: float | None = None
|
||||
end_time: float | None = None
|
||||
live_ready: bool = False
|
||||
|
||||
|
||||
class DebugReplayStopResponse(BaseModel):
|
||||
"""Response for stopping a debug replay session."""
|
||||
|
||||
success: bool
|
||||
|
||||
|
||||
@router.post(
|
||||
"/debug_replay/start",
|
||||
response_model=DebugReplayStartResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Start debug replay",
|
||||
description="Start a debug replay session from camera recordings.",
|
||||
)
|
||||
async def start_debug_replay(request: Request, body: DebugReplayStartBody):
|
||||
"""Start a debug replay session."""
|
||||
replay_manager = request.app.replay_manager
|
||||
|
||||
if replay_manager.active:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "A replay session is already active",
|
||||
},
|
||||
status_code=409,
|
||||
)
|
||||
|
||||
try:
|
||||
replay_camera = await asyncio.to_thread(
|
||||
replay_manager.start,
|
||||
source_camera=body.camera,
|
||||
start_ts=body.start_time,
|
||||
end_ts=body.end_time,
|
||||
frigate_config=request.app.frigate_config,
|
||||
config_publisher=request.app.config_publisher,
|
||||
)
|
||||
except ValueError:
|
||||
logger.exception("Invalid parameters for debug replay start request")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Invalid debug replay request parameters",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
except RuntimeError:
|
||||
logger.exception("Error while starting debug replay session")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "An internal error occurred while starting debug replay",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
return DebugReplayStartResponse(
|
||||
success=True,
|
||||
replay_camera=replay_camera,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/debug_replay/status",
|
||||
response_model=DebugReplayStatusResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get debug replay status",
|
||||
description="Get the status of the current debug replay session.",
|
||||
)
|
||||
def get_debug_replay_status(request: Request):
|
||||
"""Get the current replay session status."""
|
||||
replay_manager = request.app.replay_manager
|
||||
|
||||
live_ready = False
|
||||
replay_camera = replay_manager.replay_camera_name
|
||||
|
||||
if replay_manager.active and replay_camera:
|
||||
frame_processor = request.app.detected_frames_processor
|
||||
frame = frame_processor.get_current_frame(replay_camera)
|
||||
|
||||
if frame is not None:
|
||||
frame_time = frame_processor.get_current_frame_time(replay_camera)
|
||||
camera_config = request.app.frigate_config.cameras.get(replay_camera)
|
||||
retry_interval = 10
|
||||
|
||||
if camera_config is not None:
|
||||
retry_interval = float(camera_config.ffmpeg.retry_interval or 10)
|
||||
|
||||
live_ready = datetime.now().timestamp() <= frame_time + retry_interval
|
||||
|
||||
return DebugReplayStatusResponse(
|
||||
active=replay_manager.active,
|
||||
replay_camera=replay_camera,
|
||||
source_camera=replay_manager.source_camera,
|
||||
start_time=replay_manager.start_ts,
|
||||
end_time=replay_manager.end_ts,
|
||||
live_ready=live_ready,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/debug_replay/stop",
|
||||
response_model=DebugReplayStopResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Stop debug replay",
|
||||
description="Stop the active debug replay session and clean up all artifacts.",
|
||||
)
|
||||
async def stop_debug_replay(request: Request):
|
||||
"""Stop the active replay session."""
|
||||
replay_manager = request.app.replay_manager
|
||||
|
||||
if not replay_manager.active:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "No active replay session"},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
try:
|
||||
await asyncio.to_thread(
|
||||
replay_manager.stop,
|
||||
frigate_config=request.app.frigate_config,
|
||||
config_publisher=request.app.config_publisher,
|
||||
)
|
||||
except (ValueError, RuntimeError, OSError) as e:
|
||||
logger.error("Error stopping replay: %s", e)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Failed to stop replay session due to an internal error.",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
return DebugReplayStopResponse(success=True)
|
||||
@ -1,8 +1,7 @@
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
|
||||
class Extension(str, Enum):
|
||||
@ -36,7 +35,7 @@ class MediaEventsSnapshotQueryParams(BaseModel):
|
||||
bbox: Optional[int] = None
|
||||
crop: Optional[int] = None
|
||||
height: Optional[int] = None
|
||||
quality: Optional[int] = 70
|
||||
quality: Optional[int] = None
|
||||
|
||||
|
||||
class MediaMjpegFeedQueryParams(BaseModel):
|
||||
@ -48,15 +47,3 @@ class MediaMjpegFeedQueryParams(BaseModel):
|
||||
mask: Optional[int] = None
|
||||
motion: Optional[int] = None
|
||||
regions: Optional[int] = None
|
||||
|
||||
|
||||
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||
timezone: str = "utc"
|
||||
cameras: Optional[str] = "all"
|
||||
|
||||
|
||||
class MediaRecordingsAvailabilityQueryParams(BaseModel):
|
||||
cameras: str = "all"
|
||||
before: Union[float, SkipJsonSchema[None]] = None
|
||||
after: Union[float, SkipJsonSchema[None]] = None
|
||||
scale: int = 30
|
||||
|
||||
21
frigate/api/defs/query/recordings_query_parameters.py
Normal file
21
frigate/api/defs/query/recordings_query_parameters.py
Normal file
@ -0,0 +1,21 @@
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
|
||||
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||
timezone: str = "utc"
|
||||
cameras: Optional[str] = "all"
|
||||
|
||||
|
||||
class MediaRecordingsAvailabilityQueryParams(BaseModel):
|
||||
cameras: str = "all"
|
||||
before: Union[float, SkipJsonSchema[None]] = None
|
||||
after: Union[float, SkipJsonSchema[None]] = None
|
||||
scale: int = 30
|
||||
|
||||
|
||||
class RecordingsDeleteQueryParams(BaseModel):
|
||||
keep: Optional[str] = None
|
||||
cameras: Optional[str] = "all"
|
||||
@ -1,12 +1,13 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AppConfigSetBody(BaseModel):
|
||||
requires_restart: int = 1
|
||||
update_topic: str | None = None
|
||||
config_data: Optional[Dict[str, Any]] = None
|
||||
skip_save: bool = False
|
||||
|
||||
|
||||
class AppPutPasswordBody(BaseModel):
|
||||
@ -27,3 +28,20 @@ class AppPostLoginBody(BaseModel):
|
||||
|
||||
class AppPutRoleBody(BaseModel):
|
||||
role: str
|
||||
|
||||
|
||||
class CameraSetBody(BaseModel):
|
||||
value: str = Field(..., description="The value to set for the feature")
|
||||
|
||||
|
||||
class MediaSyncBody(BaseModel):
|
||||
dry_run: bool = Field(
|
||||
default=True, description="If True, only report orphans without deleting them"
|
||||
)
|
||||
media_types: List[str] = Field(
|
||||
default=["all"],
|
||||
description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'",
|
||||
)
|
||||
force: bool = Field(
|
||||
default=False, description="If True, bypass safety threshold checks"
|
||||
)
|
||||
|
||||
38
frigate/api/defs/request/chat_body.py
Normal file
38
frigate/api/defs/request/chat_body.py
Normal file
@ -0,0 +1,38 @@
|
||||
"""Chat API request models."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
"""A single message in a chat conversation."""
|
||||
|
||||
role: str = Field(
|
||||
description="Message role: 'user', 'assistant', 'system', or 'tool'"
|
||||
)
|
||||
content: str = Field(description="Message content")
|
||||
tool_call_id: Optional[str] = Field(
|
||||
default=None, description="For tool messages, the ID of the tool call"
|
||||
)
|
||||
name: Optional[str] = Field(
|
||||
default=None, description="For tool messages, the tool name"
|
||||
)
|
||||
|
||||
|
||||
class ChatCompletionRequest(BaseModel):
|
||||
"""Request for chat completion with tool calling."""
|
||||
|
||||
messages: list[ChatMessage] = Field(
|
||||
description="List of messages in the conversation"
|
||||
)
|
||||
max_tool_iterations: int = Field(
|
||||
default=5,
|
||||
ge=1,
|
||||
le=10,
|
||||
description="Maximum number of tool call iterations (default: 5)",
|
||||
)
|
||||
stream: bool = Field(
|
||||
default=False,
|
||||
description="If true, stream the final assistant response in the body as newline-delimited JSON.",
|
||||
)
|
||||
@ -41,6 +41,7 @@ class EventsCreateBody(BaseModel):
|
||||
duration: Optional[int] = 30
|
||||
include_recording: Optional[bool] = True
|
||||
draw: Optional[dict] = {}
|
||||
pre_capture: Optional[int] = None
|
||||
|
||||
|
||||
class EventsEndBody(BaseModel):
|
||||
|
||||
35
frigate/api/defs/request/export_case_body.py
Normal file
35
frigate/api/defs/request/export_case_body.py
Normal file
@ -0,0 +1,35 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ExportCaseCreateBody(BaseModel):
|
||||
"""Request body for creating a new export case."""
|
||||
|
||||
name: str = Field(max_length=100, description="Friendly name of the export case")
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Optional description of the export case"
|
||||
)
|
||||
|
||||
|
||||
class ExportCaseUpdateBody(BaseModel):
|
||||
"""Request body for updating an existing export case."""
|
||||
|
||||
name: Optional[str] = Field(
|
||||
default=None,
|
||||
max_length=100,
|
||||
description="Updated friendly name of the export case",
|
||||
)
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Updated description of the export case"
|
||||
)
|
||||
|
||||
|
||||
class ExportCaseAssignBody(BaseModel):
|
||||
"""Request body for assigning or unassigning an export to a case."""
|
||||
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
max_length=30,
|
||||
description="Case ID to assign to the export, or null to unassign",
|
||||
)
|
||||
@ -3,18 +3,47 @@ from typing import Optional, Union
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
from frigate.record.export import (
|
||||
PlaybackFactorEnum,
|
||||
PlaybackSourceEnum,
|
||||
)
|
||||
from frigate.record.export import PlaybackSourceEnum
|
||||
|
||||
|
||||
class ExportRecordingsBody(BaseModel):
|
||||
playback: PlaybackFactorEnum = Field(
|
||||
default=PlaybackFactorEnum.realtime, title="Playback factor"
|
||||
)
|
||||
source: PlaybackSourceEnum = Field(
|
||||
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||
)
|
||||
name: Optional[str] = Field(title="Friendly name", default=None, max_length=256)
|
||||
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
title="Export case ID",
|
||||
max_length=30,
|
||||
description="ID of the export case to assign this export to",
|
||||
)
|
||||
|
||||
|
||||
class ExportRecordingsCustomBody(BaseModel):
|
||||
source: PlaybackSourceEnum = Field(
|
||||
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||
)
|
||||
name: str = Field(title="Friendly name", default=None, max_length=256)
|
||||
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
title="Export case ID",
|
||||
max_length=30,
|
||||
description="ID of the export case to assign this export to",
|
||||
)
|
||||
ffmpeg_input_args: Optional[str] = Field(
|
||||
default=None,
|
||||
title="FFmpeg input arguments",
|
||||
description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.",
|
||||
)
|
||||
ffmpeg_output_args: Optional[str] = Field(
|
||||
default=None,
|
||||
title="FFmpeg output arguments",
|
||||
description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.",
|
||||
)
|
||||
cpu_fallback: bool = Field(
|
||||
default=False,
|
||||
title="CPU Fallback",
|
||||
description="If true, retry export without hardware acceleration if the initial export fails.",
|
||||
)
|
||||
|
||||
54
frigate/api/defs/response/chat_response.py
Normal file
54
frigate/api/defs/response/chat_response.py
Normal file
@ -0,0 +1,54 @@
|
||||
"""Chat API response models."""
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ToolCallInvocation(BaseModel):
|
||||
"""A tool call requested by the LLM (before execution)."""
|
||||
|
||||
id: str = Field(description="Unique identifier for this tool call")
|
||||
name: str = Field(description="Tool name to call")
|
||||
arguments: dict[str, Any] = Field(description="Arguments for the tool call")
|
||||
|
||||
|
||||
class ChatMessageResponse(BaseModel):
|
||||
"""A message in the chat response."""
|
||||
|
||||
role: str = Field(description="Message role")
|
||||
content: Optional[str] = Field(
|
||||
default=None, description="Message content (None if tool calls present)"
|
||||
)
|
||||
tool_calls: Optional[list[ToolCallInvocation]] = Field(
|
||||
default=None, description="Tool calls if LLM wants to call tools"
|
||||
)
|
||||
|
||||
|
||||
class ToolCall(BaseModel):
|
||||
"""A tool that was executed during the completion, with its response."""
|
||||
|
||||
name: str = Field(description="Tool name that was called")
|
||||
arguments: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Arguments passed to the tool"
|
||||
)
|
||||
response: str = Field(
|
||||
default="",
|
||||
description="The response or result returned from the tool execution",
|
||||
)
|
||||
|
||||
|
||||
class ChatCompletionResponse(BaseModel):
|
||||
"""Response from chat completion."""
|
||||
|
||||
message: ChatMessageResponse = Field(description="The assistant's message")
|
||||
finish_reason: str = Field(
|
||||
description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'"
|
||||
)
|
||||
tool_iterations: int = Field(
|
||||
default=0, description="Number of tool call iterations performed"
|
||||
)
|
||||
tool_calls: list[ToolCall] = Field(
|
||||
default_factory=list,
|
||||
description="List of tool calls that were executed during this completion",
|
||||
)
|
||||
22
frigate/api/defs/response/export_case_response.py
Normal file
22
frigate/api/defs/response/export_case_response.py
Normal file
@ -0,0 +1,22 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ExportCaseModel(BaseModel):
|
||||
"""Model representing a single export case."""
|
||||
|
||||
id: str = Field(description="Unique identifier for the export case")
|
||||
name: str = Field(description="Friendly name of the export case")
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Optional description of the export case"
|
||||
)
|
||||
created_at: float = Field(
|
||||
description="Unix timestamp when the export case was created"
|
||||
)
|
||||
updated_at: float = Field(
|
||||
description="Unix timestamp when the export case was last updated"
|
||||
)
|
||||
|
||||
|
||||
ExportCasesResponse = List[ExportCaseModel]
|
||||
@ -15,6 +15,9 @@ class ExportModel(BaseModel):
|
||||
in_progress: bool = Field(
|
||||
description="Whether the export is currently being processed"
|
||||
)
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None, description="ID of the export case this export belongs to"
|
||||
)
|
||||
|
||||
|
||||
class StartExportResponse(BaseModel):
|
||||
|
||||
@ -3,13 +3,16 @@ from enum import Enum
|
||||
|
||||
class Tags(Enum):
|
||||
app = "App"
|
||||
auth = "Auth"
|
||||
camera = "Camera"
|
||||
preview = "Preview"
|
||||
chat = "Chat"
|
||||
events = "Events"
|
||||
export = "Export"
|
||||
classification = "Classification"
|
||||
logs = "Logs"
|
||||
media = "Media"
|
||||
motion_search = "Motion Search"
|
||||
notifications = "Notifications"
|
||||
preview = "Preview"
|
||||
recordings = "Recordings"
|
||||
review = "Review"
|
||||
export = "Export"
|
||||
events = "Events"
|
||||
classification = "Classification"
|
||||
auth = "Auth"
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
"""Event apis."""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import datetime
|
||||
import json
|
||||
@ -12,7 +13,6 @@ from pathlib import Path
|
||||
from typing import List
|
||||
from urllib.parse import unquote
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from fastapi import APIRouter, Request
|
||||
from fastapi.params import Depends
|
||||
@ -61,7 +61,7 @@ from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||
from frigate.track.object_processing import TrackedObject
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.file import get_event_thumbnail_bytes, load_event_snapshot_image
|
||||
from frigate.util.time import get_dst_transitions, get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -1081,18 +1081,18 @@ async def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = N
|
||||
content=({"success": False, "message": message}), status_code=400
|
||||
)
|
||||
|
||||
# load clean.webp or clean.png (legacy)
|
||||
try:
|
||||
filename_webp = f"{event.camera}-{event.id}-clean.webp"
|
||||
filename_png = f"{event.camera}-{event.id}-clean.png"
|
||||
image, is_clean_snapshot = load_event_snapshot_image(event, clean_only=True)
|
||||
except Exception:
|
||||
logger.error(f"Unable to load clean snapshot for event: {event.id}")
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "Unable to load clean snapshot for event"}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
image_path = None
|
||||
if os.path.exists(os.path.join(CLIPS_DIR, filename_webp)):
|
||||
image_path = os.path.join(CLIPS_DIR, filename_webp)
|
||||
elif os.path.exists(os.path.join(CLIPS_DIR, filename_png)):
|
||||
image_path = os.path.join(CLIPS_DIR, filename_png)
|
||||
|
||||
if image_path is None:
|
||||
if not is_clean_snapshot or image is None or image.size == 0:
|
||||
logger.error(f"Unable to find clean snapshot for event: {event.id}")
|
||||
return JSONResponse(
|
||||
content=(
|
||||
@ -1104,27 +1104,10 @@ async def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = N
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
image = cv2.imread(image_path)
|
||||
except Exception:
|
||||
logger.error(f"Unable to load clean snapshot for event: {event.id}")
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "Unable to load clean snapshot for event"}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if image is None or image.size == 0:
|
||||
logger.error(f"Unable to load clean snapshot for event: {event.id}")
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "Unable to load clean snapshot for event"}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
try:
|
||||
plus_id = request.app.frigate_config.plus_api.upload_image(image, event.camera)
|
||||
plus_id = await asyncio.to_thread(
|
||||
request.app.frigate_config.plus_api.upload_image, image, event.camera
|
||||
)
|
||||
except Exception as ex:
|
||||
logger.exception(ex)
|
||||
return JSONResponse(
|
||||
@ -1140,7 +1123,8 @@ async def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = N
|
||||
box = event.data["box"]
|
||||
|
||||
try:
|
||||
request.app.frigate_config.plus_api.add_annotation(
|
||||
await asyncio.to_thread(
|
||||
request.app.frigate_config.plus_api.add_annotation,
|
||||
event.plus_id,
|
||||
box,
|
||||
event.label,
|
||||
@ -1230,7 +1214,8 @@ async def false_positive(request: Request, event_id: str):
|
||||
)
|
||||
|
||||
try:
|
||||
request.app.frigate_config.plus_api.add_false_positive(
|
||||
await asyncio.to_thread(
|
||||
request.app.frigate_config.plus_api.add_false_positive,
|
||||
event.plus_id,
|
||||
region,
|
||||
box,
|
||||
@ -1782,6 +1767,7 @@ def create_event(
|
||||
body.duration,
|
||||
"api",
|
||||
body.draw,
|
||||
body.pre_capture,
|
||||
),
|
||||
EventMetadataTypeEnum.manual_event_create.value,
|
||||
)
|
||||
|
||||
@ -4,10 +4,10 @@ import logging
|
||||
import random
|
||||
import string
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
import psutil
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi import APIRouter, Depends, Query, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filepath
|
||||
from peewee import DoesNotExist
|
||||
@ -19,8 +19,20 @@ from frigate.api.auth import (
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||
from frigate.api.defs.request.export_case_body import (
|
||||
ExportCaseAssignBody,
|
||||
ExportCaseCreateBody,
|
||||
ExportCaseUpdateBody,
|
||||
)
|
||||
from frigate.api.defs.request.export_recordings_body import (
|
||||
ExportRecordingsBody,
|
||||
ExportRecordingsCustomBody,
|
||||
)
|
||||
from frigate.api.defs.request.export_rename_body import ExportRenameBody
|
||||
from frigate.api.defs.response.export_case_response import (
|
||||
ExportCaseModel,
|
||||
ExportCasesResponse,
|
||||
)
|
||||
from frigate.api.defs.response.export_response import (
|
||||
ExportModel,
|
||||
ExportsResponse,
|
||||
@ -29,9 +41,9 @@ from frigate.api.defs.response.export_response import (
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import CLIPS_DIR, EXPORT_DIR
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.models import Export, ExportCase, Previews, Recordings
|
||||
from frigate.record.export import (
|
||||
PlaybackFactorEnum,
|
||||
DEFAULT_TIME_LAPSE_FFMPEG_ARGS,
|
||||
PlaybackSourceEnum,
|
||||
RecordingExporter,
|
||||
)
|
||||
@ -52,17 +64,182 @@ router = APIRouter(tags=[Tags.export])
|
||||
)
|
||||
def get_exports(
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
export_case_id: Optional[str] = None,
|
||||
cameras: Optional[str] = Query(default="all"),
|
||||
start_date: Optional[float] = None,
|
||||
end_date: Optional[float] = None,
|
||||
):
|
||||
exports = (
|
||||
Export.select()
|
||||
.where(Export.camera << allowed_cameras)
|
||||
.order_by(Export.date.desc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
query = Export.select().where(Export.camera << allowed_cameras)
|
||||
|
||||
if export_case_id is not None:
|
||||
if export_case_id == "unassigned":
|
||||
query = query.where(Export.export_case.is_null(True))
|
||||
else:
|
||||
query = query.where(Export.export_case == export_case_id)
|
||||
|
||||
if cameras and cameras != "all":
|
||||
requested = set(cameras.split(","))
|
||||
filtered_cameras = list(requested.intersection(allowed_cameras))
|
||||
if not filtered_cameras:
|
||||
return JSONResponse(content=[])
|
||||
query = query.where(Export.camera << filtered_cameras)
|
||||
|
||||
if start_date is not None:
|
||||
query = query.where(Export.date >= start_date)
|
||||
|
||||
if end_date is not None:
|
||||
query = query.where(Export.date <= end_date)
|
||||
|
||||
exports = query.order_by(Export.date.desc()).dicts().iterator()
|
||||
return JSONResponse(content=[e for e in exports])
|
||||
|
||||
|
||||
@router.get(
|
||||
"/cases",
|
||||
response_model=ExportCasesResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get export cases",
|
||||
description="Gets all export cases from the database.",
|
||||
)
|
||||
def get_export_cases():
|
||||
cases = (
|
||||
ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator()
|
||||
)
|
||||
return JSONResponse(content=[c for c in cases])
|
||||
|
||||
|
||||
@router.post(
|
||||
"/cases",
|
||||
response_model=ExportCaseModel,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create export case",
|
||||
description="Creates a new export case.",
|
||||
)
|
||||
def create_export_case(body: ExportCaseCreateBody):
|
||||
case = ExportCase.create(
|
||||
id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)),
|
||||
name=body.name,
|
||||
description=body.description,
|
||||
created_at=Path().stat().st_mtime,
|
||||
updated_at=Path().stat().st_mtime,
|
||||
)
|
||||
return JSONResponse(content=model_to_dict(case))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/cases/{case_id}",
|
||||
response_model=ExportCaseModel,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get a single export case",
|
||||
description="Gets a specific export case by ID.",
|
||||
)
|
||||
def get_export_case(case_id: str):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
return JSONResponse(content=model_to_dict(case))
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/cases/{case_id}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Update export case",
|
||||
description="Updates an existing export case.",
|
||||
)
|
||||
def update_export_case(case_id: str, body: ExportCaseUpdateBody):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if body.name is not None:
|
||||
case.name = body.name
|
||||
if body.description is not None:
|
||||
case.description = body.description
|
||||
|
||||
case.save()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully updated export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/cases/{case_id}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete export case",
|
||||
description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """,
|
||||
)
|
||||
def delete_export_case(case_id: str):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Unassign exports from this case but keep the exports themselves
|
||||
Export.update(export_case=None).where(Export.export_case == case).execute()
|
||||
|
||||
case.delete_instance()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully deleted export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/export/{export_id}/case",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Assign export to case",
|
||||
description=(
|
||||
"Assigns an export to a case, or unassigns it if export_case_id is null."
|
||||
),
|
||||
)
|
||||
async def assign_export_case(
|
||||
export_id: str,
|
||||
body: ExportCaseAssignBody,
|
||||
request: Request,
|
||||
):
|
||||
try:
|
||||
export: Export = Export.get(Export.id == export_id)
|
||||
await require_camera_access(export.camera, request=request)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export not found."},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if body.export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == body.export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found."},
|
||||
status_code=404,
|
||||
)
|
||||
export.export_case = body.export_case_id
|
||||
else:
|
||||
export.export_case = None
|
||||
|
||||
export.save()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully updated export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/export/{camera_name}/start/{start_time}/end/{end_time}",
|
||||
response_model=StartExportResponse,
|
||||
@ -88,11 +265,20 @@ def export_recording(
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
playback_factor = body.playback
|
||||
playback_source = body.source
|
||||
friendly_name = body.name
|
||||
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
|
||||
|
||||
export_case_id = body.export_case_id
|
||||
if export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Ensure that existing_image is a valid path
|
||||
if existing_image and not existing_image.startswith(CLIPS_DIR):
|
||||
return JSONResponse(
|
||||
@ -151,16 +337,12 @@ def export_recording(
|
||||
existing_image,
|
||||
int(start_time),
|
||||
int(end_time),
|
||||
(
|
||||
PlaybackFactorEnum[playback_factor]
|
||||
if playback_factor in PlaybackFactorEnum.__members__.values()
|
||||
else PlaybackFactorEnum.realtime
|
||||
),
|
||||
(
|
||||
PlaybackSourceEnum[playback_source]
|
||||
if playback_source in PlaybackSourceEnum.__members__.values()
|
||||
else PlaybackSourceEnum.recordings
|
||||
),
|
||||
export_case_id,
|
||||
)
|
||||
exporter.start()
|
||||
return JSONResponse(
|
||||
@ -271,6 +453,138 @@ async def export_delete(event_id: str, request: Request):
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/export/custom/{camera_name}/start/{start_time}/end/{end_time}",
|
||||
response_model=StartExportResponse,
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
summary="Start custom recording export",
|
||||
description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments.
|
||||
The export can be from recordings or preview footage. Returns the export ID if
|
||||
successful, or an error message if the camera is invalid or no recordings/previews
|
||||
are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided,
|
||||
defaults to timelapse export settings.""",
|
||||
)
|
||||
def export_recording_custom(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
body: ExportRecordingsCustomBody,
|
||||
):
|
||||
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": f"{camera_name} is not a valid camera."}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
playback_source = body.source
|
||||
friendly_name = body.name
|
||||
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
|
||||
ffmpeg_input_args = body.ffmpeg_input_args
|
||||
ffmpeg_output_args = body.ffmpeg_output_args
|
||||
cpu_fallback = body.cpu_fallback
|
||||
|
||||
export_case_id = body.export_case_id
|
||||
if export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Ensure that existing_image is a valid path
|
||||
if existing_image and not existing_image.startswith(CLIPS_DIR):
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "Invalid image path"}),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if playback_source == "recordings":
|
||||
recordings_count = (
|
||||
Recordings.select()
|
||||
.where(
|
||||
Recordings.start_time.between(start_time, end_time)
|
||||
| Recordings.end_time.between(start_time, end_time)
|
||||
| (
|
||||
(start_time > Recordings.start_time)
|
||||
& (end_time < Recordings.end_time)
|
||||
)
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.count()
|
||||
)
|
||||
|
||||
if recordings_count <= 0:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "No recordings found for time range"}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
else:
|
||||
previews_count = (
|
||||
Previews.select()
|
||||
.where(
|
||||
Previews.start_time.between(start_time, end_time)
|
||||
| Previews.end_time.between(start_time, end_time)
|
||||
| ((start_time > Previews.start_time) & (end_time < Previews.end_time))
|
||||
)
|
||||
.where(Previews.camera == camera_name)
|
||||
.count()
|
||||
)
|
||||
|
||||
if not is_current_hour(start_time) and previews_count <= 0:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "No previews found for time range"}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
|
||||
|
||||
# Set default values if not provided (timelapse defaults)
|
||||
if ffmpeg_input_args is None:
|
||||
ffmpeg_input_args = ""
|
||||
|
||||
if ffmpeg_output_args is None:
|
||||
ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS
|
||||
|
||||
exporter = RecordingExporter(
|
||||
request.app.frigate_config,
|
||||
export_id,
|
||||
camera_name,
|
||||
friendly_name,
|
||||
existing_image,
|
||||
int(start_time),
|
||||
int(end_time),
|
||||
(
|
||||
PlaybackSourceEnum[playback_source]
|
||||
if playback_source in PlaybackSourceEnum.__members__.values()
|
||||
else PlaybackSourceEnum.recordings
|
||||
),
|
||||
export_case_id,
|
||||
ffmpeg_input_args,
|
||||
ffmpeg_output_args,
|
||||
cpu_fallback,
|
||||
)
|
||||
exporter.start()
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": "Starting export of recording.",
|
||||
"export_id": export_id,
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/exports/{export_id}",
|
||||
response_model=ExportModel,
|
||||
|
||||
@ -16,21 +16,29 @@ from frigate.api import app as main_app
|
||||
from frigate.api import (
|
||||
auth,
|
||||
camera,
|
||||
chat,
|
||||
classification,
|
||||
debug_replay,
|
||||
event,
|
||||
export,
|
||||
media,
|
||||
motion_search,
|
||||
notification,
|
||||
preview,
|
||||
record,
|
||||
review,
|
||||
)
|
||||
from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default
|
||||
from frigate.comms.dispatcher import Dispatcher
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataPublisher,
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.updater import CameraConfigUpdatePublisher
|
||||
from frigate.config.profile_manager import ProfileManager
|
||||
from frigate.debug_replay import DebugReplayManager
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.genai import GenAIClientManager
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.stats.emitter import StatsEmitter
|
||||
from frigate.storage import StorageMaintainer
|
||||
@ -62,6 +70,9 @@ def create_fastapi_app(
|
||||
stats_emitter: StatsEmitter,
|
||||
event_metadata_updater: EventMetadataPublisher,
|
||||
config_publisher: CameraConfigUpdatePublisher,
|
||||
replay_manager: DebugReplayManager,
|
||||
dispatcher: Optional[Dispatcher] = None,
|
||||
profile_manager: Optional[ProfileManager] = None,
|
||||
enforce_default_admin: bool = True,
|
||||
):
|
||||
logger.info("Starting FastAPI app")
|
||||
@ -120,6 +131,7 @@ def create_fastapi_app(
|
||||
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
|
||||
app.include_router(auth.router)
|
||||
app.include_router(camera.router)
|
||||
app.include_router(chat.router)
|
||||
app.include_router(classification.router)
|
||||
app.include_router(review.router)
|
||||
app.include_router(main_app.router)
|
||||
@ -128,8 +140,12 @@ def create_fastapi_app(
|
||||
app.include_router(export.router)
|
||||
app.include_router(event.router)
|
||||
app.include_router(media.router)
|
||||
app.include_router(motion_search.router)
|
||||
app.include_router(record.router)
|
||||
app.include_router(debug_replay.router)
|
||||
# App Properties
|
||||
app.frigate_config = frigate_config
|
||||
app.genai_manager = GenAIClientManager(frigate_config)
|
||||
app.embeddings = embeddings
|
||||
app.detected_frames_processor = detected_frames_processor
|
||||
app.storage_maintainer = storage_maintainer
|
||||
@ -138,6 +154,9 @@ def create_fastapi_app(
|
||||
app.stats_emitter = stats_emitter
|
||||
app.event_metadata_updater = event_metadata_updater
|
||||
app.config_publisher = config_publisher
|
||||
app.replay_manager = replay_manager
|
||||
app.dispatcher = dispatcher
|
||||
app.profile_manager = profile_manager
|
||||
|
||||
if frigate_config.auth.enabled:
|
||||
secret = get_jwt_secret()
|
||||
|
||||
@ -8,9 +8,8 @@ import os
|
||||
import subprocess as sp
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from functools import reduce
|
||||
from pathlib import Path as FilePath
|
||||
from typing import Any, List
|
||||
from typing import Any
|
||||
from urllib.parse import unquote
|
||||
|
||||
import cv2
|
||||
@ -19,41 +18,45 @@ import pytz
|
||||
from fastapi import APIRouter, Depends, Path, Query, Request, Response
|
||||
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
|
||||
from pathvalidate import sanitize_filename
|
||||
from peewee import DoesNotExist, fn, operator
|
||||
from peewee import DoesNotExist, fn
|
||||
from tzlocal import get_localzone_name
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.query.media_query_parameters import (
|
||||
Extension,
|
||||
MediaEventsSnapshotQueryParams,
|
||||
MediaLatestFrameQueryParams,
|
||||
MediaMjpegFeedQueryParams,
|
||||
MediaRecordingsAvailabilityQueryParams,
|
||||
MediaRecordingsSummaryQueryParams,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.camera.state import CameraState
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.snapshots import SnapshotsConfig
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CLIPS_DIR,
|
||||
INSTALL_DIR,
|
||||
MAX_SEGMENT_DURATION,
|
||||
PREVIEW_FRAME_TYPE,
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.output.preview import get_most_recent_preview_frame
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.time import get_dst_transitions
|
||||
from frigate.util.file import (
|
||||
get_event_snapshot_bytes,
|
||||
get_event_snapshot_path,
|
||||
get_event_thumbnail_bytes,
|
||||
load_event_snapshot_image,
|
||||
)
|
||||
from frigate.util.image import get_image_from_recording, get_image_quality_params
|
||||
from frigate.util.media import get_keyframe_before
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
router = APIRouter(tags=[Tags.media])
|
||||
|
||||
|
||||
@ -114,6 +117,24 @@ def imagestream(
|
||||
)
|
||||
|
||||
|
||||
def _resolve_snapshot_settings(
|
||||
snapshot_config: SnapshotsConfig, params: MediaEventsSnapshotQueryParams
|
||||
) -> dict[str, Any]:
|
||||
return {
|
||||
"timestamp": snapshot_config.timestamp
|
||||
if params.timestamp is None
|
||||
else bool(params.timestamp),
|
||||
"bounding_box": snapshot_config.bounding_box
|
||||
if params.bbox is None
|
||||
else bool(params.bbox),
|
||||
"crop": snapshot_config.crop if params.crop is None else bool(params.crop),
|
||||
"height": snapshot_config.height if params.height is None else params.height,
|
||||
"quality": snapshot_config.quality
|
||||
if params.quality is None
|
||||
else params.quality,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/{camera_name}/ptz/info", dependencies=[Depends(require_camera_access)])
|
||||
async def camera_ptz_info(request: Request, camera_name: str):
|
||||
if camera_name in request.app.frigate_config.cameras:
|
||||
@ -131,7 +152,9 @@ async def camera_ptz_info(request: Request, camera_name: str):
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)]
|
||||
"/{camera_name}/latest.{extension}",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.",
|
||||
)
|
||||
async def latest_frame(
|
||||
request: Request,
|
||||
@ -149,14 +172,7 @@ async def latest_frame(
|
||||
"paths": params.paths,
|
||||
"regions": params.regions,
|
||||
}
|
||||
quality = params.quality
|
||||
|
||||
if extension == Extension.png:
|
||||
quality_params = None
|
||||
elif extension == Extension.webp:
|
||||
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality]
|
||||
else: # jpg or jpeg
|
||||
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
|
||||
quality_params = get_image_quality_params(extension.value, params.quality)
|
||||
|
||||
if camera_name in request.app.frigate_config.cameras:
|
||||
frame = frame_processor.get_current_frame(camera_name, draw_options)
|
||||
@ -165,9 +181,26 @@ async def latest_frame(
|
||||
or 10
|
||||
)
|
||||
|
||||
is_offline = False
|
||||
if frame is None or datetime.now().timestamp() > (
|
||||
frame_processor.get_current_frame_time(camera_name) + retry_interval
|
||||
):
|
||||
last_frame_time = frame_processor.get_current_frame_time(camera_name)
|
||||
preview_path = get_most_recent_preview_frame(
|
||||
camera_name, before=last_frame_time
|
||||
)
|
||||
|
||||
if preview_path:
|
||||
logger.debug(f"Using most recent preview frame for {camera_name}")
|
||||
frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED)
|
||||
|
||||
if frame is not None:
|
||||
is_offline = True
|
||||
|
||||
if frame is None or not is_offline:
|
||||
logger.debug(
|
||||
f"No live or preview frame available for {camera_name}. Using error image."
|
||||
)
|
||||
if request.app.camera_error_image is None:
|
||||
error_image = glob.glob(
|
||||
os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
|
||||
@ -200,14 +233,18 @@ async def latest_frame(
|
||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
|
||||
_, img = cv2.imencode(f".{extension.value}", frame, quality_params)
|
||||
|
||||
headers = {
|
||||
"Cache-Control": "no-store" if not params.store else "private, max-age=60",
|
||||
}
|
||||
|
||||
if is_offline:
|
||||
headers["X-Frigate-Offline"] = "true"
|
||||
|
||||
return Response(
|
||||
content=img.tobytes(),
|
||||
media_type=extension.get_mime_type(),
|
||||
headers={
|
||||
"Cache-Control": "no-store"
|
||||
if not params.store
|
||||
else "private, max-age=60",
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
elif (
|
||||
camera_name == "birdseye"
|
||||
@ -397,333 +434,6 @@ async def submit_recording_snapshot_to_plus(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
][RECORD_DIR]
|
||||
|
||||
if not recording_stats:
|
||||
return JSONResponse({})
|
||||
|
||||
total_mb = recording_stats["total"]
|
||||
|
||||
camera_usages: dict[str, dict] = (
|
||||
request.app.storage_maintainer.calculate_camera_usages()
|
||||
)
|
||||
|
||||
for camera_name in camera_usages.keys():
|
||||
if camera_usages.get(camera_name, {}).get("usage"):
|
||||
camera_usages[camera_name]["usage_percent"] = (
|
||||
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
||||
) * 100
|
||||
|
||||
return JSONResponse(content=camera_usages)
|
||||
|
||||
|
||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def all_recordings_summary(
|
||||
request: Request,
|
||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
days[g.day] = True
|
||||
|
||||
return JSONResponse(content=dict(sorted(days.items())))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
|
||||
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
||||
async def recordings(
|
||||
camera_name: str,
|
||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||
before: float = datetime.now().timestamp(),
|
||||
):
|
||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.motion,
|
||||
Recordings.objects,
|
||||
Recordings.duration,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera == camera_name,
|
||||
Recordings.end_time >= after,
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recordings/unavailable",
|
||||
response_model=list[dict],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def no_recordings(
|
||||
request: Request,
|
||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Get time ranges with no recordings."""
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content=[])
|
||||
cameras = ",".join(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
|
||||
before = params.before or datetime.datetime.now().timestamp()
|
||||
after = (
|
||||
params.after
|
||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||
)
|
||||
scale = params.scale
|
||||
|
||||
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
||||
if cameras != "all":
|
||||
camera_list = cameras.split(",")
|
||||
clauses.append((Recordings.camera << camera_list))
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Get recording start times
|
||||
data: list[Recordings] = (
|
||||
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# Convert recordings to list of (start, end) tuples
|
||||
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||
|
||||
# Iterate through time segments and check if each has any recording
|
||||
no_recording_segments = []
|
||||
current = after
|
||||
current_gap_start = None
|
||||
|
||||
while current < before:
|
||||
segment_end = min(current + scale, before)
|
||||
|
||||
# Check if this segment overlaps with any recording
|
||||
has_recording = any(
|
||||
rec_start < segment_end and rec_end > current
|
||||
for rec_start, rec_end in recordings
|
||||
)
|
||||
|
||||
if not has_recording:
|
||||
# This segment has no recordings
|
||||
if current_gap_start is None:
|
||||
current_gap_start = current # Start a new gap
|
||||
else:
|
||||
# This segment has recordings
|
||||
if current_gap_start is not None:
|
||||
# End the current gap and append it
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(current)}
|
||||
)
|
||||
current_gap_start = None
|
||||
|
||||
current = segment_end
|
||||
|
||||
# Append the last gap if it exists
|
||||
if current_gap_start is not None:
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(before)}
|
||||
)
|
||||
|
||||
return JSONResponse(content=no_recording_segments)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
@ -900,6 +610,33 @@ async def vod_ts(
|
||||
if recording.end_time > end_ts:
|
||||
duration -= int((recording.end_time - end_ts) * 1000)
|
||||
|
||||
# nginx-vod-module pushes clipFrom forward to the next keyframe,
|
||||
# which can leave too few frames and produce an empty/unplayable
|
||||
# segment. Snap clipFrom back to the preceding keyframe so the
|
||||
# segment always starts with a decodable frame.
|
||||
if "clipFrom" in clip:
|
||||
keyframe_ms = get_keyframe_before(recording.path, clip["clipFrom"])
|
||||
if keyframe_ms is not None:
|
||||
gained = clip["clipFrom"] - keyframe_ms
|
||||
clip["clipFrom"] = keyframe_ms
|
||||
duration += gained
|
||||
logger.debug(
|
||||
"VOD: snapped clipFrom to keyframe at %sms for %s, duration now %sms",
|
||||
keyframe_ms,
|
||||
recording.path,
|
||||
duration,
|
||||
)
|
||||
else:
|
||||
# could not read keyframes, remove clipFrom to use full recording
|
||||
logger.debug(
|
||||
"VOD: no keyframe info for %s, removing clipFrom to use full recording",
|
||||
recording.path,
|
||||
)
|
||||
del clip["clipFrom"]
|
||||
duration = int(recording.duration * 1000)
|
||||
if recording.end_time > end_ts:
|
||||
duration -= int((recording.end_time - end_ts) * 1000)
|
||||
|
||||
if duration < min_duration_ms:
|
||||
# skip if the clip has no valid duration (too short to contain frames)
|
||||
logger.debug(
|
||||
@ -1037,7 +774,7 @@ async def vod_clip(
|
||||
|
||||
@router.get(
|
||||
"/events/{event_id}/snapshot.jpg",
|
||||
description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.",
|
||||
description="Returns a snapshot image for the specified object id.",
|
||||
)
|
||||
async def event_snapshot(
|
||||
request: Request,
|
||||
@ -1046,6 +783,7 @@ async def event_snapshot(
|
||||
):
|
||||
event_complete = False
|
||||
jpg_bytes = None
|
||||
frame_time = 0
|
||||
try:
|
||||
event = Event.get(Event.id == event_id, Event.end_time != None)
|
||||
event_complete = True
|
||||
@ -1055,11 +793,22 @@ async def event_snapshot(
|
||||
content={"success": False, "message": "Snapshot not available"},
|
||||
status_code=404,
|
||||
)
|
||||
# read snapshot from disk
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"), "rb"
|
||||
) as image_file:
|
||||
jpg_bytes = image_file.read()
|
||||
snapshot_settings = _resolve_snapshot_settings(
|
||||
request.app.frigate_config.cameras[event.camera].snapshots, params
|
||||
)
|
||||
jpg_bytes, frame_time = get_event_snapshot_bytes(
|
||||
event,
|
||||
ext="jpg",
|
||||
timestamp=snapshot_settings["timestamp"],
|
||||
bounding_box=snapshot_settings["bounding_box"],
|
||||
crop=snapshot_settings["crop"],
|
||||
height=snapshot_settings["height"],
|
||||
quality=snapshot_settings["quality"],
|
||||
timestamp_style=request.app.frigate_config.cameras[
|
||||
event.camera
|
||||
].timestamp_style,
|
||||
colormap=request.app.frigate_config.model.colormap,
|
||||
)
|
||||
except DoesNotExist:
|
||||
# see if the object is currently being tracked
|
||||
try:
|
||||
@ -1070,13 +819,16 @@ async def event_snapshot(
|
||||
if event_id in camera_state.tracked_objects:
|
||||
tracked_obj = camera_state.tracked_objects.get(event_id)
|
||||
if tracked_obj is not None:
|
||||
jpg_bytes = tracked_obj.get_img_bytes(
|
||||
snapshot_settings = _resolve_snapshot_settings(
|
||||
camera_state.camera_config.snapshots, params
|
||||
)
|
||||
jpg_bytes, frame_time = tracked_obj.get_img_bytes(
|
||||
ext="jpg",
|
||||
timestamp=params.timestamp,
|
||||
bounding_box=params.bbox,
|
||||
crop=params.crop,
|
||||
height=params.height,
|
||||
quality=params.quality,
|
||||
timestamp=snapshot_settings["timestamp"],
|
||||
bounding_box=snapshot_settings["bounding_box"],
|
||||
crop=snapshot_settings["crop"],
|
||||
height=snapshot_settings["height"],
|
||||
quality=snapshot_settings["quality"],
|
||||
)
|
||||
await require_camera_access(camera_state.name, request=request)
|
||||
except Exception:
|
||||
@ -1099,6 +851,7 @@ async def event_snapshot(
|
||||
headers = {
|
||||
"Content-Type": "image/jpeg",
|
||||
"Cache-Control": "private, max-age=31536000" if event_complete else "no-store",
|
||||
"X-Frame-Time": str(frame_time),
|
||||
}
|
||||
|
||||
if params.download:
|
||||
@ -1113,7 +866,6 @@ async def event_snapshot(
|
||||
|
||||
@router.get(
|
||||
"/events/{event_id}/thumbnail.{extension}",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
)
|
||||
async def event_thumbnail(
|
||||
request: Request,
|
||||
@ -1157,11 +909,12 @@ async def event_thumbnail(
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# android notifications prefer a 2:1 ratio
|
||||
if format == "android":
|
||||
img_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
|
||||
img = cv2.imdecode(img_as_np, flags=1)
|
||||
thumbnail = cv2.copyMakeBorder(
|
||||
|
||||
# android notifications prefer a 2:1 ratio
|
||||
if format == "android":
|
||||
img = cv2.copyMakeBorder(
|
||||
img,
|
||||
0,
|
||||
0,
|
||||
@ -1177,8 +930,8 @@ async def event_thumbnail(
|
||||
elif extension == Extension.webp:
|
||||
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60]
|
||||
|
||||
_, img = cv2.imencode(f".{extension.value}", thumbnail, quality_params)
|
||||
thumbnail_bytes = img.tobytes()
|
||||
_, encoded = cv2.imencode(f".{extension.value}", img, quality_params)
|
||||
thumbnail_bytes = encoded.tobytes()
|
||||
|
||||
return Response(
|
||||
thumbnail_bytes,
|
||||
@ -1312,20 +1065,39 @@ def grid_snapshot(
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{camera_name}/region_grid", dependencies=[Depends(require_role("admin"))]
|
||||
)
|
||||
def clear_region_grid(request: Request, camera_name: str):
|
||||
"""Clear the region grid for a camera."""
|
||||
if camera_name not in request.app.frigate_config.cameras:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Camera not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
Regions.delete().where(Regions.camera == camera_name).execute()
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Region grid cleared"},
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/events/{event_id}/snapshot-clean.webp",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
)
|
||||
def event_snapshot_clean(request: Request, event_id: str, download: bool = False):
|
||||
async def event_snapshot_clean(request: Request, event_id: str, download: bool = False):
|
||||
webp_bytes = None
|
||||
event_complete = False
|
||||
try:
|
||||
event = Event.get(Event.id == event_id)
|
||||
event_complete = event.end_time is not None
|
||||
await require_camera_access(event.camera, request=request)
|
||||
snapshot_config = request.app.frigate_config.cameras[event.camera].snapshots
|
||||
if not (snapshot_config.enabled and event.has_snapshot):
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Snapshots and clean_copy must be enabled in the config",
|
||||
"message": "Snapshots must be enabled in the config",
|
||||
},
|
||||
status_code=404,
|
||||
)
|
||||
@ -1357,54 +1129,10 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
|
||||
)
|
||||
if webp_bytes is None:
|
||||
try:
|
||||
# webp
|
||||
clean_snapshot_path_webp = os.path.join(
|
||||
CLIPS_DIR, f"{event.camera}-{event.id}-clean.webp"
|
||||
image_path, is_clean_snapshot = get_event_snapshot_path(
|
||||
event, clean_only=True
|
||||
)
|
||||
# png (legacy)
|
||||
clean_snapshot_path_png = os.path.join(
|
||||
CLIPS_DIR, f"{event.camera}-{event.id}-clean.png"
|
||||
)
|
||||
|
||||
if os.path.exists(clean_snapshot_path_webp):
|
||||
with open(clean_snapshot_path_webp, "rb") as image_file:
|
||||
webp_bytes = image_file.read()
|
||||
elif os.path.exists(clean_snapshot_path_png):
|
||||
# convert png to webp and save for future use
|
||||
png_image = cv2.imread(clean_snapshot_path_png, cv2.IMREAD_UNCHANGED)
|
||||
if png_image is None:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Invalid png snapshot",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
ret, webp_data = cv2.imencode(
|
||||
".webp", png_image, [int(cv2.IMWRITE_WEBP_QUALITY), 60]
|
||||
)
|
||||
if not ret:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Unable to convert png to webp",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
webp_bytes = webp_data.tobytes()
|
||||
|
||||
# save the converted webp for future requests
|
||||
try:
|
||||
with open(clean_snapshot_path_webp, "wb") as f:
|
||||
f.write(webp_bytes)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to save converted webp for event {event.id}: {e}"
|
||||
)
|
||||
# continue since we now have the data to return
|
||||
else:
|
||||
if not is_clean_snapshot or image_path is None:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -1412,6 +1140,34 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
|
||||
},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if image_path.endswith(".webp"):
|
||||
with open(image_path, "rb") as image_file:
|
||||
webp_bytes = image_file.read()
|
||||
else:
|
||||
image = load_event_snapshot_image(event, clean_only=True)[0]
|
||||
if image is None:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Unable to load clean snapshot for event",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
ret, webp_data = cv2.imencode(
|
||||
".webp", image, get_image_quality_params("webp", None)
|
||||
)
|
||||
if not ret:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Unable to convert snapshot to webp",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
webp_bytes = webp_data.tobytes()
|
||||
except Exception:
|
||||
logger.error(f"Unable to load clean snapshot for event: {event.id}")
|
||||
return JSONResponse(
|
||||
@ -1424,7 +1180,7 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
|
||||
|
||||
headers = {
|
||||
"Content-Type": "image/webp",
|
||||
"Cache-Control": "private, max-age=31536000",
|
||||
"Cache-Control": "private, max-age=31536000" if event_complete else "no-cache",
|
||||
}
|
||||
|
||||
if download:
|
||||
@ -1440,7 +1196,7 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
|
||||
|
||||
|
||||
@router.get(
|
||||
"/events/{event_id}/clip.mp4", dependencies=[Depends(require_camera_access)]
|
||||
"/events/{event_id}/clip.mp4",
|
||||
)
|
||||
async def event_clip(
|
||||
request: Request,
|
||||
@ -1454,6 +1210,8 @@ async def event_clip(
|
||||
content={"success": False, "message": "Event not found"}, status_code=404
|
||||
)
|
||||
|
||||
await require_camera_access(event.camera, request=request)
|
||||
|
||||
if not event.has_clip:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Clip not available"}, status_code=404
|
||||
@ -1470,9 +1228,9 @@ async def event_clip(
|
||||
|
||||
|
||||
@router.get(
|
||||
"/events/{event_id}/preview.gif", dependencies=[Depends(require_camera_access)]
|
||||
"/events/{event_id}/preview.gif",
|
||||
)
|
||||
def event_preview(request: Request, event_id: str):
|
||||
async def event_preview(request: Request, event_id: str):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
@ -1480,6 +1238,8 @@ def event_preview(request: Request, event_id: str):
|
||||
content={"success": False, "message": "Event not found"}, status_code=404
|
||||
)
|
||||
|
||||
await require_camera_access(event.camera, request=request)
|
||||
|
||||
start_ts = event.start_time
|
||||
end_ts = start_ts + (
|
||||
min(event.end_time - event.start_time, 20) if event.end_time else 20
|
||||
@ -1502,6 +1262,7 @@ def preview_gif(
|
||||
):
|
||||
if datetime.fromtimestamp(start_ts) < datetime.now().replace(minute=0, second=0):
|
||||
# has preview mp4
|
||||
try:
|
||||
preview: Previews = (
|
||||
Previews.select(
|
||||
Previews.camera,
|
||||
@ -1519,8 +1280,7 @@ def preview_gif(
|
||||
.limit(1)
|
||||
.get()
|
||||
)
|
||||
|
||||
if not preview:
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Preview not found"},
|
||||
status_code=404,
|
||||
@ -1570,6 +1330,13 @@ def preview_gif(
|
||||
else:
|
||||
# need to generate from existing images
|
||||
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||
|
||||
if not os.path.isdir(preview_dir):
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Preview not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
file_start = f"preview_{camera_name}"
|
||||
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
@ -1745,6 +1512,13 @@ def preview_mp4(
|
||||
else:
|
||||
# need to generate from existing images
|
||||
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||
|
||||
if not os.path.isdir(preview_dir):
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Preview not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
file_start = f"preview_{camera_name}"
|
||||
start_file = f"{file_start}-{start_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
end_file = f"{file_start}-{end_ts}.{PREVIEW_FRAME_TYPE}"
|
||||
@ -1824,8 +1598,8 @@ def preview_mp4(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/review/{event_id}/preview", dependencies=[Depends(require_camera_access)])
|
||||
def review_preview(
|
||||
@router.get("/review/{event_id}/preview")
|
||||
async def review_preview(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
format: str = Query(default="gif", enum=["gif", "mp4"]),
|
||||
@ -1838,6 +1612,8 @@ def review_preview(
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
await require_camera_access(review.camera, request=request)
|
||||
|
||||
padding = 8
|
||||
start_ts = review.start_time - padding
|
||||
end_ts = (
|
||||
@ -1851,12 +1627,14 @@ def review_preview(
|
||||
|
||||
|
||||
@router.get(
|
||||
"/preview/{file_name}/thumbnail.jpg", dependencies=[Depends(require_camera_access)]
|
||||
"/preview/{file_name}/thumbnail.jpg",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
@router.get(
|
||||
"/preview/{file_name}/thumbnail.webp", dependencies=[Depends(require_camera_access)]
|
||||
"/preview/{file_name}/thumbnail.webp",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
def preview_thumbnail(file_name: str):
|
||||
async def preview_thumbnail(request: Request, file_name: str):
|
||||
"""Get a thumbnail from the cached preview frames."""
|
||||
if len(file_name) > 1000:
|
||||
return JSONResponse(
|
||||
@ -1866,6 +1644,17 @@ def preview_thumbnail(file_name: str):
|
||||
status_code=403,
|
||||
)
|
||||
|
||||
# Extract camera name from preview filename (format: preview_{camera}-{timestamp}.ext)
|
||||
if not file_name.startswith("preview_"):
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Invalid preview filename"},
|
||||
status_code=400,
|
||||
)
|
||||
# Use rsplit to handle camera names containing dashes (e.g. front-door)
|
||||
name_part = file_name[len("preview_") :].rsplit(".", 1)[0] # strip extension
|
||||
camera_name = name_part.rsplit("-", 1)[0] # split off timestamp
|
||||
await require_camera_access(camera_name, request=request)
|
||||
|
||||
safe_file_name_current = sanitize_filename(file_name)
|
||||
preview_dir = os.path.join(CACHE_DIR, "preview_frames")
|
||||
|
||||
|
||||
292
frigate/api/motion_search.py
Normal file
292
frigate/api/motion_search.py
Normal file
@ -0,0 +1,292 @@
|
||||
"""Motion search API for detecting changes within a region of interest."""
|
||||
|
||||
import logging
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from frigate.api.auth import require_camera_access
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.jobs.motion_search import (
|
||||
cancel_motion_search_job,
|
||||
get_motion_search_job,
|
||||
start_motion_search_job,
|
||||
)
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.motion_search])
|
||||
|
||||
|
||||
class MotionSearchRequest(BaseModel):
|
||||
"""Request body for motion search."""
|
||||
|
||||
start_time: float = Field(description="Start timestamp for the search range")
|
||||
end_time: float = Field(description="End timestamp for the search range")
|
||||
polygon_points: List[List[float]] = Field(
|
||||
description="List of [x, y] normalized coordinates (0-1) defining the ROI polygon"
|
||||
)
|
||||
threshold: int = Field(
|
||||
default=30,
|
||||
ge=1,
|
||||
le=255,
|
||||
description="Pixel difference threshold (1-255)",
|
||||
)
|
||||
min_area: float = Field(
|
||||
default=5.0,
|
||||
ge=0.1,
|
||||
le=100.0,
|
||||
description="Minimum change area as a percentage of the ROI",
|
||||
)
|
||||
frame_skip: int = Field(
|
||||
default=5,
|
||||
ge=1,
|
||||
le=30,
|
||||
description="Process every Nth frame (1=all frames, 5=every 5th frame)",
|
||||
)
|
||||
parallel: bool = Field(
|
||||
default=False,
|
||||
description="Enable parallel scanning across segments",
|
||||
)
|
||||
max_results: int = Field(
|
||||
default=25,
|
||||
ge=1,
|
||||
le=200,
|
||||
description="Maximum number of search results to return",
|
||||
)
|
||||
|
||||
|
||||
class MotionSearchResult(BaseModel):
|
||||
"""A single search result with timestamp and change info."""
|
||||
|
||||
timestamp: float = Field(description="Timestamp where change was detected")
|
||||
change_percentage: float = Field(description="Percentage of ROI area that changed")
|
||||
|
||||
|
||||
class MotionSearchMetricsResponse(BaseModel):
|
||||
"""Metrics collected during motion search execution."""
|
||||
|
||||
segments_scanned: int = 0
|
||||
segments_processed: int = 0
|
||||
metadata_inactive_segments: int = 0
|
||||
heatmap_roi_skip_segments: int = 0
|
||||
fallback_full_range_segments: int = 0
|
||||
frames_decoded: int = 0
|
||||
wall_time_seconds: float = 0.0
|
||||
segments_with_errors: int = 0
|
||||
|
||||
|
||||
class MotionSearchStartResponse(BaseModel):
|
||||
"""Response when motion search job starts."""
|
||||
|
||||
success: bool
|
||||
message: str
|
||||
job_id: str
|
||||
|
||||
|
||||
class MotionSearchStatusResponse(BaseModel):
|
||||
"""Response containing job status and results."""
|
||||
|
||||
success: bool
|
||||
message: str
|
||||
status: str # "queued", "running", "success", "failed", or "cancelled"
|
||||
results: Optional[List[MotionSearchResult]] = None
|
||||
total_frames_processed: Optional[int] = None
|
||||
error_message: Optional[str] = None
|
||||
metrics: Optional[MotionSearchMetricsResponse] = None
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{camera_name}/search/motion",
|
||||
response_model=MotionSearchStartResponse,
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
summary="Start motion search job",
|
||||
description="""Starts an asynchronous search for significant motion changes within
|
||||
a user-defined Region of Interest (ROI) over a specified time range. Returns a job_id
|
||||
that can be used to poll for results.""",
|
||||
)
|
||||
async def start_motion_search(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
body: MotionSearchRequest,
|
||||
):
|
||||
"""Start an async motion search job."""
|
||||
config = request.app.frigate_config
|
||||
|
||||
if camera_name not in config.cameras:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": f"Camera {camera_name} not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Validate polygon has at least 3 points
|
||||
if len(body.polygon_points) < 3:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Polygon must have at least 3 points",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
# Validate time range
|
||||
if body.start_time >= body.end_time:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Start time must be before end time",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
# Start the job using the jobs module
|
||||
job_id = start_motion_search_job(
|
||||
config=config,
|
||||
camera_name=camera_name,
|
||||
start_time=body.start_time,
|
||||
end_time=body.end_time,
|
||||
polygon_points=body.polygon_points,
|
||||
threshold=body.threshold,
|
||||
min_area=body.min_area,
|
||||
frame_skip=body.frame_skip,
|
||||
parallel=body.parallel,
|
||||
max_results=body.max_results,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": "Search job started",
|
||||
"job_id": job_id,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/search/motion/{job_id}",
|
||||
response_model=MotionSearchStatusResponse,
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
summary="Get motion search job status",
|
||||
description="Returns the status and results (if complete) of a motion search job.",
|
||||
)
|
||||
async def get_motion_search_status_endpoint(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
job_id: str,
|
||||
):
|
||||
"""Get the status of a motion search job."""
|
||||
config = request.app.frigate_config
|
||||
|
||||
if camera_name not in config.cameras:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": f"Camera {camera_name} not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
job = get_motion_search_job(job_id)
|
||||
if not job:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Job not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
api_status = job.status
|
||||
|
||||
# Build response content
|
||||
response_content: dict[str, Any] = {
|
||||
"success": api_status != JobStatusTypesEnum.failed,
|
||||
"status": api_status,
|
||||
}
|
||||
|
||||
if api_status == JobStatusTypesEnum.failed:
|
||||
response_content["message"] = job.error_message or "Search failed"
|
||||
response_content["error_message"] = job.error_message
|
||||
elif api_status == JobStatusTypesEnum.cancelled:
|
||||
response_content["message"] = "Search cancelled"
|
||||
response_content["total_frames_processed"] = job.total_frames_processed
|
||||
elif api_status == JobStatusTypesEnum.success:
|
||||
response_content["message"] = "Search complete"
|
||||
if job.results:
|
||||
response_content["results"] = job.results.get("results", [])
|
||||
response_content["total_frames_processed"] = job.results.get(
|
||||
"total_frames_processed", job.total_frames_processed
|
||||
)
|
||||
else:
|
||||
response_content["results"] = []
|
||||
response_content["total_frames_processed"] = job.total_frames_processed
|
||||
else:
|
||||
response_content["message"] = "Job processing"
|
||||
response_content["total_frames_processed"] = job.total_frames_processed
|
||||
# Include partial results if available (streaming)
|
||||
if job.results:
|
||||
response_content["results"] = job.results.get("results", [])
|
||||
response_content["total_frames_processed"] = job.results.get(
|
||||
"total_frames_processed", job.total_frames_processed
|
||||
)
|
||||
|
||||
# Include metrics if available
|
||||
if job.metrics:
|
||||
response_content["metrics"] = job.metrics.to_dict()
|
||||
|
||||
return JSONResponse(content=response_content)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{camera_name}/search/motion/{job_id}/cancel",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
summary="Cancel motion search job",
|
||||
description="Cancels an active motion search job if it is still processing.",
|
||||
)
|
||||
async def cancel_motion_search_endpoint(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
job_id: str,
|
||||
):
|
||||
"""Cancel an active motion search job."""
|
||||
config = request.app.frigate_config
|
||||
|
||||
if camera_name not in config.cameras:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": f"Camera {camera_name} not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
job = get_motion_search_job(job_id)
|
||||
if not job:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Job not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Check if already finished
|
||||
api_status = job.status
|
||||
if api_status not in (JobStatusTypesEnum.queued, JobStatusTypesEnum.running):
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": "Job already finished",
|
||||
"status": api_status,
|
||||
}
|
||||
)
|
||||
|
||||
# Request cancellation
|
||||
cancelled = cancel_motion_search_job(job_id)
|
||||
if cancelled:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": "Search cancelled",
|
||||
"status": "cancelled",
|
||||
}
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Failed to cancel job",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
458
frigate/api/record.py
Normal file
458
frigate/api/record.py
Normal file
@ -0,0 +1,458 @@
|
||||
"""Recording APIs."""
|
||||
|
||||
import datetime as dt
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from functools import reduce
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from urllib.parse import unquote
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi import Path as PathParam
|
||||
from fastapi.responses import JSONResponse
|
||||
from peewee import fn, operator
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.query.recordings_query_parameters import (
|
||||
MediaRecordingsAvailabilityQueryParams,
|
||||
MediaRecordingsSummaryQueryParams,
|
||||
RecordingsDeleteQueryParams,
|
||||
)
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import RECORD_DIR
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.recordings])
|
||||
|
||||
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
][RECORD_DIR]
|
||||
|
||||
if not recording_stats:
|
||||
return JSONResponse({})
|
||||
|
||||
total_mb = recording_stats["total"]
|
||||
|
||||
camera_usages: dict[str, dict] = (
|
||||
request.app.storage_maintainer.calculate_camera_usages()
|
||||
)
|
||||
|
||||
for camera_name in camera_usages.keys():
|
||||
if camera_usages.get(camera_name, {}).get("usage"):
|
||||
camera_usages[camera_name]["usage_percent"] = (
|
||||
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
||||
) * 100
|
||||
|
||||
return JSONResponse(content=camera_usages)
|
||||
|
||||
|
||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def all_recordings_summary(
|
||||
request: Request,
|
||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
day_expr = ((Recordings.start_time + period_offset) / 86400).cast("int")
|
||||
|
||||
period_query = (
|
||||
Recordings.select(day_expr.alias("day_idx"))
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.distinct()
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
day_str = (dt.date(1970, 1, 1) + dt.timedelta(days=g.day_idx)).isoformat()
|
||||
days[day_str] = True
|
||||
|
||||
return JSONResponse(content=dict(sorted(days.items())))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
|
||||
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
||||
async def recordings(
|
||||
camera_name: str,
|
||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||
before: float = datetime.now().timestamp(),
|
||||
):
|
||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.motion,
|
||||
Recordings.objects,
|
||||
Recordings.motion_heatmap,
|
||||
Recordings.duration,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera == camera_name,
|
||||
Recordings.end_time >= after,
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recordings/unavailable",
|
||||
response_model=list[dict],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def no_recordings(
|
||||
request: Request,
|
||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Get time ranges with no recordings."""
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content=[])
|
||||
cameras = ",".join(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
|
||||
before = params.before or datetime.datetime.now().timestamp()
|
||||
after = (
|
||||
params.after
|
||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||
)
|
||||
scale = params.scale
|
||||
|
||||
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
||||
if cameras != "all":
|
||||
camera_list = cameras.split(",")
|
||||
clauses.append((Recordings.camera << camera_list))
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Get recording start times
|
||||
data: list[Recordings] = (
|
||||
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# Convert recordings to list of (start, end) tuples
|
||||
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||
|
||||
# Iterate through time segments and check if each has any recording
|
||||
no_recording_segments = []
|
||||
current = after
|
||||
current_gap_start = None
|
||||
|
||||
while current < before:
|
||||
segment_end = min(current + scale, before)
|
||||
|
||||
# Check if this segment overlaps with any recording
|
||||
has_recording = any(
|
||||
rec_start < segment_end and rec_end > current
|
||||
for rec_start, rec_end in recordings
|
||||
)
|
||||
|
||||
if not has_recording:
|
||||
# This segment has no recordings
|
||||
if current_gap_start is None:
|
||||
current_gap_start = current # Start a new gap
|
||||
else:
|
||||
# This segment has recordings
|
||||
if current_gap_start is not None:
|
||||
# End the current gap and append it
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(current)}
|
||||
)
|
||||
current_gap_start = None
|
||||
|
||||
current = segment_end
|
||||
|
||||
# Append the last gap if it exists
|
||||
if current_gap_start is not None:
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(before)}
|
||||
)
|
||||
|
||||
return JSONResponse(content=no_recording_segments)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/recordings/start/{start}/end/{end}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete recordings",
|
||||
description="""Deletes recordings within the specified time range.
|
||||
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
|
||||
""",
|
||||
)
|
||||
async def delete_recordings(
|
||||
start: float = PathParam(..., description="Start timestamp (unix)"),
|
||||
end: float = PathParam(..., description="End timestamp (unix)"),
|
||||
params: RecordingsDeleteQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Delete recordings in the specified time range."""
|
||||
if start >= end:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Start time must be less than end time.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
cameras = params.cameras
|
||||
|
||||
if cameras != "all":
|
||||
requested = set(cameras.split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
|
||||
if not filtered:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "No valid cameras found in the request.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Parse keep parameter
|
||||
keep_set = set()
|
||||
|
||||
if params.keep:
|
||||
keep_set = set(params.keep.split(","))
|
||||
|
||||
# Build query to find overlapping recordings
|
||||
clauses = [
|
||||
(
|
||||
Recordings.start_time.between(start, end)
|
||||
| Recordings.end_time.between(start, end)
|
||||
| ((start > Recordings.start_time) & (end < Recordings.end_time))
|
||||
),
|
||||
(Recordings.camera << camera_list),
|
||||
]
|
||||
|
||||
keep_clauses = []
|
||||
|
||||
if "motion" in keep_set:
|
||||
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
|
||||
|
||||
if "object" in keep_set:
|
||||
keep_clauses.append(
|
||||
Recordings.objects.is_null(False) & (Recordings.objects > 0)
|
||||
)
|
||||
|
||||
if "audio" in keep_set:
|
||||
keep_clauses.append(Recordings.dBFS.is_null(False))
|
||||
|
||||
if keep_clauses:
|
||||
keep_condition = reduce(operator.or_, keep_clauses)
|
||||
clauses.append(~keep_condition)
|
||||
|
||||
recordings_to_delete = (
|
||||
Recordings.select(Recordings.id, Recordings.path)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
recording_ids = []
|
||||
deleted_count = 0
|
||||
error_count = 0
|
||||
|
||||
for recording in recordings_to_delete:
|
||||
recording_ids.append(recording["id"])
|
||||
|
||||
try:
|
||||
Path(recording["path"]).unlink(missing_ok=True)
|
||||
deleted_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
|
||||
error_count += 1
|
||||
|
||||
if recording_ids:
|
||||
max_deletes = 100000
|
||||
recording_ids_list = list(recording_ids)
|
||||
|
||||
for i in range(0, len(recording_ids_list), max_deletes):
|
||||
Recordings.delete().where(
|
||||
Recordings.id << recording_ids_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
message = f"Successfully deleted {deleted_count} recording(s)."
|
||||
|
||||
if error_count > 0:
|
||||
message += f" {error_count} file deletion error(s) occurred."
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": message},
|
||||
status_code=200,
|
||||
)
|
||||
@ -33,7 +33,6 @@ from frigate.api.defs.response.review_response import (
|
||||
ReviewSummaryResponse,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.review.types import SeverityEnum
|
||||
@ -747,9 +746,7 @@ async def set_not_reviewed(
|
||||
description="Use GenAI to summarize review items over a period of time.",
|
||||
)
|
||||
def generate_review_summary(request: Request, start_ts: float, end_ts: float):
|
||||
config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
if not config.genai.provider:
|
||||
if not request.app.genai_manager.vision_client:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
|
||||
@ -8,7 +8,7 @@ from multiprocessing import Queue
|
||||
from multiprocessing.managers import DictProxy, SyncManager
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from typing import Callable, Optional
|
||||
|
||||
import psutil
|
||||
import uvicorn
|
||||
@ -30,6 +30,7 @@ from frigate.comms.ws import WebSocketClient
|
||||
from frigate.comms.zmq_proxy import ZmqProxy
|
||||
from frigate.config.camera.updater import CameraConfigUpdatePublisher
|
||||
from frigate.config.config import FrigateConfig
|
||||
from frigate.config.profile_manager import ProfileManager
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CLIPS_DIR,
|
||||
@ -43,10 +44,15 @@ from frigate.const import (
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.debug_replay import (
|
||||
DebugReplayManager,
|
||||
cleanup_replay_cameras,
|
||||
)
|
||||
from frigate.embeddings import EmbeddingProcess, EmbeddingsContext
|
||||
from frigate.events.audio import AudioProcessor
|
||||
from frigate.events.cleanup import EventCleanup
|
||||
from frigate.events.maintainer import EventProcessor
|
||||
from frigate.jobs.motion_search import stop_all_motion_search_jobs
|
||||
from frigate.log import _stop_logging
|
||||
from frigate.models import (
|
||||
Event,
|
||||
@ -75,6 +81,7 @@ from frigate.timeline import TimelineProcessor
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import empty_and_close_queue
|
||||
from frigate.util.image import UntrackedSharedMemory
|
||||
from frigate.util.process import FrigateProcess
|
||||
from frigate.util.services import set_file_limit
|
||||
from frigate.version import VERSION
|
||||
from frigate.watchdog import FrigateWatchdog
|
||||
@ -113,6 +120,7 @@ class FrigateApp:
|
||||
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
||||
self.processes: dict[str, int] = {}
|
||||
self.embeddings: Optional[EmbeddingsContext] = None
|
||||
self.profile_manager: Optional[ProfileManager] = None
|
||||
self.config = config
|
||||
|
||||
def ensure_dirs(self) -> None:
|
||||
@ -139,6 +147,9 @@ class FrigateApp:
|
||||
else:
|
||||
logger.debug(f"Skipping directory: {d}")
|
||||
|
||||
def init_debug_replay_manager(self) -> None:
|
||||
self.replay_manager = DebugReplayManager()
|
||||
|
||||
def init_camera_metrics(self) -> None:
|
||||
# create camera_metrics
|
||||
for camera_name in self.config.cameras.keys():
|
||||
@ -341,6 +352,19 @@ class FrigateApp:
|
||||
comms,
|
||||
)
|
||||
|
||||
def init_profile_manager(self) -> None:
|
||||
self.profile_manager = ProfileManager(
|
||||
self.config, self.inter_config_updater, self.dispatcher
|
||||
)
|
||||
self.dispatcher.profile_manager = self.profile_manager
|
||||
|
||||
persisted = ProfileManager.load_persisted_profile()
|
||||
if persisted and any(
|
||||
persisted in cam.profiles for cam in self.config.cameras.values()
|
||||
):
|
||||
logger.info("Restoring persisted profile '%s'", persisted)
|
||||
self.profile_manager.activate_profile(persisted)
|
||||
|
||||
def start_detectors(self) -> None:
|
||||
for name in self.config.cameras.keys():
|
||||
try:
|
||||
@ -474,6 +498,47 @@ class FrigateApp:
|
||||
|
||||
def start_watchdog(self) -> None:
|
||||
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
|
||||
|
||||
# (attribute on self, key in self.processes, factory)
|
||||
specs: list[tuple[str, str, Callable[[], FrigateProcess]]] = [
|
||||
(
|
||||
"embedding_process",
|
||||
"embeddings",
|
||||
lambda: EmbeddingProcess(
|
||||
self.config, self.embeddings_metrics, self.stop_event
|
||||
),
|
||||
),
|
||||
(
|
||||
"recording_process",
|
||||
"recording",
|
||||
lambda: RecordProcess(self.config, self.stop_event),
|
||||
),
|
||||
(
|
||||
"review_segment_process",
|
||||
"review_segment",
|
||||
lambda: ReviewProcess(self.config, self.stop_event),
|
||||
),
|
||||
(
|
||||
"output_processor",
|
||||
"output",
|
||||
lambda: OutputProcess(self.config, self.stop_event),
|
||||
),
|
||||
]
|
||||
|
||||
for attr, key, factory in specs:
|
||||
if not hasattr(self, attr):
|
||||
continue
|
||||
|
||||
def on_restart(
|
||||
proc: FrigateProcess, _attr: str = attr, _key: str = key
|
||||
) -> None:
|
||||
setattr(self, _attr, proc)
|
||||
self.processes[_key] = proc.pid or 0
|
||||
|
||||
self.frigate_watchdog.register(
|
||||
key, getattr(self, attr), factory, on_restart
|
||||
)
|
||||
|
||||
self.frigate_watchdog.start()
|
||||
|
||||
def init_auth(self) -> None:
|
||||
@ -531,6 +596,7 @@ class FrigateApp:
|
||||
set_file_limit()
|
||||
|
||||
# Start frigate services.
|
||||
self.init_debug_replay_manager()
|
||||
self.init_camera_metrics()
|
||||
self.init_queues()
|
||||
self.init_database()
|
||||
@ -541,9 +607,14 @@ class FrigateApp:
|
||||
self.init_embeddings_manager()
|
||||
self.bind_database()
|
||||
self.check_db_data_migrations()
|
||||
|
||||
# Clean up any stale replay camera artifacts (filesystem + DB)
|
||||
cleanup_replay_cameras()
|
||||
|
||||
self.init_inter_process_communicator()
|
||||
self.start_detectors()
|
||||
self.init_dispatcher()
|
||||
self.init_profile_manager()
|
||||
self.init_embeddings_client()
|
||||
self.start_video_output_processor()
|
||||
self.start_ptz_autotracker()
|
||||
@ -572,6 +643,9 @@ class FrigateApp:
|
||||
self.stats_emitter,
|
||||
self.event_metadata_updater,
|
||||
self.inter_config_updater,
|
||||
self.replay_manager,
|
||||
self.dispatcher,
|
||||
self.profile_manager,
|
||||
),
|
||||
host="127.0.0.1",
|
||||
port=5001,
|
||||
@ -586,6 +660,9 @@ class FrigateApp:
|
||||
# used by the docker healthcheck
|
||||
Path("/dev/shm/.frigate-is-stopping").touch()
|
||||
|
||||
# Cancel any running motion search jobs before setting stop_event
|
||||
stop_all_motion_search_jobs()
|
||||
|
||||
self.stop_event.set()
|
||||
|
||||
# set an end_time on entries without an end_time before exiting
|
||||
@ -637,6 +714,7 @@ class FrigateApp:
|
||||
self.record_cleanup.join()
|
||||
self.stats_emitter.join()
|
||||
self.frigate_watchdog.join()
|
||||
self.camera_maintainer.join()
|
||||
self.db.stop()
|
||||
|
||||
# Save embeddings stats to disk
|
||||
|
||||
@ -19,6 +19,8 @@ class CameraMetrics:
|
||||
process_pid: Synchronized
|
||||
capture_process_pid: Synchronized
|
||||
ffmpeg_pid: Synchronized
|
||||
reconnects_last_hour: Synchronized
|
||||
stalls_last_hour: Synchronized
|
||||
|
||||
def __init__(self, manager: SyncManager):
|
||||
self.camera_fps = manager.Value("d", 0)
|
||||
@ -35,6 +37,8 @@ class CameraMetrics:
|
||||
self.process_pid = manager.Value("i", 0)
|
||||
self.capture_process_pid = manager.Value("i", 0)
|
||||
self.ffmpeg_pid = manager.Value("i", 0)
|
||||
self.reconnects_last_hour = manager.Value("i", 0)
|
||||
self.stalls_last_hour = manager.Value("i", 0)
|
||||
|
||||
|
||||
class PTZMetrics:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user