Compare commits

...

14 Commits

Author SHA1 Message Date
ivanshi1108
d8ab8c371e
Merge acb17a7b50 into c136e5e8bd 2025-12-04 19:19:11 +01:00
Nicolas Mowen
c136e5e8bd
Miscellaneous fixes (#21141)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
* Remove source_type from API

* Don't require state classification models to select all classes

* Specifically validate provided end_time for manual events

* Remove yolov9 specification for warning

* Remove warning for coral

* clarify zone name tip

* clarify replace rules in lpr docs

* remove periods

* Add explanation for review report

* adjust HLS gap controller params

defaults to false, should help to recover from hangs and stalling in tracking details videos on chrome

* only redirect to login page once on 401

attempt to fix ios pwa safari redirect storm

* Use contextual information from other cameras to inform report summary

* Formatting and prompt improvements for review summary report

* More improvements to prompt

* Remove examples

* Don't show admin action buttons on export card

* fix redirect race condition

Coordinate 401 redirect logic between ApiProvider and ProtectedRoute using a shared flag to prevent multiple simultaneous redirects that caused UI flashing. Ensure both auth error paths check and set the redirect flag before navigating to login, eliminating race conditions where both mechanisms could trigger at once

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-12-04 12:19:07 -06:00
shizhicheng
acb17a7b50 Format code based on the results of Python Checks
x
2025-12-01 04:47:39 +00:00
ivanshi1108
7933a83a42
Update docs/docs/configuration/object_detectors.md
Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2025-11-24 23:04:19 +08:00
shizhicheng
2eef58aa1d Modify the description of AXERA in the documentation. 2025-11-24 07:04:42 +00:00
ivanshi1108
6659b7cb0f
Merge branch 'dev' into AXERA-axcl 2025-11-24 10:55:09 +08:00
shizhicheng
f134796913 format code with ruff 2025-11-24 02:42:04 +00:00
shizhicheng
b4abbd7d3b Modify the document based on review suggestions 2025-11-24 02:20:40 +00:00
shizhicheng
438df7d484 The model inference time has been changed to the time displayed on the Frigate UI 2025-11-16 22:22:38 +08:00
shizhicheng
e27a94ae0b Fix logical errors caused by code formatting 2025-11-11 05:54:19 +00:00
shizhicheng
1dee548dbc Modifications to the YOLOv9 object detection model:
The model is now dynamically downloaded to the cache directory.
Post-processing is now done using Frigate's built-in `post_process_yolo`.
Configuration in the relevant documentation has been updated.
2025-11-11 05:42:28 +00:00
shizhicheng
91e17e12b7 Change the default detection model to YOLOv9 2025-11-09 13:21:17 +00:00
ivanshi1108
bb45483e9e
Modify AXERA section from hardware.md
Modify AXERA section and related content from hardware documentation.
2025-10-28 09:54:00 +08:00
shizhicheng
7b4eaf2d10 Initial commit for AXERA AI accelerators 2025-10-24 09:03:13 +00:00
25 changed files with 1165 additions and 678 deletions

View File

@ -224,3 +224,29 @@ jobs:
sources: | sources: |
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64 ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi
axera_build:
runs-on: ubuntu-22.04
name: AXERA Build
needs:
- amd64_build
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Axera build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: axcl
files: docker/axcl/axcl.hcl
set: |
axcl.tags=${{ steps.setup.outputs.image-name }}-axcl
*.cache-from=type=gha

55
docker/axcl/Dockerfile Normal file
View File

@ -0,0 +1,55 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM frigate AS frigate-axcl
ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
# Install axpyengine
RUN wget https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3.rc1/axengine-0.1.3-py3-none-any.whl -O /axengine-0.1.3-py3-none-any.whl
RUN pip3 install -i https://mirrors.aliyun.com/pypi/simple/ /axengine-0.1.3-py3-none-any.whl \
&& rm /axengine-0.1.3-py3-none-any.whl
# Install axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
echo "Installing x86_64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
else \
echo "Installing aarch64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
fi
RUN mkdir /unpack_axcl && \
dpkg-deb -x /axcl.deb /unpack_axcl && \
cp -R /unpack_axcl/usr/bin/axcl /usr/bin/ && \
cp -R /unpack_axcl/usr/lib/axcl /usr/lib/ && \
rm -rf /unpack_axcl /axcl.deb
# Install axcl ffmpeg
RUN mkdir -p /usr/lib/ffmpeg/axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-x64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-x64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
else \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-aarch64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-aarch64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
fi
RUN chmod +x /usr/lib/ffmpeg/axcl/ffmpeg /usr/lib/ffmpeg/axcl/ffprobe
# Set ldconfig path
RUN echo "/usr/lib/axcl" > /etc/ld.so.conf.d/ax.conf
# Set env
ENV PATH="$PATH:/usr/bin/axcl"
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/axcl"
ENTRYPOINT ["sh", "-c", "ldconfig && exec /init"]

13
docker/axcl/axcl.hcl Normal file
View File

@ -0,0 +1,13 @@
target frigate {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64", "linux/arm64"]
target = "frigate"
}
target axcl {
dockerfile = "docker/axcl/Dockerfile"
contexts = {
frigate = "target:frigate",
}
platforms = ["linux/amd64", "linux/arm64"]
}

15
docker/axcl/axcl.mk Normal file
View File

@ -0,0 +1,15 @@
BOARDS += axcl
local-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=frigate:latest-axcl \
--load
build-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl
push-axcl: build-axcl
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl \
--push

View File

@ -0,0 +1,83 @@
#!/bin/bash
# Update package list and install dependencies
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget pciutils kmod udev
# Check if gcc-12 is needed
current_gcc_version=$(gcc --version | head -n1 | awk '{print $NF}')
gcc_major_version=$(echo $current_gcc_version | cut -d'.' -f1)
if [[ $gcc_major_version -lt 12 ]]; then
echo "Current GCC version ($current_gcc_version) is lower than 12, installing gcc-12..."
sudo apt-get install -y gcc-12
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12
echo "GCC-12 installed and set as default"
else
echo "Current GCC version ($current_gcc_version) is sufficient, skipping GCC installation"
fi
# Determine architecture
arch=$(uname -m)
download_url=""
if [[ $arch == "x86_64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
elif [[ $arch == "aarch64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
else
echo "Unsupported architecture: $arch"
exit 1
fi
# Download AXCL driver
echo "Downloading AXCL driver for $arch..."
wget "$download_url" -O "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to download AXCL driver"
exit 1
fi
# Install AXCL driver
echo "Installing AXCL driver..."
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to install AXCL driver, attempting to fix dependencies..."
sudo apt-get install -f -y
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "AXCL driver installation failed"
exit 1
fi
fi
# Update environment
echo "Updating environment..."
source /etc/profile
# Verify installation
echo "Verifying AXCL installation..."
if command -v axcl-smi &> /dev/null; then
echo "AXCL driver detected, checking AI accelerator status..."
axcl_output=$(axcl-smi 2>&1)
axcl_exit_code=$?
echo "$axcl_output"
if [ $axcl_exit_code -eq 0 ]; then
echo "AXCL driver installation completed successfully!"
else
echo "AXCL driver installed but no AI accelerator detected or communication failed."
echo "Please check if the AI accelerator is properly connected and powered on."
exit 1
fi
else
echo "axcl-smi command not found. AXCL driver installation may have failed."
exit 1
fi

View File

@ -111,3 +111,9 @@ review:
## Review Reports ## Review Reports
Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review. Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review.
### Requesting Reports Programmatically
Review reports can be requested via the [API](/integrations/api#review-summarization) by sending a POST request to `/api/review/summarize/start/{start_ts}/end/{end_ts}` with Unix timestamps.
For Home Assistant users, there is a built-in service (`frigate.generate_review_summary`) that makes it easy to request review reports as part of automations or scripts. This allows you to automatically generate daily summaries, vacation reports, or custom time period reports based on your specific needs.

View File

@ -107,7 +107,7 @@ Fine-tune the LPR feature using these optional parameters at the global level of
### Normalization Rules ### Normalization Rules
- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially. Each rule must have a `pattern` (which can be a string or a regex, prepended by `r`) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0'). - **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially and are applied _before_ the `format` regex, if specified. Each rule must have a `pattern` (which can be a string or a regex, prepended by `r`) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0').
These rules must be defined at the global level of your `lpr` config. These rules must be defined at the global level of your `lpr` config.

View File

@ -49,6 +49,11 @@ Frigate supports multiple different detectors that work on different types of ha
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs. - [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
**AXERA** <CommunityBadge />
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
**For Testing** **For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. - [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@ -164,12 +169,6 @@ A Tensorflow Lite is provided in the container at `/openvino-model/ssdlite_mobil
<details> <details>
<summary>YOLOv9 Setup & Config</summary> <summary>YOLOv9 Setup & Config</summary>
:::warning
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
:::
After placing the downloaded files for the tflite model and labels in your config folder, you can use the following configuration: After placing the downloaded files for the tflite model and labels in your config folder, you can use the following configuration:
```yaml ```yaml
@ -408,7 +407,7 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
:::warning :::warning
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. If you are using a Frigate+ model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
::: :::
@ -748,7 +747,7 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
:::warning :::warning
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model. If you are using a Frigate+ model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
::: :::
@ -1482,6 +1481,41 @@ model:
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
``` ```
## AXERA
Hardware accelerated object detection is supported on the following SoCs:
- AX650N
- AX8850N
This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AXERA-TECH/Pulsar2).
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
### Configuration
When configuring the AXEngine detector, you have to specify the model name.
#### yolov9
A yolov9 model is provided in the container at /axmodels and is used by this detector type by default.
Use the model configuration shown below when using the axengine detector with the default axmodel:
```yaml
detectors:
axengine:
type: axengine
model:
path: frigate-yolov9-tiny
model_type: yolo-generic
width: 320
height: 320
tensor_format: bgr
labelmap_path: /labelmap/coco-80.txt
```
# Models # Models
Some model types are not included in Frigate by default. Some model types are not included in Frigate by default.

View File

@ -104,6 +104,10 @@ Frigate supports multiple different detectors that work on different types of ha
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection. - [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
**AXERA** <CommunityBadge />
- [AXEngine](#axera): axera models can run on AXERA NPUs via AXEngine, delivering highly efficient object detection.
::: :::
### Hailo-8 ### Hailo-8
@ -287,6 +291,14 @@ The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms fo
| ssd mobilenet | ~ 25 ms | | ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms | | yolov5m | ~ 118 ms |
### AXERA
- **AXEngine** Default model is **yolov9**
| Name | AXERA AX650N/AX8850N Inference Time |
| ---------------- | ----------------------------------- |
| yolov9-tiny | ~ 4 ms |
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity. This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.

View File

@ -287,6 +287,42 @@ or add these options to your `docker run` command:
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics). Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
### AXERA
<details>
<summary>AXERA accelerators</summary>
AXERA accelerators are available in an M.2 form factor, compatible with both Raspberry Pi and Orange Pi. This form factor has also been successfully tested on x86 platforms, making it a versatile choice for various computing environments.
#### Installation
Using AXERA accelerators requires the installation of the AXCL driver. We provide a convenient Linux script to complete this installation.
Follow these steps for installation:
1. Copy or download [this script](https://github.com/ivanshi1108/assets/releases/download/v0.16.2/user_installation.sh).
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
3. Run the script with `./user_installation.sh`
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
```yaml
devices:
- /dev/axcl_host
- /dev/ax_mmb_dev
- /dev/msg_userdev
```
If you are using `docker run`, add this option to your command `--device /dev/axcl_host --device /dev/ax_mmb_dev --device /dev/msg_userdev`
#### Configuration
Finally, configure [hardware object detection](/configuration/object_detectors#axera) to complete the setup.
</details>
## Docker ## Docker
Running through Docker with Docker Compose is the recommended install method. Running through Docker with Docker Compose is the recommended install method.

File diff suppressed because it is too large Load Diff

View File

@ -29,7 +29,6 @@ class EventsDescriptionBody(BaseModel):
class EventsCreateBody(BaseModel): class EventsCreateBody(BaseModel):
source_type: Optional[str] = "api"
sub_label: Optional[str] = None sub_label: Optional[str] = None
score: Optional[float] = 0 score: Optional[float] = 0
duration: Optional[int] = 30 duration: Optional[int] = 30

View File

@ -346,7 +346,7 @@ def events(
"/events/explore", "/events/explore",
response_model=list[EventResponse], response_model=list[EventResponse],
dependencies=[Depends(allow_any_authenticated())], dependencies=[Depends(allow_any_authenticated())],
summary="Get summary of objects.", summary="Get summary of objects",
description="""Gets a summary of objects from the database. description="""Gets a summary of objects from the database.
Returns a list of objects with a max of `limit` objects for each label. Returns a list of objects with a max of `limit` objects for each label.
""", """,
@ -439,7 +439,7 @@ def events_explore(
"/event_ids", "/event_ids",
response_model=list[EventResponse], response_model=list[EventResponse],
dependencies=[Depends(allow_any_authenticated())], dependencies=[Depends(allow_any_authenticated())],
summary="Get events by ids.", summary="Get events by ids",
description="""Gets events by a list of ids. description="""Gets events by a list of ids.
Returns a list of events. Returns a list of events.
""", """,
@ -473,7 +473,7 @@ async def event_ids(ids: str, request: Request):
@router.get( @router.get(
"/events/search", "/events/search",
dependencies=[Depends(allow_any_authenticated())], dependencies=[Depends(allow_any_authenticated())],
summary="Search events.", summary="Search events",
description="""Searches for events in the database. description="""Searches for events in the database.
Returns a list of events. Returns a list of events.
""", """,
@ -924,7 +924,7 @@ def events_summary(
"/events/{event_id}", "/events/{event_id}",
response_model=EventResponse, response_model=EventResponse,
dependencies=[Depends(allow_any_authenticated())], dependencies=[Depends(allow_any_authenticated())],
summary="Get event by id.", summary="Get event by id",
description="Gets an event by its id.", description="Gets an event by its id.",
) )
async def event(event_id: str, request: Request): async def event(event_id: str, request: Request):
@ -968,7 +968,7 @@ def set_retain(event_id: str):
"/events/{event_id}/plus", "/events/{event_id}/plus",
response_model=EventUploadPlusResponse, response_model=EventUploadPlusResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Send event to Frigate+.", summary="Send event to Frigate+",
description="""Sends an event to Frigate+. description="""Sends an event to Frigate+.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
""", """,
@ -1207,7 +1207,7 @@ async def false_positive(request: Request, event_id: str):
"/events/{event_id}/retain", "/events/{event_id}/retain",
response_model=GenericResponse, response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Stop event from being retained indefinitely.", summary="Stop event from being retained indefinitely",
description="""Stops an event from being retained indefinitely. description="""Stops an event from being retained indefinitely.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
NOTE: This is a legacy endpoint and is not supported in the frontend. NOTE: This is a legacy endpoint and is not supported in the frontend.
@ -1236,7 +1236,7 @@ async def delete_retain(event_id: str, request: Request):
"/events/{event_id}/sub_label", "/events/{event_id}/sub_label",
response_model=GenericResponse, response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Set event sub label.", summary="Set event sub label",
description="""Sets an event's sub label. description="""Sets an event's sub label.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
""", """,
@ -1295,7 +1295,7 @@ async def set_sub_label(
"/events/{event_id}/recognized_license_plate", "/events/{event_id}/recognized_license_plate",
response_model=GenericResponse, response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Set event license plate.", summary="Set event license plate",
description="""Sets an event's license plate. description="""Sets an event's license plate.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
""", """,
@ -1355,7 +1355,7 @@ async def set_plate(
"/events/{event_id}/description", "/events/{event_id}/description",
response_model=GenericResponse, response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Set event description.", summary="Set event description",
description="""Sets an event's description. description="""Sets an event's description.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
""", """,
@ -1411,7 +1411,7 @@ async def set_description(
"/events/{event_id}/description/regenerate", "/events/{event_id}/description/regenerate",
response_model=GenericResponse, response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Regenerate event description.", summary="Regenerate event description",
description="""Regenerates an event's description. description="""Regenerates an event's description.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
""", """,
@ -1463,8 +1463,8 @@ async def regenerate_description(
@router.post( @router.post(
"/description/generate", "/description/generate",
response_model=GenericResponse, response_model=GenericResponse,
# dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Generate description embedding.", summary="Generate description embedding",
description="""Generates an embedding for an event's description. description="""Generates an embedding for an event's description.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
""", """,
@ -1529,7 +1529,7 @@ async def delete_single_event(event_id: str, request: Request) -> dict:
"/events/{event_id}", "/events/{event_id}",
response_model=GenericResponse, response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Delete event.", summary="Delete event",
description="""Deletes an event from the database. description="""Deletes an event from the database.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
""", """,
@ -1544,7 +1544,7 @@ async def delete_event(request: Request, event_id: str):
"/events/", "/events/",
response_model=EventMultiDeleteResponse, response_model=EventMultiDeleteResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Delete events.", summary="Delete events",
description="""Deletes a list of events from the database. description="""Deletes a list of events from the database.
Returns a success message or an error if the events are not found. Returns a success message or an error if the events are not found.
""", """,
@ -1578,7 +1578,7 @@ async def delete_events(request: Request, body: EventsDeleteBody):
"/events/{camera_name}/{label}/create", "/events/{camera_name}/{label}/create",
response_model=EventCreateResponse, response_model=EventCreateResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Create manual event.", summary="Create manual event",
description="""Creates a manual event in the database. description="""Creates a manual event in the database.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
NOTES: NOTES:
@ -1620,7 +1620,7 @@ def create_event(
body.score, body.score,
body.sub_label, body.sub_label,
body.duration, body.duration,
body.source_type, "api",
body.draw, body.draw,
), ),
EventMetadataTypeEnum.manual_event_create.value, EventMetadataTypeEnum.manual_event_create.value,
@ -1642,7 +1642,7 @@ def create_event(
"/events/{event_id}/end", "/events/{event_id}/end",
response_model=GenericResponse, response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="End manual event.", summary="End manual event",
description="""Ends a manual event. description="""Ends a manual event.
Returns a success message or an error if the event is not found. Returns a success message or an error if the event is not found.
NOTE: This should only be used for manual events. NOTE: This should only be used for manual events.
@ -1652,10 +1652,27 @@ async def end_event(request: Request, event_id: str, body: EventsEndBody):
try: try:
event: Event = Event.get(Event.id == event_id) event: Event = Event.get(Event.id == event_id)
await require_camera_access(event.camera, request=request) await require_camera_access(event.camera, request=request)
if body.end_time is not None and body.end_time < event.start_time:
return JSONResponse(
content=(
{
"success": False,
"message": f"end_time ({body.end_time}) cannot be before start_time ({event.start_time}).",
}
),
status_code=400,
)
end_time = body.end_time or datetime.datetime.now().timestamp() end_time = body.end_time or datetime.datetime.now().timestamp()
request.app.event_metadata_updater.publish( request.app.event_metadata_updater.publish(
(event_id, end_time), EventMetadataTypeEnum.manual_event_end.value (event_id, end_time), EventMetadataTypeEnum.manual_event_end.value
) )
except DoesNotExist:
return JSONResponse(
content=({"success": False, "message": f"Event {event_id} not found."}),
status_code=404,
)
except Exception: except Exception:
return JSONResponse( return JSONResponse(
content=( content=(
@ -1674,7 +1691,7 @@ async def end_event(request: Request, event_id: str, body: EventsEndBody):
"/trigger/embedding", "/trigger/embedding",
response_model=dict, response_model=dict,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Create trigger embedding.", summary="Create trigger embedding",
description="""Creates a trigger embedding for a specific trigger. description="""Creates a trigger embedding for a specific trigger.
Returns a success message or an error if the trigger is not found. Returns a success message or an error if the trigger is not found.
""", """,
@ -1832,7 +1849,7 @@ def create_trigger_embedding(
"/trigger/embedding/{camera_name}/{name}", "/trigger/embedding/{camera_name}/{name}",
response_model=dict, response_model=dict,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Update trigger embedding.", summary="Update trigger embedding",
description="""Updates a trigger embedding for a specific trigger. description="""Updates a trigger embedding for a specific trigger.
Returns a success message or an error if the trigger is not found. Returns a success message or an error if the trigger is not found.
""", """,
@ -1997,7 +2014,7 @@ def update_trigger_embedding(
"/trigger/embedding/{camera_name}/{name}", "/trigger/embedding/{camera_name}/{name}",
response_model=dict, response_model=dict,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Delete trigger embedding.", summary="Delete trigger embedding",
description="""Deletes a trigger embedding for a specific trigger. description="""Deletes a trigger embedding for a specific trigger.
Returns a success message or an error if the trigger is not found. Returns a success message or an error if the trigger is not found.
""", """,
@ -2071,7 +2088,7 @@ def delete_trigger_embedding(
"/triggers/status/{camera_name}", "/triggers/status/{camera_name}",
response_model=dict, response_model=dict,
dependencies=[Depends(require_role(["admin"]))], dependencies=[Depends(require_role(["admin"]))],
summary="Get triggers status.", summary="Get triggers status",
description="""Gets the status of all triggers for a specific camera. description="""Gets the status of all triggers for a specific camera.
Returns a success message or an error if the camera is not found. Returns a success message or an error if the camera is not found.
""", """,

View File

@ -209,10 +209,22 @@ class ReviewDescriptionProcessor(PostProcessorApi):
logger.debug( logger.debug(
f"Found GenAI Review Summary request for {start_ts} to {end_ts}" f"Found GenAI Review Summary request for {start_ts} to {end_ts}"
) )
items: list[dict[str, Any]] = [
r["data"]["metadata"] # Query all review segments with camera and time information
segments: list[dict[str, Any]] = [
{
"camera": r["camera"].replace("_", " ").title(),
"start_time": r["start_time"],
"end_time": r["end_time"],
"metadata": r["data"]["metadata"],
}
for r in ( for r in (
ReviewSegment.select(ReviewSegment.data) ReviewSegment.select(
ReviewSegment.camera,
ReviewSegment.start_time,
ReviewSegment.end_time,
ReviewSegment.data,
)
.where( .where(
(ReviewSegment.data["metadata"].is_null(False)) (ReviewSegment.data["metadata"].is_null(False))
& (ReviewSegment.start_time < end_ts) & (ReviewSegment.start_time < end_ts)
@ -224,21 +236,66 @@ class ReviewDescriptionProcessor(PostProcessorApi):
) )
] ]
if len(items) == 0: if len(segments) == 0:
logger.debug("No review items with metadata found during time period") logger.debug("No review items with metadata found during time period")
return "No activity was found during this time." return "No activity was found during this time period."
important_items = list( # Identify primary items (important items that need review)
filter( primary_segments = [
lambda item: item.get("potential_threat_level", 0) > 0 seg
or item.get("other_concerns"), for seg in segments
items, if seg["metadata"].get("potential_threat_level", 0) > 0
) or seg["metadata"].get("other_concerns")
) ]
if not important_items: if not primary_segments:
return "No concerns were found during this time period." return "No concerns were found during this time period."
# For each primary segment, find overlapping contextual items from other cameras
all_items_for_summary = []
for primary_seg in primary_segments:
# Add the primary item with marker
primary_item = copy.deepcopy(primary_seg["metadata"])
primary_item["_is_primary"] = True
primary_item["_camera"] = primary_seg["camera"]
all_items_for_summary.append(primary_item)
# Find overlapping contextual items from other cameras
primary_start = primary_seg["start_time"]
primary_end = primary_seg["end_time"]
primary_camera = primary_seg["camera"]
for seg in segments:
seg_camera = seg["camera"]
if seg_camera == primary_camera:
continue
if seg in primary_segments:
continue
seg_start = seg["start_time"]
seg_end = seg["end_time"]
if seg_start < primary_end and primary_start < seg_end:
contextual_item = copy.deepcopy(seg["metadata"])
contextual_item["_is_primary"] = False
contextual_item["_camera"] = seg_camera
contextual_item["_related_to_camera"] = primary_camera
if not any(
item.get("_camera") == seg_camera
and item.get("time") == contextual_item.get("time")
for item in all_items_for_summary
):
all_items_for_summary.append(contextual_item)
logger.debug(
f"Summary includes {len(primary_segments)} primary items and "
f"{len(all_items_for_summary) - len(primary_segments)} contextual items"
)
if self.config.review.genai.debug_save_thumbnails: if self.config.review.genai.debug_save_thumbnails:
Path( Path(
os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}") os.path.join(CLIPS_DIR, "genai-requests", f"{start_ts}-{end_ts}")
@ -247,7 +304,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
return self.genai_client.generate_review_summary( return self.genai_client.generate_review_summary(
start_ts, start_ts,
end_ts, end_ts,
important_items, all_items_for_summary,
self.config.review.genai.debug_save_thumbnails, self.config.review.genai.debug_save_thumbnails,
) )
else: else:

View File

@ -0,0 +1,86 @@
import logging
import os.path
import re
import urllib.request
from typing import Literal
import axengine as axe
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.util.model import post_process_yolo
logger = logging.getLogger(__name__)
DETECTOR_KEY = "axengine"
supported_models = {
ModelTypeEnum.yologeneric: "frigate-yolov9-.*$",
}
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "axengine_cache/")
class AxengineDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
class Axengine(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, config: AxengineDetectorConfig):
logger.info("__init__ axengine")
super().__init__(config)
self.height = config.model.height
self.width = config.model.width
model_path = config.model.path or "frigate-yolov9-tiny"
model_props = self.parse_model_input(model_path)
self.session = axe.InferenceSession(model_props["path"])
def __del__(self):
pass
def parse_model_input(self, model_path):
model_props = {}
model_props["preset"] = True
model_matched = False
for model_type, pattern in supported_models.items():
if re.match(pattern, model_path):
model_matched = True
model_props["model_type"] = model_type
if model_matched:
model_props["filename"] = model_path + ".axmodel"
model_props["path"] = model_cache_dir + model_props["filename"]
if not os.path.isfile(model_props["path"]):
self.download_model(model_props["filename"])
else:
supported_models_str = ", ".join(model[1:-1] for model in supported_models)
raise Exception(
f"Model {model_path} is unsupported. Provide your own model or choose one of the following: {supported_models_str}"
)
return model_props
def download_model(self, filename):
if not os.path.isdir(model_cache_dir):
os.mkdir(model_cache_dir)
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
urllib.request.urlretrieve(
f"{GITHUB_ENDPOINT}/ivanshi1108/assets/releases/download/v0.16.2/{filename}",
model_cache_dir + filename,
)
def detect_raw(self, tensor_input):
results = None
results = self.session.run(None, {"images": tensor_input})
if self.detector_config.model.model_type == ModelTypeEnum.yologeneric:
return post_process_yolo(results, self.width, self.height)
else:
raise ValueError(
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
)

View File

@ -185,44 +185,66 @@ Each line represents a detection state, not necessarily unique individuals. Pare
timeline_summary_prompt = f""" timeline_summary_prompt = f"""
You are a security officer. You are a security officer.
Time range: {time_range}. Time range: {time_range}.
Input: JSON list with "title", "scene", "confidence", "potential_threat_level" (1-2), "other_concerns". Input: JSON list with "title", "scene", "confidence", "potential_threat_level" (0-2), "other_concerns", "_is_primary", "_camera".
Task: Write a concise, human-presentable security report in markdown format. Task: Write a concise, human-presentable security report in markdown format.
CRITICAL - Understanding Primary vs Contextual Items:
- Items with "_is_primary": true are events that REQUIRE REVIEW and MUST be included in the report
- Items with "_is_primary": false are additional context from other camera perspectives that overlap in time
- **DO NOT create separate bullet points or sections for contextual items**
- **ONLY use contextual items to enrich and inform the description of primary items**
- The "_camera" field indicates which camera captured each event
- **When a contextual item provides relevant background, you MUST incorporate it directly into the primary event's bullet point**
- Contextual information often explains or de-escalates seemingly suspicious primary events
Rules for the report: Rules for the report:
- Title & overview - Title & overview
- Start with: - Start with:
# Security Summary - {time_range} # Security Summary - {time_range}
- Write a 1-2 sentence situational overview capturing the general pattern of the period. - Write a 1-2 sentence situational overview capturing the general pattern of the period.
- Keep the overview high-level; specific details will be in the event bullets below.
- Event details - Event details
- Present events in chronological order as a bullet list. - **ONLY create bullet points for PRIMARY items (_is_primary: true)**
- **If multiple events occur within the same minute or overlapping time range, COMBINE them into a single bullet.** - **Do NOT create sections or bullets for events that don't exist**
- Summarize the distinct activities as sub-points under the shared timestamp. - Do NOT create separate bullets for contextual items
- If no timestamp is given, preserve order but label as Time not specified. - Present primary events in chronological order as a bullet list.
- **CRITICAL: When contextual items overlap with a primary event, you MUST weave that information directly into the same bullet point**
- Format: **[Timestamp]** - [Description incorporating any contextual information]. [Camera info]. (threat level: X)
- If contextual information provides an explanation (e.g., delivery truck person is likely delivery driver), reflect this understanding in your description and potentially adjust the perceived threat level
- If multiple PRIMARY events occur within the same minute, combine them into a single bullet with sub-points.
- Use bold timestamps for clarity. - Use bold timestamps for clarity.
- Group bullets under subheadings when multiple events fall into the same category (e.g., Vehicle Activity, Porch Activity, Unusual Behavior). - Camera format: "Camera: [camera name]" or mention contextual cameras inline when relevant
- Group bullets under subheadings ONLY when you have actual PRIMARY events to list (e.g., Porch Activity, Unusual Behavior).
- Threat levels - Threat levels
- Always show the threat level for each event using these labels: - Show the threat level for PRIMARY events using these labels:
- Threat level 0: "Normal" - Threat level 0: "Normal"
- Threat level 1: "Needs review" - Threat level 1: "Needs review"
- Threat level 2: "Security concern" - Threat level 2: "Security concern"
- Format as (threat level: Normal), (threat level: Needs review), or (threat level: Security concern). - Format as (threat level: Normal), (threat level: Needs review), or (threat level: Security concern).
- If multiple events at the same time share the same threat level, only state it once. - **When contextual items clearly explain a primary event (e.g., delivery truck explains person at door), you should describe it as normal activity and note the explanation**
- **Your description and tone should reflect the fuller understanding provided by contextual information**
- Example: Primary event says "unidentified person with face covering" but context shows delivery truck describe as "delivery person (truck visible on Front Driveway Cam)" rather than emphasizing suspicious elements
- The stored threat level remains as originally classified, but your narrative should reflect the contextual understanding
- If multiple PRIMARY events at the same time share the same threat level, only state it once.
- Final assessment - Final assessment
- End with a Final Assessment section. - End with a Final Assessment section.
- If all events are threat level 0: - If all primary events are threat level 0 or explained by contextual items:
Final assessment: Only normal residential activity observed during this period. Final assessment: Only normal residential activity observed during this period.
- If threat level 1 events are present: - If threat level 1 events are present:
Final assessment: Some activity requires review but no security concerns identified. Final assessment: Some activity requires review but no security concerns identified.
- If threat level 2 events are present, clearly summarize them as Security concerns requiring immediate attention. - If threat level 2 events are present, clearly summarize them as Security concerns requiring immediate attention.
- Keep this section brief - do not repeat details from the event descriptions above.
- Conciseness - Conciseness
- Do not repeat benign clothing/appearance details unless they distinguish individuals. - Do not repeat benign clothing/appearance details unless they distinguish individuals.
- Summarize similar routine events instead of restating full scene descriptions. - Summarize similar routine events instead of restating full scene descriptions.
- When incorporating contextual information, do so briefly and naturally within the primary event description.
- Avoid lengthy explanatory notes - integrate context seamlessly into the narrative.
""" """
for item in segments: for item in segments:

View File

@ -177,7 +177,7 @@
"generateSuccess": "Successfully generated sample images", "generateSuccess": "Successfully generated sample images",
"missingStatesWarning": { "missingStatesWarning": {
"title": "Missing State Examples", "title": "Missing State Examples",
"description": "You haven't selected examples for all states. The model will not be trained until all states have images. After continuing, use the Recent Classifications view to classify images for the missing states, then train the model." "description": "It's recommended to select examples for all states for best results. You can continue without selecting all states, but the model will not be trained until all states have images. After continuing, use the Recent Classifications view to classify images for the missing states, then train the model."
} }
} }
} }

View File

@ -500,7 +500,7 @@
"name": { "name": {
"title": "Name", "title": "Name",
"inputPlaceHolder": "Enter a name…", "inputPlaceHolder": "Enter a name…",
"tips": "Name must be at least 2 characters, must have at least one letter, and must not be the name of a camera or another zone." "tips": "Name must be at least 2 characters, must have at least one letter, and must not be the name of a camera or another zone on this camera."
}, },
"inertia": { "inertia": {
"title": "Inertia", "title": "Inertia",

View File

@ -0,0 +1,12 @@
// Module-level flag to prevent multiple simultaneous redirects
// (eg, when multiple SWR queries fail with 401 at once, or when
// both ApiProvider and ProtectedRoute try to redirect)
let _isRedirectingToLogin = false;
export function isRedirectingToLogin(): boolean {
return _isRedirectingToLogin;
}
export function setRedirectingToLogin(value: boolean): void {
_isRedirectingToLogin = value;
}

View File

@ -3,6 +3,7 @@ import { SWRConfig } from "swr";
import { WsProvider } from "./ws"; import { WsProvider } from "./ws";
import axios from "axios"; import axios from "axios";
import { ReactNode } from "react"; import { ReactNode } from "react";
import { isRedirectingToLogin, setRedirectingToLogin } from "./auth-redirect";
axios.defaults.baseURL = `${baseUrl}api/`; axios.defaults.baseURL = `${baseUrl}api/`;
@ -31,7 +32,8 @@ export function ApiProvider({ children, options }: ApiProviderType) {
) { ) {
// redirect to the login page if not already there // redirect to the login page if not already there
const loginPage = error.response.headers.get("location") ?? "login"; const loginPage = error.response.headers.get("location") ?? "login";
if (window.location.href !== loginPage) { if (window.location.href !== loginPage && !isRedirectingToLogin()) {
setRedirectingToLogin(true);
window.location.href = loginPage; window.location.href = loginPage;
} }
} }

View File

@ -1,7 +1,11 @@
import { useContext } from "react"; import { useContext, useEffect } from "react";
import { Navigate, Outlet } from "react-router-dom"; import { Navigate, Outlet } from "react-router-dom";
import { AuthContext } from "@/context/auth-context"; import { AuthContext } from "@/context/auth-context";
import ActivityIndicator from "../indicators/activity-indicator"; import ActivityIndicator from "../indicators/activity-indicator";
import {
isRedirectingToLogin,
setRedirectingToLogin,
} from "@/api/auth-redirect";
export default function ProtectedRoute({ export default function ProtectedRoute({
requiredRoles, requiredRoles,
@ -10,6 +14,20 @@ export default function ProtectedRoute({
}) { }) {
const { auth } = useContext(AuthContext); const { auth } = useContext(AuthContext);
// Redirect to login page when not authenticated
// don't use <Navigate> because we need a full page load to reset state
useEffect(() => {
if (
!auth.isLoading &&
auth.isAuthenticated &&
!auth.user &&
!isRedirectingToLogin()
) {
setRedirectingToLogin(true);
window.location.href = "/login";
}
}, [auth.isLoading, auth.isAuthenticated, auth.user]);
if (auth.isLoading) { if (auth.isLoading) {
return ( return (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" /> <ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
@ -23,7 +41,9 @@ export default function ProtectedRoute({
// Authenticated mode (8971): require login // Authenticated mode (8971): require login
if (!auth.user) { if (!auth.user) {
return <Navigate to="/login" replace />; return (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
);
} }
// If role is null (shouldnt happen if isAuthenticated, but type safety), fallback // If role is null (shouldnt happen if isAuthenticated, but type safety), fallback

View File

@ -23,6 +23,7 @@ import { useTranslation } from "react-i18next";
import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay"; import { ImageShadowOverlay } from "../overlay/ImageShadowOverlay";
import BlurredIconButton from "../button/BlurredIconButton"; import BlurredIconButton from "../button/BlurredIconButton";
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"; import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip";
import { useIsAdmin } from "@/hooks/use-is-admin";
type ExportProps = { type ExportProps = {
className: string; className: string;
@ -40,6 +41,7 @@ export default function ExportCard({
onDelete, onDelete,
}: ExportProps) { }: ExportProps) {
const { t } = useTranslation(["views/exports"]); const { t } = useTranslation(["views/exports"]);
const isAdmin = useIsAdmin();
const [hovered, setHovered] = useState(false); const [hovered, setHovered] = useState(false);
const [loading, setLoading] = useState( const [loading, setLoading] = useState(
exportedRecording.thumb_path.length > 0, exportedRecording.thumb_path.length > 0,
@ -195,7 +197,7 @@ export default function ExportCard({
</Tooltip> </Tooltip>
</a> </a>
)} )}
{!exportedRecording.in_progress && ( {isAdmin && !exportedRecording.in_progress && (
<Tooltip> <Tooltip>
<TooltipTrigger asChild> <TooltipTrigger asChild>
<BlurredIconButton <BlurredIconButton
@ -212,21 +214,23 @@ export default function ExportCard({
<TooltipContent>{t("tooltip.editName")}</TooltipContent> <TooltipContent>{t("tooltip.editName")}</TooltipContent>
</Tooltip> </Tooltip>
)} )}
<Tooltip> {isAdmin && (
<TooltipTrigger asChild> <Tooltip>
<BlurredIconButton <TooltipTrigger asChild>
onClick={() => <BlurredIconButton
onDelete({ onClick={() =>
file: exportedRecording.id, onDelete({
exportName: exportedRecording.name, file: exportedRecording.id,
}) exportName: exportedRecording.name,
} })
> }
<LuTrash className="size-4 fill-destructive text-destructive hover:text-white" /> >
</BlurredIconButton> <LuTrash className="size-4 fill-destructive text-destructive hover:text-white" />
</TooltipTrigger> </BlurredIconButton>
<TooltipContent>{t("tooltip.deleteExport")}</TooltipContent> </TooltipTrigger>
</Tooltip> <TooltipContent>{t("tooltip.deleteExport")}</TooltipContent>
</Tooltip>
)}
</div> </div>
</div> </div>

View File

@ -407,30 +407,6 @@ export default function Step3ChooseExamples({
return allClasses.every((className) => statesWithExamples.has(className)); return allClasses.every((className) => statesWithExamples.has(className));
}, [step1Data.modelType, allClasses, statesWithExamples]); }, [step1Data.modelType, allClasses, statesWithExamples]);
// For state models on the last class, require all images to be classified
// But allow proceeding even if not all states have examples (with warning)
const canProceed = useMemo(() => {
if (step1Data.modelType === "state" && isLastClass) {
// Check if all 24 images will be classified after current selections are applied
const totalImages = unknownImages.slice(0, 24).length;
// Count images that will be classified (either already classified or currently selected)
const allImages = unknownImages.slice(0, 24);
const willBeClassified = allImages.filter((img) => {
return imageClassifications[img] || selectedImages.has(img);
}).length;
return willBeClassified >= totalImages;
}
return true;
}, [
step1Data.modelType,
isLastClass,
unknownImages,
imageClassifications,
selectedImages,
]);
const hasUnclassifiedImages = useMemo(() => { const hasUnclassifiedImages = useMemo(() => {
if (!unknownImages) return false; if (!unknownImages) return false;
const allImages = unknownImages.slice(0, 24); const allImages = unknownImages.slice(0, 24);
@ -594,9 +570,7 @@ export default function Step3ChooseExamples({
} }
variant="select" variant="select"
className="flex items-center justify-center gap-2 sm:flex-1" className="flex items-center justify-center gap-2 sm:flex-1"
disabled={ disabled={!hasGenerated || isGenerating || isProcessing}
!hasGenerated || isGenerating || isProcessing || !canProceed
}
> >
{isProcessing && <ActivityIndicator className="size-4" />} {isProcessing && <ActivityIndicator className="size-4" />}
{t("button.continue", { ns: "common" })} {t("button.continue", { ns: "common" })}

View File

@ -559,6 +559,7 @@ export function TrackingDetails({
isDetailMode={true} isDetailMode={true}
camera={event.camera} camera={event.camera}
currentTimeOverride={currentTime} currentTimeOverride={currentTime}
enableGapControllerRecovery={true}
/> />
{isVideoLoading && ( {isVideoLoading && (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" /> <ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />

View File

@ -5,7 +5,7 @@ import {
useRef, useRef,
useState, useState,
} from "react"; } from "react";
import Hls from "hls.js"; import Hls, { HlsConfig } from "hls.js";
import { isDesktop, isMobile } from "react-device-detect"; import { isDesktop, isMobile } from "react-device-detect";
import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch"; import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
import VideoControls from "./VideoControls"; import VideoControls from "./VideoControls";
@ -57,6 +57,7 @@ type HlsVideoPlayerProps = {
isDetailMode?: boolean; isDetailMode?: boolean;
camera?: string; camera?: string;
currentTimeOverride?: number; currentTimeOverride?: number;
enableGapControllerRecovery?: boolean;
}; };
export default function HlsVideoPlayer({ export default function HlsVideoPlayer({
@ -81,6 +82,7 @@ export default function HlsVideoPlayer({
isDetailMode = false, isDetailMode = false,
camera, camera,
currentTimeOverride, currentTimeOverride,
enableGapControllerRecovery = false,
}: HlsVideoPlayerProps) { }: HlsVideoPlayerProps) {
const { t } = useTranslation("components/player"); const { t } = useTranslation("components/player");
const { data: config } = useSWR<FrigateConfig>("config"); const { data: config } = useSWR<FrigateConfig>("config");
@ -170,11 +172,23 @@ export default function HlsVideoPlayer({
return; return;
} }
hlsRef.current = new Hls({ // Base HLS configuration
const baseConfig: Partial<HlsConfig> = {
maxBufferLength: 10, maxBufferLength: 10,
maxBufferSize: 20 * 1000 * 1000, maxBufferSize: 20 * 1000 * 1000,
startPosition: currentSource.startPosition, startPosition: currentSource.startPosition,
}); };
const hlsConfig = { ...baseConfig };
if (enableGapControllerRecovery) {
hlsConfig.highBufferWatchdogPeriod = 1; // Check for stalls every 1 second (default: 3)
hlsConfig.nudgeOffset = 0.2; // Nudge playhead forward 0.2s when stalled (default: 0.1)
hlsConfig.nudgeMaxRetry = 5; // Try up to 5 nudges before giving up (default: 3)
hlsConfig.maxBufferHole = 0.5; // Tolerate up to 0.5s gaps between fragments (default: 0.1)
}
hlsRef.current = new Hls(hlsConfig);
hlsRef.current.attachMedia(videoRef.current); hlsRef.current.attachMedia(videoRef.current);
hlsRef.current.loadSource(currentSource.playlist); hlsRef.current.loadSource(currentSource.playlist);
videoRef.current.playbackRate = currentPlaybackRate; videoRef.current.playbackRate = currentPlaybackRate;
@ -187,7 +201,13 @@ export default function HlsVideoPlayer({
hlsRef.current.destroy(); hlsRef.current.destroy();
} }
}; };
}, [videoRef, hlsRef, useHlsCompat, currentSource]); }, [
videoRef,
hlsRef,
useHlsCompat,
currentSource,
enableGapControllerRecovery,
]);
// state handling // state handling