Compare commits

...

13 Commits

Author SHA1 Message Date
ivanshi1108
99aaf4d3ba
Merge 7933a83a42 into 1b57fb15a7 2025-11-27 23:44:46 +08:00
Nicolas Mowen
1b57fb15a7
Miscellaneous Fixes (#21063)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
* Fix history management failing when updating URL

* Handle case where user doesn't have images that represent all states

If a user selects all imags and can't proceed we show a warning that they can still proceed but the model won't be trained until they get at least one image for every state.

* Still create all classes

We stil need to create all classes even if the user didn't assign images to them.

* fix camera group access for non admin users

changes from previous PR wrongly included users from the standard viewer role (but excluded custom viewer roles)

* Adjust threat level interaction to be less strict

* use base path when fetching go2rtc data

* show config error message when starting in safe mode

* fix genai migration

* fix genai

* Fix genai migration

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-11-27 07:58:35 -06:00
ivanshi1108
7933a83a42
Update docs/docs/configuration/object_detectors.md
Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2025-11-24 23:04:19 +08:00
shizhicheng
2eef58aa1d Modify the description of AXERA in the documentation. 2025-11-24 07:04:42 +00:00
ivanshi1108
6659b7cb0f
Merge branch 'dev' into AXERA-axcl 2025-11-24 10:55:09 +08:00
shizhicheng
f134796913 format code with ruff 2025-11-24 02:42:04 +00:00
shizhicheng
b4abbd7d3b Modify the document based on review suggestions 2025-11-24 02:20:40 +00:00
shizhicheng
438df7d484 The model inference time has been changed to the time displayed on the Frigate UI 2025-11-16 22:22:38 +08:00
shizhicheng
e27a94ae0b Fix logical errors caused by code formatting 2025-11-11 05:54:19 +00:00
shizhicheng
1dee548dbc Modifications to the YOLOv9 object detection model:
The model is now dynamically downloaded to the cache directory.
Post-processing is now done using Frigate's built-in `post_process_yolo`.
Configuration in the relevant documentation has been updated.
2025-11-11 05:42:28 +00:00
shizhicheng
91e17e12b7 Change the default detection model to YOLOv9 2025-11-09 13:21:17 +00:00
ivanshi1108
bb45483e9e
Modify AXERA section from hardware.md
Modify AXERA section and related content from hardware documentation.
2025-10-28 09:54:00 +08:00
shizhicheng
7b4eaf2d10 Initial commit for AXERA AI accelerators 2025-10-24 09:03:13 +00:00
26 changed files with 717 additions and 99 deletions

View File

@ -225,3 +225,29 @@ jobs:
sources: |
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi
axera_build:
runs-on: ubuntu-22.04
name: AXERA Build
needs:
- amd64_build
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Axera build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: axcl
files: docker/axcl/axcl.hcl
set: |
axcl.tags=${{ steps.setup.outputs.image-name }}-axcl
*.cache-from=type=gha

55
docker/axcl/Dockerfile Normal file
View File

@ -0,0 +1,55 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM frigate AS frigate-axcl
ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
# Install axpyengine
RUN wget https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3.rc1/axengine-0.1.3-py3-none-any.whl -O /axengine-0.1.3-py3-none-any.whl
RUN pip3 install -i https://mirrors.aliyun.com/pypi/simple/ /axengine-0.1.3-py3-none-any.whl \
&& rm /axengine-0.1.3-py3-none-any.whl
# Install axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
echo "Installing x86_64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
else \
echo "Installing aarch64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
fi
RUN mkdir /unpack_axcl && \
dpkg-deb -x /axcl.deb /unpack_axcl && \
cp -R /unpack_axcl/usr/bin/axcl /usr/bin/ && \
cp -R /unpack_axcl/usr/lib/axcl /usr/lib/ && \
rm -rf /unpack_axcl /axcl.deb
# Install axcl ffmpeg
RUN mkdir -p /usr/lib/ffmpeg/axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-x64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-x64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
else \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-aarch64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-aarch64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
fi
RUN chmod +x /usr/lib/ffmpeg/axcl/ffmpeg /usr/lib/ffmpeg/axcl/ffprobe
# Set ldconfig path
RUN echo "/usr/lib/axcl" > /etc/ld.so.conf.d/ax.conf
# Set env
ENV PATH="$PATH:/usr/bin/axcl"
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/axcl"
ENTRYPOINT ["sh", "-c", "ldconfig && exec /init"]

13
docker/axcl/axcl.hcl Normal file
View File

@ -0,0 +1,13 @@
target frigate {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64", "linux/arm64"]
target = "frigate"
}
target axcl {
dockerfile = "docker/axcl/Dockerfile"
contexts = {
frigate = "target:frigate",
}
platforms = ["linux/amd64", "linux/arm64"]
}

15
docker/axcl/axcl.mk Normal file
View File

@ -0,0 +1,15 @@
BOARDS += axcl
local-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=frigate:latest-axcl \
--load
build-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl
push-axcl: build-axcl
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl \
--push

View File

@ -0,0 +1,83 @@
#!/bin/bash
# Update package list and install dependencies
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget pciutils kmod udev
# Check if gcc-12 is needed
current_gcc_version=$(gcc --version | head -n1 | awk '{print $NF}')
gcc_major_version=$(echo $current_gcc_version | cut -d'.' -f1)
if [[ $gcc_major_version -lt 12 ]]; then
echo "Current GCC version ($current_gcc_version) is lower than 12, installing gcc-12..."
sudo apt-get install -y gcc-12
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12
echo "GCC-12 installed and set as default"
else
echo "Current GCC version ($current_gcc_version) is sufficient, skipping GCC installation"
fi
# Determine architecture
arch=$(uname -m)
download_url=""
if [[ $arch == "x86_64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
elif [[ $arch == "aarch64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
else
echo "Unsupported architecture: $arch"
exit 1
fi
# Download AXCL driver
echo "Downloading AXCL driver for $arch..."
wget "$download_url" -O "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to download AXCL driver"
exit 1
fi
# Install AXCL driver
echo "Installing AXCL driver..."
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to install AXCL driver, attempting to fix dependencies..."
sudo apt-get install -f -y
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "AXCL driver installation failed"
exit 1
fi
fi
# Update environment
echo "Updating environment..."
source /etc/profile
# Verify installation
echo "Verifying AXCL installation..."
if command -v axcl-smi &> /dev/null; then
echo "AXCL driver detected, checking AI accelerator status..."
axcl_output=$(axcl-smi 2>&1)
axcl_exit_code=$?
echo "$axcl_output"
if [ $axcl_exit_code -eq 0 ]; then
echo "AXCL driver installation completed successfully!"
else
echo "AXCL driver installed but no AI accelerator detected or communication failed."
echo "Please check if the AI accelerator is properly connected and powered on."
exit 1
fi
else
echo "axcl-smi command not found. AXCL driver installation may have failed."
exit 1
fi

View File

@ -49,6 +49,11 @@ Frigate supports multiple different detectors that work on different types of ha
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
**AXERA** <CommunityBadge />
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
**For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@ -1438,6 +1443,41 @@ model:
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
```
## AXERA
Hardware accelerated object detection is supported on the following SoCs:
- AX650N
- AX8850N
This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AXERA-TECH/Pulsar2).
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
### Configuration
When configuring the AXEngine detector, you have to specify the model name.
#### yolov9
A yolov9 model is provided in the container at /axmodels and is used by this detector type by default.
Use the model configuration shown below when using the axengine detector with the default axmodel:
```yaml
detectors:
axengine:
type: axengine
model:
path: frigate-yolov9-tiny
model_type: yolo-generic
width: 320
height: 320
tensor_format: bgr
labelmap_path: /labelmap/coco-80.txt
```
# Models
Some model types are not included in Frigate by default.

View File

@ -104,6 +104,10 @@ Frigate supports multiple different detectors that work on different types of ha
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
**AXERA** <CommunityBadge />
- [AXEngine](#axera): axera models can run on AXERA NPUs via AXEngine, delivering highly efficient object detection.
:::
### Hailo-8
@ -287,6 +291,14 @@ The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms fo
| ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms |
### AXERA
- **AXEngine** Default model is **yolov9**
| Name | AXERA AX650N/AX8850N Inference Time |
| ---------------- | ----------------------------------- |
| yolov9-tiny | ~ 4 ms |
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
@ -307,4 +319,4 @@ Basically - When you increase the resolution and/or the frame rate of the stream
YES! The Coral does not help with decoding video streams.
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.

View File

@ -287,6 +287,42 @@ or add these options to your `docker run` command:
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
### AXERA
<details>
<summary>AXERA accelerators</summary>
AXERA accelerators are available in an M.2 form factor, compatible with both Raspberry Pi and Orange Pi. This form factor has also been successfully tested on x86 platforms, making it a versatile choice for various computing environments.
#### Installation
Using AXERA accelerators requires the installation of the AXCL driver. We provide a convenient Linux script to complete this installation.
Follow these steps for installation:
1. Copy or download [this script](https://github.com/ivanshi1108/assets/releases/download/v0.16.2/user_installation.sh).
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
3. Run the script with `./user_installation.sh`
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
```yaml
devices:
- /dev/axcl_host
- /dev/ax_mmb_dev
- /dev/msg_userdev
```
If you are using `docker run`, add this option to your command `--device /dev/axcl_host --device /dev/ax_mmb_dev --device /dev/msg_userdev`
#### Configuration
Finally, configure [hardware object detection](/configuration/object_detectors#axera) to complete the setup.
</details>
## Docker
Running through Docker with Docker Compose is the recommended install method.

View File

@ -870,6 +870,46 @@ def categorize_classification_image(request: Request, name: str, body: dict = No
)
@router.post(
"/classification/{name}/dataset/{category}/create",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Create an empty classification category folder",
description="""Creates an empty folder for a classification category.
This is used to create folders for categories that don't have images yet.
Returns a success message or an error if the name is invalid.""",
)
def create_classification_category(request: Request, name: str, category: str):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
category_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(category)
)
os.makedirs(category_folder, exist_ok=True)
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully created category folder: {category}",
}
),
status_code=200,
)
@router.post(
"/classification/{name}/train/delete",
response_model=GenericResponse,

View File

@ -375,7 +375,19 @@ class WebPushClient(Communicator):
ended = state == "end" or state == "genai"
if state == "genai" and payload["after"]["data"]["metadata"]:
title = payload["after"]["data"]["metadata"]["title"]
base_title = payload["after"]["data"]["metadata"]["title"]
threat_level = payload["after"]["data"]["metadata"].get(
"potential_threat_level", 0
)
# Add prefix for threat levels 1 and 2
if threat_level == 1:
title = f"Needs Review: {base_title}"
elif threat_level == 2:
title = f"Security Concern: {base_title}"
else:
title = base_title
message = payload["after"]["data"]["metadata"]["scene"]
else:
title = f"{titlecase(', '.join(sorted_objects).replace('_', ' '))}{' was' if state == 'end' else ''} detected in {titlecase(', '.join(payload['after']['data']['zones']).replace('_', ' '))}"

View File

@ -0,0 +1,87 @@
import logging
import os.path
import re
import urllib.request
from typing import Literal
import axengine as axe
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.util.model import post_process_yolo
logger = logging.getLogger(__name__)
DETECTOR_KEY = "axengine"
supported_models = {
ModelTypeEnum.yologeneric: "frigate-yolov9-.*$",
}
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "axengine_cache/")
class AxengineDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
class Axengine(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, config: AxengineDetectorConfig):
logger.info("__init__ axengine")
super().__init__(config)
self.height = config.model.height
self.width = config.model.width
model_path = config.model.path or "frigate-yolov9-tiny"
model_props = self.parse_model_input(model_path)
self.session = axe.InferenceSession(model_props["path"])
def __del__(self):
pass
def parse_model_input(self, model_path):
model_props = {}
model_props["preset"] = True
model_matched = False
for model_type, pattern in supported_models.items():
if re.match(pattern, model_path):
model_matched = True
model_props["model_type"] = model_type
if model_matched:
model_props["filename"] = model_path + ".axmodel"
model_props["path"] = model_cache_dir + model_props["filename"]
if not os.path.isfile(model_props["path"]):
self.download_model(model_props["filename"])
else:
supported_models_str = ", ".join(
model[1:-1] for model in supported_models
)
raise Exception(
f"Model {model_path} is unsupported. Provide your own model or choose one of the following: {supported_models_str}"
)
return model_props
def download_model(self, filename):
if not os.path.isdir(model_cache_dir):
os.mkdir(model_cache_dir)
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
urllib.request.urlretrieve(
f"{GITHUB_ENDPOINT}/ivanshi1108/assets/releases/download/v0.16.2/{filename}",
model_cache_dir + filename,
)
def detect_raw(self, tensor_input):
results = None
results = self.session.run(None, {"images": tensor_input})
if self.detector_config.model.model_type == ModelTypeEnum.yologeneric:
return post_process_yolo(results, self.width, self.height)
else:
raise ValueError(
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
)

View File

@ -205,14 +205,20 @@ Rules for the report:
- Group bullets under subheadings when multiple events fall into the same category (e.g., Vehicle Activity, Porch Activity, Unusual Behavior).
- Threat levels
- Always show (threat level: X) for each event.
- Always show the threat level for each event using these labels:
- Threat level 0: "Normal"
- Threat level 1: "Needs review"
- Threat level 2: "Security concern"
- Format as (threat level: Normal), (threat level: Needs review), or (threat level: Security concern).
- If multiple events at the same time share the same threat level, only state it once.
- Final assessment
- End with a Final Assessment section.
- If all events are threat level 1 with no escalation:
- If all events are threat level 0:
Final assessment: Only normal residential activity observed during this period.
- If threat level 2+ events are present, clearly summarize them as Potential concerns requiring review.
- If threat level 1 events are present:
Final assessment: Some activity requires review but no security concerns identified.
- If threat level 2 events are present, clearly summarize them as Security concerns requiring immediate attention.
- Conciseness
- Do not repeat benign clothing/appearance details unless they distinguish individuals.

View File

@ -348,7 +348,7 @@ def migrate_016_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]:
"""Handle migrating frigate config to 0.16-0"""
"""Handle migrating frigate config to 0.17-0"""
new_config = config.copy()
# migrate global to new recording configuration
@ -380,7 +380,7 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
if global_genai:
new_genai_config = {}
new_object_config = config.get("objects", {})
new_object_config = new_config.get("objects", {})
new_object_config["genai"] = {}
for key in global_genai.keys():
@ -389,7 +389,8 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
else:
new_object_config["genai"][key] = global_genai[key]
config["genai"] = new_genai_config
new_config["genai"] = new_genai_config
new_config["objects"] = new_object_config
for name, camera in config.get("cameras", {}).items():
camera_config: dict[str, dict[str, Any]] = camera.copy()
@ -415,8 +416,9 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
camera_genai = camera_config.get("genai", {})
if camera_genai:
new_object_config = config.get("objects", {})
new_object_config["genai"] = camera_genai
camera_object_config = camera_config.get("objects", {})
camera_object_config["genai"] = camera_genai
camera_config["objects"] = camera_object_config
del camera_config["genai"]
new_config["cameras"][name] = camera_config

View File

@ -166,6 +166,7 @@
"noImages": "No sample images generated",
"classifying": "Classifying & Training...",
"trainingStarted": "Training started successfully",
"modelCreated": "Model created successfully. Use the Recent Classifications view to add images for missing states, then train the model.",
"errors": {
"noCameras": "No cameras configured",
"noObjectLabel": "No object label selected",
@ -173,7 +174,11 @@
"generationFailed": "Generation failed. Please try again.",
"classifyFailed": "Failed to classify images: {{error}}"
},
"generateSuccess": "Successfully generated sample images"
"generateSuccess": "Successfully generated sample images",
"missingStatesWarning": {
"title": "Missing State Examples",
"description": "You haven't selected examples for all states. The model will not be trained until all states have images. After continuing, use the Recent Classifications view to classify images for the missing states, then train the model."
}
}
}
}

View File

@ -54,6 +54,7 @@
"selected_other": "{{count}} selected",
"camera": "Camera",
"detected": "detected",
"suspiciousActivity": "Suspicious Activity",
"threateningActivity": "Threatening Activity"
"normalActivity": "Normal",
"needsReview": "Needs review",
"securityConcern": "Security concern"
}

View File

@ -10,12 +10,8 @@ import useSWR from "swr";
import { baseUrl } from "@/api/baseUrl";
import { isMobile } from "react-device-detect";
import { cn } from "@/lib/utils";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { TooltipPortal } from "@radix-ui/react-tooltip";
import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert";
import { IoIosWarning } from "react-icons/io";
export type Step3FormData = {
examplesGenerated: boolean;
@ -145,20 +141,67 @@ export default function Step3ChooseExamples({
);
await Promise.all(categorizePromises);
// Step 3: Kick off training
await axios.post(`/classification/${step1Data.modelName}/train`);
// Step 2.5: Create empty folders for classes that don't have any images
// This ensures all classes are available in the dataset view later
const classesWithImages = new Set(
Object.values(classifications).filter((c) => c && c !== "none"),
);
const emptyFolderPromises = step1Data.classes
.filter((className) => !classesWithImages.has(className))
.map((className) =>
axios.post(
`/classification/${step1Data.modelName}/dataset/${className}/create`,
),
);
await Promise.all(emptyFolderPromises);
toast.success(t("wizard.step3.trainingStarted"), {
closeButton: true,
});
setIsTraining(true);
// Step 3: Determine if we should train
// For state models, we need ALL states to have examples
// For object models, we need at least 2 classes with images
const allStatesHaveExamplesForTraining =
step1Data.modelType !== "state" ||
step1Data.classes.every((className) =>
classesWithImages.has(className),
);
const shouldTrain =
allStatesHaveExamplesForTraining && classesWithImages.size >= 2;
// Step 4: Kick off training only if we have enough classes with images
if (shouldTrain) {
await axios.post(`/classification/${step1Data.modelName}/train`);
toast.success(t("wizard.step3.trainingStarted"), {
closeButton: true,
});
setIsTraining(true);
} else {
// Don't train - not all states have examples
toast.success(t("wizard.step3.modelCreated"), {
closeButton: true,
});
setIsTraining(false);
onClose();
}
},
[step1Data, step2Data, t],
[step1Data, step2Data, t, onClose],
);
const handleContinueClassification = useCallback(async () => {
// Mark selected images with current class
const newClassifications = { ...imageClassifications };
// Handle user going back and de-selecting images
const imagesToCheck = unknownImages.slice(0, 24);
imagesToCheck.forEach((imageName) => {
if (
newClassifications[imageName] === currentClass &&
!selectedImages.has(imageName)
) {
delete newClassifications[imageName];
}
});
// Then, add all currently selected images to the current class
selectedImages.forEach((imageName) => {
newClassifications[imageName] = currentClass;
});
@ -329,8 +372,43 @@ export default function Step3ChooseExamples({
return unclassifiedImages.length === 0;
}, [unclassifiedImages]);
// For state models on the last class, require all images to be classified
const isLastClass = currentClassIndex === allClasses.length - 1;
const statesWithExamples = useMemo(() => {
if (step1Data.modelType !== "state") return new Set<string>();
const states = new Set<string>();
const allImages = unknownImages.slice(0, 24);
// Check which states have at least one image classified
allImages.forEach((img) => {
let className: string | undefined;
if (selectedImages.has(img)) {
className = currentClass;
} else {
className = imageClassifications[img];
}
if (className && allClasses.includes(className)) {
states.add(className);
}
});
return states;
}, [
step1Data.modelType,
unknownImages,
imageClassifications,
selectedImages,
currentClass,
allClasses,
]);
const allStatesHaveExamples = useMemo(() => {
if (step1Data.modelType !== "state") return true;
return allClasses.every((className) => statesWithExamples.has(className));
}, [step1Data.modelType, allClasses, statesWithExamples]);
// For state models on the last class, require all images to be classified
// But allow proceeding even if not all states have examples (with warning)
const canProceed = useMemo(() => {
if (step1Data.modelType === "state" && isLastClass) {
// Check if all 24 images will be classified after current selections are applied
@ -353,6 +431,28 @@ export default function Step3ChooseExamples({
selectedImages,
]);
const hasUnclassifiedImages = useMemo(() => {
if (!unknownImages) return false;
const allImages = unknownImages.slice(0, 24);
return allImages.some((img) => !imageClassifications[img]);
}, [unknownImages, imageClassifications]);
const showMissingStatesWarning = useMemo(() => {
return (
step1Data.modelType === "state" &&
isLastClass &&
!allStatesHaveExamples &&
!hasUnclassifiedImages &&
hasGenerated
);
}, [
step1Data.modelType,
isLastClass,
allStatesHaveExamples,
hasUnclassifiedImages,
hasGenerated,
]);
const handleBack = useCallback(() => {
if (currentClassIndex > 0) {
const previousClass = allClasses[currentClassIndex - 1];
@ -399,6 +499,17 @@ export default function Step3ChooseExamples({
</div>
) : hasGenerated ? (
<div className="flex flex-col gap-4">
{showMissingStatesWarning && (
<Alert variant="destructive">
<IoIosWarning className="size-5" />
<AlertTitle>
{t("wizard.step3.missingStatesWarning.title")}
</AlertTitle>
<AlertDescription>
{t("wizard.step3.missingStatesWarning.description")}
</AlertDescription>
</Alert>
)}
{!allImagesClassified && (
<div className="text-center">
<h3 className="text-lg font-medium">
@ -474,35 +585,22 @@ export default function Step3ChooseExamples({
<Button type="button" onClick={handleBack} className="sm:flex-1">
{t("button.back", { ns: "common" })}
</Button>
<Tooltip>
<TooltipTrigger asChild>
<Button
type="button"
onClick={
allImagesClassified
? handleContinue
: handleContinueClassification
}
variant="select"
className="flex items-center justify-center gap-2 sm:flex-1"
disabled={
!hasGenerated || isGenerating || isProcessing || !canProceed
}
>
{isProcessing && <ActivityIndicator className="size-4" />}
{t("button.continue", { ns: "common" })}
</Button>
</TooltipTrigger>
{!canProceed && (
<TooltipPortal>
<TooltipContent>
{t("wizard.step3.allImagesRequired", {
count: unclassifiedImages.length,
})}
</TooltipContent>
</TooltipPortal>
)}
</Tooltip>
<Button
type="button"
onClick={
allImagesClassified
? handleContinue
: handleContinueClassification
}
variant="select"
className="flex items-center justify-center gap-2 sm:flex-1"
disabled={
!hasGenerated || isGenerating || isProcessing || !canProceed
}
>
{isProcessing && <ActivityIndicator className="size-4" />}
{t("button.continue", { ns: "common" })}
</Button>
</div>
)}
</div>

View File

@ -78,7 +78,7 @@ import { useStreamingSettings } from "@/context/streaming-settings-provider";
import { Trans, useTranslation } from "react-i18next";
import { CameraNameLabel } from "../camera/FriendlyNameLabel";
import { useAllowedCameras } from "@/hooks/use-allowed-cameras";
import { useIsCustomRole } from "@/hooks/use-is-custom-role";
import { useIsAdmin } from "@/hooks/use-is-admin";
type CameraGroupSelectorProps = {
className?: string;
@ -88,7 +88,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
const { t } = useTranslation(["components/camera"]);
const { data: config } = useSWR<FrigateConfig>("config");
const allowedCameras = useAllowedCameras();
const isCustomRole = useIsCustomRole();
const isAdmin = useIsAdmin();
// tooltip
@ -124,7 +124,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
const allGroups = Object.entries(config.camera_groups);
// If custom role, filter out groups where user has no accessible cameras
if (isCustomRole) {
if (!isAdmin) {
return allGroups
.filter(([, groupConfig]) => {
// Check if user has access to at least one camera in this group
@ -136,7 +136,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
}
return allGroups.sort((a, b) => a[1].order - b[1].order);
}, [config, allowedCameras, isCustomRole]);
}, [config, allowedCameras, isAdmin]);
// add group
@ -153,7 +153,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
activeGroup={group}
setGroup={setGroup}
deleteGroup={deleteGroup}
isCustomRole={isCustomRole}
isAdmin={isAdmin}
/>
<Scroller className={`${isMobile ? "whitespace-nowrap" : ""}`}>
<div
@ -221,7 +221,7 @@ export function CameraGroupSelector({ className }: CameraGroupSelectorProps) {
);
})}
{!isCustomRole && (
{isAdmin && (
<Button
className="bg-secondary text-muted-foreground"
aria-label={t("group.add")}
@ -245,7 +245,7 @@ type NewGroupDialogProps = {
activeGroup?: string;
setGroup: (value: string | undefined, replace?: boolean | undefined) => void;
deleteGroup: () => void;
isCustomRole?: boolean;
isAdmin?: boolean;
};
function NewGroupDialog({
open,
@ -254,7 +254,7 @@ function NewGroupDialog({
activeGroup,
setGroup,
deleteGroup,
isCustomRole,
isAdmin,
}: NewGroupDialogProps) {
const { t } = useTranslation(["components/camera"]);
const { mutate: updateConfig } = useSWR<FrigateConfig>("config");
@ -390,7 +390,7 @@ function NewGroupDialog({
>
<Title>{t("group.label")}</Title>
<Description className="sr-only">{t("group.edit")}</Description>
{!isCustomRole && (
{isAdmin && (
<div
className={cn(
"absolute",
@ -422,7 +422,7 @@ function NewGroupDialog({
group={group}
onDeleteGroup={() => onDeleteGroup(group[0])}
onEditGroup={() => onEditGroup(group)}
isReadOnly={isCustomRole}
isReadOnly={!isAdmin}
/>
))}
</div>
@ -677,7 +677,7 @@ export function CameraGroupEdit({
);
const allowedCameras = useAllowedCameras();
const isCustomRole = useIsCustomRole();
const isAdmin = useIsAdmin();
const [openCamera, setOpenCamera] = useState<string | null>();
@ -867,7 +867,7 @@ export function CameraGroupEdit({
<FormMessage />
{[
...(birdseyeConfig?.enabled &&
(!isCustomRole || "birdseye" in allowedCameras)
(isAdmin || "birdseye" in allowedCameras)
? ["birdseye"]
: []),
...Object.keys(config?.cameras ?? {})

View File

@ -1,7 +1,11 @@
import { Dialog, DialogContent, DialogTrigger } from "@/components/ui/dialog";
import { Drawer, DrawerContent, DrawerTrigger } from "@/components/ui/drawer";
import { cn } from "@/lib/utils";
import { ReviewSegment, ThreatLevel } from "@/types/review";
import {
ReviewSegment,
ThreatLevel,
THREAT_LEVEL_LABELS,
} from "@/types/review";
import { useEffect, useMemo, useState } from "react";
import { isDesktop } from "react-device-detect";
import { useTranslation } from "react-i18next";
@ -55,13 +59,22 @@ export function GenAISummaryDialog({
}
let concerns = "";
switch (aiAnalysis.potential_threat_level) {
case ThreatLevel.SUSPICIOUS:
concerns = `${t("suspiciousActivity", { ns: "views/events" })}\n`;
break;
case ThreatLevel.DANGER:
concerns = `${t("threateningActivity", { ns: "views/events" })}\n`;
break;
const threatLevel = aiAnalysis.potential_threat_level ?? 0;
if (threatLevel > 0) {
let label = "";
switch (threatLevel) {
case ThreatLevel.NEEDS_REVIEW:
label = t("needsReview", { ns: "views/events" });
break;
case ThreatLevel.SECURITY_CONCERN:
label = t("securityConcern", { ns: "views/events" });
break;
default:
label = THREAT_LEVEL_LABELS[threatLevel as ThreatLevel] || "Unknown";
}
concerns = `${label}\n`;
}
(aiAnalysis.other_concerns ?? []).forEach((c) => {

View File

@ -1,7 +1,11 @@
import React, { useCallback, useEffect, useMemo, useState } from "react";
import { useApiHost } from "@/api";
import { isCurrentHour } from "@/utils/dateUtil";
import { ReviewSegment } from "@/types/review";
import {
ReviewSegment,
ThreatLevel,
THREAT_LEVEL_LABELS,
} from "@/types/review";
import { getIconForLabel } from "@/utils/iconUtil";
import TimeAgo from "../dynamic/TimeAgo";
import useSWR from "swr";
@ -44,7 +48,7 @@ export default function PreviewThumbnailPlayer({
onClick,
onTimeUpdate,
}: PreviewPlayerProps) {
const { t } = useTranslation(["components/player"]);
const { t } = useTranslation(["components/player", "views/events"]);
const apiHost = useApiHost();
const { data: config } = useSWR<FrigateConfig>("config");
const [imgRef, imgLoaded, onImgLoad] = useImageLoaded();
@ -319,11 +323,21 @@ export default function PreviewThumbnailPlayer({
</TooltipTrigger>
</div>
<TooltipContent className="smart-capitalize">
{review.data.metadata.potential_threat_level == 1 ? (
<>{t("suspiciousActivity", { ns: "views/events" })}</>
) : (
<>{t("threateningActivity", { ns: "views/events" })}</>
)}
{(() => {
const threatLevel =
review.data.metadata.potential_threat_level ?? 0;
switch (threatLevel) {
case ThreatLevel.NEEDS_REVIEW:
return t("needsReview", { ns: "views/events" });
case ThreatLevel.SECURITY_CONCERN:
return t("securityConcern", { ns: "views/events" });
default:
return (
THREAT_LEVEL_LABELS[threatLevel as ThreatLevel] ||
"Unknown"
);
}
})()}
</TooltipContent>
</Tooltip>
)}

View File

@ -1,3 +1,4 @@
import { baseUrl } from "@/api/baseUrl";
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
import { useCallback, useEffect, useState, useMemo } from "react";
import useSWR from "swr";
@ -41,9 +42,12 @@ export default function useCameraLiveMode(
const metadataPromises = streamNames.map(async (streamName) => {
try {
const response = await fetch(`/api/go2rtc/streams/${streamName}`, {
priority: "low",
});
const response = await fetch(
`${baseUrl}api/go2rtc/streams/${streamName}`,
{
priority: "low",
},
);
if (response.ok) {
const data = await response.json();

View File

@ -17,6 +17,7 @@ export function useHistoryBack({
}: UseHistoryBackOptions): void {
const historyPushedRef = React.useRef(false);
const closedByBackRef = React.useRef(false);
const urlWhenOpenedRef = React.useRef<string | null>(null);
// Keep onClose in a ref to avoid effect re-runs that cause multiple history pushes
const onCloseRef = React.useRef(onClose);
@ -30,6 +31,9 @@ export function useHistoryBack({
if (open) {
// Only push history state if we haven't already (prevents duplicates in strict mode)
if (!historyPushedRef.current) {
// Store the current URL (pathname + search, without hash) before pushing history state
urlWhenOpenedRef.current =
window.location.pathname + window.location.search;
window.history.pushState({ overlayOpen: true }, "");
historyPushedRef.current = true;
}
@ -37,6 +41,7 @@ export function useHistoryBack({
const handlePopState = () => {
closedByBackRef.current = true;
historyPushedRef.current = false;
urlWhenOpenedRef.current = null;
onCloseRef.current();
};
@ -48,10 +53,22 @@ export function useHistoryBack({
} else {
// Overlay is closing - clean up history if we pushed and it wasn't via back button
if (historyPushedRef.current && !closedByBackRef.current) {
window.history.back();
const currentUrl = window.location.pathname + window.location.search;
const urlWhenOpened = urlWhenOpenedRef.current;
// If the URL has changed (e.g., filters were applied via search params),
// don't go back as it would undo the filter update.
// The history entry we pushed will remain, but that's acceptable compared
// to losing the user's filter changes.
if (!urlWhenOpened || currentUrl === urlWhenOpened) {
// URL hasn't changed, safe to go back and remove our history entry
window.history.back();
}
// If URL changed, we skip history.back() to preserve the filter updates
}
historyPushedRef.current = false;
closedByBackRef.current = false;
urlWhenOpenedRef.current = null;
}
}, [enabled, open]);
}

View File

@ -49,6 +49,7 @@ function ConfigEditor() {
const [restartDialogOpen, setRestartDialogOpen] = useState(false);
const { send: sendRestart } = useRestart();
const initialValidationRef = useRef(false);
const onHandleSaveConfig = useCallback(
async (save_option: SaveOptions): Promise<void> => {
@ -171,6 +172,33 @@ function ConfigEditor() {
};
}, [rawConfig, apiHost, systemTheme, theme, onHandleSaveConfig]);
// when in safe mode, attempt to validate the existing (invalid) config immediately
// so that the user sees the validation errors without needing to press save
useEffect(() => {
if (
config?.safe_mode &&
rawConfig &&
!initialValidationRef.current &&
!error
) {
initialValidationRef.current = true;
axios
.post(`config/save?save_option=saveonly`, rawConfig, {
headers: { "Content-Type": "text/plain" },
})
.then(() => {
// if this succeeds while in safe mode, we won't force any UI change
})
.catch((e: AxiosError<ApiErrorResponse>) => {
const errorMessage =
e.response?.data?.message ||
e.response?.data?.detail ||
"Unknown error";
setError(errorMessage);
});
}
}, [config?.safe_mode, rawConfig, error]);
// monitoring state
const [hasChanges, setHasChanges] = useState(false);

View File

@ -14,12 +14,12 @@ import { useTranslation } from "react-i18next";
import { useEffect, useMemo, useRef } from "react";
import useSWR from "swr";
import { useAllowedCameras } from "@/hooks/use-allowed-cameras";
import { useIsCustomRole } from "@/hooks/use-is-custom-role";
import { useIsAdmin } from "@/hooks/use-is-admin";
function Live() {
const { t } = useTranslation(["views/live"]);
const { data: config } = useSWR<FrigateConfig>("config");
const isCustomRole = useIsCustomRole();
const isAdmin = useIsAdmin();
// selection
@ -94,7 +94,7 @@ function Live() {
const includesBirdseye = useMemo(() => {
// Restricted users should never have access to birdseye
if (isCustomRole) {
if (!isAdmin) {
return false;
}
@ -109,7 +109,7 @@ function Live() {
} else {
return false;
}
}, [config, cameraGroup, isCustomRole]);
}, [config, cameraGroup, isAdmin]);
const cameras = useMemo(() => {
if (!config) {

View File

@ -87,6 +87,13 @@ export type ZoomLevel = {
};
export enum ThreatLevel {
SUSPICIOUS = 1,
DANGER = 2,
NORMAL = 0,
NEEDS_REVIEW = 1,
SECURITY_CONCERN = 2,
}
export const THREAT_LEVEL_LABELS: Record<ThreatLevel, string> = {
[ThreatLevel.NORMAL]: "Normal",
[ThreatLevel.NEEDS_REVIEW]: "Needs review",
[ThreatLevel.SECURITY_CONCERN]: "Security concern",
};

View File

@ -1,3 +1,4 @@
import { baseUrl } from "@/api/baseUrl";
import { generateFixedHash, isValidId } from "./stringUtil";
/**
@ -52,9 +53,12 @@ export async function detectReolinkCamera(
password,
});
const response = await fetch(`/api/reolink/detect?${params.toString()}`, {
method: "GET",
});
const response = await fetch(
`${baseUrl}api/reolink/detect?${params.toString()}`,
{
method: "GET",
},
);
if (!response.ok) {
return null;

View File

@ -54,7 +54,7 @@ import { useTranslation } from "react-i18next";
import { EmptyCard } from "@/components/card/EmptyCard";
import { BsFillCameraVideoOffFill } from "react-icons/bs";
import { AuthContext } from "@/context/auth-context";
import { useIsCustomRole } from "@/hooks/use-is-custom-role";
import { useIsAdmin } from "@/hooks/use-is-admin";
type LiveDashboardViewProps = {
cameras: CameraConfig[];
@ -661,10 +661,10 @@ export default function LiveDashboardView({
function NoCameraView() {
const { t } = useTranslation(["views/live"]);
const { auth } = useContext(AuthContext);
const isCustomRole = useIsCustomRole();
const isAdmin = useIsAdmin();
// Check if this is a restricted user with no cameras in this group
const isRestricted = isCustomRole && auth.isAuthenticated;
const isRestricted = !isAdmin && auth.isAuthenticated;
return (
<div className="flex size-full items-center justify-center">