mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-08 14:25:41 +03:00
Compare commits
14 Commits
aa7f714992
...
a4d9c31fb6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a4d9c31fb6 | ||
|
|
acb17a7b50 | ||
|
|
97b29d177a | ||
|
|
7933a83a42 | ||
|
|
2eef58aa1d | ||
|
|
6659b7cb0f | ||
|
|
f134796913 | ||
|
|
b4abbd7d3b | ||
|
|
438df7d484 | ||
|
|
e27a94ae0b | ||
|
|
1dee548dbc | ||
|
|
91e17e12b7 | ||
|
|
bb45483e9e | ||
|
|
7b4eaf2d10 |
26
.github/workflows/ci.yml
vendored
26
.github/workflows/ci.yml
vendored
@ -225,3 +225,29 @@ jobs:
|
|||||||
sources: |
|
sources: |
|
||||||
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64
|
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64
|
||||||
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi
|
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi
|
||||||
|
axera_build:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
name: AXERA Build
|
||||||
|
needs:
|
||||||
|
- amd64_build
|
||||||
|
- arm64_build
|
||||||
|
steps:
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
- name: Set up QEMU and Buildx
|
||||||
|
id: setup
|
||||||
|
uses: ./.github/actions/setup
|
||||||
|
with:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Build and push Axera build
|
||||||
|
uses: docker/bake-action@v6
|
||||||
|
with:
|
||||||
|
source: .
|
||||||
|
push: true
|
||||||
|
targets: axcl
|
||||||
|
files: docker/axcl/axcl.hcl
|
||||||
|
set: |
|
||||||
|
axcl.tags=${{ steps.setup.outputs.image-name }}-axcl
|
||||||
|
*.cache-from=type=gha
|
||||||
55
docker/axcl/Dockerfile
Normal file
55
docker/axcl/Dockerfile
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# syntax=docker/dockerfile:1.6
|
||||||
|
|
||||||
|
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||||
|
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||||
|
|
||||||
|
|
||||||
|
FROM frigate AS frigate-axcl
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||||
|
|
||||||
|
# Install axpyengine
|
||||||
|
RUN wget https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3.rc1/axengine-0.1.3-py3-none-any.whl -O /axengine-0.1.3-py3-none-any.whl
|
||||||
|
RUN pip3 install -i https://mirrors.aliyun.com/pypi/simple/ /axengine-0.1.3-py3-none-any.whl \
|
||||||
|
&& rm /axengine-0.1.3-py3-none-any.whl
|
||||||
|
|
||||||
|
# Install axcl
|
||||||
|
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
|
echo "Installing x86_64 version of axcl"; \
|
||||||
|
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
|
||||||
|
else \
|
||||||
|
echo "Installing aarch64 version of axcl"; \
|
||||||
|
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN mkdir /unpack_axcl && \
|
||||||
|
dpkg-deb -x /axcl.deb /unpack_axcl && \
|
||||||
|
cp -R /unpack_axcl/usr/bin/axcl /usr/bin/ && \
|
||||||
|
cp -R /unpack_axcl/usr/lib/axcl /usr/lib/ && \
|
||||||
|
rm -rf /unpack_axcl /axcl.deb
|
||||||
|
|
||||||
|
|
||||||
|
# Install axcl ffmpeg
|
||||||
|
RUN mkdir -p /usr/lib/ffmpeg/axcl
|
||||||
|
|
||||||
|
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||||
|
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-x64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
|
||||||
|
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-x64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
|
||||||
|
else \
|
||||||
|
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-aarch64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
|
||||||
|
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-aarch64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN chmod +x /usr/lib/ffmpeg/axcl/ffmpeg /usr/lib/ffmpeg/axcl/ffprobe
|
||||||
|
|
||||||
|
# Set ldconfig path
|
||||||
|
RUN echo "/usr/lib/axcl" > /etc/ld.so.conf.d/ax.conf
|
||||||
|
|
||||||
|
# Set env
|
||||||
|
ENV PATH="$PATH:/usr/bin/axcl"
|
||||||
|
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/axcl"
|
||||||
|
|
||||||
|
ENTRYPOINT ["sh", "-c", "ldconfig && exec /init"]
|
||||||
13
docker/axcl/axcl.hcl
Normal file
13
docker/axcl/axcl.hcl
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
target frigate {
|
||||||
|
dockerfile = "docker/main/Dockerfile"
|
||||||
|
platforms = ["linux/amd64", "linux/arm64"]
|
||||||
|
target = "frigate"
|
||||||
|
}
|
||||||
|
|
||||||
|
target axcl {
|
||||||
|
dockerfile = "docker/axcl/Dockerfile"
|
||||||
|
contexts = {
|
||||||
|
frigate = "target:frigate",
|
||||||
|
}
|
||||||
|
platforms = ["linux/amd64", "linux/arm64"]
|
||||||
|
}
|
||||||
15
docker/axcl/axcl.mk
Normal file
15
docker/axcl/axcl.mk
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
BOARDS += axcl
|
||||||
|
|
||||||
|
local-axcl: version
|
||||||
|
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
|
||||||
|
--set axcl.tags=frigate:latest-axcl \
|
||||||
|
--load
|
||||||
|
|
||||||
|
build-axcl: version
|
||||||
|
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
|
||||||
|
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl
|
||||||
|
|
||||||
|
push-axcl: build-axcl
|
||||||
|
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
|
||||||
|
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl \
|
||||||
|
--push
|
||||||
83
docker/axcl/user_installation.sh
Executable file
83
docker/axcl/user_installation.sh
Executable file
@ -0,0 +1,83 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Update package list and install dependencies
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y build-essential cmake git wget pciutils kmod udev
|
||||||
|
|
||||||
|
# Check if gcc-12 is needed
|
||||||
|
current_gcc_version=$(gcc --version | head -n1 | awk '{print $NF}')
|
||||||
|
gcc_major_version=$(echo $current_gcc_version | cut -d'.' -f1)
|
||||||
|
|
||||||
|
if [[ $gcc_major_version -lt 12 ]]; then
|
||||||
|
echo "Current GCC version ($current_gcc_version) is lower than 12, installing gcc-12..."
|
||||||
|
sudo apt-get install -y gcc-12
|
||||||
|
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12
|
||||||
|
echo "GCC-12 installed and set as default"
|
||||||
|
else
|
||||||
|
echo "Current GCC version ($current_gcc_version) is sufficient, skipping GCC installation"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine architecture
|
||||||
|
arch=$(uname -m)
|
||||||
|
download_url=""
|
||||||
|
|
||||||
|
if [[ $arch == "x86_64" ]]; then
|
||||||
|
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
|
||||||
|
deb_file="axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
|
||||||
|
elif [[ $arch == "aarch64" ]]; then
|
||||||
|
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
|
||||||
|
deb_file="axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
|
||||||
|
else
|
||||||
|
echo "Unsupported architecture: $arch"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Download AXCL driver
|
||||||
|
echo "Downloading AXCL driver for $arch..."
|
||||||
|
wget "$download_url" -O "$deb_file"
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Failed to download AXCL driver"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install AXCL driver
|
||||||
|
echo "Installing AXCL driver..."
|
||||||
|
sudo dpkg -i "$deb_file"
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Failed to install AXCL driver, attempting to fix dependencies..."
|
||||||
|
sudo apt-get install -f -y
|
||||||
|
sudo dpkg -i "$deb_file"
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "AXCL driver installation failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update environment
|
||||||
|
echo "Updating environment..."
|
||||||
|
source /etc/profile
|
||||||
|
|
||||||
|
# Verify installation
|
||||||
|
echo "Verifying AXCL installation..."
|
||||||
|
if command -v axcl-smi &> /dev/null; then
|
||||||
|
echo "AXCL driver detected, checking AI accelerator status..."
|
||||||
|
|
||||||
|
axcl_output=$(axcl-smi 2>&1)
|
||||||
|
axcl_exit_code=$?
|
||||||
|
|
||||||
|
echo "$axcl_output"
|
||||||
|
|
||||||
|
if [ $axcl_exit_code -eq 0 ]; then
|
||||||
|
echo "AXCL driver installation completed successfully!"
|
||||||
|
else
|
||||||
|
echo "AXCL driver installed but no AI accelerator detected or communication failed."
|
||||||
|
echo "Please check if the AI accelerator is properly connected and powered on."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "axcl-smi command not found. AXCL driver installation may have failed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@ -157,3 +157,19 @@ Only one `speech` event may be transcribed at a time. Frigate does not automatic
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a supported Nvidia GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.
|
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a supported Nvidia GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.
|
||||||
|
|
||||||
|
#### FAQ
|
||||||
|
|
||||||
|
1. Why doesn't Frigate automatically transcribe all `speech` events?
|
||||||
|
|
||||||
|
Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. That’s a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise.
|
||||||
|
|
||||||
|
Because transcription is **serialized (one event at a time)** and speech events can be generated far faster than they can be processed, an auto-transcribe toggle would very quickly create an ever-growing backlog and degrade core functionality. For the amount of engineering and risk involved, it adds **very little practical value** for the majority of deployments, which are often on low-powered, edge hardware.
|
||||||
|
|
||||||
|
If you hear speech that’s actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
|
||||||
|
|
||||||
|
2. Why don't you save live transcription text and use that for `speech` events?
|
||||||
|
|
||||||
|
There’s no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
|
||||||
|
|
||||||
|
Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. That’s why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event.
|
||||||
|
|||||||
@ -49,6 +49,11 @@ Frigate supports multiple different detectors that work on different types of ha
|
|||||||
|
|
||||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
|
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
|
||||||
|
|
||||||
|
**AXERA** <CommunityBadge />
|
||||||
|
|
||||||
|
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
|
||||||
|
|
||||||
|
|
||||||
**For Testing**
|
**For Testing**
|
||||||
|
|
||||||
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
||||||
@ -1438,6 +1443,41 @@ model:
|
|||||||
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
|
input_pixel_format: rgb/bgr # look at the model.json to figure out which to put here
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## AXERA
|
||||||
|
|
||||||
|
Hardware accelerated object detection is supported on the following SoCs:
|
||||||
|
|
||||||
|
- AX650N
|
||||||
|
- AX8850N
|
||||||
|
|
||||||
|
This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AXERA-TECH/Pulsar2).
|
||||||
|
|
||||||
|
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
When configuring the AXEngine detector, you have to specify the model name.
|
||||||
|
|
||||||
|
#### yolov9
|
||||||
|
|
||||||
|
A yolov9 model is provided in the container at /axmodels and is used by this detector type by default.
|
||||||
|
|
||||||
|
Use the model configuration shown below when using the axengine detector with the default axmodel:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
detectors:
|
||||||
|
axengine:
|
||||||
|
type: axengine
|
||||||
|
|
||||||
|
model:
|
||||||
|
path: frigate-yolov9-tiny
|
||||||
|
model_type: yolo-generic
|
||||||
|
width: 320
|
||||||
|
height: 320
|
||||||
|
tensor_format: bgr
|
||||||
|
labelmap_path: /labelmap/coco-80.txt
|
||||||
|
```
|
||||||
|
|
||||||
# Models
|
# Models
|
||||||
|
|
||||||
Some model types are not included in Frigate by default.
|
Some model types are not included in Frigate by default.
|
||||||
|
|||||||
@ -104,6 +104,10 @@ Frigate supports multiple different detectors that work on different types of ha
|
|||||||
|
|
||||||
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
|
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
|
||||||
|
|
||||||
|
**AXERA** <CommunityBadge />
|
||||||
|
|
||||||
|
- [AXEngine](#axera): axera models can run on AXERA NPUs via AXEngine, delivering highly efficient object detection.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
### Hailo-8
|
### Hailo-8
|
||||||
@ -287,6 +291,14 @@ The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms fo
|
|||||||
| ssd mobilenet | ~ 25 ms |
|
| ssd mobilenet | ~ 25 ms |
|
||||||
| yolov5m | ~ 118 ms |
|
| yolov5m | ~ 118 ms |
|
||||||
|
|
||||||
|
### AXERA
|
||||||
|
|
||||||
|
- **AXEngine** Default model is **yolov9**
|
||||||
|
|
||||||
|
| Name | AXERA AX650N/AX8850N Inference Time |
|
||||||
|
| ---------------- | ----------------------------------- |
|
||||||
|
| yolov9-tiny | ~ 4 ms |
|
||||||
|
|
||||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||||
|
|
||||||
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
||||||
|
|||||||
@ -287,6 +287,42 @@ or add these options to your `docker run` command:
|
|||||||
|
|
||||||
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
|
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
|
||||||
|
|
||||||
|
### AXERA
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>AXERA accelerators</summary>
|
||||||
|
AXERA accelerators are available in an M.2 form factor, compatible with both Raspberry Pi and Orange Pi. This form factor has also been successfully tested on x86 platforms, making it a versatile choice for various computing environments.
|
||||||
|
|
||||||
|
#### Installation
|
||||||
|
|
||||||
|
Using AXERA accelerators requires the installation of the AXCL driver. We provide a convenient Linux script to complete this installation.
|
||||||
|
|
||||||
|
Follow these steps for installation:
|
||||||
|
|
||||||
|
1. Copy or download [this script](https://github.com/ivanshi1108/assets/releases/download/v0.16.2/user_installation.sh).
|
||||||
|
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
||||||
|
3. Run the script with `./user_installation.sh`
|
||||||
|
|
||||||
|
#### Setup
|
||||||
|
|
||||||
|
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
|
||||||
|
|
||||||
|
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
devices:
|
||||||
|
- /dev/axcl_host
|
||||||
|
- /dev/ax_mmb_dev
|
||||||
|
- /dev/msg_userdev
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are using `docker run`, add this option to your command `--device /dev/axcl_host --device /dev/ax_mmb_dev --device /dev/msg_userdev`
|
||||||
|
|
||||||
|
#### Configuration
|
||||||
|
|
||||||
|
Finally, configure [hardware object detection](/configuration/object_detectors#axera) to complete the setup.
|
||||||
|
</details>
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
Running through Docker with Docker Compose is the recommended install method.
|
Running through Docker with Docker Compose is the recommended install method.
|
||||||
|
|||||||
@ -99,6 +99,42 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
if self.inference_speed:
|
if self.inference_speed:
|
||||||
self.inference_speed.update(duration)
|
self.inference_speed.update(duration)
|
||||||
|
|
||||||
|
def _should_save_image(
|
||||||
|
self, camera: str, detected_state: str, score: float = 1.0
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Determine if we should save the image for training.
|
||||||
|
Save when:
|
||||||
|
- State is changing or being verified (regardless of score)
|
||||||
|
- Score is less than 100% (even if state matches, useful for training)
|
||||||
|
Don't save when:
|
||||||
|
- State is stable (matches current_state) AND score is 100%
|
||||||
|
"""
|
||||||
|
if camera not in self.state_history:
|
||||||
|
# First detection for this camera, save it
|
||||||
|
return True
|
||||||
|
|
||||||
|
verification = self.state_history[camera]
|
||||||
|
current_state = verification.get("current_state")
|
||||||
|
pending_state = verification.get("pending_state")
|
||||||
|
|
||||||
|
# Save if there's a pending state change being verified
|
||||||
|
if pending_state is not None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Save if the detected state differs from the current verified state
|
||||||
|
# (state is changing)
|
||||||
|
if current_state is not None and detected_state != current_state:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# If score is less than 100%, save even if state matches
|
||||||
|
# (useful for training to improve confidence)
|
||||||
|
if score < 1.0:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Don't save if state is stable (detected_state == current_state) AND score is 100%
|
||||||
|
return False
|
||||||
|
|
||||||
def verify_state_change(self, camera: str, detected_state: str) -> str | None:
|
def verify_state_change(self, camera: str, detected_state: str) -> str | None:
|
||||||
"""
|
"""
|
||||||
Verify state change requires 3 consecutive identical states before publishing.
|
Verify state change requires 3 consecutive identical states before publishing.
|
||||||
@ -212,6 +248,8 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if self.interpreter is None:
|
if self.interpreter is None:
|
||||||
|
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
||||||
|
if self._should_save_image(camera, "unknown", 0.0):
|
||||||
write_classification_attempt(
|
write_classification_attempt(
|
||||||
self.train_dir,
|
self.train_dir,
|
||||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||||
@ -236,12 +274,15 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
score = round(probs[best_id], 2)
|
score = round(probs[best_id], 2)
|
||||||
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
self.__update_metrics(datetime.datetime.now().timestamp() - now)
|
||||||
|
|
||||||
|
detected_state = self.labelmap[best_id]
|
||||||
|
|
||||||
|
if self._should_save_image(camera, detected_state, score):
|
||||||
write_classification_attempt(
|
write_classification_attempt(
|
||||||
self.train_dir,
|
self.train_dir,
|
||||||
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
cv2.cvtColor(frame, cv2.COLOR_RGB2BGR),
|
||||||
"none-none",
|
"none-none",
|
||||||
now,
|
now,
|
||||||
self.labelmap[best_id],
|
detected_state,
|
||||||
score,
|
score,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -251,7 +292,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
detected_state = self.labelmap[best_id]
|
|
||||||
verified_state = self.verify_state_change(camera, detected_state)
|
verified_state = self.verify_state_change(camera, detected_state)
|
||||||
|
|
||||||
if verified_state is not None:
|
if verified_state is not None:
|
||||||
|
|||||||
86
frigate/detectors/plugins/axengine.py
Normal file
86
frigate/detectors/plugins/axengine.py
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
import logging
|
||||||
|
import os.path
|
||||||
|
import re
|
||||||
|
import urllib.request
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
import axengine as axe
|
||||||
|
|
||||||
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
|
from frigate.detectors.detection_api import DetectionApi
|
||||||
|
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||||
|
from frigate.util.model import post_process_yolo
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
DETECTOR_KEY = "axengine"
|
||||||
|
|
||||||
|
supported_models = {
|
||||||
|
ModelTypeEnum.yologeneric: "frigate-yolov9-.*$",
|
||||||
|
}
|
||||||
|
|
||||||
|
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "axengine_cache/")
|
||||||
|
|
||||||
|
|
||||||
|
class AxengineDetectorConfig(BaseDetectorConfig):
|
||||||
|
type: Literal[DETECTOR_KEY]
|
||||||
|
|
||||||
|
|
||||||
|
class Axengine(DetectionApi):
|
||||||
|
type_key = DETECTOR_KEY
|
||||||
|
|
||||||
|
def __init__(self, config: AxengineDetectorConfig):
|
||||||
|
logger.info("__init__ axengine")
|
||||||
|
super().__init__(config)
|
||||||
|
self.height = config.model.height
|
||||||
|
self.width = config.model.width
|
||||||
|
model_path = config.model.path or "frigate-yolov9-tiny"
|
||||||
|
model_props = self.parse_model_input(model_path)
|
||||||
|
self.session = axe.InferenceSession(model_props["path"])
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def parse_model_input(self, model_path):
|
||||||
|
model_props = {}
|
||||||
|
model_props["preset"] = True
|
||||||
|
|
||||||
|
model_matched = False
|
||||||
|
|
||||||
|
for model_type, pattern in supported_models.items():
|
||||||
|
if re.match(pattern, model_path):
|
||||||
|
model_matched = True
|
||||||
|
model_props["model_type"] = model_type
|
||||||
|
|
||||||
|
if model_matched:
|
||||||
|
model_props["filename"] = model_path + ".axmodel"
|
||||||
|
model_props["path"] = model_cache_dir + model_props["filename"]
|
||||||
|
|
||||||
|
if not os.path.isfile(model_props["path"]):
|
||||||
|
self.download_model(model_props["filename"])
|
||||||
|
else:
|
||||||
|
supported_models_str = ", ".join(model[1:-1] for model in supported_models)
|
||||||
|
raise Exception(
|
||||||
|
f"Model {model_path} is unsupported. Provide your own model or choose one of the following: {supported_models_str}"
|
||||||
|
)
|
||||||
|
return model_props
|
||||||
|
|
||||||
|
def download_model(self, filename):
|
||||||
|
if not os.path.isdir(model_cache_dir):
|
||||||
|
os.mkdir(model_cache_dir)
|
||||||
|
|
||||||
|
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
|
||||||
|
urllib.request.urlretrieve(
|
||||||
|
f"{GITHUB_ENDPOINT}/ivanshi1108/assets/releases/download/v0.16.2/{filename}",
|
||||||
|
model_cache_dir + filename,
|
||||||
|
)
|
||||||
|
|
||||||
|
def detect_raw(self, tensor_input):
|
||||||
|
results = None
|
||||||
|
results = self.session.run(None, {"images": tensor_input})
|
||||||
|
if self.detector_config.model.model_type == ModelTypeEnum.yologeneric:
|
||||||
|
return post_process_yolo(results, self.width, self.height)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
|
||||||
|
)
|
||||||
@ -190,7 +190,11 @@ class OnvifController:
|
|||||||
ptz: ONVIFService = await onvif.create_ptz_service()
|
ptz: ONVIFService = await onvif.create_ptz_service()
|
||||||
self.cams[camera_name]["ptz"] = ptz
|
self.cams[camera_name]["ptz"] = ptz
|
||||||
|
|
||||||
|
try:
|
||||||
imaging: ONVIFService = await onvif.create_imaging_service()
|
imaging: ONVIFService = await onvif.create_imaging_service()
|
||||||
|
except (Fault, ONVIFError, TransportError, Exception) as e:
|
||||||
|
logger.debug(f"Imaging service not supported for {camera_name}: {e}")
|
||||||
|
imaging = None
|
||||||
self.cams[camera_name]["imaging"] = imaging
|
self.cams[camera_name]["imaging"] = imaging
|
||||||
try:
|
try:
|
||||||
video_sources = await media.GetVideoSources()
|
video_sources = await media.GetVideoSources()
|
||||||
@ -381,7 +385,10 @@ class OnvifController:
|
|||||||
f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}"
|
f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.cams[camera_name]["video_source_token"] is not None:
|
if (
|
||||||
|
self.cams[camera_name]["video_source_token"] is not None
|
||||||
|
and imaging is not None
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
imaging_capabilities = await imaging.GetImagingSettings(
|
imaging_capabilities = await imaging.GetImagingSettings(
|
||||||
{"VideoSourceToken": self.cams[camera_name]["video_source_token"]}
|
{"VideoSourceToken": self.cams[camera_name]["video_source_token"]}
|
||||||
@ -421,6 +428,7 @@ class OnvifController:
|
|||||||
if (
|
if (
|
||||||
"focus" in self.cams[camera_name]["features"]
|
"focus" in self.cams[camera_name]["features"]
|
||||||
and self.cams[camera_name]["video_source_token"]
|
and self.cams[camera_name]["video_source_token"]
|
||||||
|
and self.cams[camera_name]["imaging"] is not None
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
stop_request = self.cams[camera_name]["imaging"].create_type("Stop")
|
stop_request = self.cams[camera_name]["imaging"].create_type("Stop")
|
||||||
@ -648,6 +656,7 @@ class OnvifController:
|
|||||||
if (
|
if (
|
||||||
"focus" not in self.cams[camera_name]["features"]
|
"focus" not in self.cams[camera_name]["features"]
|
||||||
or not self.cams[camera_name]["video_source_token"]
|
or not self.cams[camera_name]["video_source_token"]
|
||||||
|
or self.cams[camera_name]["imaging"] is None
|
||||||
):
|
):
|
||||||
logger.error(f"{camera_name} does not support ONVIF continuous focus.")
|
logger.error(f"{camera_name} does not support ONVIF continuous focus.")
|
||||||
return
|
return
|
||||||
|
|||||||
@ -124,6 +124,7 @@ def capture_frames(
|
|||||||
config_subscriber.check_for_updates()
|
config_subscriber.check_for_updates()
|
||||||
return config.enabled
|
return config.enabled
|
||||||
|
|
||||||
|
try:
|
||||||
while not stop_event.is_set():
|
while not stop_event.is_set():
|
||||||
if not get_enabled_state():
|
if not get_enabled_state():
|
||||||
logger.debug(f"Stopping capture thread for disabled {config.name}")
|
logger.debug(f"Stopping capture thread for disabled {config.name}")
|
||||||
@ -141,7 +142,9 @@ def capture_frames(
|
|||||||
if stop_event.is_set():
|
if stop_event.is_set():
|
||||||
break
|
break
|
||||||
|
|
||||||
logger.error(f"{config.name}: Unable to read frames from ffmpeg process.")
|
logger.error(
|
||||||
|
f"{config.name}: Unable to read frames from ffmpeg process."
|
||||||
|
)
|
||||||
|
|
||||||
if ffmpeg_process.poll() is not None:
|
if ffmpeg_process.poll() is not None:
|
||||||
logger.error(
|
logger.error(
|
||||||
@ -163,6 +166,8 @@ def capture_frames(
|
|||||||
skipped_eps.update()
|
skipped_eps.update()
|
||||||
|
|
||||||
frame_index = 0 if frame_index == shm_frame_count - 1 else frame_index + 1
|
frame_index = 0 if frame_index == shm_frame_count - 1 else frame_index + 1
|
||||||
|
finally:
|
||||||
|
config_subscriber.stop()
|
||||||
|
|
||||||
|
|
||||||
class CameraWatchdog(threading.Thread):
|
class CameraWatchdog(threading.Thread):
|
||||||
@ -234,6 +239,16 @@ class CameraWatchdog(threading.Thread):
|
|||||||
else:
|
else:
|
||||||
self.ffmpeg_detect_process.wait()
|
self.ffmpeg_detect_process.wait()
|
||||||
|
|
||||||
|
# Wait for old capture thread to fully exit before starting a new one
|
||||||
|
if self.capture_thread is not None and self.capture_thread.is_alive():
|
||||||
|
self.logger.info("Waiting for capture thread to exit...")
|
||||||
|
self.capture_thread.join(timeout=5)
|
||||||
|
|
||||||
|
if self.capture_thread.is_alive():
|
||||||
|
self.logger.warning(
|
||||||
|
f"Capture thread for {self.config.name} did not exit in time"
|
||||||
|
)
|
||||||
|
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
"The following ffmpeg logs include the last 100 lines prior to exit."
|
"The following ffmpeg logs include the last 100 lines prior to exit."
|
||||||
)
|
)
|
||||||
|
|||||||
@ -37,7 +37,7 @@ import { useForm } from "react-hook-form";
|
|||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
import { LuPlus, LuX } from "react-icons/lu";
|
import { LuPlus, LuX } from "react-icons/lu";
|
||||||
import { toast } from "sonner";
|
import { toast } from "sonner";
|
||||||
import useSWR from "swr";
|
import useSWR, { mutate } from "swr";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
|
|
||||||
type ClassificationModelEditDialogProps = {
|
type ClassificationModelEditDialogProps = {
|
||||||
@ -240,24 +240,72 @@ export default function ClassificationModelEditDialog({
|
|||||||
position: "top-center",
|
position: "top-center",
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
// State model - update classes
|
const stateData = data as StateFormData;
|
||||||
// Note: For state models, updating classes requires renaming categories
|
const newClasses = stateData.classes.filter(
|
||||||
// which is handled through the dataset API, not the config API
|
(c) => c.trim().length > 0,
|
||||||
// We'll need to implement this by calling the rename endpoint for each class
|
);
|
||||||
// For now, we just show a message that this requires retraining
|
const oldClasses = dataset?.categories
|
||||||
|
? Object.keys(dataset.categories).filter((key) => key !== "none")
|
||||||
|
: [];
|
||||||
|
|
||||||
|
const renameMap = new Map<string, string>();
|
||||||
|
const maxLength = Math.max(oldClasses.length, newClasses.length);
|
||||||
|
|
||||||
|
for (let i = 0; i < maxLength; i++) {
|
||||||
|
const oldClass = oldClasses[i];
|
||||||
|
const newClass = newClasses[i];
|
||||||
|
|
||||||
|
if (oldClass && newClass && oldClass !== newClass) {
|
||||||
|
renameMap.set(oldClass, newClass);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const renamePromises = Array.from(renameMap.entries()).map(
|
||||||
|
async ([oldName, newName]) => {
|
||||||
|
try {
|
||||||
|
await axios.put(
|
||||||
|
`/classification/${model.name}/dataset/${oldName}/rename`,
|
||||||
|
{
|
||||||
|
new_category: newName,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
} catch (err) {
|
||||||
|
const error = err as {
|
||||||
|
response?: { data?: { message?: string; detail?: string } };
|
||||||
|
};
|
||||||
|
const errorMessage =
|
||||||
|
error.response?.data?.message ||
|
||||||
|
error.response?.data?.detail ||
|
||||||
|
"Unknown error";
|
||||||
|
throw new Error(
|
||||||
|
`Failed to rename ${oldName} to ${newName}: ${errorMessage}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
if (renamePromises.length > 0) {
|
||||||
|
await Promise.all(renamePromises);
|
||||||
|
await mutate(`classification/${model.name}/dataset`);
|
||||||
|
toast.success(t("toast.success.updatedModel"), {
|
||||||
|
position: "top-center",
|
||||||
|
});
|
||||||
|
} else {
|
||||||
toast.info(t("edit.stateClassesInfo"), {
|
toast.info(t("edit.stateClassesInfo"), {
|
||||||
position: "top-center",
|
position: "top-center",
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
onSuccess();
|
onSuccess();
|
||||||
onClose();
|
onClose();
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
const error = err as {
|
const error = err as {
|
||||||
response?: { data?: { message?: string; detail?: string } };
|
response?: { data?: { message?: string; detail?: string } };
|
||||||
|
message?: string;
|
||||||
};
|
};
|
||||||
const errorMessage =
|
const errorMessage =
|
||||||
|
error.message ||
|
||||||
error.response?.data?.message ||
|
error.response?.data?.message ||
|
||||||
error.response?.data?.detail ||
|
error.response?.data?.detail ||
|
||||||
"Unknown error";
|
"Unknown error";
|
||||||
@ -268,7 +316,7 @@ export default function ClassificationModelEditDialog({
|
|||||||
setIsSaving(false);
|
setIsSaving(false);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[isObjectModel, model, t, onSuccess, onClose],
|
[isObjectModel, model, dataset, t, onSuccess, onClose],
|
||||||
);
|
);
|
||||||
|
|
||||||
const handleCancel = useCallback(() => {
|
const handleCancel = useCallback(() => {
|
||||||
|
|||||||
@ -48,6 +48,7 @@ import { useTranslation } from "react-i18next";
|
|||||||
import { useDateLocale } from "@/hooks/use-date-locale";
|
import { useDateLocale } from "@/hooks/use-date-locale";
|
||||||
import { useIsAdmin } from "@/hooks/use-is-admin";
|
import { useIsAdmin } from "@/hooks/use-is-admin";
|
||||||
import { CameraNameLabel } from "../camera/FriendlyNameLabel";
|
import { CameraNameLabel } from "../camera/FriendlyNameLabel";
|
||||||
|
import { LiveStreamMetadata } from "@/types/live";
|
||||||
|
|
||||||
type LiveContextMenuProps = {
|
type LiveContextMenuProps = {
|
||||||
className?: string;
|
className?: string;
|
||||||
@ -68,6 +69,7 @@ type LiveContextMenuProps = {
|
|||||||
resetPreferredLiveMode: () => void;
|
resetPreferredLiveMode: () => void;
|
||||||
config?: FrigateConfig;
|
config?: FrigateConfig;
|
||||||
children?: ReactNode;
|
children?: ReactNode;
|
||||||
|
streamMetadata?: { [key: string]: LiveStreamMetadata };
|
||||||
};
|
};
|
||||||
export default function LiveContextMenu({
|
export default function LiveContextMenu({
|
||||||
className,
|
className,
|
||||||
@ -88,6 +90,7 @@ export default function LiveContextMenu({
|
|||||||
resetPreferredLiveMode,
|
resetPreferredLiveMode,
|
||||||
config,
|
config,
|
||||||
children,
|
children,
|
||||||
|
streamMetadata,
|
||||||
}: LiveContextMenuProps) {
|
}: LiveContextMenuProps) {
|
||||||
const { t } = useTranslation("views/live");
|
const { t } = useTranslation("views/live");
|
||||||
const [showSettings, setShowSettings] = useState(false);
|
const [showSettings, setShowSettings] = useState(false);
|
||||||
@ -558,6 +561,7 @@ export default function LiveContextMenu({
|
|||||||
setGroupStreamingSettings={setGroupStreamingSettings}
|
setGroupStreamingSettings={setGroupStreamingSettings}
|
||||||
setIsDialogOpen={setShowSettings}
|
setIsDialogOpen={setShowSettings}
|
||||||
onSave={onSave}
|
onSave={onSave}
|
||||||
|
streamMetadata={streamMetadata}
|
||||||
/>
|
/>
|
||||||
</Dialog>
|
</Dialog>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -38,6 +38,7 @@ import { useCameraFriendlyName } from "@/hooks/use-camera-friendly-name";
|
|||||||
type CameraStreamingDialogProps = {
|
type CameraStreamingDialogProps = {
|
||||||
camera: string;
|
camera: string;
|
||||||
groupStreamingSettings: GroupStreamingSettings;
|
groupStreamingSettings: GroupStreamingSettings;
|
||||||
|
streamMetadata?: { [key: string]: LiveStreamMetadata };
|
||||||
setGroupStreamingSettings: React.Dispatch<
|
setGroupStreamingSettings: React.Dispatch<
|
||||||
React.SetStateAction<GroupStreamingSettings>
|
React.SetStateAction<GroupStreamingSettings>
|
||||||
>;
|
>;
|
||||||
@ -48,6 +49,7 @@ type CameraStreamingDialogProps = {
|
|||||||
export function CameraStreamingDialog({
|
export function CameraStreamingDialog({
|
||||||
camera,
|
camera,
|
||||||
groupStreamingSettings,
|
groupStreamingSettings,
|
||||||
|
streamMetadata,
|
||||||
setGroupStreamingSettings,
|
setGroupStreamingSettings,
|
||||||
setIsDialogOpen,
|
setIsDialogOpen,
|
||||||
onSave,
|
onSave,
|
||||||
@ -76,12 +78,7 @@ export function CameraStreamingDialog({
|
|||||||
[config, streamName],
|
[config, streamName],
|
||||||
);
|
);
|
||||||
|
|
||||||
const { data: cameraMetadata } = useSWR<LiveStreamMetadata>(
|
const cameraMetadata = streamName ? streamMetadata?.[streamName] : undefined;
|
||||||
isRestreamed ? `go2rtc/streams/${streamName}` : null,
|
|
||||||
{
|
|
||||||
revalidateOnFocus: false,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
const supportsAudioOutput = useMemo(() => {
|
const supportsAudioOutput = useMemo(() => {
|
||||||
if (!cameraMetadata) {
|
if (!cameraMetadata) {
|
||||||
|
|||||||
@ -1,8 +1,8 @@
|
|||||||
import { baseUrl } from "@/api/baseUrl";
|
|
||||||
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
|
import { CameraConfig, FrigateConfig } from "@/types/frigateConfig";
|
||||||
import { useCallback, useEffect, useState, useMemo } from "react";
|
import { useCallback, useEffect, useState, useMemo } from "react";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import { LivePlayerMode, LiveStreamMetadata } from "@/types/live";
|
import { LivePlayerMode } from "@/types/live";
|
||||||
|
import useDeferredStreamMetadata from "./use-deferred-stream-metadata";
|
||||||
|
|
||||||
export default function useCameraLiveMode(
|
export default function useCameraLiveMode(
|
||||||
cameras: CameraConfig[],
|
cameras: CameraConfig[],
|
||||||
@ -11,9 +11,9 @@ export default function useCameraLiveMode(
|
|||||||
) {
|
) {
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
|
|
||||||
// Get comma-separated list of restreamed stream names for SWR key
|
// Compute which streams need metadata (restreamed streams only)
|
||||||
const restreamedStreamsKey = useMemo(() => {
|
const restreamedStreamNames = useMemo(() => {
|
||||||
if (!cameras || !config) return null;
|
if (!cameras || !config) return [];
|
||||||
|
|
||||||
const streamNames = new Set<string>();
|
const streamNames = new Set<string>();
|
||||||
cameras.forEach((camera) => {
|
cameras.forEach((camera) => {
|
||||||
@ -32,56 +32,13 @@ export default function useCameraLiveMode(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return streamNames.size > 0
|
return Array.from(streamNames);
|
||||||
? Array.from(streamNames).sort().join(",")
|
|
||||||
: null;
|
|
||||||
}, [cameras, config, activeStreams]);
|
}, [cameras, config, activeStreams]);
|
||||||
|
|
||||||
const streamsFetcher = useCallback(async (key: string) => {
|
// Fetch stream metadata with deferred loading (doesn't block initial render)
|
||||||
const streamNames = key.split(",");
|
const streamMetadata = useDeferredStreamMetadata(restreamedStreamNames);
|
||||||
|
|
||||||
const metadataPromises = streamNames.map(async (streamName) => {
|
|
||||||
try {
|
|
||||||
const response = await fetch(
|
|
||||||
`${baseUrl}api/go2rtc/streams/${streamName}`,
|
|
||||||
{
|
|
||||||
priority: "low",
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
if (response.ok) {
|
|
||||||
const data = await response.json();
|
|
||||||
return { streamName, data };
|
|
||||||
}
|
|
||||||
return { streamName, data: null };
|
|
||||||
} catch (error) {
|
|
||||||
// eslint-disable-next-line no-console
|
|
||||||
console.error(`Failed to fetch metadata for ${streamName}:`, error);
|
|
||||||
return { streamName, data: null };
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
const results = await Promise.allSettled(metadataPromises);
|
|
||||||
|
|
||||||
const metadata: { [key: string]: LiveStreamMetadata } = {};
|
|
||||||
results.forEach((result) => {
|
|
||||||
if (result.status === "fulfilled" && result.value.data) {
|
|
||||||
metadata[result.value.streamName] = result.value.data;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return metadata;
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const { data: allStreamMetadata = {} } = useSWR<{
|
|
||||||
[key: string]: LiveStreamMetadata;
|
|
||||||
}>(restreamedStreamsKey, streamsFetcher, {
|
|
||||||
revalidateOnFocus: false,
|
|
||||||
revalidateOnReconnect: false,
|
|
||||||
revalidateIfStale: false,
|
|
||||||
dedupingInterval: 60000,
|
|
||||||
});
|
|
||||||
|
|
||||||
|
// Compute live mode states
|
||||||
const [preferredLiveModes, setPreferredLiveModes] = useState<{
|
const [preferredLiveModes, setPreferredLiveModes] = useState<{
|
||||||
[key: string]: LivePlayerMode;
|
[key: string]: LivePlayerMode;
|
||||||
}>({});
|
}>({});
|
||||||
@ -122,10 +79,10 @@ export default function useCameraLiveMode(
|
|||||||
newPreferredLiveModes[camera.name] = isRestreamed ? "mse" : "jsmpeg";
|
newPreferredLiveModes[camera.name] = isRestreamed ? "mse" : "jsmpeg";
|
||||||
}
|
}
|
||||||
|
|
||||||
// check each stream for audio support
|
// Check each stream for audio support
|
||||||
if (isRestreamed) {
|
if (isRestreamed) {
|
||||||
Object.values(camera.live.streams).forEach((streamName) => {
|
Object.values(camera.live.streams).forEach((streamName) => {
|
||||||
const metadata = allStreamMetadata?.[streamName];
|
const metadata = streamMetadata[streamName];
|
||||||
newSupportsAudioOutputStates[streamName] = {
|
newSupportsAudioOutputStates[streamName] = {
|
||||||
supportsAudio: metadata
|
supportsAudio: metadata
|
||||||
? metadata.producers.find(
|
? metadata.producers.find(
|
||||||
@ -150,7 +107,7 @@ export default function useCameraLiveMode(
|
|||||||
setPreferredLiveModes(newPreferredLiveModes);
|
setPreferredLiveModes(newPreferredLiveModes);
|
||||||
setIsRestreamedStates(newIsRestreamedStates);
|
setIsRestreamedStates(newIsRestreamedStates);
|
||||||
setSupportsAudioOutputStates(newSupportsAudioOutputStates);
|
setSupportsAudioOutputStates(newSupportsAudioOutputStates);
|
||||||
}, [cameras, config, windowVisible, allStreamMetadata]);
|
}, [cameras, config, windowVisible, streamMetadata]);
|
||||||
|
|
||||||
const resetPreferredLiveMode = useCallback(
|
const resetPreferredLiveMode = useCallback(
|
||||||
(cameraName: string) => {
|
(cameraName: string) => {
|
||||||
@ -180,5 +137,6 @@ export default function useCameraLiveMode(
|
|||||||
resetPreferredLiveMode,
|
resetPreferredLiveMode,
|
||||||
isRestreamedStates,
|
isRestreamedStates,
|
||||||
supportsAudioOutputStates,
|
supportsAudioOutputStates,
|
||||||
|
streamMetadata,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
90
web/src/hooks/use-deferred-stream-metadata.ts
Normal file
90
web/src/hooks/use-deferred-stream-metadata.ts
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
import { baseUrl } from "@/api/baseUrl";
|
||||||
|
import { useCallback, useEffect, useState, useMemo } from "react";
|
||||||
|
import useSWR from "swr";
|
||||||
|
import { LiveStreamMetadata } from "@/types/live";
|
||||||
|
|
||||||
|
const FETCH_TIMEOUT_MS = 10000;
|
||||||
|
const DEFER_DELAY_MS = 2000;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hook that fetches go2rtc stream metadata with deferred loading.
|
||||||
|
*
|
||||||
|
* Metadata fetching is delayed to prevent blocking initial page load
|
||||||
|
* and camera image requests.
|
||||||
|
*
|
||||||
|
* @param streamNames - Array of stream names to fetch metadata for
|
||||||
|
* @returns Object containing stream metadata keyed by stream name
|
||||||
|
*/
|
||||||
|
export default function useDeferredStreamMetadata(streamNames: string[]) {
|
||||||
|
const [fetchEnabled, setFetchEnabled] = useState(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const timeoutId = setTimeout(() => {
|
||||||
|
setFetchEnabled(true);
|
||||||
|
}, DEFER_DELAY_MS);
|
||||||
|
|
||||||
|
return () => clearTimeout(timeoutId);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const swrKey = useMemo(() => {
|
||||||
|
if (!fetchEnabled || streamNames.length === 0) return null;
|
||||||
|
// Use spread to avoid mutating the original array
|
||||||
|
return `deferred-streams:${[...streamNames].sort().join(",")}`;
|
||||||
|
}, [fetchEnabled, streamNames]);
|
||||||
|
|
||||||
|
const fetcher = useCallback(async (key: string) => {
|
||||||
|
// Extract stream names from key (remove prefix)
|
||||||
|
const names = key.replace("deferred-streams:", "").split(",");
|
||||||
|
|
||||||
|
const promises = names.map(async (streamName) => {
|
||||||
|
const controller = new AbortController();
|
||||||
|
const timeoutId = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch(
|
||||||
|
`${baseUrl}api/go2rtc/streams/${streamName}`,
|
||||||
|
{
|
||||||
|
priority: "low",
|
||||||
|
signal: controller.signal,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
|
||||||
|
if (response.ok) {
|
||||||
|
const data = await response.json();
|
||||||
|
return { streamName, data };
|
||||||
|
}
|
||||||
|
return { streamName, data: null };
|
||||||
|
} catch (error) {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
if ((error as Error).name !== "AbortError") {
|
||||||
|
// eslint-disable-next-line no-console
|
||||||
|
console.error(`Failed to fetch metadata for ${streamName}:`, error);
|
||||||
|
}
|
||||||
|
return { streamName, data: null };
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const results = await Promise.allSettled(promises);
|
||||||
|
|
||||||
|
const metadata: { [key: string]: LiveStreamMetadata } = {};
|
||||||
|
results.forEach((result) => {
|
||||||
|
if (result.status === "fulfilled" && result.value.data) {
|
||||||
|
metadata[result.value.streamName] = result.value.data;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return metadata;
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const { data: metadata = {} } = useSWR<{
|
||||||
|
[key: string]: LiveStreamMetadata;
|
||||||
|
}>(swrKey, fetcher, {
|
||||||
|
revalidateOnFocus: false,
|
||||||
|
revalidateOnReconnect: false,
|
||||||
|
revalidateIfStale: false,
|
||||||
|
dedupingInterval: 60000,
|
||||||
|
});
|
||||||
|
|
||||||
|
return metadata;
|
||||||
|
}
|
||||||
@ -24,6 +24,7 @@ import "react-resizable/css/styles.css";
|
|||||||
import {
|
import {
|
||||||
AudioState,
|
AudioState,
|
||||||
LivePlayerMode,
|
LivePlayerMode,
|
||||||
|
LiveStreamMetadata,
|
||||||
StatsState,
|
StatsState,
|
||||||
VolumeState,
|
VolumeState,
|
||||||
} from "@/types/live";
|
} from "@/types/live";
|
||||||
@ -47,7 +48,6 @@ import {
|
|||||||
TooltipContent,
|
TooltipContent,
|
||||||
} from "@/components/ui/tooltip";
|
} from "@/components/ui/tooltip";
|
||||||
import { Toaster } from "@/components/ui/sonner";
|
import { Toaster } from "@/components/ui/sonner";
|
||||||
import useCameraLiveMode from "@/hooks/use-camera-live-mode";
|
|
||||||
import LiveContextMenu from "@/components/menu/LiveContextMenu";
|
import LiveContextMenu from "@/components/menu/LiveContextMenu";
|
||||||
import { useStreamingSettings } from "@/context/streaming-settings-provider";
|
import { useStreamingSettings } from "@/context/streaming-settings-provider";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
@ -65,6 +65,16 @@ type DraggableGridLayoutProps = {
|
|||||||
setIsEditMode: React.Dispatch<React.SetStateAction<boolean>>;
|
setIsEditMode: React.Dispatch<React.SetStateAction<boolean>>;
|
||||||
fullscreen: boolean;
|
fullscreen: boolean;
|
||||||
toggleFullscreen: () => void;
|
toggleFullscreen: () => void;
|
||||||
|
preferredLiveModes: { [key: string]: LivePlayerMode };
|
||||||
|
setPreferredLiveModes: React.Dispatch<
|
||||||
|
React.SetStateAction<{ [key: string]: LivePlayerMode }>
|
||||||
|
>;
|
||||||
|
resetPreferredLiveMode: (cameraName: string) => void;
|
||||||
|
isRestreamedStates: { [key: string]: boolean };
|
||||||
|
supportsAudioOutputStates: {
|
||||||
|
[key: string]: { supportsAudio: boolean; cameraName: string };
|
||||||
|
};
|
||||||
|
streamMetadata: { [key: string]: LiveStreamMetadata };
|
||||||
};
|
};
|
||||||
export default function DraggableGridLayout({
|
export default function DraggableGridLayout({
|
||||||
cameras,
|
cameras,
|
||||||
@ -79,6 +89,12 @@ export default function DraggableGridLayout({
|
|||||||
setIsEditMode,
|
setIsEditMode,
|
||||||
fullscreen,
|
fullscreen,
|
||||||
toggleFullscreen,
|
toggleFullscreen,
|
||||||
|
preferredLiveModes,
|
||||||
|
setPreferredLiveModes,
|
||||||
|
resetPreferredLiveMode,
|
||||||
|
isRestreamedStates,
|
||||||
|
supportsAudioOutputStates,
|
||||||
|
streamMetadata,
|
||||||
}: DraggableGridLayoutProps) {
|
}: DraggableGridLayoutProps) {
|
||||||
const { t } = useTranslation(["views/live"]);
|
const { t } = useTranslation(["views/live"]);
|
||||||
const { data: config } = useSWR<FrigateConfig>("config");
|
const { data: config } = useSWR<FrigateConfig>("config");
|
||||||
@ -98,33 +114,6 @@ export default function DraggableGridLayout({
|
|||||||
}
|
}
|
||||||
}, [allGroupsStreamingSettings, cameraGroup]);
|
}, [allGroupsStreamingSettings, cameraGroup]);
|
||||||
|
|
||||||
const activeStreams = useMemo(() => {
|
|
||||||
const streams: { [cameraName: string]: string } = {};
|
|
||||||
cameras.forEach((camera) => {
|
|
||||||
const availableStreams = camera.live.streams || {};
|
|
||||||
const streamNameFromSettings =
|
|
||||||
currentGroupStreamingSettings?.[camera.name]?.streamName || "";
|
|
||||||
const streamExists =
|
|
||||||
streamNameFromSettings &&
|
|
||||||
Object.values(availableStreams).includes(streamNameFromSettings);
|
|
||||||
|
|
||||||
const streamName = streamExists
|
|
||||||
? streamNameFromSettings
|
|
||||||
: Object.values(availableStreams)[0] || "";
|
|
||||||
|
|
||||||
streams[camera.name] = streamName;
|
|
||||||
});
|
|
||||||
return streams;
|
|
||||||
}, [cameras, currentGroupStreamingSettings]);
|
|
||||||
|
|
||||||
const {
|
|
||||||
preferredLiveModes,
|
|
||||||
setPreferredLiveModes,
|
|
||||||
resetPreferredLiveMode,
|
|
||||||
isRestreamedStates,
|
|
||||||
supportsAudioOutputStates,
|
|
||||||
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
|
|
||||||
|
|
||||||
// grid layout
|
// grid layout
|
||||||
|
|
||||||
const ResponsiveGridLayout = useMemo(() => WidthProvider(Responsive), []);
|
const ResponsiveGridLayout = useMemo(() => WidthProvider(Responsive), []);
|
||||||
@ -624,6 +613,7 @@ export default function DraggableGridLayout({
|
|||||||
resetPreferredLiveMode(camera.name)
|
resetPreferredLiveMode(camera.name)
|
||||||
}
|
}
|
||||||
config={config}
|
config={config}
|
||||||
|
streamMetadata={streamMetadata}
|
||||||
>
|
>
|
||||||
<LivePlayer
|
<LivePlayer
|
||||||
key={camera.name}
|
key={camera.name}
|
||||||
@ -838,6 +828,7 @@ type GridLiveContextMenuProps = {
|
|||||||
unmuteAll: () => void;
|
unmuteAll: () => void;
|
||||||
resetPreferredLiveMode: () => void;
|
resetPreferredLiveMode: () => void;
|
||||||
config?: FrigateConfig;
|
config?: FrigateConfig;
|
||||||
|
streamMetadata?: { [key: string]: LiveStreamMetadata };
|
||||||
};
|
};
|
||||||
|
|
||||||
const GridLiveContextMenu = React.forwardRef<
|
const GridLiveContextMenu = React.forwardRef<
|
||||||
@ -868,6 +859,7 @@ const GridLiveContextMenu = React.forwardRef<
|
|||||||
unmuteAll,
|
unmuteAll,
|
||||||
resetPreferredLiveMode,
|
resetPreferredLiveMode,
|
||||||
config,
|
config,
|
||||||
|
streamMetadata,
|
||||||
...props
|
...props
|
||||||
},
|
},
|
||||||
ref,
|
ref,
|
||||||
@ -899,6 +891,7 @@ const GridLiveContextMenu = React.forwardRef<
|
|||||||
unmuteAll={unmuteAll}
|
unmuteAll={unmuteAll}
|
||||||
resetPreferredLiveMode={resetPreferredLiveMode}
|
resetPreferredLiveMode={resetPreferredLiveMode}
|
||||||
config={config}
|
config={config}
|
||||||
|
streamMetadata={streamMetadata}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
</LiveContextMenu>
|
</LiveContextMenu>
|
||||||
|
|||||||
@ -265,6 +265,7 @@ export default function LiveDashboardView({
|
|||||||
resetPreferredLiveMode,
|
resetPreferredLiveMode,
|
||||||
isRestreamedStates,
|
isRestreamedStates,
|
||||||
supportsAudioOutputStates,
|
supportsAudioOutputStates,
|
||||||
|
streamMetadata,
|
||||||
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
|
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
|
||||||
|
|
||||||
const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
|
const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
|
||||||
@ -650,6 +651,12 @@ export default function LiveDashboardView({
|
|||||||
setIsEditMode={setIsEditMode}
|
setIsEditMode={setIsEditMode}
|
||||||
fullscreen={fullscreen}
|
fullscreen={fullscreen}
|
||||||
toggleFullscreen={toggleFullscreen}
|
toggleFullscreen={toggleFullscreen}
|
||||||
|
preferredLiveModes={preferredLiveModes}
|
||||||
|
setPreferredLiveModes={setPreferredLiveModes}
|
||||||
|
resetPreferredLiveMode={resetPreferredLiveMode}
|
||||||
|
isRestreamedStates={isRestreamedStates}
|
||||||
|
supportsAudioOutputStates={supportsAudioOutputStates}
|
||||||
|
streamMetadata={streamMetadata}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
</>
|
</>
|
||||||
|
|||||||
@ -478,8 +478,7 @@ export default function AuthenticationView({
|
|||||||
<TableCell className="text-right">
|
<TableCell className="text-right">
|
||||||
<TooltipProvider>
|
<TooltipProvider>
|
||||||
<div className="flex items-center justify-end gap-2">
|
<div className="flex items-center justify-end gap-2">
|
||||||
{user.username !== "admin" &&
|
{user.username !== "admin" && (
|
||||||
user.username !== "viewer" && (
|
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<Button
|
<Button
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user