Compare commits

...

8 Commits

Author SHA1 Message Date
ivanshi1108
f818fdaa3d
Merge e27a94ae0b into de066d0062 2025-11-12 09:57:29 +08:00
GuoQing Liu
de066d0062
Fix i18n (#20857)
Some checks failed
CI / AMD64 Build (push) Has been cancelled
CI / ARM Build (push) Has been cancelled
CI / Jetson Jetpack 6 (push) Has been cancelled
CI / AMD64 Extra Build (push) Has been cancelled
CI / ARM Extra Build (push) Has been cancelled
CI / Synaptics Build (push) Has been cancelled
CI / Assemble and push default build (push) Has been cancelled
* fix: fix the missing i18n key

* fix: fix trackedObject i18n keys count variable

* fix: fix some pages audio label missing i18n

* fix: add 6214d52 missing variable

* fix: add more missing i18n

* fix: add menu missing key
2025-11-11 17:23:30 -06:00
Nicolas Mowen
f1a05d0f9b
Miscellaneous fixes (#20875)
* Improve stream fetching logic

* Reduce need to revalidate stream info

* fix frigate+ frame submission

* add UI setting to configure jsmpeg fallback timeout

* hide settings dropdown when fullscreen

* Fix arcface running on OpenVINO

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
2025-11-11 17:00:54 -06:00
shizhicheng
e27a94ae0b Fix logical errors caused by code formatting 2025-11-11 05:54:19 +00:00
shizhicheng
1dee548dbc Modifications to the YOLOv9 object detection model:
The model is now dynamically downloaded to the cache directory.
Post-processing is now done using Frigate's built-in `post_process_yolo`.
Configuration in the relevant documentation has been updated.
2025-11-11 05:42:28 +00:00
shizhicheng
91e17e12b7 Change the default detection model to YOLOv9 2025-11-09 13:21:17 +00:00
ivanshi1108
bb45483e9e
Modify AXERA section from hardware.md
Modify AXERA section and related content from hardware documentation.
2025-10-28 09:54:00 +08:00
shizhicheng
7b4eaf2d10 Initial commit for AXERA AI accelerators 2025-10-24 09:03:13 +00:00
27 changed files with 862 additions and 307 deletions

View File

@ -225,3 +225,29 @@ jobs:
sources: |
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi
axera_build:
runs-on: ubuntu-22.04
name: AXERA Build
needs:
- amd64_build
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Axera build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: axcl
files: docker/axcl/axcl.hcl
set: |
axcl.tags=${{ steps.setup.outputs.image-name }}-axcl
*.cache-from=type=gha

55
docker/axcl/Dockerfile Normal file
View File

@ -0,0 +1,55 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM frigate AS frigate-axcl
ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
# Install axpyengine
RUN wget https://github.com/AXERA-TECH/pyaxengine/releases/download/0.1.3.rc1/axengine-0.1.3-py3-none-any.whl -O /axengine-0.1.3-py3-none-any.whl
RUN pip3 install -i https://mirrors.aliyun.com/pypi/simple/ /axengine-0.1.3-py3-none-any.whl \
&& rm /axengine-0.1.3-py3-none-any.whl
# Install axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
echo "Installing x86_64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
else \
echo "Installing aarch64 version of axcl"; \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb -O /axcl.deb; \
fi
RUN mkdir /unpack_axcl && \
dpkg-deb -x /axcl.deb /unpack_axcl && \
cp -R /unpack_axcl/usr/bin/axcl /usr/bin/ && \
cp -R /unpack_axcl/usr/lib/axcl /usr/lib/ && \
rm -rf /unpack_axcl /axcl.deb
# Install axcl ffmpeg
RUN mkdir -p /usr/lib/ffmpeg/axcl
RUN if [ "$TARGETARCH" = "amd64" ]; then \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-x64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-x64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
else \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffmpeg-aarch64 -O /usr/lib/ffmpeg/axcl/ffmpeg && \
wget https://github.com/ivanshi1108/assets/releases/download/v0.16.2/ffprobe-aarch64 -O /usr/lib/ffmpeg/axcl/ffprobe; \
fi
RUN chmod +x /usr/lib/ffmpeg/axcl/ffmpeg /usr/lib/ffmpeg/axcl/ffprobe
# Set ldconfig path
RUN echo "/usr/lib/axcl" > /etc/ld.so.conf.d/ax.conf
# Set env
ENV PATH="$PATH:/usr/bin/axcl"
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/axcl"
ENTRYPOINT ["sh", "-c", "ldconfig && exec /init"]

13
docker/axcl/axcl.hcl Normal file
View File

@ -0,0 +1,13 @@
target frigate {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64", "linux/arm64"]
target = "frigate"
}
target axcl {
dockerfile = "docker/axcl/Dockerfile"
contexts = {
frigate = "target:frigate",
}
platforms = ["linux/amd64", "linux/arm64"]
}

15
docker/axcl/axcl.mk Normal file
View File

@ -0,0 +1,15 @@
BOARDS += axcl
local-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=frigate:latest-axcl \
--load
build-axcl: version
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl
push-axcl: build-axcl
docker buildx bake --file=docker/axcl/axcl.hcl axcl \
--set axcl.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-axcl \
--push

View File

@ -0,0 +1,83 @@
#!/bin/bash
# Update package list and install dependencies
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget pciutils kmod udev
# Check if gcc-12 is needed
current_gcc_version=$(gcc --version | head -n1 | awk '{print $NF}')
gcc_major_version=$(echo $current_gcc_version | cut -d'.' -f1)
if [[ $gcc_major_version -lt 12 ]]; then
echo "Current GCC version ($current_gcc_version) is lower than 12, installing gcc-12..."
sudo apt-get install -y gcc-12
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 12
echo "GCC-12 installed and set as default"
else
echo "Current GCC version ($current_gcc_version) is sufficient, skipping GCC installation"
fi
# Determine architecture
arch=$(uname -m)
download_url=""
if [[ $arch == "x86_64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_x86_64_V3.6.5_20250908154509_NO4973.deb"
elif [[ $arch == "aarch64" ]]; then
download_url="https://github.com/ivanshi1108/assets/releases/download/v0.16.2/axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
deb_file="axcl_host_aarch64_V3.6.5_20250908154509_NO4973.deb"
else
echo "Unsupported architecture: $arch"
exit 1
fi
# Download AXCL driver
echo "Downloading AXCL driver for $arch..."
wget "$download_url" -O "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to download AXCL driver"
exit 1
fi
# Install AXCL driver
echo "Installing AXCL driver..."
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "Failed to install AXCL driver, attempting to fix dependencies..."
sudo apt-get install -f -y
sudo dpkg -i "$deb_file"
if [ $? -ne 0 ]; then
echo "AXCL driver installation failed"
exit 1
fi
fi
# Update environment
echo "Updating environment..."
source /etc/profile
# Verify installation
echo "Verifying AXCL installation..."
if command -v axcl-smi &> /dev/null; then
echo "AXCL driver detected, checking AI accelerator status..."
axcl_output=$(axcl-smi 2>&1)
axcl_exit_code=$?
echo "$axcl_output"
if [ $axcl_exit_code -eq 0 ]; then
echo "AXCL driver installation completed successfully!"
else
echo "AXCL driver installed but no AI accelerator detected or communication failed."
echo "Please check if the AI accelerator is properly connected and powered on."
exit 1
fi
else
echo "axcl-smi command not found. AXCL driver installation may have failed."
exit 1
fi

View File

@ -47,6 +47,11 @@ Frigate supports multiple different detectors that work on different types of ha
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
**AXERA**
- [AXEngine](#axera): axmodels can run on AXERA AI acceleration.
**For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@ -1169,6 +1174,41 @@ model: # required
labelmap_path: /labelmap/coco-80.txt # required
```
## AXERA
Hardware accelerated object detection is supported on the following SoCs:
- AX650N
- AX8850N
This implementation uses the [AXera Pulsar2 Toolchain](https://huggingface.co/AXERA-TECH/Pulsar2).
See the [installation docs](../frigate/installation.md#axera) for information on configuring the AXEngine hardware.
### Configuration
When configuring the AXEngine detector, you have to specify the model name.
#### yolov9
A yolov9 model is provided in the container at /axmodels and is used by this detector type by default.
Use the model configuration shown below when using the axengine detector with the default axmodel:
```yaml
detectors: # required
axengine: # required
type: axengine # required
model: # required
path: frigate-yolov9-tiny # required
model_type: yolo-generic # required
width: 320 # required
height: 320 # required
tensor_format: bgr # required
labelmap_path: /labelmap/coco-80.txt # required
```
## Rockchip platform
Hardware accelerated object detection is supported on the following SoCs:

View File

@ -110,6 +110,14 @@ Frigate supports multiple different detectors that work on different types of ha
| ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms |
### AXERA
- **AXEngine** Default model is **yolov9**
| Name | AXERA AX650N/AX8850N Inference Time |
| ---------------- | ----------------------------------- |
| yolov9-tiny | ~ 1.012 ms |
### Hailo-8
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isnt provided.

View File

@ -287,6 +287,40 @@ or add these options to your `docker run` command:
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
### AXERA
AXERA accelerators are available in an M.2 form factor, compatible with both Raspberry Pi and Orange Pi. This form factor has also been successfully tested on x86 platforms, making it a versatile choice for various computing environments.
#### Installation
Using AXERA accelerators requires the installation of the AXCL driver. We provide a convenient Linux script to complete this installation.
Follow these steps for installation:
1. Copy or download [this script](https://github.com/ivanshi1108/assets/releases/download/v0.16.2/user_installation.sh).
2. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
3. Run the script with `./user_installation.sh`
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
```yaml
devices:
- /dev/axcl_host
- /dev/ax_mmb_dev
- /dev/msg_userdev
```
If you are using `docker run`, add this option to your command `--device /dev/axcl_host --device /dev/ax_mmb_dev --device /dev/msg_userdev`
#### Configuration
Finally, configure [hardware object detection](/configuration/object_detectors#axera) to complete the setup.
## Docker
Running through Docker with Docker Compose is the recommended install method.

View File

@ -255,6 +255,7 @@ class OpenVINOModelRunner(BaseModelRunner):
def __init__(self, model_path: str, device: str, model_type: str, **kwargs):
self.model_path = model_path
self.device = device
self.model_type = model_type
if device == "NPU" and not OpenVINOModelRunner.is_model_npu_supported(
model_type
@ -341,6 +342,13 @@ class OpenVINOModelRunner(BaseModelRunner):
# Lock prevents concurrent access to infer_request
# Needed for JinaV2: genai thread (text) + embeddings thread (vision)
with self._inference_lock:
from frigate.embeddings.types import EnrichmentModelTypeEnum
if self.model_type in [EnrichmentModelTypeEnum.arcface.value]:
# For face recognition models, create a fresh infer_request
# for each inference to avoid state pollution that causes incorrect results.
self.infer_request = self.compiled_model.create_infer_request()
# Handle single input case for backward compatibility
if (
len(inputs) == 1

View File

@ -0,0 +1,92 @@
import logging
import os.path
import re
import urllib.request
from typing import Literal
import cv2
import numpy as np
from pydantic import Field
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from frigate.util.model import post_process_yolo
import axengine as axe
from axengine import axclrt_provider_name, axengine_provider_name
logger = logging.getLogger(__name__)
DETECTOR_KEY = "axengine"
supported_models = {
ModelTypeEnum.yologeneric: "frigate-yolov9-.*$",
}
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "axengine_cache/")
class AxengineDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
class Axengine(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, config: AxengineDetectorConfig):
logger.info("__init__ axengine")
super().__init__(config)
self.height = config.model.height
self.width = config.model.width
model_path = config.model.path or "frigate-yolov9-tiny"
model_props = self.parse_model_input(model_path)
self.session = axe.InferenceSession(model_props["path"])
def __del__(self):
pass
def parse_model_input(self, model_path):
model_props = {}
model_props["preset"] = True
model_matched = False
for model_type, pattern in supported_models.items():
if re.match(pattern, model_path):
model_matched = True
model_props["model_type"] = model_type
if model_matched:
model_props["filename"] = model_path + f".axmodel"
model_props["path"] = model_cache_dir + model_props["filename"]
if not os.path.isfile(model_props["path"]):
self.download_model(model_props["filename"])
else:
supported_models_str = ", ".join(
model[1:-1] for model in supported_models
)
raise Exception(
f"Model {model_path} is unsupported. Provide your own model or choose one of the following: {supported_models_str}"
)
return model_props
def download_model(self, filename):
if not os.path.isdir(model_cache_dir):
os.mkdir(model_cache_dir)
GITHUB_ENDPOINT = os.environ.get("GITHUB_ENDPOINT", "https://github.com")
urllib.request.urlretrieve(
f"{GITHUB_ENDPOINT}/ivanshi1108/assets/releases/download/v0.16.2/{filename}",
model_cache_dir + filename,
)
def detect_raw(self, tensor_input):
results = None
results = self.session.run(None, {"images": tensor_input})
if self.detector_config.model.model_type == ModelTypeEnum.yologeneric:
return post_process_yolo(results, self.width, self.height)
else:
raise ValueError(
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
)

View File

@ -72,7 +72,10 @@
"formattedTimestampFilename": {
"12hour": "MM-dd-yy-h-mm-ss-a",
"24hour": "MM-dd-yy-HH-mm-ss"
}
},
"inProgress": "In progress",
"invalidStartTime": "Invalid start time",
"invalidEndTime": "Invalid end time"
},
"unit": {
"speed": {
@ -144,7 +147,8 @@
"unselect": "Unselect",
"export": "Export",
"deleteNow": "Delete Now",
"next": "Next"
"next": "Next",
"continue": "Continue"
},
"menu": {
"system": "System",
@ -237,6 +241,7 @@
"export": "Export",
"uiPlayground": "UI Playground",
"faceLibrary": "Face Library",
"classification": "Classification",
"user": {
"title": "User",
"account": "Account",

View File

@ -24,8 +24,8 @@
"label": "Detail",
"noDataFound": "No detail data to review",
"aria": "Toggle detail view",
"trackedObject_one": "object",
"trackedObject_other": "objects",
"trackedObject_one": "{{count}} object",
"trackedObject_other": "{{count}} objects",
"noObjectDetailData": "No object detail data available.",
"settings": "Detail View Settings",
"alwaysExpandActive": {

View File

@ -35,7 +35,7 @@
"snapshot": "snapshot",
"thumbnail": "thumbnail",
"video": "video",
"object_lifecycle": "object lifecycle"
"tracking_details": "tracking details"
},
"trackingDetails": {
"title": "Tracking Details",

View File

@ -8,7 +8,7 @@
"masksAndZones": "Mask and Zone Editor - Frigate",
"motionTuner": "Motion Tuner - Frigate",
"object": "Debug - Frigate",
"general": "General Settings - Frigate",
"general": "UI Settings - Frigate",
"frigatePlus": "Frigate+ Settings - Frigate",
"notifications": "Notification Settings - Frigate"
},
@ -37,7 +37,7 @@
"noCamera": "No Camera"
},
"general": {
"title": "General Settings",
"title": "UI Settings",
"liveDashboard": {
"title": "Live Dashboard",
"automaticLiveView": {
@ -51,6 +51,10 @@
"displayCameraNames": {
"label": "Always Show Camera Names",
"desc": "Always show the camera names in a chip in the multi-camera live view dashboard."
},
"liveFallbackTimeout": {
"label": "Live Player Fallback Timeout",
"desc": "When a camera's high quality live stream is unavailable, fall back to low bandwidth mode after this many seconds. Default: 3."
}
},
"storedLayouts": {

View File

@ -454,6 +454,24 @@ export function GeneralFilterContent({
onClose,
}: GeneralFilterContentProps) {
const { t } = useTranslation(["components/filter", "views/events"]);
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const allAudioListenLabels = useMemo<string[]>(() => {
if (!config) {
return [];
}
const labels = new Set<string>();
Object.values(config.cameras).forEach((camera) => {
if (camera?.audio?.enabled) {
camera.audio.listen.forEach((label) => {
labels.add(label);
});
}
});
return [...labels].sort();
}, [config]);
return (
<>
<div className="scrollbar-container h-auto max-h-[80dvh] overflow-y-auto overflow-x-hidden">
@ -504,7 +522,10 @@ export function GeneralFilterContent({
{allLabels.map((item) => (
<FilterSwitch
key={item}
label={getTranslatedLabel(item)}
label={getTranslatedLabel(
item,
allAudioListenLabels.includes(item) ? "audio" : "object",
)}
isChecked={filter.labels?.includes(item) ?? false}
onCheckedChange={(isChecked) => {
if (isChecked) {

View File

@ -81,6 +81,43 @@ export default function InputWithTags({
revalidateOnFocus: false,
});
const allAudioListenLabels = useMemo<Set<string>>(() => {
if (!config) {
return new Set<string>();
}
const labels = new Set<string>();
Object.values(config.cameras).forEach((camera) => {
if (camera?.audio?.enabled) {
camera.audio.listen.forEach((label) => {
labels.add(label);
});
}
});
return labels;
}, [config]);
const translatedAudioLabelMap = useMemo<Map<string, string>>(() => {
const map = new Map<string, string>();
if (!config) return map;
allAudioListenLabels.forEach((label) => {
// getTranslatedLabel likely depends on i18n internally; including `lang`
// in deps ensures this map is rebuilt when language changes
map.set(label, getTranslatedLabel(label, "audio"));
});
return map;
}, [allAudioListenLabels, config]);
function resolveLabel(value: string) {
const mapped = translatedAudioLabelMap.get(value);
if (mapped) return mapped;
return getTranslatedLabel(
value,
allAudioListenLabels.has(value) ? "audio" : "object",
);
}
const [inputValue, setInputValue] = useState(search || "");
const [currentFilterType, setCurrentFilterType] = useState<FilterType | null>(
null,
@ -421,7 +458,8 @@ export default function InputWithTags({
? t("button.yes", { ns: "common" })
: t("button.no", { ns: "common" });
} else if (filterType === "labels") {
return getTranslatedLabel(String(filterValues));
const value = String(filterValues);
return resolveLabel(value);
} else if (filterType === "search_type") {
return t("filter.searchType." + String(filterValues));
} else {
@ -828,7 +866,7 @@ export default function InputWithTags({
>
{t("filter.label." + filterType)}:{" "}
{filterType === "labels" ? (
getTranslatedLabel(value)
resolveLabel(value)
) : filterType === "cameras" ? (
<CameraNameLabel camera={value} />
) : filterType === "zones" ? (

View File

@ -1155,7 +1155,7 @@ function ObjectDetailsTab({
</div>
<div className="flex flex-row items-center gap-2 text-sm smart-capitalize">
{getIconForLabel(search.label, "size-4 text-primary")}
{getTranslatedLabel(search.label)}
{getTranslatedLabel(search.label, search.data.type)}
{search.sub_label && ` (${search.sub_label})`}
{isAdmin && search.end_time && (
<Tooltip>
@ -1394,7 +1394,9 @@ function ObjectDetailsTab({
{state == "submitted" && (
<div className="flex flex-row items-center justify-center gap-2">
<FaCheckCircle className="size-4 text-success" />
{t("explore.plus.review.state.submitted")}
{t("explore.plus.review.state.submitted", {
ns: "components/dialog",
})}
</div>
)}
</div>

View File

@ -343,6 +343,10 @@ export function TrackingDetails({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [displayedRecordTime]);
const onUploadFrameToPlus = useCallback(() => {
return axios.post(`/${event.camera}/plus/${currentTime}`);
}, [event.camera, currentTime]);
if (!config) {
return <ActivityIndicator />;
}
@ -388,6 +392,7 @@ export function TrackingDetails({
frigateControls={true}
onTimeUpdate={handleTimeUpdate}
onSeekToTime={handleSeekToTime}
onUploadFrame={onUploadFrameToPlus}
isDetailMode={true}
camera={event.camera}
currentTimeOverride={currentTime}

View File

@ -1,4 +1,5 @@
import { baseUrl } from "@/api/baseUrl";
import { usePersistence } from "@/hooks/use-persistence";
import {
LivePlayerError,
PlayerStatsType,
@ -71,6 +72,8 @@ function MSEPlayer({
const [errorCount, setErrorCount] = useState<number>(0);
const totalBytesLoaded = useRef(0);
const [fallbackTimeout] = usePersistence<number>("liveFallbackTimeout", 3);
const videoRef = useRef<HTMLVideoElement>(null);
const wsRef = useRef<WebSocket | null>(null);
const reconnectTIDRef = useRef<number | null>(null);
@ -475,7 +478,10 @@ function MSEPlayer({
setBufferTimeout(undefined);
}
const timeoutDuration = bufferTime == 0 ? 5000 : 3000;
const timeoutDuration =
bufferTime == 0
? (fallbackTimeout ?? 3) * 2 * 1000
: (fallbackTimeout ?? 3) * 1000;
setBufferTimeout(
setTimeout(() => {
if (
@ -500,6 +506,7 @@ function MSEPlayer({
onError,
onPlaying,
playbackEnabled,
fallbackTimeout,
]);
useEffect(() => {

View File

@ -349,7 +349,7 @@ function ReviewGroup({
? fetchedEvents.length
: (review.data.objects ?? []).length;
return `${objectCount} ${t("detail.trackedObject", { count: objectCount })}`;
return `${t("detail.trackedObject", { count: objectCount })}`;
}, [review, t, fetchedEvents]);
const reviewDuration = useMemo(
@ -478,7 +478,7 @@ function ReviewGroup({
<div className="rounded-full bg-muted-foreground p-1">
{getIconForLabel(audioLabel, "size-3 text-white")}
</div>
<span>{getTranslatedLabel(audioLabel)}</span>
<span>{getTranslatedLabel(audioLabel, "audio")}</span>
</div>
</div>
))}
@ -513,7 +513,8 @@ function EventList({
const isSelected = selectedObjectIds.includes(event.id);
const label = event.sub_label || getTranslatedLabel(event.label);
const label =
event.sub_label || getTranslatedLabel(event.label, event.data.type);
const handleObjectSelect = (event: Event | undefined) => {
if (event) {

View File

@ -6,6 +6,7 @@ import { LivePlayerMode, LiveStreamMetadata } from "@/types/live";
export default function useCameraLiveMode(
cameras: CameraConfig[],
windowVisible: boolean,
activeStreams?: { [cameraName: string]: string },
) {
const { data: config } = useSWR<FrigateConfig>("config");
@ -20,16 +21,20 @@ export default function useCameraLiveMode(
);
if (isRestreamed) {
Object.values(camera.live.streams).forEach((streamName) => {
streamNames.add(streamName);
});
if (activeStreams && activeStreams[camera.name]) {
streamNames.add(activeStreams[camera.name]);
} else {
Object.values(camera.live.streams).forEach((streamName) => {
streamNames.add(streamName);
});
}
}
});
return streamNames.size > 0
? Array.from(streamNames).sort().join(",")
: null;
}, [cameras, config]);
}, [cameras, config, activeStreams]);
const streamsFetcher = useCallback(async (key: string) => {
const streamNames = key.split(",");
@ -68,7 +73,9 @@ export default function useCameraLiveMode(
[key: string]: LiveStreamMetadata;
}>(restreamedStreamsKey, streamsFetcher, {
revalidateOnFocus: false,
dedupingInterval: 10000,
revalidateOnReconnect: false,
revalidateIfStale: false,
dedupingInterval: 60000,
});
const [preferredLiveModes, setPreferredLiveModes] = useState<{

View File

@ -244,12 +244,12 @@ export const getDurationFromTimestamps = (
abbreviated: boolean = false,
): string => {
if (isNaN(start_time)) {
return "Invalid start time";
return i18n.t("time.invalidStartTime", { ns: "common" });
}
let duration = "In Progress";
let duration = i18n.t("time.inProgress", { ns: "common" });
if (end_time !== null) {
if (isNaN(end_time)) {
return "Invalid end time";
return i18n.t("time.invalidEndTime", { ns: "common" });
}
const start = fromUnixTime(start_time);
const end = fromUnixTime(end_time);

View File

@ -86,14 +86,6 @@ export default function DraggableGridLayout({
// preferred live modes per camera
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible);
const [globalAutoLive] = usePersistence("autoLiveView", true);
const [displayCameraNames] = usePersistence("displayCameraNames", false);
@ -106,6 +98,33 @@ export default function DraggableGridLayout({
}
}, [allGroupsStreamingSettings, cameraGroup]);
const activeStreams = useMemo(() => {
const streams: { [cameraName: string]: string } = {};
cameras.forEach((camera) => {
const availableStreams = camera.live.streams || {};
const streamNameFromSettings =
currentGroupStreamingSettings?.[camera.name]?.streamName || "";
const streamExists =
streamNameFromSettings &&
Object.values(availableStreams).includes(streamNameFromSettings);
const streamName = streamExists
? streamNameFromSettings
: Object.values(availableStreams)[0] || "";
streams[camera.name] = streamName;
});
return streams;
}, [cameras, currentGroupStreamingSettings]);
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
// grid layout
const ResponsiveGridLayout = useMemo(() => WidthProvider(Responsive), []);

View File

@ -162,6 +162,9 @@ export default function LiveCameraView({
isRestreamed ? `go2rtc/streams/${streamName}` : null,
{
revalidateOnFocus: false,
revalidateOnReconnect: false,
revalidateIfStale: false,
dedupingInterval: 60000,
},
);
@ -1027,294 +1030,298 @@ function FrigateCameraFeatures({
disabled={!cameraEnabled || debug || isSnapshotLoading}
loading={isSnapshotLoading}
/>
<DropdownMenu modal={false}>
<DropdownMenuTrigger>
<div
className={cn(
"flex flex-col items-center justify-center rounded-lg bg-secondary p-2 text-secondary-foreground md:p-0",
)}
>
<FaCog
className={`text-secondary-foreground" size-5 md:m-[6px]`}
/>
</div>
</DropdownMenuTrigger>
<DropdownMenuContent className="max-w-96">
<div className="flex flex-col gap-5 p-4">
{!isRestreamed && (
<div className="flex flex-col gap-2">
<Label>
{t("streaming.label", { ns: "components/dialog" })}
</Label>
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
<LuX className="size-4 text-danger" />
<div>
{t("streaming.restreaming.disabled", {
ns: "components/dialog",
})}
</div>
<Popover>
<PopoverTrigger asChild>
<div className="cursor-pointer p-0">
<LuInfo className="size-4" />
<span className="sr-only">
{t("button.info", { ns: "common" })}
</span>
</div>
</PopoverTrigger>
<PopoverContent className="w-80 text-xs">
{t("streaming.restreaming.desc.title", {
{!fullscreen && (
<DropdownMenu modal={false}>
<DropdownMenuTrigger>
<div
className={cn(
"flex flex-col items-center justify-center rounded-lg bg-secondary p-2 text-secondary-foreground md:p-0",
)}
>
<FaCog
className={`text-secondary-foreground" size-5 md:m-[6px]`}
/>
</div>
</DropdownMenuTrigger>
<DropdownMenuContent className="max-w-96">
<div className="flex flex-col gap-5 p-4">
{!isRestreamed && (
<div className="flex flex-col gap-2">
<Label>
{t("streaming.label", { ns: "components/dialog" })}
</Label>
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
<LuX className="size-4 text-danger" />
<div>
{t("streaming.restreaming.disabled", {
ns: "components/dialog",
})}
<div className="mt-2 flex items-center text-primary">
<Link
to={getLocaleDocUrl("configuration/live")}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</PopoverContent>
</Popover>
</div>
</div>
)}
{isRestreamed &&
Object.values(camera.live.streams).length > 0 && (
<div className="flex flex-col gap-1">
<Label htmlFor="streaming-method">
{t("stream.title")}
</Label>
<Select
value={streamName}
disabled={debug}
onValueChange={(value) => {
setStreamName?.(value);
}}
>
<SelectTrigger className="w-full">
<SelectValue>
{Object.keys(camera.live.streams).find(
(key) => camera.live.streams[key] === streamName,
)}
</SelectValue>
</SelectTrigger>
<SelectContent>
<SelectGroup>
{Object.entries(camera.live.streams).map(
([stream, name]) => (
<SelectItem
key={stream}
className="cursor-pointer"
value={name}
>
{stream}
</SelectItem>
),
)}
</SelectGroup>
</SelectContent>
</Select>
{debug && (
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
<>
<LuX className="size-8 text-danger" />
<div>{t("stream.debug.picker")}</div>
</>
</div>
)}
{preferredLiveMode != "jsmpeg" &&
!debug &&
isRestreamed && (
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
{supportsAudioOutput ? (
<>
<LuCheck className="size-4 text-success" />
<div>{t("stream.audio.available")}</div>
</>
) : (
<>
<LuX className="size-4 text-danger" />
<div>{t("stream.audio.unavailable")}</div>
<Popover>
<PopoverTrigger asChild>
<div className="cursor-pointer p-0">
<LuInfo className="size-4" />
<span className="sr-only">
{t("button.info", { ns: "common" })}
</span>
</div>
</PopoverTrigger>
<PopoverContent className="w-80 text-xs">
{t("stream.audio.tips.title")}
<div className="mt-2 flex items-center text-primary">
<Link
to={getLocaleDocUrl("configuration/live")}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", {
ns: "common",
})}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</PopoverContent>
</Popover>
</>
)}
</div>
)}
{preferredLiveMode != "jsmpeg" &&
!debug &&
isRestreamed &&
supportsAudioOutput && (
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
{supports2WayTalk ? (
<>
<LuCheck className="size-4 text-success" />
<div>{t("stream.twoWayTalk.available")}</div>
</>
) : (
<>
<LuX className="size-4 text-danger" />
<div>{t("stream.twoWayTalk.unavailable")}</div>
<Popover>
<PopoverTrigger asChild>
<div className="cursor-pointer p-0">
<LuInfo className="size-4" />
<span className="sr-only">
{t("button.info", { ns: "common" })}
</span>
</div>
</PopoverTrigger>
<PopoverContent className="w-80 text-xs">
{t("stream.twoWayTalk.tips")}
<div className="mt-2 flex items-center text-primary">
<Link
to={getLocaleDocUrl(
"configuration/live/#webrtc-extra-configuration",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", {
ns: "common",
})}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</PopoverContent>
</Popover>
</>
)}
</div>
)}
{preferredLiveMode == "jsmpeg" &&
!debug &&
isRestreamed && (
<div className="flex flex-col items-center gap-3">
<div className="flex flex-row items-center gap-2">
<IoIosWarning className="mr-1 size-8 text-danger" />
<p className="text-sm">
{t("stream.lowBandwidth.tips")}
</p>
<Popover>
<PopoverTrigger asChild>
<div className="cursor-pointer p-0">
<LuInfo className="size-4" />
<span className="sr-only">
{t("button.info", { ns: "common" })}
</span>
</div>
<Button
className={`flex items-center gap-2.5 rounded-lg`}
aria-label={t("stream.lowBandwidth.resetStream")}
variant="outline"
size="sm"
onClick={() => setLowBandwidth(false)}
>
<MdOutlineRestartAlt className="size-5 text-primary-variant" />
<div className="text-primary-variant">
{t("stream.lowBandwidth.resetStream")}
</div>
</Button>
</div>
)}
</PopoverTrigger>
<PopoverContent className="w-80 text-xs">
{t("streaming.restreaming.desc.title", {
ns: "components/dialog",
})}
<div className="mt-2 flex items-center text-primary">
<Link
to={getLocaleDocUrl("configuration/live")}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", { ns: "common" })}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</PopoverContent>
</Popover>
</div>
</div>
)}
{isRestreamed &&
Object.values(camera.live.streams).length > 0 && (
<div className="flex flex-col gap-1">
<Label htmlFor="streaming-method">
{t("stream.title")}
</Label>
<Select
value={streamName}
disabled={debug}
onValueChange={(value) => {
setStreamName?.(value);
}}
>
<SelectTrigger className="w-full">
<SelectValue>
{Object.keys(camera.live.streams).find(
(key) => camera.live.streams[key] === streamName,
)}
</SelectValue>
</SelectTrigger>
<SelectContent>
<SelectGroup>
{Object.entries(camera.live.streams).map(
([stream, name]) => (
<SelectItem
key={stream}
className="cursor-pointer"
value={name}
>
{stream}
</SelectItem>
),
)}
</SelectGroup>
</SelectContent>
</Select>
{debug && (
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
<>
<LuX className="size-8 text-danger" />
<div>{t("stream.debug.picker")}</div>
</>
</div>
)}
{preferredLiveMode != "jsmpeg" &&
!debug &&
isRestreamed && (
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
{supportsAudioOutput ? (
<>
<LuCheck className="size-4 text-success" />
<div>{t("stream.audio.available")}</div>
</>
) : (
<>
<LuX className="size-4 text-danger" />
<div>{t("stream.audio.unavailable")}</div>
<Popover>
<PopoverTrigger asChild>
<div className="cursor-pointer p-0">
<LuInfo className="size-4" />
<span className="sr-only">
{t("button.info", { ns: "common" })}
</span>
</div>
</PopoverTrigger>
<PopoverContent className="w-80 text-xs">
{t("stream.audio.tips.title")}
<div className="mt-2 flex items-center text-primary">
<Link
to={getLocaleDocUrl(
"configuration/live",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", {
ns: "common",
})}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</PopoverContent>
</Popover>
</>
)}
</div>
)}
{preferredLiveMode != "jsmpeg" &&
!debug &&
isRestreamed &&
supportsAudioOutput && (
<div className="flex flex-row items-center gap-1 text-sm text-muted-foreground">
{supports2WayTalk ? (
<>
<LuCheck className="size-4 text-success" />
<div>{t("stream.twoWayTalk.available")}</div>
</>
) : (
<>
<LuX className="size-4 text-danger" />
<div>{t("stream.twoWayTalk.unavailable")}</div>
<Popover>
<PopoverTrigger asChild>
<div className="cursor-pointer p-0">
<LuInfo className="size-4" />
<span className="sr-only">
{t("button.info", { ns: "common" })}
</span>
</div>
</PopoverTrigger>
<PopoverContent className="w-80 text-xs">
{t("stream.twoWayTalk.tips")}
<div className="mt-2 flex items-center text-primary">
<Link
to={getLocaleDocUrl(
"configuration/live/#webrtc-extra-configuration",
)}
target="_blank"
rel="noopener noreferrer"
className="inline"
>
{t("readTheDocumentation", {
ns: "common",
})}
<LuExternalLink className="ml-2 inline-flex size-3" />
</Link>
</div>
</PopoverContent>
</Popover>
</>
)}
</div>
)}
{preferredLiveMode == "jsmpeg" &&
!debug &&
isRestreamed && (
<div className="flex flex-col items-center gap-3">
<div className="flex flex-row items-center gap-2">
<IoIosWarning className="mr-1 size-8 text-danger" />
<p className="text-sm">
{t("stream.lowBandwidth.tips")}
</p>
</div>
<Button
className={`flex items-center gap-2.5 rounded-lg`}
aria-label={t("stream.lowBandwidth.resetStream")}
variant="outline"
size="sm"
onClick={() => setLowBandwidth(false)}
>
<MdOutlineRestartAlt className="size-5 text-primary-variant" />
<div className="text-primary-variant">
{t("stream.lowBandwidth.resetStream")}
</div>
</Button>
</div>
)}
</div>
)}
{isRestreamed && (
<div className="flex flex-col gap-1">
<div className="flex items-center justify-between">
<Label
className="mx-0 cursor-pointer text-primary"
htmlFor="backgroundplay"
>
{t("stream.playInBackground.label")}
</Label>
<Switch
className="ml-1"
id="backgroundplay"
disabled={debug}
checked={playInBackground}
onCheckedChange={(checked) =>
setPlayInBackground(checked)
}
/>
</div>
<p className="text-sm text-muted-foreground">
{t("stream.playInBackground.tips")}
</p>
</div>
)}
{isRestreamed && (
<div className="flex flex-col gap-1">
<div className="flex items-center justify-between">
<Label
className="mx-0 cursor-pointer text-primary"
htmlFor="backgroundplay"
htmlFor="showstats"
>
{t("stream.playInBackground.label")}
{t("streaming.showStats.label", {
ns: "components/dialog",
})}
</Label>
<Switch
className="ml-1"
id="backgroundplay"
id="showstats"
disabled={debug}
checked={playInBackground}
onCheckedChange={(checked) =>
setPlayInBackground(checked)
}
checked={showStats}
onCheckedChange={(checked) => setShowStats(checked)}
/>
</div>
<p className="text-sm text-muted-foreground">
{t("stream.playInBackground.tips")}
{t("streaming.showStats.desc", {
ns: "components/dialog",
})}
</p>
</div>
)}
<div className="flex flex-col gap-1">
<div className="flex items-center justify-between">
<Label
className="mx-0 cursor-pointer text-primary"
htmlFor="showstats"
>
{t("streaming.showStats.label", {
ns: "components/dialog",
})}
</Label>
<Switch
className="ml-1"
id="showstats"
disabled={debug}
checked={showStats}
onCheckedChange={(checked) => setShowStats(checked)}
/>
</div>
<p className="text-sm text-muted-foreground">
{t("streaming.showStats.desc", {
ns: "components/dialog",
})}
</p>
</div>
<div className="flex flex-col gap-1">
<div className="flex items-center justify-between">
<Label
className="mx-0 cursor-pointer text-primary"
htmlFor="debug"
>
{t("streaming.debugView", {
ns: "components/dialog",
})}
</Label>
<Switch
className="ml-1"
id="debug"
checked={debug}
onCheckedChange={(checked) => setDebug(checked)}
/>
<div className="flex flex-col gap-1">
<div className="flex items-center justify-between">
<Label
className="mx-0 cursor-pointer text-primary"
htmlFor="debug"
>
{t("streaming.debugView", {
ns: "components/dialog",
})}
</Label>
<Switch
className="ml-1"
id="debug"
checked={debug}
onCheckedChange={(checked) => setDebug(checked)}
/>
</div>
</div>
</div>
</div>
</DropdownMenuContent>
</DropdownMenu>
</DropdownMenuContent>
</DropdownMenu>
)}
</>
);
}

View File

@ -202,14 +202,6 @@ export default function LiveDashboardView({
};
}, []);
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible);
const [globalAutoLive] = usePersistence("autoLiveView", true);
const [displayCameraNames] = usePersistence("displayCameraNames", false);
@ -239,6 +231,33 @@ export default function LiveDashboardView({
[visibleCameraObserver.current],
);
const activeStreams = useMemo(() => {
const streams: { [cameraName: string]: string } = {};
cameras.forEach((camera) => {
const availableStreams = camera.live.streams || {};
const streamNameFromSettings =
currentGroupStreamingSettings?.[camera.name]?.streamName || "";
const streamExists =
streamNameFromSettings &&
Object.values(availableStreams).includes(streamNameFromSettings);
const streamName = streamExists
? streamNameFromSettings
: Object.values(availableStreams)[0] || "";
streams[camera.name] = streamName;
});
return streams;
}, [cameras, currentGroupStreamingSettings]);
const {
preferredLiveModes,
setPreferredLiveModes,
resetPreferredLiveMode,
isRestreamedStates,
supportsAudioOutputStates,
} = useCameraLiveMode(cameras, windowVisible, activeStreams);
const birdseyeConfig = useMemo(() => config?.birdseye, [config]);
const handleError = useCallback(

View File

@ -649,7 +649,7 @@ export function RecordingView({
value="detail"
aria-label="Detail Stream"
>
<div className="">Detail</div>
<div className="">{t("detail.label")}</div>
</ToggleGroupItem>
</ToggleGroup>
) : (

View File

@ -99,6 +99,10 @@ export default function UiSettingsView() {
const [playbackRate, setPlaybackRate] = usePersistence("playbackRate", 1);
const [weekStartsOn, setWeekStartsOn] = usePersistence("weekStartsOn", 0);
const [alertVideos, setAlertVideos] = usePersistence("alertVideos", true);
const [fallbackTimeout, setFallbackTimeout] = usePersistence(
"liveFallbackTimeout",
3,
);
return (
<>
@ -161,6 +165,48 @@ export default function UiSettingsView() {
<p>{t("general.liveDashboard.displayCameraNames.desc")}</p>
</div>
</div>
<div className="space-y-3">
<div className="flex flex-row items-center justify-start gap-2">
<Label
className="cursor-pointer"
htmlFor="live-fallback-timeout"
>
{t("general.liveDashboard.liveFallbackTimeout.label")}
</Label>
</div>
<div className="my-2 max-w-5xl text-sm text-muted-foreground">
<p>{t("general.liveDashboard.liveFallbackTimeout.desc")}</p>
</div>
<Select
value={fallbackTimeout?.toString()}
onValueChange={(value) => setFallbackTimeout(parseInt(value))}
>
<SelectTrigger className="w-36">
{t("time.second", {
ns: "common",
time: fallbackTimeout,
count: fallbackTimeout,
})}
</SelectTrigger>
<SelectContent>
<SelectGroup>
{[1, 2, 3, 4, 5, 6, 7, 8, 9, 10].map((timeout) => (
<SelectItem
key={timeout}
className="cursor-pointer"
value={timeout.toString()}
>
{t("time.second", {
ns: "common",
time: timeout,
count: timeout,
})}
</SelectItem>
))}
</SelectGroup>
</SelectContent>
</Select>
</div>
</div>
<div className="my-3 flex w-full flex-col space-y-6">