Merge branch 'dev' of https://github.com/blakeblackshear/frigate into migrate-db-path

This commit is contained in:
Felipe Santos 2023-04-23 13:13:15 -03:00
commit b5ab2134a6
102 changed files with 4562 additions and 3449 deletions

View File

@ -1,6 +1,6 @@
name: EdgeTpu Support Request
description: Support for setting up EdgeTPU in Frigate
title: "[EdgeTPU Support]: "
name: Detector Support Request
description: Support for setting up object detector in Frigate (Coral, OpenVINO, TensorRT, etc.)
title: "[Detector Support]: "
labels: ["support", "triage"]
assignees: []
body:

View File

@ -19,6 +19,15 @@ jobs:
runs-on: ubuntu-latest
name: Image Build
steps:
- name: Remove unnecessary files
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
- id: lowercaseRepo
uses: ASzc/change-string-case-action@v5
with:
string: ${{ github.repository }}
- name: Check out code
uses: actions/checkout@v3
- name: Set up QEMU
@ -36,23 +45,23 @@ jobs:
- name: Create short sha
run: echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV
- name: Build and push
uses: docker/build-push-action@v3
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/amd64,linux/arm64,linux/arm/v7
platforms: linux/amd64,linux/arm64
target: frigate
tags: |
ghcr.io/blakeblackshear/frigate:${{ github.ref_name }}-${{ env.SHORT_SHA }}
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Build and push TensorRT
uses: docker/build-push-action@v3
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/amd64
target: frigate-tensorrt
tags: |
ghcr.io/blakeblackshear/frigate:${{ github.ref_name }}-${{ env.SHORT_SHA }}-tensorrt
ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-tensorrt
cache-from: type=gha

View File

@ -16,7 +16,9 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Enable auto-merge for Dependabot PRs
if: steps.metadata.outputs.dependency-type == 'direct:development' && (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch')
run: gh pr merge --auto --squash "$PR_URL"
run: |
gh pr review --approve "$PR_URL"
gh pr merge --auto --squash "$PR_URL"
env:
PR_URL: ${{ github.event.pull_request.html_url }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

45
.github/workflows/maintain_cache.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: Maintain Cache
on:
schedule:
- cron: "13 0 * * 0,4"
env:
PYTHON_VERSION: 3.9
jobs:
multi_arch_build:
runs-on: ubuntu-latest
name: Image Build
steps:
- name: Remove unnecessary files
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
- id: lowercaseRepo
uses: ASzc/change-string-case-action@v5
with:
string: ${{ github.repository }}
- name: Check out code
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to the Container registry
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create version file
run: make version
- name: Build and push
uses: docker/build-push-action@v4
with:
context: .
push: false
platforms: linux/amd64,linux/arm64
target: frigate
cache-from: type=gha

View File

@ -27,7 +27,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
FROM wget AS go2rtc
ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin
RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.0.1/go2rtc_linux_${TARGETARCH}" \
RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.2.0/go2rtc_linux_${TARGETARCH}" \
&& chmod +x go2rtc
@ -133,11 +133,6 @@ RUN apt-get -qq update \
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
&& python3 get-pip.py "pip"
RUN if [ "${TARGETARCH}" = "arm" ]; \
then echo "[global]" > /etc/pip.conf \
&& echo "extra-index-url=https://www.piwheels.org/simple" >> /etc/pip.conf; \
fi
COPY requirements.txt /requirements.txt
RUN pip3 install -r requirements.txt
@ -207,6 +202,10 @@ FROM deps AS devcontainer
# But start a fake service for simulating the logs
COPY docker/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
# Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it
RUN mkdir -p /opt/frigate \
&& ln -svf /workspace/frigate/frigate /opt/frigate/frigate
# Install Node 16
RUN apt-get update \
&& apt-get install wget -y \

View File

@ -1,7 +1,7 @@
default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.12.0
VERSION = 0.13.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
CURRENT_UID := $(shell id -u)
CURRENT_GID := $(shell id -g)
@ -22,14 +22,11 @@ amd64:
arm64:
docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
armv7:
docker buildx build --platform linux/arm/v7 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
build: version amd64 arm64 armv7
docker buildx build --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
build: version amd64 arm64
docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) .
push: build
docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) .
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) .
docker buildx build --push --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt .
run: local

View File

@ -37,15 +37,6 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay
fi
# ffmpeg -> arm32
if [[ "${TARGETARCH}" == "arm" ]]; then
# add raspberry pi repo
gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 9165938D90FDDD2E
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] http://raspbian.raspberrypi.org/raspbian/ bullseye main contrib non-free rpi" | tee /etc/apt/sources.list.d/raspi.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
fi
# ffmpeg -> arm64
if [[ "${TARGETARCH}" == "arm64" ]]; then
# add raspberry pi repo
@ -64,6 +55,9 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd \
mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools
# something about this dependency requires it to be installed in a separate call rather than in the line above
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver-shaders
rm -f /etc/apt/sources.list.d/debian-testing.list
fi
@ -72,16 +66,6 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
libva-drm2 mesa-va-drivers
fi
# not sure why 32bit arm requires all these
if [[ "${TARGETARCH}" == "arm" ]]; then
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \
gfortran openexr libatlas-base-dev libtbb-dev libdc1394-22-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev
fi
apt-get purge gnupg apt-transport-https wget xz-utils -y
apt-get clean autoclean -y
apt-get autoremove --purge -y

View File

@ -2,12 +2,10 @@
set -euxo pipefail
s6_version="3.1.2.1"
s6_version="3.1.4.1"
if [[ "${TARGETARCH}" == "amd64" ]]; then
s6_arch="x86_64"
elif [[ "${TARGETARCH}" == "arm" ]]; then
s6_arch="armhf"
elif [[ "${TARGETARCH}" == "arm64" ]]; then
s6_arch="aarch64"
fi

View File

@ -4,6 +4,8 @@
set -o errexit -o nounset -o pipefail
# Logs should be sent to stdout so that s6 can collect them
declare exit_code_container
exit_code_container=$(cat /run/s6-linux-init-container-results/exitcode)
readonly exit_code_container
@ -11,20 +13,16 @@ readonly exit_code_service="${1}"
readonly exit_code_signal="${2}"
readonly service="Frigate"
echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})" >&2
echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})"
if [[ "${exit_code_service}" -eq 256 ]]; then
if [[ "${exit_code_container}" -eq 0 ]]; then
echo $((128 + exit_code_signal)) > /run/s6-linux-init-container-results/exitcode
echo $((128 + exit_code_signal)) >/run/s6-linux-init-container-results/exitcode
fi
elif [[ "${exit_code_service}" -ne 0 ]]; then
if [[ "${exit_code_container}" -eq 0 ]]; then
echo "${exit_code_service}" > /run/s6-linux-init-container-results/exitcode
echo "${exit_code_service}" >/run/s6-linux-init-container-results/exitcode
fi
else
# Exit code 0 is expected when Frigate is restarted by the user. In this case,
# we create a signal for the go2rtc finish script to tolerate the restart.
touch /dev/shm/restarting-frigate
fi
exec /run/s6/basedir/bin/halt

View File

@ -4,6 +4,8 @@
set -o errexit -o nounset -o pipefail
# Logs should be sent to stdout so that s6 can collect them
# Tell S6-Overlay not to restart this service
s6-svc -O .
@ -14,7 +16,7 @@ function migrate_db_path() {
if [[ -f "${config_file_yaml}" ]]; then
config_file="${config_file_yaml}"
elif [[ ! -f "${config_file}" ]]; then
echo "[ERROR] Frigate config file not found" >&2
echo "[ERROR] Frigate config file not found"
return 1
fi
unset config_file_yaml
@ -29,23 +31,23 @@ function migrate_db_path() {
if [[ -f "${previous_db_path}" ]]; then
if mountpoint --quiet "${new_db_dir}"; then
# /config is a mount point, move the db
echo "[INFO] Moving db from '${previous_db_path}' to the '${new_db_dir}' dir..." >&2
echo "[INFO] Moving db from '${previous_db_path}' to the '${new_db_dir}' dir..."
# Move all files that starts with frigate.db to the new directory
mv -vf "${previous_db_path}"* "${new_db_dir}"
else
echo "[ERROR] Trying to migrate the db path from '${previous_db_path}' to the '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" >&2
echo "[ERROR] Trying to migrate the db path from '${previous_db_path}' to the '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir"
return 1
fi
fi
fi
}
echo "[INFO] Preparing Frigate..." >&2
echo "[INFO] Preparing Frigate..."
migrate_db_path
echo "[INFO] Starting Frigate..." >&2
echo "[INFO] Starting Frigate..."
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" >&2
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
# Replace the bash process with the Frigate process, redirecting stderr to stdout
exec 2>&1

View File

@ -0,0 +1,12 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -o errexit -o nounset -o pipefail
# Logs should be sent to stdout so that s6 can collect them
readonly exit_code_service="${1}"
readonly exit_code_signal="${2}"
readonly service="go2rtc-healthcheck"
echo "[INFO] The ${service} service exited with code ${exit_code_service} (by signal ${exit_code_signal})"

View File

@ -0,0 +1 @@
go2rtc-log

View File

@ -0,0 +1,22 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Start the go2rtc-healthcheck service
set -o errexit -o nounset -o pipefail
# Logs should be sent to stdout so that s6 can collect them
# Give some additional time for go2rtc to start before start pinging
sleep 10s
echo "[INFO] Starting go2rtc healthcheck service..."
while sleep 30s; do
# Check if the service is running
if ! curl --connect-timeout 10 --fail --silent --show-error --output /dev/null http://127.0.0.1:1984/api/streams 2>&1; then
echo "[ERROR] The go2rtc service is not responding to ping, restarting..."
# We can also use -r instead of -t to send kill signal rather than term
s6-svc -t /var/run/service/go2rtc 2>&1
# Give some additional time to go2rtc to restart before start pinging again
sleep 10s
fi
done

View File

@ -0,0 +1 @@
longrun

View File

@ -1 +1,2 @@
go2rtc
go2rtc-healthcheck

View File

@ -1,32 +1,12 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Take down the S6 supervision tree when the service exits
set -o errexit -o nounset -o pipefail
declare exit_code_container
exit_code_container=$(cat /run/s6-linux-init-container-results/exitcode)
readonly exit_code_container
# Logs should be sent to stdout so that s6 can collect them
readonly exit_code_service="${1}"
readonly exit_code_signal="${2}"
readonly service="go2rtc"
echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})" >&2
if [[ "${exit_code_service}" -eq 256 ]]; then
if [[ "${exit_code_container}" -eq 0 ]]; then
echo $((128 + exit_code_signal)) > /run/s6-linux-init-container-results/exitcode
fi
elif [[ "${exit_code_service}" -ne 0 ]]; then
if [[ "${exit_code_container}" -eq 0 ]]; then
echo "${exit_code_service}" > /run/s6-linux-init-container-results/exitcode
fi
else
# go2rtc is not supposed to exit, so even when it exits with 0 we make the
# container with 1. We only tolerate it when Frigate is restarting.
if [[ "${exit_code_container}" -eq 0 && ! -f /dev/shm/restarting-frigate ]]; then
echo "1" > /run/s6-linux-init-container-results/exitcode
fi
fi
exec /run/s6/basedir/bin/halt
echo "[INFO] The ${service} service exited with code ${exit_code_service} (by signal ${exit_code_signal})"

View File

@ -4,8 +4,7 @@
set -o errexit -o nounset -o pipefail
# Tell S6-Overlay not to restart this service
s6-svc -O .
# Logs should be sent to stdout so that s6 can collect them
function get_ip_and_port_from_supervisor() {
local ip_address
@ -19,9 +18,9 @@ function get_ip_and_port_from_supervisor() {
jq --exit-status --raw-output '.data.ipv4.address[0]'
) && [[ "${ip_address}" =~ ${ip_regex} ]]; then
ip_address="${BASH_REMATCH[1]}"
echo "[INFO] Got IP address from supervisor: ${ip_address}" >&2
echo "[INFO] Got IP address from supervisor: ${ip_address}"
else
echo "[WARN] Failed to get IP address from supervisor" >&2
echo "[WARN] Failed to get IP address from supervisor"
return 0
fi
@ -35,26 +34,37 @@ function get_ip_and_port_from_supervisor() {
jq --exit-status --raw-output '.data.network["8555/tcp"]'
) && [[ "${webrtc_port}" =~ ${port_regex} ]]; then
webrtc_port="${BASH_REMATCH[1]}"
echo "[INFO] Got WebRTC port from supervisor: ${ip_address}" >&2
echo "[INFO] Got WebRTC port from supervisor: ${webrtc_port}"
else
echo "[WARN] Failed to get WebRTC port from supervisor" >&2
echo "[WARN] Failed to get WebRTC port from supervisor"
return 0
fi
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
}
echo "[INFO] Preparing go2rtc config..." >&2
if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Preparing go2rtc config..."
if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then
# Running as a Home Assistant add-on, infer the IP address and port
get_ip_and_port_from_supervisor
if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then
# Running as a Home Assistant add-on, infer the IP address and port
get_ip_and_port_from_supervisor
fi
python3 /usr/local/go2rtc/create_config.py
fi
raw_config=$(python3 /usr/local/go2rtc/create_config.py)
readonly config_path="/config"
echo "[INFO] Starting go2rtc..." >&2
if [[ -x "${config_path}/go2rtc" ]]; then
readonly binary_path="${config_path}/go2rtc"
echo "[WARN] Using go2rtc binary from '${binary_path}' instead of the embedded one"
else
readonly binary_path="/usr/local/go2rtc/bin/go2rtc"
fi
echo "[INFO] Starting go2rtc..."
# Replace the bash process with the go2rtc process, redirecting stderr to stdout
exec 2>&1
exec go2rtc -config="${raw_config}"
exec "${binary_path}" -config=/dev/shm/go2rtc.yaml

View File

@ -4,6 +4,8 @@
set -o errexit -o nounset -o pipefail
# Logs should be sent to stdout so that s6 can collect them
declare exit_code_container
exit_code_container=$(cat /run/s6-linux-init-container-results/exitcode)
readonly exit_code_container
@ -11,18 +13,18 @@ readonly exit_code_service="${1}"
readonly exit_code_signal="${2}"
readonly service="NGINX"
echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})" >&2
echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})"
if [[ "${exit_code_service}" -eq 256 ]]; then
if [[ "${exit_code_container}" -eq 0 ]]; then
echo $((128 + exit_code_signal)) > /run/s6-linux-init-container-results/exitcode
echo $((128 + exit_code_signal)) >/run/s6-linux-init-container-results/exitcode
fi
if [[ "${exit_code_signal}" -eq 15 ]]; then
exec /run/s6/basedir/bin/halt
fi
elif [[ "${exit_code_service}" -ne 0 ]]; then
if [[ "${exit_code_container}" -eq 0 ]]; then
echo "${exit_code_service}" > /run/s6-linux-init-container-results/exitcode
echo "${exit_code_service}" >/run/s6-linux-init-container-results/exitcode
fi
exec /run/s6/basedir/bin/halt
fi

View File

@ -4,7 +4,9 @@
set -o errexit -o nounset -o pipefail
echo "[INFO] Starting NGINX..." >&2
# Logs should be sent to stdout so that s6 can collect them
echo "[INFO] Starting NGINX..."
# Replace the bash process with the NGINX process, redirecting stderr to stdout
exec 2>&1

View File

@ -5,8 +5,13 @@ import os
import sys
import yaml
sys.path.insert(0, "/opt/frigate")
from frigate.const import BIRDSEYE_PIPE, BTBN_PATH
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
sys.path.remove("/opt/frigate")
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
@ -19,12 +24,18 @@ with open(config_file) as f:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config = yaml.safe_load(raw_config)
config: dict[str, any] = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config = json.loads(raw_config)
config: dict[str, any] = json.loads(raw_config)
go2rtc_config: dict[str, any] = config.get("go2rtc", {})
# Need to enable CORS for go2rtc so the frigate integration / card work automatically
if go2rtc_config.get("api") is None:
go2rtc_config["api"] = {"origin": "*"}
elif go2rtc_config["api"].get("origin") is None:
go2rtc_config["api"]["origin"] = "*"
# we want to ensure that logs are easy to read
if go2rtc_config.get("log") is None:
go2rtc_config["log"] = {"format": "text"}
@ -34,7 +45,9 @@ elif go2rtc_config["log"].get("format") is None:
if not go2rtc_config.get("webrtc", {}).get("candidates", []):
default_candidates = []
# use internal candidate if it was discovered when running through the add-on
internal_candidate = os.environ.get("FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL", None)
internal_candidate = os.environ.get(
"FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL", None
)
if internal_candidate is not None:
default_candidates.append(internal_candidate)
# should set default stun server so webrtc can work
@ -42,7 +55,17 @@ if not go2rtc_config.get("webrtc", {}).get("candidates", []):
go2rtc_config["webrtc"] = {"candidates": default_candidates}
else:
print("[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually", file=sys.stderr)
print(
"[INFO] Not injecting WebRTC candidates into go2rtc config as it has been set manually",
)
# sets default RTSP response to be equivalent to ?video=h264,h265&audio=aac
# this means user does not need to specify audio codec when using restream
# as source for frigate and the integration supports HLS playback
if go2rtc_config.get("rtsp") is None:
go2rtc_config["rtsp"] = {"default_query": "mp4"}
elif go2rtc_config["rtsp"].get("default_query") is None:
go2rtc_config["rtsp"]["default_query"] = "mp4"
# need to replace ffmpeg command when using ffmpeg4
if not os.path.exists(BTBN_PATH):
@ -54,14 +77,30 @@ if not os.path.exists(BTBN_PATH):
go2rtc_config["ffmpeg"][
"rtsp"
] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
for name in go2rtc_config.get("streams", {}):
stream = go2rtc_config["streams"][name]
if isinstance(stream, str):
go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format(**FRIGATE_ENV_VARS)
go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format(
**FRIGATE_ENV_VARS
)
elif isinstance(stream, list):
for i, stream in enumerate(stream):
go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS)
print(json.dumps(go2rtc_config))
# add birdseye restream stream if enabled
if config.get("birdseye", {}).get("restream", False):
birdseye: dict[str, any] = config.get("birdseye")
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
if go2rtc_config.get("streams"):
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd
else:
go2rtc_config["streams"] = {"birdseye": ffmpeg_cmd}
# Write go2rtc_config to /dev/shm/go2rtc.yaml
with open("/dev/shm/go2rtc.yaml", "w") as f:
yaml.dump(go2rtc_config, f)

View File

@ -16,7 +16,7 @@ Note that mjpeg cameras require encoding the video into h264 for recording, and
```yaml
go2rtc:
streams:
mjpeg_cam: ffmpeg:{your_mjpeg_stream_url}#video=h264#hardware # <- use hardware acceleration to create an h264 stream usable for other components.
mjpeg_cam: "ffmpeg:{your_mjpeg_stream_url}#video=h264#hardware" # <- use hardware acceleration to create an h264 stream usable for other components.
cameras:
...
@ -110,7 +110,7 @@ go2rtc:
streams:
reolink:
- http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password
- ffmpeg:reolink#audio=opus
- "ffmpeg:reolink#audio=opus"
reolink_sub:
- http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password
@ -126,19 +126,28 @@ cameras:
input_args: preset-rtsp-restream
roles:
- detect
detect:
width: 896
height: 672
fps: 7
```
### Unifi Protect Cameras
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp.
Unifi protect cameras require the rtspx stream to be used with go2rtc.
To utilize a Unifi protect camera, modify the rtsps link to begin with rtspx.
Additionally, remove the "?enableSrtp" from the end of the Unifi link.
```yaml
go2rtc:
streams:
front:
- rtspx://192.168.1.1:7441/abcdefghijk
```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect.
```yaml
ffmpeg:
output_args:
record: preset-record-ubiquiti
rtmp: preset-rtmp-ubiquiti
rtmp: preset-rtmp-ubiquiti # recommend using go2rtc instead
```

View File

@ -101,7 +101,7 @@ The OpenVINO device to be used is specified using the `"device"` attribute accor
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html)
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector.
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
```yaml
detectors:
@ -119,6 +119,25 @@ model:
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
```
This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/index.md#full-configuration-reference) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
```yaml
detectors:
ov:
type: openvino
device: AUTO
model:
path: /path/to/yolox_tiny.xml
model:
width: 416
height: 416
input_tensor: nchw
input_pixel_format: bgr
model_type: yolox
labelmap_path: /path/to/coco_80cl.txt
```
### Intel NCS2 VPU and Myriad X Setup
Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device.
@ -212,6 +231,10 @@ yolov4x-mish-320
yolov4x-mish-640
yolov7-tiny-288
yolov7-tiny-416
yolov7-640
yolov7-320
yolov7x-640
yolov7x-320
```
### Configuration Parameters

View File

@ -28,16 +28,17 @@ Input args presets help make the config more readable and handle use cases for d
See [the camera specific docs](/configuration/camera_specific.md) for more info on non-standard cameras and recommendations for using them in Frigate.
| Preset | Usage | Other Notes |
| ------------------------- | ------------------------- | --------------------------------------------------- |
| preset-http-jpeg-generic | HTTP Live Jpeg | Recommend restreaming live jpeg instead |
| preset-http-mjpeg-generic | HTTP Mjpeg Stream | Recommend restreaming mjpeg stream instead |
| preset-http-reolink | Reolink HTTP-FLV Stream | Only for reolink http, not when restreaming as rtsp |
| preset-rtmp-generic | RTMP Stream | |
| preset-rtsp-generic | RTSP Stream | This is the default when nothing is specified |
| preset-rtsp-restream | RTSP Stream from restream | Use when using rtsp restream as source |
| preset-rtsp-udp | RTSP Stream via UDP | Use when camera is UDP only |
| preset-rtsp-blue-iris | Blue Iris RTSP Stream | Use when consuming a stream from Blue Iris |
| Preset | Usage | Other Notes |
| -------------------------------- | ------------------------- | ------------------------------------------------------------------------------------------------ |
| preset-http-jpeg-generic | HTTP Live Jpeg | Recommend restreaming live jpeg instead |
| preset-http-mjpeg-generic | HTTP Mjpeg Stream | Recommend restreaming mjpeg stream instead |
| preset-http-reolink | Reolink HTTP-FLV Stream | Only for reolink http, not when restreaming as rtsp |
| preset-rtmp-generic | RTMP Stream | |
| preset-rtsp-generic | RTSP Stream | This is the default when nothing is specified |
| preset-rtsp-restream | RTSP Stream from restream | Use for rtsp restream as source for frigate |
| preset-rtsp-restream-low-latency | RTSP Stream from restream | Use for rtsp restream as source for frigate to lower latency, may cause issues with some cameras |
| preset-rtsp-udp | RTSP Stream via UDP | Use when camera is UDP only |
| preset-rtsp-blue-iris | Blue Iris RTSP Stream | Use when consuming a stream from Blue Iris |
:::caution
@ -46,21 +47,22 @@ It is important to be mindful of input args when using restream because you can
:::
```yaml
go2rtc:
streams:
reolink_cam: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=admin&password=password
cameras:
reolink_cam:
ffmpeg:
inputs:
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=admin&password={FRIGATE_CAM_PASSWORD}
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=admin&password=password
input_args: preset-http-reolink
roles:
- detect
- path: rtsp://192.168.0.10:8554/garage
- path: rtsp://127.0.0.1:8554/reolink_cam
input_args: preset-rtsp-generic
roles:
- record
- path: http://192.168.0.139/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=admin&password={FRIGATE_CAM_PASSWORD}
roles:
- restream
```
### Output Args Presets

View File

@ -15,23 +15,39 @@ ffmpeg:
hwaccel_args: preset-rpi-64-h264
```
### Intel-based CPUs (<10th Generation) via Quicksync
### Intel-based CPUs (<10th Generation) via VAAPI
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs if QSV does not work.
```yaml
ffmpeg:
hwaccel_args: preset-vaapi
```
**NOTICE**: With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
### Intel-based CPUs (>=10th Generation) via Quicksync
QSV must be set specifically based on the video encoding of the stream.
#### H.264 streams
```yaml
ffmpeg:
hwaccel_args: preset-intel-qsv-h264
```
#### H.265 streams
```yaml
ffmpeg:
hwaccel_args: preset-intel-qsv-h265
```
### AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
**Note:** You also need to set `LIBVA_DRIVER_NAME=radeonsi` as an environment variable on the container.
```yaml

View File

@ -3,7 +3,7 @@ id: index
title: Configuration File
---
For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`) and named `frigate.yaml`.
For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored.
For all other installation types, the config file should be mapped to `/config/config.yml` inside the container.
@ -19,7 +19,6 @@ cameras:
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
roles:
- detect
- restream
detect:
width: 1280
height: 720
@ -37,6 +36,25 @@ It is not recommended to copy this full configuration file. Only specify values
:::
**Note:** The following values will be replaced at runtime by using environment variables
- `{FRIGATE_MQTT_USER}`
- `{FRIGATE_MQTT_PASSWORD}`
- `{FRIGATE_RTSP_USER}`
- `{FRIGATE_RTSP_PASSWORD}`
for example:
```yaml
mqtt:
user: "{FRIGATE_MQTT_USER}"
password: "{FRIGATE_MQTT_PASSWORD}"
```
```yaml
- path: rtsp://{FRIGATE_RTSP_USER}:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:8554/unicast
```
```yaml
mqtt:
# Optional: Enable mqtt server (default: shown below)
@ -105,6 +123,9 @@ model:
# Optional: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
@ -146,7 +167,7 @@ birdseye:
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
ffmpeg:
# Optional: global ffmpeg args (default: shown below)
global_args: -hide_banner -loglevel warning
global_args: -hide_banner -loglevel warning -threads 2
# Optional: global hwaccel args (default: shown below)
# NOTE: See hardware acceleration docs for your specific device
hwaccel_args: []
@ -155,7 +176,7 @@ ffmpeg:
# Optional: global output args
output_args:
# Optional: output args for detect streams (default: shown below)
detect: -f rawvideo -pix_fmt yuv420p
detect: -threads 2 -f rawvideo -pix_fmt yuv420p
# Optional: output args for record streams (default: shown below)
record: preset-record-generic
# Optional: output args for rtmp streams (default: shown below)
@ -172,7 +193,6 @@ detect:
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
fps: 5
# Optional: enables detection for the camera (default: True)
# This value can be set via MQTT and will be updated in startup based on retained value
enabled: True
# Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate)
max_disappeared: 25
@ -271,11 +291,6 @@ record:
# Optional: Enable recording (default: shown below)
# WARNING: If recording is disabled in the config, turning it on via
# the UI or MQTT later will have no effect.
# WARNING: Frigate does not currently support limiting recordings based
# on available disk space automatically. If using recordings,
# you must specify retention settings for a number of days that
# will fit within the available disk space of your drive or Frigate
# will crash.
enabled: False
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
@ -325,7 +340,6 @@ record:
# NOTE: Can be overridden at the camera level
snapshots:
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
# This value can be set via MQTT and will be updated in startup based on retained value
enabled: False
# Optional: save a clean PNG copy of the snapshot image (default: shown below)
clean_copy: True
@ -355,7 +369,7 @@ rtmp:
enabled: False
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.0.1)
# Uses https://github.com/AlexxIT/go2rtc (v1.2.0)
go2rtc:
# Optional: jsmpeg stream configuration for WebUI
@ -410,12 +424,12 @@ cameras:
# Required: the path to the stream
# NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {}
- path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
# Required: list of roles for this stream. valid values are: detect,record,restream,rtmp
# NOTICE: In addition to assigning the record, restream, and rtmp roles,
# Required: list of roles for this stream. valid values are: detect,record,rtmp
# NOTICE: In addition to assigning the record and rtmp roles,
# they must also be enabled in the camera config.
roles:
- detect
- restream
- record
- rtmp
# Optional: stream specific global args (default: inherit)
# global_args:
@ -488,9 +502,32 @@ ui:
# Optional: Set the default live mode for cameras in the UI (default: shown below)
live_mode: mse
# Optional: Set a timezone to use in the UI (default: use browser local time)
timezone: None
# timezone: America/Denver
# Optional: Use an experimental recordings / camera view UI (default: shown below)
experimental_ui: False
use_experimental: False
# Optional: Set the time format used.
# Options are browser, 12hour, or 24hour (default: shown below)
time_format: browser
# Optional: Set the date style for a specified length.
# Options are: full, long, medium, short
# Examples:
# short: 2/11/23
# medium: Feb 11, 2023
# full: Saturday, February 11, 2023
# (default: shown below).
date_style: short
# Optional: Set the time style for a specified length.
# Options are: full, long, medium, short
# Examples:
# short: 8:14 PM
# medium: 8:15:22 PM
# full: 8:15:22 PM Mountain Standard Time
# (default: shown below).
time_style: medium
# Optional: Ability to manually override the date / time styling to use strftime format
# https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html
# possible values are shown above (default: not set)
strftime_fmt: "%Y/%m/%d %H:%M"
# Optional: Telemetry configuration
telemetry:

View File

@ -3,17 +3,17 @@ id: live
title: Live View
---
Frigate has different live view options, some of which require [restream](restream.md) to be enabled.
Frigate has different live view options, some of which require the bundled `go2rtc` to be configured as shown in the [step by step guide](/guides/configuring_go2rtc).
## Live View Options
Live view options can be selected while viewing the live stream. The options are:
| Source | Latency | Frame Rate | Resolution | Audio | Requires Restream | Other Limitations |
| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | ----------------- | -------------------------------------------- |
| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none |
| mse | low | native | native | yes (depends on audio codec) | yes | not supported on iOS, Firefox is h.264 only |
| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 |
| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations |
| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | -------------------------------------------- |
| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none |
| mse | low | native | native | yes (depends on audio codec) | yes | not supported on iOS, Firefox is h.264 only |
| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 |
### Audio Support
@ -24,9 +24,10 @@ go2rtc:
streams:
rtsp_cam: # <- for RTSP streams
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio
- ffmpeg:rtsp_cam#audio=opus # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
- "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
http_cam: # <- for http streams
- "ffmpeg:http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password#video=copy#audio=copy#audio=opus" # <- http streams must use ffmpeg to set all types
- http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password # <- stream which supports video & aac audio
- "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
```
### Setting Stream For Live UI
@ -36,12 +37,12 @@ There may be some cameras that you would prefer to use the sub stream for live v
```yaml
go2rtc:
streams:
rtsp_cam:
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg
- ffmpeg:rtsp_cam#audio=opus # <- copy of the stream which transcodes audio to opus
rtsp_cam:
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio.
- "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus
rtsp_cam_sub:
- rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg
- ffmpeg:rtsp_cam_sub#audio=opus # <- copy of the stream which transcodes audio to opus
- rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio.
- "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus
cameras:
test_cam:
@ -49,16 +50,16 @@ cameras:
output_args:
record: preset-record-generic-audio-copy
inputs:
- path: rtsp://127.0.0.1:8554/test_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream
- path: rtsp://127.0.0.1:8554/test_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream
roles:
- record
- path: rtsp://127.0.0.1:8554/test_cam_sub?video=copy # <--- the name here must match the name of the camera_sub in restream
- path: rtsp://127.0.0.1:8554/test_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream
roles:
- detect
live:
stream_name: test_cam_sub
stream_name: rtsp_cam_sub
```
### WebRTC extra configuration:
@ -68,15 +69,15 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
- For external access, over the internet, setup your router to forward port `8555` to port `8555` on the Frigate device, for both TCP and UDP.
- For internal/local access, unless you are running through the add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate:
```yaml title="/config/frigate.yaml"
go2rtc:
streams:
test_cam: ...
webrtc:
candidates:
- 192.168.1.10:8555
- stun:8555
```
```yaml title="/config/frigate.yaml"
go2rtc:
streams:
test_cam: ...
webrtc:
candidates:
- 192.168.1.10:8555
- stun:8555
```
:::tip
@ -100,4 +101,4 @@ If you are having difficulties getting WebRTC to work and you are running Frigat
:::
See https://github.com/AlexxIT/go2rtc#module-webrtc for more information about this.
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#module-webrtc) for more information about this.

View File

@ -3,17 +3,38 @@ id: restream
title: Restream
---
### RTSP
## RTSP
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc#configuration) for more advanced configurations and features.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.2.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#configuration) for more advanced configurations and features.
#### Birdseye Restream
:::note
You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which can be helpful to debug as well as provide useful information about your camera streams.
:::
### Birdseye Restream
Birdseye RTSP restream can be enabled at `birdseye -> restream` and accessed at `rtsp://<frigate_host>:8554/birdseye`. Enabling the restream will cause birdseye to run 24/7 which may increase CPU usage somewhat.
### RTMP (Deprecated)
### Securing Restream With Authentication
The go2rtc restream can be secured with RTSP based username / password authentication. Ex:
```yaml
go2rtc:
rtsp:
username: "admin"
password: "pass"
streams:
...
```
**NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras.
## RTMP (Deprecated)
In previous Frigate versions RTMP was used for re-streaming. RTMP has disadvantages however including being incompatible with H.265, high bitrates, and certain audio codecs. RTMP is deprecated and it is recommended to move to the new restream role.
@ -30,10 +51,10 @@ go2rtc:
streams:
rtsp_cam: # <- for RTSP streams
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio
- ffmpeg:rtsp_cam#audio=opus # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
- "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
http_cam: # <- for other streams
- http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password # <- stream which supports video & aac audio
- ffmpeg:http_cam#audio=opus # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
- "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
cameras:
rtsp_cam:
@ -41,7 +62,7 @@ cameras:
output_args:
record: preset-record-generic-audio-copy
inputs:
- path: rtsp://127.0.0.1:8554/rtsp_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream
- path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream
roles:
- record
@ -51,7 +72,7 @@ cameras:
output_args:
record: preset-record-generic-audio-copy
inputs:
- path: rtsp://127.0.0.1:8554/http_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream
- path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream
roles:
- record
@ -67,16 +88,16 @@ go2rtc:
streams:
rtsp_cam:
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg
- ffmpeg:rtsp_cam#audio=opus # <- copy of the stream which transcodes audio to opus
- "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus
rtsp_cam_sub:
- rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg
- ffmpeg:rtsp_cam_sub#audio=opus # <- copy of the stream which transcodes audio to opus
- "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus
http_cam:
- http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg
- ffmpeg:http_cam#audio=opus # <- copy of the stream which transcodes audio to opus
- "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to opus
http_cam_sub:
- http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=user&password=password # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg
- ffmpeg:http_cam_sub#audio=opus # <- copy of the stream which transcodes audio to opus
- "ffmpeg:http_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus
cameras:
rtsp_cam:
@ -84,11 +105,11 @@ cameras:
output_args:
record: preset-record-generic-audio-copy
inputs:
- path: rtsp://127.0.0.1:8554/rtsp_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream
- path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream
roles:
- record
- path: rtsp://127.0.0.1:8554/rtsp_cam_sub?video=copy&audio=aac # <--- the name here must match the name of the camera_sub in restream
- path: rtsp://127.0.0.1:8554/rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream
roles:
- detect
@ -97,11 +118,11 @@ cameras:
output_args:
record: preset-record-generic-audio-copy
inputs:
- path: rtsp://127.0.0.1:8554/http_cam?video=copy&audio=aac # <--- the name here must match the name of the camera in restream
- path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream
input_args: preset-rtsp-restream
roles:
- record
- path: rtsp://127.0.0.1:8554/http_cam_sub?video=copy&audio=aac # <--- the name here must match the name of the camera_sub in restream
- path: rtsp://127.0.0.1:8554/http_cam_sub # <--- the name here must match the name of the camera_sub in restream
input_args: preset-rtsp-restream
roles:
- detect
@ -109,7 +130,7 @@ cameras:
## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
NOTE: The output will need to be passed with two curly braces `{{output}}`
@ -118,15 +139,3 @@ go2rtc:
streams:
stream1: exec:ffmpeg -hide_banner -re -stream_loop -1 -i /media/BigBuckBunny.mp4 -c copy -rtsp_transport tcp -f rtsp {{output}}
```
## Go2rtc Exec
Go2rtc offers the ability to [run a full command with exec](https://github.com/AlexxIT/go2rtc#source-exec) and calls for `{output}` at the end of the stream. Due to frigate's handling of templates, the output will need to be passed as `{{output}}`.
ex:
```yaml
go2rtc:
streams:
test: exec:ffmpeg -hide_banner -re -stream_loop -1 -i /media/BigBuckBunny.mp4 -c copy -rtsp_transport tcp -f rtsp {{output}}
```

View File

@ -11,6 +11,24 @@ During testing, enable the Zones option for the debug feed so you can adjust as
To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead.
### Restricting events to specific zones
Often you will only want events to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to be notified when an object enters your entire_yard zone, the config would be:
```yaml
camera:
record:
events:
required_zones:
- entire_yard
snapshots:
required_zones:
- entire_yard
zones:
entire_yard:
coordinates: ...
```
### Restricting zones to specific objects
Sometimes you want to limit a zone to specific object types to have more granular control of when events/snapshots are saved. The following example will limit one zone to person objects and the other to cars.

View File

@ -3,7 +3,7 @@ id: camera_setup
title: Camera setup
---
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but far less compatibility. Safari and Edge are the only browsers able to play H.265. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
- **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server.

View File

@ -23,15 +23,11 @@ I may earn a small commission for my endorsement, recommendation, testimonial, o
My current favorite is the Minisforum GK41 because of the dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
| Name | Coral Inference Speed | Coral Compatibility | Notes |
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
| Odyssey X86 Blue J4125 (<a href="https://amzn.to/3oH4BKi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) (<a href="https://www.seeedstudio.com/Frigate-NVR-with-Odyssey-Blue-and-Coral-USB-Accelerator.html?utm_source=Frigate" target="_blank" rel="nofollow noopener sponsored">SeeedStudio</a>) | 9-10ms | M.2 B+M, USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Minisforum GK41 (<a href="https://amzn.to/3ptnb8D" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Beelink GK55 (<a href="https://amzn.to/35E79BC" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Intel NUC (<a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
| BMAX B2 Plus (<a href="https://amzn.to/3a6TBh8" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 10-12ms | USB | Good balance of performance and cost. Also capable of running many other services at the same time as Frigate. |
| Atomic Pi (<a href="https://amzn.to/2YjpY9m" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 16ms | USB | Good option for a dedicated low power board with a small number of cameras. Can leverage Intel QuickSync for stream decoding. |
| Raspberry Pi 4 (64bit) (<a href="https://amzn.to/2YhSGHH" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 10-15ms | USB | Can handle a small number of cameras. |
| Name | Coral Inference Speed | Coral Compatibility | Notes |
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
| Odyssey X86 Blue J4125 (<a href="https://amzn.to/3oH4BKi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) (<a href="https://www.seeedstudio.com/Frigate-NVR-with-Odyssey-Blue-and-Coral-USB-Accelerator.html?utm_source=Frigate" target="_blank" rel="nofollow noopener sponsored">SeeedStudio</a>) | 9-10ms | M.2 B+M, USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Minisforum GK41 (<a href="https://amzn.to/3ptnb8D" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
| Intel NUC (<a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
## Detectors
@ -50,19 +46,29 @@ A single Coral can handle many cameras and will be sufficient for the majority o
### OpenVino
The OpenVINO detector type is able to run on:
- 6th Gen Intel Platforms and newer that have an iGPU
- x86 & Arm32/64 hosts with VPU Hardware (ex: Intel NCS2)
- x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2)
More information is available [in the detector docs](/configuration/detectors#openvino-detector)
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
| Name | Inference Speed | Notes |
| ------------------- | --------------- | --------------------------------------------------------------------- |
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were ~ 150 ms |
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
| Intel NCS2 VPU | 60 - 65 ms | May vary based on host device |
| Intel i5 1135G7 | 10 - 15 ms | |
| Name | Inference Speed | Notes |
| -------------------- | --------------- | --------------------------------------------------------------------- |
| Intel NCS2 VPU | 60 - 65 ms | May vary based on host device |
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were 150 - 200 ms |
| Intel Celeron N3060 | 130 - 150 ms | Inference speeds on CPU were ~ 550 ms |
| Intel Celeron N3205U | ~ 120 ms | Inference speeds on CPU were ~ 380 ms |
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
| Intel i3 6100T | 15 - 35 ms | Inference speeds on CPU were 60 - 120 ms |
| Intel i3 8100 | ~ 15 ms | Inference speeds on CPU were ~ 65 ms |
| Intel i5 4590 | ~ 20 ms | Inference speeds on CPU were ~ 230 ms |
| Intel i5 6500 | ~ 15 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7200u | 15 - 25 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
| Intel i5 1135G7 | 10 - 15 ms | |
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
### TensorRT
@ -71,10 +77,15 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
Inference speeds will vary greatly depending on the GPU and the model used.
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
| Name | Model | Inference Speed |
| -------- | --------------- | --------------- |
| RTX 3050 | yolov4-tiny-416 | ~ 5 ms |
| RTX 3050 | yolov7-tiny-416 | ~ 6 ms |
| Name | Inference Speed |
| --------------- | --------------- |
| GTX 1060 6GB | ~ 7 ms |
| GTX 1070 | ~ 6 ms |
| GTX 1660 SUPER | ~ 4 ms |
| RTX 3050 | 5 - 7 ms |
| RTX 3070 Mobile | ~ 5 ms |
| Quadro P400 2GB | 20 - 25 ms |
| Quadro P2000 | ~ 12 ms |
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)

View File

@ -137,7 +137,10 @@ docker run -d \
:::caution
Due to limitations in Home Assistant Operating System, utilizing external storage for recordings or snapshots requires [modifying udev rules manually](https://community.home-assistant.io/t/solved-mount-usb-drive-in-hassio-to-be-used-on-the-media-folder-with-udev-customization/258406/46).
There are important limitations in Home Assistant Operating System to be aware of:
- Utilizing external storage for recordings or snapshots requires [modifying udev rules manually](https://community.home-assistant.io/t/solved-mount-usb-drive-in-hassio-to-be-used-on-the-media-folder-with-udev-customization/258406/46).
- AMD GPUs are not supported because HA OS does not include the mesa driver.
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
:::
@ -168,6 +171,13 @@ There are several versions of the addon available:
## Home Assistant Supervised
:::caution
There are important limitations in Home Assistant Supervised to be aware of:
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
:::
:::tip
If possible, it is recommended to run Frigate standalone in Docker and use [Frigate's Proxy Addon](https://github.com/blakeblackshear/frigate-hass-addons/blob/main/frigate_proxy/README.md).

View File

@ -0,0 +1,77 @@
---
id: configuring_go2rtc
title: Configuring go2rtc
---
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
- RTSP (instead of RTMP) relay for use with Home Assistant or other consumers to reduce the number of connections to your camera streams
# Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#module-streams), not just rtsp.
```yaml
go2rtc:
streams:
back:
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
```
The easiest live view to get working is MSE. After adding this to the config, restart Frigate and try to watch the live stream by selecting MSE in the dropdown after clicking on the camera.
### What if my video doesn't play?
If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration:
```yaml
go2rtc:
streams:
back:
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
- "ffmpeg:back#video=h264"
```
Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types.
```yaml
go2rtc:
streams:
back:
- ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
```
If you can see the video but do not have audio, this is most likely because your camera's audio stream is not AAC. If possible, update your camera's audio settings to AAC. If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows:
```yaml
go2rtc:
streams:
back:
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
- "ffmpeg:back#audio=aac"
```
If you need to convert **both** the audio and video streams, you can use the following:
```yaml
go2rtc:
streams:
back:
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
- "ffmpeg:back#video=h264#audio=aac"
```
When using the ffmpeg module, you would add AAC audio like this:
```yaml
go2rtc:
streams:
back:
- "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac"
```
## Next steps
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
1. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats.

View File

@ -1,10 +0,0 @@
---
id: events_setup
title: Setting Up Events
---
[Snapshots](../configuration/snapshots.md) and/or [Recordings](../configuration/record.md) must be enabled for events to be created for detected objects.
## Limiting Events to Areas of Interest
The best way to limit events to areas of interest is to use [zones](../configuration/zones.md) along with `required_zones` for events and snapshots to only have events created in areas of interest.

View File

@ -1,79 +1,32 @@
---
id: getting_started
title: Creating a config file
title: Getting started
---
This guide walks through the steps to build a configuration file for Frigate. It assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/guides/camera_setup)
This guide walks through the steps to build a configuration file for Frigate. It assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution.
### Step 1: Configure the MQTT server (Optional)
### Step 1: Add a detect stream
Use of a functioning MQTT server is optional for Frigate, but required for the home assistant integration. Start by adding the mqtt section at the top level in your config:
First we will add the detect stream for the camera:
If using mqtt:
```yaml
mqtt:
host: <ip of your mqtt server>
```
If not using mqtt:
```yaml
mqtt:
enabled: False
```
If using the Mosquitto Addon in Home Assistant, a username and password is required. For example:
```yaml
mqtt:
host: <ip of your mqtt server>
user: <username>
password: <password>
```
Frigate supports many configuration options for mqtt. See the [configuration reference](../configuration/index.md#full-configuration-reference) for more info.
### Step 2: Configure detectors
By default, Frigate will use a single CPU detector. If you have a USB Coral, you will need to add a detectors section to your config.
```yaml
mqtt:
host: <ip of your mqtt server>
detectors:
coral:
type: edgetpu
device: usb
```
More details on available detectors can be found [here](../configuration/detectors.md).
### Step 3: Add a minimal camera configuration
Now let's add the first camera:
```yaml
mqtt:
host: <ip of your mqtt server>
detectors:
coral:
type: edgetpu
device: usb
cameras:
camera_1: # <------ Name the camera
ffmpeg:
inputs:
- path: rtsp://10.0.10.10:554/rtsp # <----- Update for your camera
- path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection
roles:
- detect
detect:
enabled: False # <---- disable detection until you have a working camera feed
width: 1280 # <---- update for your camera's resolution
height: 720 # <---- update for your camera's resolution
```
### Step 4: Start Frigate
### Step 2: Start Frigate
At this point you should be able to start Frigate and see the the video feed in the UI.
@ -81,41 +34,48 @@ If you get an error image from the camera, this means ffmpeg was not able to get
FFmpeg arguments for other types of cameras can be found [here](../configuration/camera_specific.md).
### Step 5: Configure hardware acceleration (optional)
### Step 3: Configure hardware acceleration (recommended)
Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware.
In order to best evaluate the performance impact of hardware acceleration, it is recommended to temporarily disable detection.
Here is an example configuration with hardware acceleration configured for Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md):
```yaml
mqtt: ...
detectors: ...
cameras:
camera_1:
ffmpeg: ...
detect:
enabled: False
...
```
Here is an example configuration with hardware acceleration configured:
```yaml
mqtt: ...
detectors: ...
cameras:
camera_1:
ffmpeg:
inputs: ...
hwaccel_args: -c:v h264_v4l2m2m
hwaccel_args: preset-vaapi
detect: ...
```
### Step 6: Setup motion masks
### Step 4: Configure detectors
By default, Frigate will use a single CPU detector. If you have a USB Coral, you will need to add a detectors section to your config.
```yaml
mqtt: ...
detectors: # <---- add detectors
coral:
type: edgetpu
device: usb
cameras:
camera_1:
ffmpeg: ...
detect:
enabled: True # <---- turn on detection
...
```
More details on available detectors can be found [here](../configuration/detectors.md).
Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference).
### Step 5: Setup motion masks
Now that you have optimized your configuration for decoding the video stream, you will want to check to see where to implement motion masks. To do this, navigate to the camera in the UI, select "Debug" at the top, and enable "Motion boxes" in the options below the video feed. Watch for areas that continuously trigger unwanted motion to be detected. Common areas to mask include camera timestamps and trees that frequently blow in the wind. The goal is to avoid wasting object detection cycles looking at these areas.
@ -131,7 +91,7 @@ Your configuration should look similar to this now.
```yaml
mqtt:
host: mqtt.local
enabled: False
detectors:
coral:
@ -153,9 +113,13 @@ cameras:
- 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432
```
### Step 7: Enable recording (optional)
### Step 6: Enable recording and/or snapshots
To enable recording video, add the `record` role to a stream and enable it in the config.
In order to see Events in the Frigate UI, either snapshots or record will need to be enabled.
#### Record
To enable recording video, add the `record` role to a stream and enable it in the config. If record is disabled in the config, turning it on via the UI will not have any effect.
```yaml
mqtt: ...
@ -169,7 +133,7 @@ cameras:
- path: rtsp://10.0.10.10:554/rtsp
roles:
- detect
- path: rtsp://10.0.10.10:554/high_res_stream # <----- Add high res stream
- path: rtsp://10.0.10.10:554/high_res_stream # <----- Add stream you want to record from
roles:
- record
detect: ...
@ -182,9 +146,9 @@ If you don't have separate streams for detect and record, you would just add the
By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/index.md#full-configuration-reference).
### Step 8: Enable snapshots (optional)
#### Snapshots
To enable snapshots of your events, just enable it in the config.
To enable snapshots of your events, just enable it in the config. Snapshots are taken from the detect stream because it is the only stream decoded.
```yaml
mqtt: ...
@ -201,3 +165,10 @@ cameras:
```
By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/index.md#full-configuration-reference).
### Step 7: Follow up guides
Now that you have a working install, you can use the following guides for additional features:
1. [Configuring go2rtc](configuring_go2rtc) - Additional live view options and RTSP relay
2. [Home Assistant Integration](../integrations/home-assistant.md) - Integrate with Home Assistant

View File

@ -1,6 +1,6 @@
---
id: reverse_proxy
title: Setting up a Reverse Proxy
title: Setting up a reverse proxy
---
This guide outlines the basic configuration steps needed to expose your Frigate UI to the internet.
@ -8,6 +8,7 @@ A common way of accomplishing this is to use a reverse proxy webserver between y
A reverse proxy accepts HTTP requests from the public internet and redirects them transparently to internal webserver(s) on your network.
The suggested steps are:
- **Configure** a 'proxy' HTTP webserver (such as [Apache2](https://httpd.apache.org/docs/current/) or [NPM](https://github.com/NginxProxyManager/nginx-proxy-manager)) and only expose ports 80/443 from this webserver to the internet
- **Encrypt** content from the proxy webserver by installing SSL (such as with [Let's Encrypt](https://letsencrypt.org/)). Note that SSL is then not required on your Frigate webserver as the proxy encrypts all requests for you
- **Restrict** access to your Frigate instance at the proxy using, for example, password authentication
@ -31,6 +32,7 @@ On Debian Apache2 the configuration file will be named along the lines of `/etc/
Make life easier for yourself by presenting your Frigate interface as a DNS sub-domain rather than as a sub-folder of your main domain.
Here we access Frigate via https://cctv.mydomain.co.uk
```xml
<VirtualHost *:443>
ServerName cctv.mydomain.co.uk
@ -38,7 +40,7 @@ Here we access Frigate via https://cctv.mydomain.co.uk
ProxyPreserveHost On
ProxyPass "/" "http://frigatepi.local:5000/"
ProxyPassReverse "/" "http://frigatepi.local:5000/"
ProxyPass /ws ws://frigatepi.local:5000/ws
ProxyPassReverse /ws ws://frigatepi.local:5000/ws

View File

@ -168,6 +168,16 @@ Events from the database. Accepts the following query string parameters:
| `include_thumbnails` | int | Include thumbnails in the response (0 or 1) |
| `in_progress` | int | Limit to events in progress (0 or 1) |
### `GET /api/timeline`
Timeline of key moments of an event(s) from the database. Accepts the following query string parameters:
| param | Type | Description |
| -------------------- | ---- | --------------------------------------------- |
| `camera` | int | Name of camera |
| `source_id` | str | ID of tracked object |
| `limit` | int | Limit the number of events returned |
### `GET /api/events/summary`
Returns summary data for events in the database. Used by the Home Assistant integration.
@ -233,6 +243,10 @@ Accepts the following query string parameters, but they are only applied when an
Returns the snapshot image from the latest event for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type.
### `GET /api/<camera_name>/recording/<frame_time>/snapshot.png`
Returns the snapshot image from the specific point in that cameras recordings.
### `GET /clips/<camera>-<id>.jpg`
JPG snapshot for the given camera and event id.

View File

@ -16,6 +16,8 @@ See the [MQTT integration
documentation](https://www.home-assistant.io/integrations/mqtt/) for more
details.
In addition, MQTT must be enabled in your Frigate configuration file and Frigate must be connected to the same MQTT server as Home Assistant for many of the entities created by the integration to function.
### Integration installation
Available via HACS as a default repository. To install:
@ -30,7 +32,7 @@ Home Assistant > HACS > Integrations > "Explore & Add Integrations" > Frigate
- Then add/configure the integration:
```
Home Assistant > Configuration > Integrations > Add Integration > Frigate
Home Assistant > Settings > Devices & Services > Add Integration > Frigate
```
Note: You will also need
@ -64,13 +66,13 @@ Home Assistant > Configuration > Integrations > Frigate > Options
| Option | Description |
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| RTSP URL Template | A [jinja2](https://jinja.palletsprojects.com/) template that is used to override the standard RTMP stream URL (e.g. for use with reverse proxies). This option is only shown to users who have [advanced mode](https://www.home-assistant.io/blog/2019/07/17/release-96/#advanced-mode) enabled. See [RTSP streams](#streams) below. |
| RTSP URL Template | A [jinja2](https://jinja.palletsprojects.com/) template that is used to override the standard RTSP stream URL (e.g. for use with reverse proxies). This option is only shown to users who have [advanced mode](https://www.home-assistant.io/blog/2019/07/17/release-96/#advanced-mode) enabled. See [RTSP streams](#streams) below. |
## Entities Provided
| Platform | Description |
| --------------- | --------------------------------------------------------------------------------- |
| `camera` | Live camera stream (requires RTMP), camera for image of the last detected object. |
| `camera` | Live camera stream (requires RTSP), camera for image of the last detected object. |
| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. |
| `switch` | Switch entities to toggle detection, recordings and snapshots. |
| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. |

View File

@ -39,6 +39,12 @@ You cannot use the `environment_vars` section of your configuration file to set
Once your API key is configured, you can submit examples directly from the events page in Frigate using the `SEND TO FRIGATE+` button.
:::note
Snapshots must be enabled to be able to submit examples to Frigate+
:::
![Send To Plus](/img/send-to-plus.png)
### Annotate and verify

View File

@ -17,16 +17,18 @@ ffmpeg:
record: preset-record-generic-audio-aac
```
### I can't view events or recordings in the Web UI.
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
You can open `chrome://media-internals/` in another tab and then try to playback, the media internals page will give information about why playback is failing.
### My mjpeg stream or snapshots look green and crazy
This almost always means that the width/height defined for your camera are not correct. Double check the resolution with VLC or another player. Also make sure you don't have the width and height values backwards.
![mismatched-resolution](/img/mismatched-resolution-min.jpg)
### I can't view events or recordings in the Web UI.
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
### "[mov,mp4,m4a,3gp,3g2,mj2 @ 0x5639eeb6e140] moov atom not found"
These messages in the logs are expected in certain situations. Frigate checks the integrity of the recordings before storing. Occasionally these cached files will be invalid and cleaned up automatically.

1365
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -14,12 +14,12 @@
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^2.2.0",
"@docusaurus/preset-classic": "^2.2.0",
"@docusaurus/core": "^2.4.0",
"@docusaurus/preset-classic": "^2.4.0",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
"raw-loader": "^4.0.2",
"prism-react-renderer": "^1.3.5",
"raw-loader": "^4.0.2",
"react": "^17.0.2",
"react-dom": "^17.0.2"
},
@ -36,8 +36,8 @@
]
},
"devDependencies": {
"@types/react": "^17.0.0",
"@docusaurus/module-type-aliases": "2.2.0"
"@docusaurus/module-type-aliases": "^2.4.0",
"@types/react": "^17.0.0"
},
"engines": {
"node": ">=16.14"

View File

@ -4,11 +4,11 @@ module.exports = {
"frigate/index",
"frigate/hardware",
"frigate/installation",
"frigate/camera_setup",
],
Guides: [
"guides/camera_setup",
"guides/getting_started",
"guides/events_setup",
"guides/configuring_go2rtc",
"guides/false_positives",
"guides/ha_notifications",
"guides/stationary_objects",

View File

@ -3,6 +3,7 @@ import multiprocessing as mp
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent
import os
import shutil
import signal
import sys
from typing import Optional
@ -22,14 +23,14 @@ from frigate.object_detection import ObjectDetectProcess
from frigate.events import EventCleanup, EventProcessor
from frigate.http import create_app
from frigate.log import log_process, root_configurer
from frigate.models import Event, Recordings
from frigate.models import Event, Recordings, Timeline
from frigate.object_processing import TrackedObjectProcessor
from frigate.output import output_frames
from frigate.plus import PlusApi
from frigate.record import RecordingCleanup, RecordingMaintainer
from frigate.restream import RestreamApi
from frigate.stats import StatsEmitter, stats_init
from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor
from frigate.version import VERSION
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
@ -116,6 +117,9 @@ class FrigateApp:
if not "werkzeug" in self.config.logger.logs:
logging.getLogger("werkzeug").setLevel("ERROR")
if not "ws4py" in self.config.logger.logs:
logging.getLogger("ws4py").setLevel("ERROR")
def init_queues(self) -> None:
# Queues for clip processing
self.event_queue: Queue = mp.Queue()
@ -132,6 +136,9 @@ class FrigateApp:
# Queue for recordings info
self.recordings_info_queue: Queue = mp.Queue()
# Queue for timeline events
self.timeline_queue: Queue = mp.Queue()
def init_database(self) -> None:
# Migrate DB location
old_db_path = DEFAULT_DB_PATH
@ -151,7 +158,7 @@ class FrigateApp:
migrate_db.close()
self.db = SqliteQueueDatabase(self.config.database.path)
models = [Event, Recordings]
models = [Event, Recordings, Timeline]
self.db.bind(models)
def init_stats(self) -> None:
@ -169,18 +176,13 @@ class FrigateApp:
self.plus_api,
)
def init_restream(self) -> None:
self.restream = RestreamApi(self.config)
self.restream.add_cameras()
def init_dispatcher(self) -> None:
comms: list[Communicator] = []
if self.config.mqtt.enabled:
comms.append(MqttClient(self.config))
self.ws_client = WebSocketClient(self.config)
comms.append(self.ws_client)
comms.append(WebSocketClient(self.config))
self.dispatcher = Dispatcher(self.config, self.camera_metrics, comms)
def start_detectors(self) -> None:
@ -288,12 +290,19 @@ class FrigateApp:
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_timeline_processor(self) -> None:
self.timeline_processor = TimelineProcessor(
self.config, self.timeline_queue, self.stop_event
)
self.timeline_processor.start()
def start_event_processor(self) -> None:
self.event_processor = EventProcessor(
self.config,
self.camera_metrics,
self.event_queue,
self.event_processed_queue,
self.timeline_queue,
self.stop_event,
)
self.event_processor.start()
@ -329,6 +338,22 @@ class FrigateApp:
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
def check_shm(self) -> None:
available_shm = round(shutil.disk_usage("/dev/shm").total / 1000000, 1)
min_req_shm = 30
for _, camera in self.config.cameras.items():
min_req_shm += round(
(camera.detect.width * camera.detect.height * 1.5 * 9 + 270480)
/ 1048576,
1,
)
if available_shm < min_req_shm:
logger.warning(
f"The current SHM size of {available_shm}MB is too small, recommend increasing it to at least {min_req_shm}MB."
)
def start(self) -> None:
self.init_logger()
logger.info(f"Starting Frigate ({VERSION})")
@ -362,7 +387,6 @@ class FrigateApp:
print(e)
self.log_process.terminate()
sys.exit(1)
self.init_restream()
self.start_detectors()
self.start_video_output_processor()
self.start_detected_frames_processor()
@ -371,13 +395,14 @@ class FrigateApp:
self.start_storage_maintainer()
self.init_stats()
self.init_web_server()
self.start_timeline_processor()
self.start_event_processor()
self.start_event_cleanup()
self.start_recording_maintainer()
self.start_recording_cleanup()
self.start_stats_emitter()
self.start_watchdog()
# self.zeroconf = broadcast_zeroconf(self.config.mqtt.client_id)
self.check_shm()
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
self.stop()
@ -396,7 +421,17 @@ class FrigateApp:
logger.info(f"Stopping...")
self.stop_event.set()
self.ws_client.stop()
for detector in self.detectors.values():
detector.stop()
# Empty the detection queue and set the events for all requests
while not self.detection_queue.empty():
connection_id = self.detection_queue.get(timeout=1)
self.detection_out_events[connection_id].set()
self.detection_queue.close()
self.detection_queue.join_thread()
self.dispatcher.stop()
self.detected_frames_processor.join()
self.event_processor.join()
self.event_cleanup.join()
@ -406,10 +441,20 @@ class FrigateApp:
self.frigate_watchdog.join()
self.db.stop()
for detector in self.detectors.values():
detector.stop()
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()
shm.unlink()
for queue in [
self.event_queue,
self.event_processed_queue,
self.video_output_queue,
self.detected_frames_queue,
self.recordings_info_queue,
self.log_queue,
]:
while not queue.empty():
queue.get_nowait()
queue.close()
queue.join_thread()

View File

@ -27,6 +27,11 @@ class Communicator(ABC):
"""Pass receiver so communicators can pass commands."""
pass
@abstractmethod
def stop(self) -> None:
"""Stop the communicator."""
pass
class Dispatcher:
"""Handle communication between Frigate and communicators."""
@ -72,6 +77,10 @@ class Dispatcher:
for comm in self.comms:
comm.publish(topic, payload, retain)
def stop(self) -> None:
for comm in self.comms:
comm.stop()
def _on_detect_command(self, camera_name: str, payload: str) -> None:
"""Callback for detect topic."""
detect_settings = self.config.cameras[camera_name].detect

View File

@ -35,6 +35,9 @@ class MqttClient(Communicator): # type: ignore[misc]
f"{self.mqtt_config.topic_prefix}/{topic}", payload, retain=retain
)
def stop(self) -> None:
self.client.disconnect()
def _set_initial_topics(self) -> None:
"""Set initial state topics."""
for camera_name, camera in self.config.cameras.items():

View File

@ -95,3 +95,4 @@ class WebSocketClient(Communicator): # type: ignore[misc]
self.websocket_server.manager.join()
self.websocket_server.shutdown()
self.websocket_thread.join()
logger.info("Exiting websocket client...")

View File

@ -66,12 +66,37 @@ class LiveModeEnum(str, Enum):
webrtc = "webrtc"
class TimeFormatEnum(str, Enum):
browser = "browser"
hours12 = "12hour"
hours24 = "24hour"
class DateTimeStyleEnum(str, Enum):
full = "full"
long = "long"
medium = "medium"
short = "short"
class UIConfig(FrigateBaseModel):
live_mode: LiveModeEnum = Field(
default=LiveModeEnum.mse, title="Default Live Mode."
)
timezone: Optional[str] = Field(title="Override UI timezone.")
use_experimental: bool = Field(default=False, title="Experimental UI")
time_format: TimeFormatEnum = Field(
default=TimeFormatEnum.browser, title="Override UI time format."
)
date_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.short, title="Override UI dateStyle."
)
time_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.medium, title="Override UI timeStyle."
)
strftime_fmt: Optional[str] = Field(
default=None, title="Override date and time format using strftime syntax."
)
class TelemetryConfig(FrigateBaseModel):
@ -139,8 +164,6 @@ class RecordConfig(FrigateBaseModel):
default=60,
title="Number of minutes to wait between cleanup runs.",
)
# deprecated - to be removed in a future version
retain_days: Optional[float] = Field(title="Recording retention period in days.")
retain: RecordRetainConfig = Field(
default_factory=RecordRetainConfig, title="Record retention settings."
)
@ -370,9 +393,18 @@ class BirdseyeCameraConfig(BaseModel):
)
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning"]
# Note: Setting threads to less than 2 caused several issues with recording segments
# https://github.com/blakeblackshear/frigate/issues/5659
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"]
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-threads",
"2",
"-f",
"rawvideo",
"-pix_fmt",
"yuv420p",
]
RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-rtmp-generic"
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic"
@ -749,16 +781,6 @@ def verify_valid_live_stream_name(
)
def verify_old_retain_config(camera_config: CameraConfig) -> None:
"""Leave log if old retain_days is used."""
if not camera_config.record.retain_days is None:
logger.warning(
"The 'retain_days' config option has been DEPRECATED and will be removed in a future version. Please use the 'days' setting under 'retain'"
)
if camera_config.record.retain.days == 0:
camera_config.record.retain.days = camera_config.record.retain_days
def verify_recording_retention(camera_config: CameraConfig) -> None:
"""Verify that recording retention modes are ranked correctly."""
rank_map = {
@ -965,7 +987,6 @@ class FrigateConfig(FrigateBaseModel):
verify_config_roles(camera_config)
verify_valid_live_stream_name(config, camera_config)
verify_old_retain_config(camera_config)
verify_recording_retention(camera_config)
verify_recording_segments_setup_with_reasonable_time(camera_config)
verify_zone_objects_are_tracked(camera_config)

View File

@ -23,6 +23,13 @@ class InputTensorEnum(str, Enum):
nhwc = "nhwc"
class ModelTypeEnum(str, Enum):
ssd = "ssd"
yolox = "yolox"
yolov5 = "yolov5"
yolov8 = "yolov8"
class ModelConfig(BaseModel):
path: Optional[str] = Field(title="Custom Object detection model path.")
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
@ -37,6 +44,9 @@ class ModelConfig(BaseModel):
input_pixel_format: PixelFormatEnum = Field(
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
)
model_type: ModelTypeEnum = Field(
default=ModelTypeEnum.ssd, title="Object Detection Model Type"
)
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()

View File

@ -13,12 +13,19 @@ from .detector_config import BaseDetectorConfig
logger = logging.getLogger(__name__)
plugin_modules = [
importlib.import_module(name)
for finder, name, ispkg in pkgutil.iter_modules(
plugins.__path__, plugins.__name__ + "."
)
]
_included_modules = pkgutil.iter_modules(plugins.__path__, plugins.__name__ + ".")
plugin_modules = []
for _, name, _ in _included_modules:
try:
# currently openvino may fail when importing
# on an arm device with 64 KiB page size.
plugin_modules.append(importlib.import_module(name))
except ImportError as e:
logger.error(f"Error importing detector runtime: {e}")
api_types = {det.type_key: det for det in DetectionApi.__subclasses__()}

View File

@ -5,7 +5,11 @@ from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from typing import Literal
from pydantic import Extra, Field
import tflite_runtime.interpreter as tflite
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
@ -22,7 +26,7 @@ class CpuTfl(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: CpuDetectorConfig):
self.interpreter = tflite.Interpreter(
self.interpreter = Interpreter(
model_path=detector_config.model.path or "/cpu_model.tflite",
num_threads=detector_config.num_threads or 3,
)

View File

@ -5,8 +5,11 @@ from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from typing import Literal
from pydantic import Extra, Field
import tflite_runtime.interpreter as tflite
from tflite_runtime.interpreter import load_delegate
try:
from tflite_runtime.interpreter import Interpreter, load_delegate
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter, load_delegate
logger = logging.getLogger(__name__)
@ -33,7 +36,7 @@ class EdgeTpuTfl(DetectionApi):
logger.info(f"Attempting to load TPU as {device_config['device']}")
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
logger.info("TPU found")
self.interpreter = tflite.Interpreter(
self.interpreter = Interpreter(
model_path=detector_config.model.path or "/edgetpu_model.tflite",
experimental_delegates=[edge_tpu_delegate],
)

View File

@ -3,7 +3,7 @@ import numpy as np
import openvino.runtime as ov
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
from typing import Literal
from pydantic import Extra, Field
@ -24,12 +24,18 @@ class OvDetector(DetectionApi):
def __init__(self, detector_config: OvDetectorConfig):
self.ov_core = ov.Core()
self.ov_model = self.ov_core.read_model(detector_config.model.path)
self.ov_model_type = detector_config.model.model_type
self.h = detector_config.model.height
self.w = detector_config.model.width
self.interpreter = self.ov_core.compile_model(
model=self.ov_model, device_name=detector_config.device
)
logger.info(f"Model Input Shape: {self.interpreter.input(0).shape}")
self.output_indexes = 0
while True:
try:
tensor_shape = self.interpreter.output(self.output_indexes).shape
@ -38,29 +44,131 @@ class OvDetector(DetectionApi):
except:
logger.info(f"Model has {self.output_indexes} Output Tensors")
break
if self.ov_model_type == ModelTypeEnum.yolox:
self.num_classes = tensor_shape[2] - 5
logger.info(f"YOLOX model has {self.num_classes} classes")
self.set_strides_grids()
def set_strides_grids(self):
grids = []
expanded_strides = []
strides = [8, 16, 32]
hsizes = [self.h // stride for stride in strides]
wsizes = [self.w // stride for stride in strides]
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
expanded_strides.append(np.full((*shape, 1), stride))
self.grids = np.concatenate(grids, 1)
self.expanded_strides = np.concatenate(expanded_strides, 1)
## Takes in class ID, confidence score, and array of [x, y, w, h] that describes detection position,
## returns an array that's easily passable back to Frigate.
def process_yolo(self, class_id, conf, pos):
return [
class_id, # class ID
conf, # confidence score
(pos[1] - (pos[3] / 2)) / self.h, # y_min
(pos[0] - (pos[2] / 2)) / self.w, # x_min
(pos[1] + (pos[3] / 2)) / self.h, # y_max
(pos[0] + (pos[2] / 2)) / self.w, # x_max
]
def detect_raw(self, tensor_input):
infer_request = self.interpreter.create_infer_request()
infer_request.infer([tensor_input])
results = infer_request.get_output_tensor()
if self.ov_model_type == ModelTypeEnum.ssd:
results = infer_request.get_output_tensor()
detections = np.zeros((20, 6), np.float32)
i = 0
for object_detected in results.data[0, 0, :]:
if object_detected[0] != -1:
logger.debug(object_detected)
if object_detected[2] < 0.1 or i == 20:
break
detections[i] = [
object_detected[1], # Label ID
float(object_detected[2]), # Confidence
object_detected[4], # y_min
object_detected[3], # x_min
object_detected[6], # y_max
object_detected[5], # x_max
]
i += 1
detections = np.zeros((20, 6), np.float32)
i = 0
for object_detected in results.data[0, 0, :]:
if object_detected[0] != -1:
logger.debug(object_detected)
if object_detected[2] < 0.1 or i == 20:
break
detections[i] = [
object_detected[1], # Label ID
float(object_detected[2]), # Confidence
object_detected[4], # y_min
object_detected[3], # x_min
object_detected[6], # y_max
object_detected[5], # x_max
]
i += 1
return detections
elif self.ov_model_type == ModelTypeEnum.yolox:
out_tensor = infer_request.get_output_tensor()
# [x, y, h, w, box_score, class_no_1, ..., class_no_80],
results = out_tensor.data
results[..., :2] = (results[..., :2] + self.grids) * self.expanded_strides
results[..., 2:4] = np.exp(results[..., 2:4]) * self.expanded_strides
image_pred = results[0, ...]
return detections
class_conf = np.max(
image_pred[:, 5 : 5 + self.num_classes], axis=1, keepdims=True
)
class_pred = np.argmax(image_pred[:, 5 : 5 + self.num_classes], axis=1)
class_pred = np.expand_dims(class_pred, axis=1)
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
dets = dets[conf_mask]
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(
object_detected[6], object_detected[5], object_detected[:4]
)
return detections
elif self.ov_model_type == ModelTypeEnum.yolov8:
out_tensor = infer_request.get_output_tensor()
results = out_tensor.data[0]
output_data = np.transpose(results)
scores = np.max(output_data[:, 4:], axis=1)
if len(scores) == 0:
return np.zeros((20, 6), np.float32)
scores = np.expand_dims(scores, axis=1)
# add scores to the last column
dets = np.concatenate((output_data, scores), axis=1)
# filter out lines with scores below threshold
dets = dets[dets[:, -1] > 0.5, :]
# limit to top 20 scores, descending order
ordered = dets[dets[:, -1].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(
np.argmax(object_detected[4:-1]),
object_detected[-1],
object_detected[:4],
)
return detections
elif self.ov_model_type == ModelTypeEnum.yolov5:
out_tensor = infer_request.get_output_tensor()
output_data = out_tensor.data[0]
# filter out lines with scores below threshold
conf_mask = (output_data[:, 4] >= 0.5).squeeze()
output_data = output_data[conf_mask]
# limit to top 20 scores, descending order
ordered = output_data[output_data[:, 4].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(
np.argmax(object_detected[5:]),
object_detected[4],
object_detected[:4],
)
return detections

View File

@ -3,14 +3,14 @@ import logging
import os
import queue
import threading
import time
from pathlib import Path
from peewee import fn
from frigate.config import EventsConfig, FrigateConfig, RecordConfig
from frigate.config import EventsConfig, FrigateConfig
from frigate.const import CLIPS_DIR
from frigate.models import Event
from frigate.timeline import TimelineSourceEnum
from frigate.types import CameraMetricsTypes
from multiprocessing.queues import Queue
@ -48,6 +48,7 @@ class EventProcessor(threading.Thread):
camera_processes: dict[str, CameraMetricsTypes],
event_queue: Queue,
event_processed_queue: Queue,
timeline_queue: Queue,
stop_event: MpEvent,
):
threading.Thread.__init__(self)
@ -56,6 +57,7 @@ class EventProcessor(threading.Thread):
self.camera_processes = camera_processes
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.timeline_queue = timeline_queue
self.events_in_process: Dict[str, Event] = {}
self.stop_event = stop_event
@ -67,12 +69,22 @@ class EventProcessor(threading.Thread):
while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
event_type, camera, event_data = self.event_queue.get(timeout=1)
except queue.Empty:
continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.timeline_queue.put(
(
camera,
TimelineSourceEnum.tracked_object,
event_type,
self.events_in_process.get(event_data["id"]),
event_data,
)
)
event_config: EventsConfig = self.config.cameras[camera].record.events
if event_type == "start":

View File

@ -1,14 +1,52 @@
"""Handles inserting and maintaining ffmpeg presets."""
import logging
import os
from typing import Any
from frigate.version import VERSION
from frigate.const import BTBN_PATH
from frigate.util import vainfo_hwaccel
logger = logging.getLogger(__name__)
class LibvaGpuSelector:
"Automatically selects the correct libva GPU."
_selected_gpu = None
def get_selected_gpu(self) -> str:
"""Get selected libva GPU."""
if not os.path.exists("/dev/dri"):
return ""
if self._selected_gpu:
return self._selected_gpu
devices = list(filter(lambda d: d.startswith("render"), os.listdir("/dev/dri")))
if len(devices) < 2:
self._selected_gpu = "/dev/dri/renderD128"
return self._selected_gpu
for device in devices:
check = vainfo_hwaccel(device_name=device)
logger.debug(f"{device} return vainfo status code: {check.returncode}")
if check.returncode == 0:
self._selected_gpu = f"/dev/dri/{device}"
return self._selected_gpu
return ""
TIMEOUT_PARAM = "-timeout" if os.path.exists(BTBN_PATH) else "-stimeout"
_gpu_selector = LibvaGpuSelector()
_user_agent_args = [
"-user_agent",
f"FFmpeg Frigate/{VERSION}",
@ -23,7 +61,7 @@ PRESETS_HW_ACCEL_DECODE = {
"-hwaccel",
"vaapi",
"-hwaccel_device",
"/dev/dri/renderD128",
_gpu_selector.get_selected_gpu(),
"-hwaccel_output_format",
"vaapi",
],
@ -31,7 +69,7 @@ PRESETS_HW_ACCEL_DECODE = {
"-hwaccel",
"qsv",
"-qsv_device",
"/dev/dri/renderD128",
_gpu_selector.get_selected_gpu(),
"-hwaccel_output_format",
"qsv",
"-c:v",
@ -43,7 +81,7 @@ PRESETS_HW_ACCEL_DECODE = {
"-hwaccel",
"qsv",
"-qsv_device",
"/dev/dri/renderD128",
_gpu_selector.get_selected_gpu(),
"-hwaccel_output_format",
"qsv",
"-c:v",
@ -54,47 +92,36 @@ PRESETS_HW_ACCEL_DECODE = {
"cuda",
"-hwaccel_output_format",
"cuda",
"-extra_hw_frames",
"2",
"-c:v",
"h264_cuvid",
],
"preset-nvidia-h265": [
"-hwaccel",
"cuda",
"-hwaccel_output_format",
"cuda",
"-extra_hw_frames",
"2",
"-c:v",
"hevc_cuvid",
],
"preset-nvidia-mjpeg": [
"-hwaccel",
"cuda",
"-hwaccel_output_format",
"cuda",
"-extra_hw_frames",
"2",
"-c:v",
"mjpeg_cuvid",
],
}
PRESETS_HW_ACCEL_SCALE = {
"preset-rpi-32-h264": "-r {0} -s {1}x{2} -f rawvideo -pix_fmt yuv420p",
"preset-rpi-64-h264": "-r {0} -s {1}x{2} -f rawvideo -pix_fmt yuv420p",
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p -f rawvideo",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p -f rawvideo",
"preset-rpi-32-h264": "-r {0} -s {1}x{2}",
"preset-rpi-64-h264": "-r {0} -s {1}x{2}",
"preset-vaapi": "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=yuv420p",
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h264": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"preset-nvidia-h265": "-r {0} -vf fps={0},scale_cuda=w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
"default": "-r {0} -s {1}x{2}",
}
PRESETS_HW_ACCEL_ENCODE = {
"preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}",
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m -g 50 -bf 0 {1}",
"preset-rpi-32-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-rpi-64-h264": "ffmpeg -hide_banner {0} -c:v h264_v4l2m2m {1}",
"preset-vaapi": "ffmpeg -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {2} {0} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {1}",
"preset-intel-qsv-h264": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-intel-qsv-h265": "ffmpeg -hide_banner {0} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {1}",
"preset-nvidia-h264": "ffmpeg -hide_banner {0} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {1}",
@ -127,7 +154,9 @@ def parse_preset_hardware_acceleration_scale(
scale = PRESETS_HW_ACCEL_SCALE.get(arg, "")
if scale:
return scale.format(fps, width, height).split(" ")
scale = scale.format(fps, width, height).split(" ")
scale.extend(detect_args)
return scale
else:
scale = scale.format(fps, width, height).split(" ")
scale.extend(detect_args)
@ -142,6 +171,7 @@ def parse_preset_hardware_acceleration_encode(arg: Any, input: str, output: str)
return PRESETS_HW_ACCEL_ENCODE.get(arg, PRESETS_HW_ACCEL_ENCODE["default"]).format(
input,
output,
_gpu_selector.get_selected_gpu(),
)
@ -231,6 +261,13 @@ PRESETS_INPUT = {
"1",
],
"preset-rtsp-restream": _user_agent_args
+ [
"-rtsp_transport",
"tcp",
TIMEOUT_PARAM,
"5000000",
],
"preset-rtsp-restream-low-latency": _user_agent_args
+ [
"-rtsp_transport",
"tcp",

View File

@ -33,7 +33,7 @@ from playhouse.shortcuts import model_to_dict
from frigate.config import FrigateConfig
from frigate.const import CLIPS_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
from frigate.models import Event, Recordings
from frigate.models import Event, Recordings, Timeline
from frigate.object_processing import TrackedObject
from frigate.stats import stats_snapshot
from frigate.util import (
@ -111,6 +111,7 @@ def events_summary():
Event.select(
Event.camera,
Event.label,
Event.sub_label,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
@ -124,6 +125,7 @@ def events_summary():
.group_by(
Event.camera,
Event.label,
Event.sub_label,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
@ -184,6 +186,18 @@ def send_to_plus(id):
logger.error(message)
return make_response(jsonify({"success": False, "message": message}), 404)
if event.end_time is None:
logger.error(f"Unable to load clean png for in-progress event: {event.id}")
return make_response(
jsonify(
{
"success": False,
"message": "Unable to load clean png for in-progress event",
}
),
400,
)
if event.plus_id:
message = "Already submitted to plus"
logger.error(message)
@ -202,6 +216,15 @@ def send_to_plus(id):
400,
)
if image is None or image.size == 0:
logger.error(f"Unable to load clean png for event: {event.id}")
return make_response(
jsonify(
{"success": False, "message": "Unable to load clean png for event"}
),
400,
)
try:
plus_id = current_app.plus_api.upload_image(image, event.camera)
except Exception as ex:
@ -301,7 +324,9 @@ def get_sub_labels():
sub_labels.remove(None)
if split_joined:
for label in sub_labels:
original_labels = sub_labels.copy()
for label in original_labels:
if "," in label:
sub_labels.remove(label)
parts = label.split(",")
@ -310,6 +335,7 @@ def get_sub_labels():
if not (part.strip()) in sub_labels:
sub_labels.append(part.strip())
sub_labels.sort()
return jsonify(sub_labels)
@ -388,6 +414,42 @@ def event_thumbnail(id, max_cache_age=2592000):
return response
@bp.route("/timeline")
def timeline():
camera = request.args.get("camera", "all")
source_id = request.args.get("source_id", type=str)
limit = request.args.get("limit", 100)
clauses = []
selected_columns = [
Timeline.timestamp,
Timeline.camera,
Timeline.source,
Timeline.source_id,
Timeline.class_type,
Timeline.data,
]
if camera != "all":
clauses.append((Timeline.camera == camera))
if source_id:
clauses.append((Timeline.source_id == source_id))
if len(clauses) == 0:
clauses.append((True))
timeline = (
Timeline.select(*selected_columns)
.where(reduce(operator.and_, clauses))
.order_by(Timeline.timestamp.asc())
.limit(limit)
)
return jsonify([model_to_dict(t) for t in timeline])
@bp.route("/<camera_name>/<label>/best.jpg")
@bp.route("/<camera_name>/<label>/thumbnail.jpg")
def label_thumbnail(camera_name, label):
@ -617,7 +679,13 @@ def events():
sub_label_clauses.append((Event.sub_label.is_null()))
for label in filtered_sub_labels:
sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label}*"))
sub_label_clauses.append(
(Event.sub_label.cast("text") == label)
) # include exact matches
# include this label when part of a list
sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
sub_label_clause = reduce(operator.or_, sub_label_clauses)
clauses.append((sub_label_clause))
@ -892,6 +960,53 @@ def latest_frame(camera_name):
return "Camera named {} not found".format(camera_name), 404
@bp.route("/<camera_name>/recordings/<frame_time>/snapshot.png")
def get_snapshot_from_recording(camera_name: str, frame_time: str):
if camera_name not in current_app.frigate_config.cameras:
return "Camera named {} not found".format(camera_name), 404
frame_time = float(frame_time)
recording_query = (
Recordings.select()
.where(
((frame_time > Recordings.start_time) & (frame_time < Recordings.end_time))
)
.where(Recordings.camera == camera_name)
)
try:
recording: Recordings = recording_query.get()
time_in_segment = frame_time - recording.start_time
ffmpeg_cmd = [
"ffmpeg",
"-hide_banner",
"-loglevel",
"warning",
"-ss",
f"00:00:{time_in_segment}",
"-i",
recording.path,
"-frames:v",
"1",
"-c:v",
"png",
"-f",
"image2pipe",
"-",
]
process = sp.run(
ffmpeg_cmd,
capture_output=True,
)
response = make_response(process.stdout)
response.headers["Content-Type"] = "image/png"
return response
except DoesNotExist:
return "Recording not found for {} at {}".format(camera_name, frame_time), 404
@bp.route("/recordings/storage", methods=["GET"])
def get_recordings_storage_usage():
recording_stats = stats_snapshot(
@ -1275,12 +1390,12 @@ def ffprobe():
output.append(
{
"return_code": ffprobe.returncode,
"stderr": json.loads(ffprobe.stderr.decode("unicode_escape").strip())
if ffprobe.stderr.decode()
else {},
"stderr": ffprobe.stderr.decode("unicode_escape").strip()
if ffprobe.returncode != 0
else "",
"stdout": json.loads(ffprobe.stdout.decode("unicode_escape").strip())
if ffprobe.stdout.decode()
else {},
if ffprobe.returncode == 0
else "",
}
)

View File

@ -2,11 +2,16 @@
import logging
import threading
import os
import signal
import queue
import multiprocessing as mp
from multiprocessing.queues import Queue
from logging import handlers
from typing import Optional
from types import FrameType
from setproctitle import setproctitle
from typing import Deque
from typing import Deque, Optional
from types import FrameType
from collections import deque
from frigate.util import clean_camera_user_pass
@ -34,10 +39,21 @@ def log_process(log_queue: Queue) -> None:
threading.current_thread().name = f"logger"
setproctitle("frigate.logger")
listener_configurer()
stop_event = mp.Event()
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
while True:
try:
record = log_queue.get(timeout=5)
record = log_queue.get(timeout=1)
except (queue.Empty, KeyboardInterrupt):
if stop_event.is_set():
break
continue
logger = logging.getLogger(record.name)
logger.handle(record)

View File

@ -32,6 +32,15 @@ class Event(Model): # type: ignore[misc]
plus_id = CharField(max_length=30)
class Timeline(Model): # type: ignore[misc]
timestamp = DateTimeField()
camera = CharField(index=True, max_length=20)
source = CharField(index=True, max_length=20) # ex: tracked object, audio, external
source_id = CharField(index=True, max_length=30)
class_type = CharField(max_length=50) # ex: entered_zone, audio_heard
data = JSONField() # ex: tracked object id, region, box, etc.
class Recordings(Model): # type: ignore[misc]
id = CharField(null=False, primary_key=True, max_length=30)
camera = CharField(index=True, max_length=20)

View File

@ -20,8 +20,8 @@ class MotionDetector:
config.frame_height,
config.frame_height * frame_shape[1] // frame_shape[0],
)
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
self.avg_delta = np.zeros(self.motion_frame_size, np.float32)
self.motion_frame_count = 0
self.frame_counter = 0
resized_mask = cv2.resize(

View File

@ -59,6 +59,3 @@ ignore_errors = false
[mypy-frigate.watchdog]
ignore_errors = false
disallow_untyped_calls = false
[mypy-frigate.zeroconf]
ignore_errors = false

View File

@ -88,6 +88,7 @@ def run_detector(
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
logger.info("Signal to exit detection process...")
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
@ -104,7 +105,7 @@ def run_detector(
while not stop_event.is_set():
try:
connection_id = detection_queue.get(timeout=5)
connection_id = detection_queue.get(timeout=1)
except queue.Empty:
continue
input_frame = frame_manager.get(
@ -125,6 +126,8 @@ def run_detector(
avg_speed.value = (avg_speed.value * 9 + duration) / 10
logger.info("Exited detection process...")
class ObjectDetectProcess:
def __init__(
@ -144,6 +147,9 @@ class ObjectDetectProcess:
self.start_or_restart()
def stop(self):
# if the process has already exited on its own, just return
if self.detect_process and self.detect_process.exitcode:
return
self.detect_process.terminate()
logging.info("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
@ -151,6 +157,7 @@ class ObjectDetectProcess:
logging.info("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
logging.info("Detection process has exited...")
def start_or_restart(self):
self.detection_start.value = 0.0
@ -173,12 +180,13 @@ class ObjectDetectProcess:
class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_config):
def __init__(self, name, labels, detection_queue, event, model_config, stop_event):
self.labels = labels
self.name = name
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
self.event = event
self.stop_event = stop_event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray(
(1, model_config.height, model_config.width, 3),
@ -193,11 +201,14 @@ class RemoteObjectDetector:
def detect(self, tensor_input, threshold=0.4):
detections = []
if self.stop_event.is_set():
return detections
# copy input to shared memory
self.np_shm[:] = tensor_input[:]
self.event.clear()
self.detection_queue.put(self.name)
result = self.event.wait(timeout=10.0)
result = self.event.wait(timeout=5.0)
# if it timed out
if result is None:

View File

@ -901,7 +901,7 @@ class TrackedObjectProcessor(threading.Thread):
current_tracked_objects,
motion_boxes,
regions,
) = self.tracked_objects_queue.get(True, 10)
) = self.tracked_objects_queue.get(True, 1)
except queue.Empty:
continue

View File

@ -38,16 +38,10 @@ class FFMpegConverter:
quality: int,
birdseye_rtsp: bool = False,
):
if birdseye_rtsp:
if os.path.exists(BIRDSEYE_PIPE):
os.remove(BIRDSEYE_PIPE)
self.bd_pipe = None
os.mkfifo(BIRDSEYE_PIPE, mode=0o777)
stdin = os.open(BIRDSEYE_PIPE, os.O_RDONLY | os.O_NONBLOCK)
self.bd_pipe = os.open(BIRDSEYE_PIPE, os.O_WRONLY)
os.close(stdin)
else:
self.bd_pipe = None
if birdseye_rtsp:
self.recreate_birdseye_pipe()
ffmpeg_cmd = [
"ffmpeg",
@ -80,14 +74,36 @@ class FFMpegConverter:
start_new_session=True,
)
def recreate_birdseye_pipe(self) -> None:
if self.bd_pipe:
os.close(self.bd_pipe)
if os.path.exists(BIRDSEYE_PIPE):
os.remove(BIRDSEYE_PIPE)
os.mkfifo(BIRDSEYE_PIPE, mode=0o777)
stdin = os.open(BIRDSEYE_PIPE, os.O_RDONLY | os.O_NONBLOCK)
self.bd_pipe = os.open(BIRDSEYE_PIPE, os.O_WRONLY)
os.close(stdin)
self.reading_birdseye = False
def write(self, b) -> None:
self.process.stdin.write(b)
if self.bd_pipe:
try:
os.write(self.bd_pipe, b)
self.reading_birdseye = True
except BrokenPipeError:
# catch error when no one is listening
if self.reading_birdseye:
# we know the pipe was being read from and now it is not
# so we should recreate the pipe to ensure no partially-read
# frames exist
logger.debug(
"Recreating the birdseye pipe because it was read from and now is not"
)
self.recreate_birdseye_pipe()
return
def read(self, length):
@ -109,14 +125,15 @@ class FFMpegConverter:
class BroadcastThread(threading.Thread):
def __init__(self, camera, converter, websocket_server):
def __init__(self, camera, converter, websocket_server, stop_event):
super(BroadcastThread, self).__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
self.stop_event = stop_event
def run(self):
while True:
while not self.stop_event.is_set():
buf = self.converter.read(65536)
if buf:
manager = self.websocket_server.manager
@ -312,7 +329,6 @@ class BirdsEyeFrameManager:
# update each position in the layout
for position, camera in enumerate(self.camera_layout, start=0):
# if this camera was removed, replace it or clear it
if camera in removed_cameras:
# if replacing this camera with a newly added one
@ -426,7 +442,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
cam_config.live.quality,
)
broadcasters[camera] = BroadcastThread(
camera, converters[camera], websocket_server
camera, converters[camera], websocket_server, stop_event
)
if config.birdseye.enabled:
@ -439,7 +455,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
config.birdseye.restream,
)
broadcasters["birdseye"] = BroadcastThread(
"birdseye", converters["birdseye"], websocket_server
"birdseye", converters["birdseye"], websocket_server, stop_event
)
websocket_thread.start()
@ -463,7 +479,7 @@ def output_frames(config: FrigateConfig, video_output_queue):
current_tracked_objects,
motion_boxes,
regions,
) = video_output_queue.get(True, 10)
) = video_output_queue.get(True, 1)
except queue.Empty:
continue

View File

@ -100,25 +100,17 @@ class RecordingMaintainer(threading.Thread):
for camera in grouped_recordings.keys():
segment_count = len(grouped_recordings[camera])
if segment_count > keep_count:
####
# Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.
####
# logger.warning(
# f"Too many recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {segment_count}, discarding the rest..."
# )
logger.warning(
f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {segment_count} and discarding the rest..."
)
to_remove = grouped_recordings[camera][:-keep_count]
for f in to_remove:
cache_path = f["cache_path"]
####
# Need to find a way to tell if these are aging out based on retention settings or if the system is overloaded.
####
# logger.warning(f"Discarding a recording segment: {cache_path}")
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
for camera, recordings in grouped_recordings.items():
# clear out all the recording info for old frames
while (
len(self.recordings_info[camera]) > 0
@ -178,10 +170,12 @@ class RecordingMaintainer(threading.Thread):
else:
if duration == -1:
logger.warning(
f"Failed to probe corrupt segment {f}: {p.returncode} - {p.stderr}"
f"Failed to probe corrupt segment {cache_path}: {p.returncode} - {p.stderr}"
)
logger.warning(f"Discarding a corrupt recording segment: {f}")
logger.warning(
f"Discarding a corrupt recording segment: {cache_path}"
)
Path(cache_path).unlink(missing_ok=True)
continue
@ -227,6 +221,19 @@ class RecordingMaintainer(threading.Thread):
cache_path,
record_mode,
)
# if it doesn't overlap with an event, go ahead and drop the segment
# if it ends more than the configured pre_capture for the camera
else:
pre_capture = self.config.cameras[
camera
].record.events.pre_capture
most_recently_processed_frame_time = self.recordings_info[
camera
][-1][0]
retain_cutoff = most_recently_processed_frame_time - pre_capture
if end_time.timestamp() < retain_cutoff:
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
# else retain days includes this segment
else:
record_mode = self.config.cameras[camera].record.retain.mode
@ -411,6 +418,10 @@ class RecordingCleanup(threading.Thread):
logger.debug(f"Checking tmp clip {p}.")
if p.stat().st_mtime < (datetime.datetime.now().timestamp() - 60 * 1):
logger.debug("Deleting tmp clip.")
# empty contents of file before unlinking https://github.com/blakeblackshear/frigate/issues/4769
with open(p, "w"):
pass
p.unlink(missing_ok=True)
def expire_recordings(self):

View File

@ -1,33 +0,0 @@
"""Controls go2rtc restream."""
import logging
import requests
from frigate.config import FrigateConfig
from frigate.const import BIRDSEYE_PIPE
from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_encode,
)
logger = logging.getLogger(__name__)
class RestreamApi:
"""Control go2rtc relay API."""
def __init__(self, config: FrigateConfig) -> None:
self.config: FrigateConfig = config
def add_cameras(self) -> None:
"""Add cameras to go2rtc."""
self.relays: dict[str, str] = {}
if self.config.birdseye.restream:
self.relays[
"birdseye"
] = f"exec:{parse_preset_hardware_acceleration_encode(self.config.ffmpeg.hwaccel_args, f'-f rawvideo -pix_fmt yuv420p -video_size {self.config.birdseye.width}x{self.config.birdseye.height} -r 10 -i {BIRDSEYE_PIPE}', '-rtsp_transport tcp -f rtsp {output}')}"
for name, path in self.relays.items():
params = {"src": path, "name": name}
requests.put("http://127.0.0.1:1984/api/streams", params=params)

View File

@ -23,7 +23,6 @@ logger = logging.getLogger(__name__)
def get_latest_version(config: FrigateConfig) -> str:
if not config.telemetry.version_check:
return "disabled"
@ -53,6 +52,7 @@ def stats_init(
"detectors": detectors,
"started": int(time.time()),
"latest_frigate_version": get_latest_version(config),
"last_updated": int(time.time()),
}
return stats_tracking
@ -244,6 +244,7 @@ def stats_snapshot(
"latest_version": stats_tracking["latest_frigate_version"],
"storage": {},
"temperatures": get_temperatures(),
"last_updated": int(time.time()),
}
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
@ -281,8 +282,10 @@ class StatsEmitter(threading.Thread):
def run(self) -> None:
time.sleep(10)
while not self.stop_event.wait(self.config.mqtt.stats_interval):
logger.debug("Starting stats collection")
stats = stats_snapshot(
self.config, self.stats_tracking, self.hwaccel_errors
)
self.dispatcher.publish("stats", json.dumps(stats), retain=False)
logger.info(f"Exiting watchdog...")
logger.debug("Finished stats collection")
logger.info(f"Exiting stats emitter...")

View File

@ -179,7 +179,6 @@ class StorageMaintainer(threading.Thread):
def run(self):
"""Check every 5 minutes if storage needs to be cleaned up."""
while not self.stop_event.wait(300):
if not self.camera_storage_stats or True in [
r["needs_refresh"] for r in self.camera_storage_stats.values()
]:

View File

@ -36,3 +36,14 @@ class TestUserPassCleanup(unittest.TestCase):
"""Test that no change is made to path with no special characters."""
escaped = escape_special_characters(self.rtsp_with_pass)
assert escaped == self.rtsp_with_pass
class TestUserPassMasking(unittest.TestCase):
def setUp(self) -> None:
self.rtsp_log_message = "Did you mean file:rtsp://user:password@192.168.1.3:554"
def test_rtsp_in_log_message(self):
"""Test that the rtsp url in a log message is espaced."""
escaped = clean_camera_user_pass(self.rtsp_log_message)
print(f"The escaped is {escaped}")
assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554"

View File

@ -673,7 +673,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].detect.max_disappeared == 5 * 5
def test_motion_frame_height_wont_go_below_120(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -702,7 +701,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].motion.frame_height == 50
def test_motion_contour_area_dynamic(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -731,7 +729,6 @@ class TestConfig(unittest.TestCase):
assert round(runtime_config.cameras["back"].motion.contour_area) == 30
def test_merge_labelmap(self):
config = {
"mqtt": {"host": "mqtt"},
"model": {"labelmap": {7: "truck"}},
@ -761,7 +758,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.model.merged_labelmap[7] == "truck"
def test_default_labelmap_empty(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -790,7 +786,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.model.merged_labelmap[0] == "person"
def test_default_labelmap(self):
config = {
"mqtt": {"host": "mqtt"},
"model": {"width": 320, "height": 320},
@ -820,7 +815,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.model.merged_labelmap[0] == "person"
def test_fails_on_invalid_role(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -849,7 +843,6 @@ class TestConfig(unittest.TestCase):
self.assertRaises(ValidationError, lambda: FrigateConfig(**config))
def test_fails_on_missing_role(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -880,7 +873,6 @@ class TestConfig(unittest.TestCase):
self.assertRaises(ValueError, lambda: frigate_config.runtime_config)
def test_works_on_missing_role_multiple_cams(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -929,7 +921,6 @@ class TestConfig(unittest.TestCase):
runtime_config = frigate_config.runtime_config
def test_global_detect(self):
config = {
"mqtt": {"host": "mqtt"},
"detect": {"max_disappeared": 1},
@ -959,7 +950,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].detect.height == 1080
def test_default_detect(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -983,7 +973,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].detect.height == 720
def test_global_detect_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"detect": {"max_disappeared": 1, "height": 720},
@ -1014,7 +1003,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].detect.width == 1920
def test_global_snapshots(self):
config = {
"mqtt": {"host": "mqtt"},
"snapshots": {"enabled": True},
@ -1042,7 +1030,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].snapshots.height == 100
def test_default_snapshots(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -1066,7 +1053,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].snapshots.quality == 70
def test_global_snapshots_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"snapshots": {"bounding_box": False, "height": 300},
@ -1096,7 +1082,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].snapshots.enabled
def test_global_rtmp_disabled(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -1119,7 +1104,6 @@ class TestConfig(unittest.TestCase):
assert not runtime_config.cameras["back"].rtmp.enabled
def test_default_not_rtmp(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -1142,7 +1126,6 @@ class TestConfig(unittest.TestCase):
assert not runtime_config.cameras["back"].rtmp.enabled
def test_global_rtmp_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"rtmp": {"enabled": False},
@ -1169,7 +1152,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].rtmp.enabled
def test_global_rtmp_default(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -1196,7 +1178,6 @@ class TestConfig(unittest.TestCase):
assert not runtime_config.cameras["back"].rtmp.enabled
def test_global_jsmpeg(self):
config = {
"mqtt": {"host": "mqtt"},
"live": {"quality": 4},
@ -1220,7 +1201,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].live.quality == 4
def test_default_live(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -1243,7 +1223,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].live.quality == 8
def test_global_live_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"live": {"quality": 4, "height": 480},
@ -1271,7 +1250,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].live.height == 480
def test_global_timestamp_style(self):
config = {
"mqtt": {"host": "mqtt"},
"timestamp_style": {"position": "bl"},
@ -1295,7 +1273,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].timestamp_style.position == "bl"
def test_default_timestamp_style(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
@ -1318,7 +1295,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].timestamp_style.position == "tl"
def test_global_timestamp_style_merge(self):
config = {
"mqtt": {"host": "mqtt"},
"rtmp": {"enabled": False},
@ -1345,7 +1321,6 @@ class TestConfig(unittest.TestCase):
assert runtime_config.cameras["back"].timestamp_style.thickness == 4
def test_allow_retain_to_be_a_decimal(self):
config = {
"mqtt": {"host": "mqtt"},
"snapshots": {"retain": {"default": 1.5}},

140
frigate/timeline.py Normal file
View File

@ -0,0 +1,140 @@
"""Record events for object, audio, etc. detections."""
import logging
import threading
import queue
from enum import Enum
from frigate.config import FrigateConfig
from frigate.models import Timeline
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent
logger = logging.getLogger(__name__)
class TimelineSourceEnum(str, Enum):
# api = "api"
# audio = "audio"
tracked_object = "tracked_object"
class TimelineProcessor(threading.Thread):
"""Handle timeline queue and update DB."""
def __init__(
self,
config: FrigateConfig,
queue: Queue,
stop_event: MpEvent,
) -> None:
threading.Thread.__init__(self)
self.name = "timeline_processor"
self.config = config
self.queue = queue
self.stop_event = stop_event
def run(self) -> None:
while not self.stop_event.is_set():
try:
(
camera,
input_type,
event_type,
prev_event_data,
event_data,
) = self.queue.get(timeout=1)
except queue.Empty:
continue
if input_type == TimelineSourceEnum.tracked_object:
self.handle_object_detection(
camera, event_type, prev_event_data, event_data
)
def handle_object_detection(
self,
camera: str,
event_type: str,
prev_event_data: dict[any, any],
event_data: dict[any, any],
) -> None:
"""Handle object detection."""
camera_config = self.config.cameras[camera]
if event_type == "start":
Timeline.insert(
timestamp=event_data["frame_time"],
camera=camera,
source="tracked_object",
source_id=event_data["id"],
class_type="visible",
data={
"box": [
event_data["box"][0] / camera_config.detect.width,
event_data["box"][1] / camera_config.detect.height,
event_data["box"][2] / camera_config.detect.width,
event_data["box"][3] / camera_config.detect.height,
],
"label": event_data["label"],
"region": [
event_data["region"][0] / camera_config.detect.width,
event_data["region"][1] / camera_config.detect.height,
event_data["region"][2] / camera_config.detect.width,
event_data["region"][3] / camera_config.detect.height,
],
},
).execute()
elif (
event_type == "update"
and prev_event_data["current_zones"] != event_data["current_zones"]
and len(event_data["current_zones"]) > 0
):
Timeline.insert(
timestamp=event_data["frame_time"],
camera=camera,
source="tracked_object",
source_id=event_data["id"],
class_type="entered_zone",
data={
"box": [
event_data["box"][0] / camera_config.detect.width,
event_data["box"][1] / camera_config.detect.height,
event_data["box"][2] / camera_config.detect.width,
event_data["box"][3] / camera_config.detect.height,
],
"label": event_data["label"],
"region": [
event_data["region"][0] / camera_config.detect.width,
event_data["region"][1] / camera_config.detect.height,
event_data["region"][2] / camera_config.detect.width,
event_data["region"][3] / camera_config.detect.height,
],
"zones": event_data["current_zones"],
},
).execute()
elif event_type == "end":
Timeline.insert(
timestamp=event_data["frame_time"],
camera=camera,
source="tracked_object",
source_id=event_data["id"],
class_type="gone",
data={
"box": [
event_data["box"][0] / camera_config.detect.width,
event_data["box"][1] / camera_config.detect.height,
event_data["box"][2] / camera_config.detect.width,
event_data["box"][3] / camera_config.detect.height,
],
"label": event_data["label"],
"region": [
event_data["region"][0] / camera_config.detect.width,
event_data["region"][1] / camera_config.detect.height,
event_data["region"][2] / camera_config.detect.width,
event_data["region"][3] / camera_config.detect.height,
],
},
).execute()

View File

@ -29,3 +29,4 @@ class StatsTrackingTypes(TypedDict):
detectors: dict[str, ObjectDetectProcess]
started: int
latest_frigate_version: str
last_updated: int

View File

@ -14,7 +14,7 @@ from abc import ABC, abstractmethod
from collections import Counter
from collections.abc import Mapping
from multiprocessing import shared_memory
from typing import Any, AnyStr, Tuple
from typing import Any, AnyStr, Optional, Tuple
import cv2
import numpy as np
@ -722,7 +722,7 @@ def load_labels(path, encoding="utf-8"):
def clean_camera_user_pass(line: str) -> str:
"""Removes user and password from line."""
if line.startswith("rtsp://"):
if "rtsp://" in line:
return re.sub(REGEX_RTSP_CAMERA_USER_PASS, "://*:*@", line)
else:
return re.sub(REGEX_HTTP_CAMERA_USER_PASS, "user=*&password=*", line)
@ -772,7 +772,6 @@ def get_docker_memlimit_bytes() -> int:
# check running a supported cgroups version
if get_cgroups_version() == "cgroup2":
memlimit_command = ["cat", "/sys/fs/cgroup/memory.max"]
p = sp.run(
@ -817,7 +816,6 @@ def get_cpu_stats() -> dict[str, dict]:
for line in lines:
stats = list(filter(lambda a: a != "", line.strip().split(" ")))
try:
if docker_memlimit > 0:
mem_res = int(stats[5])
mem_pct = str(
@ -926,6 +924,17 @@ def get_nvidia_gpu_stats() -> dict[str, str]:
"--format=csv",
]
if (
"CUDA_VISIBLE_DEVICES" in os.environ
and os.environ["CUDA_VISIBLE_DEVICES"].isdigit()
):
nvidia_smi_command.extend(["--id", os.environ["CUDA_VISIBLE_DEVICES"]])
elif (
"NVIDIA_VISIBLE_DEVICES" in os.environ
and os.environ["NVIDIA_VISIBLE_DEVICES"].isdigit()
):
nvidia_smi_command.extend(["--id", os.environ["NVIDIA_VISIBLE_DEVICES"]])
p = sp.run(
nvidia_smi_command,
encoding="ascii",
@ -965,9 +974,13 @@ def ffprobe_stream(path: str) -> sp.CompletedProcess:
return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel() -> sp.CompletedProcess:
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
"""Run vainfo."""
ffprobe_cmd = ["vainfo"]
ffprobe_cmd = (
["vainfo"]
if not device_name
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
)
return sp.run(ffprobe_cmd, capture_output=True)

View File

@ -104,7 +104,7 @@ def create_tensor_input(frame, model_config, region):
if cropped_frame.shape != (model_config.height, model_config.width, 3):
cropped_frame = cv2.resize(
cropped_frame,
dsize=(model_config.height, model_config.width),
dsize=(model_config.width, model_config.height),
interpolation=cv2.INTER_LINEAR,
)
@ -160,8 +160,8 @@ def capture_frames(
fps: mp.Value,
skipped_fps: mp.Value,
current_frame: mp.Value,
stop_event: mp.Event,
):
frame_size = frame_shape[0] * frame_shape[1]
frame_rate = EventsPerSecond()
frame_rate.start()
@ -177,6 +177,9 @@ def capture_frames(
try:
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
except Exception as e:
# shutdown has been initiated
if stop_event.is_set():
break
logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.")
if ffmpeg_process.poll() != None:
@ -269,7 +272,20 @@ class CameraWatchdog(threading.Thread):
self.logger.info("Waiting for ffmpeg to exit gracefully...")
self.ffmpeg_detect_process.communicate(timeout=30)
except sp.TimeoutExpired:
self.logger.info("FFmpeg didnt exit. Force killing...")
self.logger.info("FFmpeg did not exit. Force killing...")
self.ffmpeg_detect_process.kill()
self.ffmpeg_detect_process.communicate()
elif self.camera_fps.value >= (self.config.detect.fps + 10):
self.camera_fps.value = 0
self.logger.info(
f"{self.camera_name} exceeded fps limit. Exiting ffmpeg..."
)
self.ffmpeg_detect_process.terminate()
try:
self.logger.info("Waiting for ffmpeg to exit gracefully...")
self.ffmpeg_detect_process.communicate(timeout=30)
except sp.TimeoutExpired:
self.logger.info("FFmpeg did not exit. Force killing...")
self.ffmpeg_detect_process.kill()
self.ffmpeg_detect_process.communicate()
@ -327,6 +343,7 @@ class CameraWatchdog(threading.Thread):
self.frame_shape,
self.frame_queue,
self.camera_fps,
self.stop_event,
)
self.capture_thread.start()
@ -355,13 +372,16 @@ class CameraWatchdog(threading.Thread):
class CameraCapture(threading.Thread):
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
def __init__(
self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event
):
threading.Thread.__init__(self)
self.name = f"capture:{camera_name}"
self.camera_name = camera_name
self.frame_shape = frame_shape
self.frame_queue = frame_queue
self.fps = fps
self.stop_event = stop_event
self.skipped_fps = EventsPerSecond()
self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process
@ -379,6 +399,7 @@ class CameraCapture(threading.Thread):
self.fps,
self.skipped_fps,
self.current_frame,
self.stop_event,
)
@ -391,6 +412,9 @@ def capture_camera(name, config: CameraConfig, process_info):
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
threading.current_thread().name = f"capture:{name}"
setproctitle(f"frigate.capture:{name}")
frame_queue = process_info["frame_queue"]
camera_watchdog = CameraWatchdog(
name,
@ -445,7 +469,7 @@ def track_camera(
motion_contour_area,
)
object_detector = RemoteObjectDetector(
name, labelmap, detection_queue, result_connection, model_config
name, labelmap, detection_queue, result_connection, model_config, stop_event
)
object_tracker = ObjectTracker(config.detect)
@ -569,7 +593,6 @@ def process_frames(
stop_event,
exit_on_empty: bool = False,
):
fps = process_info["process_fps"]
detection_fps = process_info["detection_fps"]
current_frame_time = process_info["detection_frame"]
@ -585,7 +608,7 @@ def process_frames(
break
try:
frame_time = frame_queue.get(True, 10)
frame_time = frame_queue.get(True, 1)
except queue.Empty:
continue
@ -723,7 +746,6 @@ def process_frames(
selected_objects = []
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
# o[2] is the box of the object: xmin, ymin, xmax, ymax
# apply max/min to ensure values do not exceed the known frame size
@ -771,6 +793,7 @@ def process_frames(
refining = True
else:
selected_objects.append(obj)
# set the detections list to only include top, complete objects
# and new detections
detections = selected_objects

View File

@ -1,62 +0,0 @@
import logging
import socket
from zeroconf import (
ServiceInfo,
NonUniqueNameException,
InterfaceChoice,
IPVersion,
Zeroconf,
)
logger = logging.getLogger(__name__)
ZEROCONF_TYPE = "_frigate._tcp.local."
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip() -> bytes:
"""Try to determine the local IP address of the machine."""
host_ip_str = ""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(("8.8.8.8", 80))
host_ip_str = sock.getsockname()[0]
except OSError:
try:
host_ip_str = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host_ip_str = "127.0.0.1"
finally:
sock.close()
try:
host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip_str)
except OSError:
host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip_str)
return host_ip_pton
def broadcast_zeroconf(frigate_id: str) -> Zeroconf:
zeroconf = Zeroconf(interfaces=InterfaceChoice.Default, ip_version=IPVersion.V4Only)
host_ip = get_local_ip()
info = ServiceInfo(
ZEROCONF_TYPE,
name=f"{frigate_id}.{ZEROCONF_TYPE}",
addresses=[host_ip],
port=5000,
)
logger.info("Starting Zeroconf broadcast")
try:
zeroconf.register_service(info)
except NonUniqueNameException:
logger.error(
"Frigate instance with identical name present in the local network"
)
return zeroconf

View File

@ -0,0 +1,48 @@
"""Peewee migrations -- 013_create_timeline_table.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import datetime as dt
import peewee as pw
from playhouse.sqlite_ext import *
from decimal import ROUND_HALF_EVEN
from frigate.models import Recordings
try:
import playhouse.postgres_ext as pw_pext
except ImportError:
pass
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.sql(
'CREATE TABLE IF NOT EXISTS "timeline" ("timestamp" DATETIME NOT NULL, "camera" VARCHAR(20) NOT NULL, "source" VARCHAR(20) NOT NULL, "source_id" VARCHAR(30), "class_type" VARCHAR(50) NOT NULL, "data" JSON)'
)
migrator.sql('CREATE INDEX IF NOT EXISTS "timeline_camera" ON "timeline" ("camera")')
migrator.sql('CREATE INDEX IF NOT EXISTS "timeline_source" ON "timeline" ("source")')
migrator.sql('CREATE INDEX IF NOT EXISTS "timeline_source_id" ON "timeline" ("source_id")')
def rollback(migrator, database, fake=False, **kwargs):
pass

View File

@ -1,2 +1,2 @@
pylint == 2.15.*
black == 22.12.*
pylint == 2.17.*
black == 23.3.*

View File

@ -1,3 +1,3 @@
numpy == 1.19.*
numpy == 1.23.*
openvino == 2022.*
openvino-dev[tensorflow2] == 2022.*

View File

@ -3,7 +3,7 @@ Flask == 2.2.*
imutils == 0.5.*
matplotlib == 3.6.*
mypy == 0.942
numpy == 1.22.*
numpy == 1.23.*
opencv-python-headless == 4.5.5.*
paho-mqtt == 1.6.*
peewee == 3.15.*
@ -11,16 +11,14 @@ peewee_migrate == 1.6.*
psutil == 5.9.*
pydantic == 1.10.*
PyYAML == 6.0
pytz == 2022.7
pytz == 2023.3
tzlocal == 4.2
types-PyYAML == 6.0.*
requests == 2.28.*
types-requests == 2.28.*
scipy == 1.8.*
scipy == 1.10.*
setproctitle == 1.3.*
ws4py == 0.5.*
zeroconf == 0.47.*
# Openvino Library - Custom built with MYRIAD support
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64'
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64'
openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.2.0/openvino-2022.2.0-000-cp39-cp39-linux_armv7l.whl; platform_machine == 'armv7l'

View File

@ -1,2 +1,2 @@
scikit-build == 0.16.4
scikit-build == 0.17.1
nvidia-pyindex

3882
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -13,47 +13,46 @@
},
"dependencies": {
"@cycjimmy/jsmpeg-player": "^6.0.5",
"axios": "^1.2.2",
"axios": "^1.3.5",
"copy-to-clipboard": "3.3.3",
"date-fns": "^2.29.3",
"idb-keyval": "^6.2.0",
"immer": "^9.0.16",
"preact": "^10.11.3",
"immer": "^9.0.21",
"monaco-yaml": "^4.0.4",
"preact": "^10.13.2",
"preact-async-route": "^2.2.1",
"preact-router": "^4.1.0",
"react": "npm:@preact/compat@^17.1.2",
"react-dom": "npm:@preact/compat@^17.1.2",
"vite-plugin-monaco-editor": "^1.1.0",
"monaco-yaml": "^4.0.2",
"strftime": "^0.10.1",
"swr": "^1.3.0",
"video.js": "^7.20.3",
"videojs-playlist": "^5.0.0",
"videojs-seek-buttons": "^3.0.1"
"video.js": "^8.3.0",
"videojs-playlist": "^5.1.0",
"vite-plugin-monaco-editor": "^1.1.0"
},
"devDependencies": {
"@preact/preset-vite": "^2.5.0",
"@tailwindcss/forms": "^0.5.3",
"@testing-library/jest-dom": "^5.16.5",
"@testing-library/preact": "^3.2.2",
"@testing-library/preact": "^3.2.3",
"@testing-library/user-event": "^14.4.3",
"@types/video.js": "^7.3.50",
"@typescript-eslint/eslint-plugin": "^5.47.1",
"@typescript-eslint/parser": "^5.47.1",
"@vitest/coverage-c8": "^0.26.2",
"@vitest/ui": "^0.26.2",
"autoprefixer": "^10.4.13",
"eslint": "^8.30.0",
"@typescript-eslint/eslint-plugin": "^5.58.0",
"@typescript-eslint/parser": "^5.58.0",
"@vitest/coverage-c8": "^0.30.1",
"@vitest/ui": "^0.30.1",
"autoprefixer": "^10.4.14",
"eslint": "^8.38.0",
"eslint-config-preact": "^1.3.0",
"eslint-config-prettier": "^8.5.0",
"eslint-plugin-vitest-globals": "^1.2.0",
"eslint-config-prettier": "^8.8.0",
"eslint-plugin-vitest-globals": "^1.3.1",
"fake-indexeddb": "^4.0.1",
"jsdom": "^20.0.3",
"msw": "^0.49.2",
"jsdom": "^21.1.1",
"msw": "^1.2.1",
"postcss": "^8.4.19",
"prettier": "^2.8.0",
"tailwindcss": "^3.2.4",
"typescript": "^4.8.4",
"vite": "^4.0.3",
"vitest": "^0.25.3"
"prettier": "^2.8.7",
"tailwindcss": "^3.3.1",
"typescript": "^5.0.4",
"vite": "^4.2.1",
"vitest": "^0.30.1"
}
}

View File

@ -11,7 +11,15 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
const [hasLoaded, setHasLoaded] = useState(false);
const containerRef = useRef(null);
const canvasRef = useRef(null);
const [{ width: availableWidth }] = useResizeObserver(containerRef);
const [{ width: containerWidth }] = useResizeObserver(containerRef);
// Add scrollbar width (when visible) to the available observer width to eliminate screen juddering.
// https://github.com/blakeblackshear/frigate/issues/1657
let scrollBarWidth = 0;
if (window.innerWidth && document.body.offsetWidth) {
scrollBarWidth = window.innerWidth - document.body.offsetWidth;
}
const availableWidth = scrollBarWidth ? containerWidth + scrollBarWidth : containerWidth;
const { name } = config ? config.cameras[camera] : '';
const enabled = config ? config.cameras[camera].enabled : 'True';
@ -22,7 +30,11 @@ export default function CameraImage({ camera, onload, searchParams = '', stretch
const scaledHeight = Math.floor(availableWidth / aspectRatio);
return stretch ? scaledHeight : Math.min(scaledHeight, height);
}, [availableWidth, aspectRatio, height, stretch]);
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio), [scaledHeight, aspectRatio]);
const scaledWidth = useMemo(() => Math.ceil(scaledHeight * aspectRatio - scrollBarWidth), [
scaledHeight,
aspectRatio,
scrollBarWidth,
]);
const img = useMemo(() => new Image(), []);
img.onload = useCallback(

View File

@ -21,7 +21,7 @@ export default function Dialog({ children, portalRootID = 'dialogs' }) {
>
<div
role="modal"
className={`absolute rounded shadow-2xl bg-white dark:bg-gray-700 max-w-sm text-gray-900 dark:text-white transition-transform transition-opacity duration-75 transform scale-90 opacity-0 ${
className={`absolute rounded shadow-2xl bg-white dark:bg-gray-700 sm:max-w-sm md:max-w-md lg:max-w-lg text-gray-900 dark:text-white transition-transform transition-opacity duration-75 transform scale-90 opacity-0 ${
show ? 'scale-100 opacity-100' : ''
}`}
>

View File

@ -3,11 +3,10 @@ import { useCallback, useEffect, useRef, useState } from 'preact/hooks';
import { useApiHost } from '../../api';
import { isNullOrUndefined } from '../../utils/objectUtils';
import 'videojs-seek-buttons';
import 'video.js/dist/video-js.css';
import 'videojs-seek-buttons/dist/videojs-seek-buttons.css';
import videojs, { VideoJsPlayer } from 'video.js';
import videojs from 'video.js';
import type Player from 'video.js/dist/types/player';
interface OnTimeUpdateEvent {
timestamp: number;
@ -34,10 +33,10 @@ export const HistoryVideo = ({
const apiHost = useApiHost();
const videoRef = useRef<HTMLVideoElement>(null);
const [video, setVideo] = useState<VideoJsPlayer>();
const [video, setVideo] = useState<Player>();
useEffect(() => {
let video: VideoJsPlayer
let video: Player
if (videoRef.current) {
video = videojs(videoRef.current, {})
setVideo(video)
@ -88,7 +87,8 @@ export const HistoryVideo = ({
);
useEffect(() => {
if (video && video.readyState() >= 1) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
if (video && (video as any).readyState() >= 1) {
if (videoIsPlaying) {
video.play()
} else {
@ -98,7 +98,8 @@ export const HistoryVideo = ({
}, [video, videoIsPlaying])
const onLoad = useCallback(() => {
if (video && video.readyState() >= 1 && videoIsPlaying) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
if (video && (video as any).readyState() >= 1 && videoIsPlaying) {
video.play()
}
}, [video, videoIsPlaying])

View File

@ -13,9 +13,11 @@ export default function MultiSelect({ className, title, options, selection, onTo
const [state, setState] = useState({
showMenu: false,
});
const isOptionSelected = (item) => { return selection == "all" || selection.split(',').indexOf(item) > -1; }
const menuHeight = Math.round(window.innerHeight * 0.55);
return (
<div className={`${className} p-2`} ref={popupRef}>
<div
@ -26,7 +28,7 @@ export default function MultiSelect({ className, title, options, selection, onTo
<ArrowDropdown className="w-6" />
</div>
{state.showMenu ? (
<Menu relativeTo={popupRef} onDismiss={() => setState({ showMenu: false })}>
<Menu className={`max-h-[${menuHeight}px] overflow-scroll`} relativeTo={popupRef} onDismiss={() => setState({ showMenu: false })}>
<div className="flex flex-wrap justify-between items-center">
<Heading className="p-4 justify-center" size="md">{title}</Heading>
<Button tabindex="false" className="mx-4" onClick={() => onShowAll() }>

View File

@ -80,7 +80,9 @@ export default function RelativeModal({
// too close to bottom
if (top + menuHeight > windowHeight - WINDOW_PADDING + window.scrollY) {
newTop = WINDOW_PADDING;
// If the pop-up modal would extend beyond the bottom of the visible window,
// reposition the modal to appear above the clicked icon instead
newTop = top - menuHeight;
}
if (top <= WINDOW_PADDING + window.scrollY) {

View File

@ -1,61 +0,0 @@
import { h } from 'preact';
const timeAgo = ({ time, dense = false }) => {
if (!time) return 'Invalid Time';
try {
const currentTime = new Date();
const pastTime = new Date(time);
const elapsedTime = currentTime - pastTime;
if (elapsedTime < 0) return 'Invalid Time';
const timeUnits = [
{ unit: 'ye', full: 'year', value: 31536000 },
{ unit: 'mo', full: 'month', value: 0 },
{ unit: 'day', full: 'day', value: 86400 },
{ unit: 'h', full: 'hour', value: 3600 },
{ unit: 'm', full: 'minute', value: 60 },
{ unit: 's', full: 'second', value: 1 },
];
let elapsed = elapsedTime / 1000;
if (elapsed < 60) {
return 'just now';
}
for (let i = 0; i < timeUnits.length; i++) {
// if months
if (i === 1) {
// Get the month and year for the time provided
const pastMonth = pastTime.getUTCMonth();
const pastYear = pastTime.getUTCFullYear();
// get current month and year
const currentMonth = currentTime.getUTCMonth();
const currentYear = currentTime.getUTCFullYear();
let monthDiff = (currentYear - pastYear) * 12 + (currentMonth - pastMonth);
// check if the time provided is the previous month but not exceeded 1 month ago.
if (currentTime.getUTCDate() < pastTime.getUTCDate()) {
monthDiff--;
}
if (monthDiff > 0) {
const unitAmount = monthDiff;
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
}
} else if (elapsed >= timeUnits[i].value) {
const unitAmount = Math.floor(elapsed / timeUnits[i].value);
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
}
}
} catch {
return 'Invalid Time';
}
};
const TimeAgo = (props) => {
return <span>{timeAgo({ ...props })}</span>;
};
export default TimeAgo;

View File

@ -0,0 +1,84 @@
import { h, FunctionComponent } from 'preact';
import { useEffect, useMemo, useState } from 'preact/hooks';
interface IProp {
/** The time to calculate time-ago from */
time: Date;
/** OPTIONAL: overwrite current time */
currentTime?: Date;
/** OPTIONAL: boolean that determines whether to show the time-ago text in dense format */
dense?: boolean;
/** OPTIONAL: set custom refresh interval in milliseconds, default 1000 (1 sec) */
refreshInterval?: number;
}
type TimeUnit = {
unit: string;
full: string;
value: number;
};
const timeAgo = ({ time, currentTime = new Date(), dense = false }: IProp): string => {
if (typeof time !== 'number' || time < 0) return 'Invalid Time Provided';
const pastTime: Date = new Date(time);
const elapsedTime: number = currentTime.getTime() - pastTime.getTime();
const timeUnits: TimeUnit[] = [
{ unit: 'ye', full: 'year', value: 31536000 },
{ unit: 'mo', full: 'month', value: 0 },
{ unit: 'day', full: 'day', value: 86400 },
{ unit: 'h', full: 'hour', value: 3600 },
{ unit: 'm', full: 'minute', value: 60 },
{ unit: 's', full: 'second', value: 1 },
];
const elapsed: number = elapsedTime / 1000;
if (elapsed < 10) {
return 'just now';
}
for (let i = 0; i < timeUnits.length; i++) {
// if months
if (i === 1) {
// Get the month and year for the time provided
const pastMonth = pastTime.getUTCMonth();
const pastYear = pastTime.getUTCFullYear();
// get current month and year
const currentMonth = currentTime.getUTCMonth();
const currentYear = currentTime.getUTCFullYear();
let monthDiff = (currentYear - pastYear) * 12 + (currentMonth - pastMonth);
// check if the time provided is the previous month but not exceeded 1 month ago.
if (currentTime.getUTCDate() < pastTime.getUTCDate()) {
monthDiff--;
}
if (monthDiff > 0) {
const unitAmount = monthDiff;
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
}
} else if (elapsed >= timeUnits[i].value) {
const unitAmount: number = Math.floor(elapsed / timeUnits[i].value);
return `${unitAmount}${dense ? timeUnits[i].unit[0] : ` ${timeUnits[i].full}`}${dense ? '' : 's'} ago`;
}
}
return 'Invalid Time';
};
const TimeAgo: FunctionComponent<IProp> = ({ refreshInterval = 1000, ...rest }): JSX.Element => {
const [currentTime, setCurrentTime] = useState<Date>(new Date());
useEffect(() => {
const intervalId: NodeJS.Timeout = setInterval(() => {
setCurrentTime(new Date());
}, refreshInterval);
return () => clearInterval(intervalId);
}, [refreshInterval]);
const timeAgoValue = useMemo(() => timeAgo({ currentTime, ...rest }), [currentTime, rest]);
return <span>{timeAgoValue}</span>;
};
export default TimeAgo;

View File

@ -0,0 +1,95 @@
import { h } from 'preact';
import useSWR from 'swr';
import ActivityIndicator from './ActivityIndicator';
import { formatUnixTimestampToDateTime } from '../utils/dateUtil';
import PlayIcon from '../icons/Play';
import ExitIcon from '../icons/Exit';
import { Zone } from '../icons/Zone';
import { useState } from 'preact/hooks';
import Button from './Button';
export default function TimelineSummary({ event, onFrameSelected }) {
const { data: eventTimeline } = useSWR([
'timeline',
{
source_id: event.id,
},
]);
const { data: config } = useSWR('config');
const [timeIndex, setTimeIndex] = useState(-1);
const onSelectMoment = async (index) => {
setTimeIndex(index);
onFrameSelected(eventTimeline[index]);
};
if (!eventTimeline || !config) {
return <ActivityIndicator />;
}
return (
<div className="flex flex-col">
<div className="h-14 flex justify-center">
<div className="sm:w-1 md:w-1/4 flex flex-row flex-nowrap justify-between overflow-auto">
{eventTimeline.map((item, index) =>
item.class_type == 'visible' || item.class_type == 'gone' ? (
<Button
key={index}
className="rounded-full"
type="text"
color={index == timeIndex ? 'blue' : 'gray'}
aria-label={window.innerWidth > 640 ? getTimelineItemDescription(config, item, event) : ''}
onClick={() => onSelectMoment(index)}
>
{item.class_type == 'visible' ? <PlayIcon className="w-8" /> : <ExitIcon className="w-8" />}
</Button>
) : (
<Button
key={index}
className="rounded-full"
type="text"
color={index == timeIndex ? 'blue' : 'gray'}
aria-label={window.innerWidth > 640 ? getTimelineItemDescription(config, item, event) : ''}
onClick={() => onSelectMoment(index)}
>
<Zone className="w-8" />
</Button>
)
)}
</div>
</div>
{timeIndex >= 0 ? (
<div className="bg-gray-500 p-4 m-2 max-w-md self-center">
Disclaimer: This data comes from the detect feed but is shown on the recordings, it is unlikely that the
streams are perfectly in sync so the bounding box and the footage will not line up perfectly.
</div>
) : null}
</div>
);
}
function getTimelineItemDescription(config, timelineItem, event) {
if (timelineItem.class_type == 'visible') {
return `${event.label} detected at ${formatUnixTimestampToDateTime(timelineItem.timestamp, {
date_style: 'short',
time_style: 'medium',
time_format: config.ui.time_format,
})}`;
} else if (timelineItem.class_type == 'entered_zone') {
return `${event.label.replaceAll('_', ' ')} entered ${timelineItem.data.zones
.join(' and ')
.replaceAll('_', ' ')} at ${formatUnixTimestampToDateTime(timelineItem.timestamp, {
date_style: 'short',
time_style: 'medium',
time_format: config.ui.time_format,
})}`;
}
return `${event.label} left at ${formatUnixTimestampToDateTime(timelineItem.timestamp, {
date_style: 'short',
time_style: 'medium',
time_format: config.ui.time_format,
})}`;
}

View File

@ -49,7 +49,7 @@ export default function Tooltip({ relativeTo, text }) {
const tooltip = (
<div
role="tooltip"
className={`shadow max-w-lg absolute pointer-events-none bg-gray-900 dark:bg-gray-200 bg-opacity-80 rounded px-2 py-1 transition-transform transition-opacity duration-75 transform scale-90 opacity-0 text-gray-100 dark:text-gray-900 text-sm ${
className={`shadow max-w-lg absolute pointer-events-none bg-gray-900 dark:bg-gray-200 bg-opacity-80 rounded px-2 py-1 transition-transform transition-opacity duration-75 transform scale-90 opacity-0 text-gray-100 dark:text-gray-900 text-sm capitalize ${
position.top >= 0 ? 'opacity-100 scale-100' : ''
}`}
ref={ref}

View File

@ -2,24 +2,21 @@ import { h } from 'preact';
import { useRef, useEffect } from 'preact/hooks';
import videojs from 'video.js';
import 'videojs-playlist';
import 'videojs-seek-buttons';
import 'video.js/dist/video-js.css';
import 'videojs-seek-buttons/dist/videojs-seek-buttons.css';
export default function VideoPlayer({ children, options, seekOptions = {}, onReady = () => {}, onDispose = () => {} }) {
export default function VideoPlayer({ children, options, seekOptions = {forward:30, backward: 10}, onReady = () => {}, onDispose = () => {} }) {
const playerRef = useRef();
useEffect(() => {
const defaultOptions = {
controls: true,
controlBar: {
skipButtons: seekOptions,
},
playbackRates: [0.5, 1, 2, 4, 8],
fluid: true,
};
const defaultSeekOptions = {
forward: 30,
back: 10,
};
if (!videojs.browser.IS_FIREFOX) {
defaultOptions.playbackRates.push(16);
@ -28,10 +25,6 @@ export default function VideoPlayer({ children, options, seekOptions = {}, onRea
const player = videojs(playerRef.current, { ...defaultOptions, ...options }, () => {
onReady(player);
});
player.seekButtons({
...defaultSeekOptions,
...seekOptions,
});
// Allows player to continue on error
player.reloadSourceOnError();

12
web/src/icons/Exit.jsx Normal file
View File

@ -0,0 +1,12 @@
import { h } from 'preact';
import { memo } from 'preact/compat';
function Exit({ className = '' }) {
return (
<svg className={`fill-current ${className}`} viewBox="0 0 24 24">
<path d="M22 12l-4-4v3h-8v2h8v3m2 2a10 10 0 110-12h-2.73a8 8 0 100 12z" />
</svg>
);
}
export default memo(Exit);

View File

@ -1,9 +1,9 @@
import { h } from 'preact';
import { memo } from 'preact/compat';
export function Play() {
export function Play({ className = '' }) {
return (
<svg style="width:24px;height:24px" viewBox="0 0 24 24">
<svg className={`fill-current ${className}`} viewBox="0 0 24 24">
<path fill="currentColor" d="M8,5.14V19.14L19,12.14L8,5.14Z" />
</svg>
);

View File

@ -10,7 +10,10 @@ import useSWR from 'swr';
export default function Birdseye() {
const { data: config } = useSWR('config');
const [viewSource, setViewSource, sourceIsLoaded] = usePersistence('birdseye-source', 'mse');
const [viewSource, setViewSource, sourceIsLoaded] = usePersistence(
'birdseye-source',
getDefaultLiveMode(config)
);
const sourceValues = ['mse', 'webrtc', 'jsmpeg'];
if (!config || !sourceIsLoaded) {
@ -80,3 +83,16 @@ export default function Birdseye() {
</div>
);
}
function getDefaultLiveMode(config) {
if (config) {
if (config.birdseye.restream) {
return config.ui.live_mode;
}
return 'jsmpeg';
}
return undefined;
}

View File

@ -25,13 +25,14 @@ export default function Camera({ camera }) {
const [viewMode, setViewMode] = useState('live');
const cameraConfig = config?.cameras[camera];
const restreamEnabled = cameraConfig && Object.keys(config.go2rtc.streams || {}).includes(cameraConfig.live.stream_name);
const restreamEnabled =
cameraConfig && Object.keys(config.go2rtc.streams || {}).includes(cameraConfig.live.stream_name);
const jsmpegWidth = cameraConfig
? Math.round(cameraConfig.live.height * (cameraConfig.detect.width / cameraConfig.detect.height))
: 0;
const [viewSource, setViewSource, sourceIsLoaded] = usePersistence(
`${camera}-source`,
getDefaultLiveMode(config, cameraConfig)
getDefaultLiveMode(config, cameraConfig, restreamEnabled)
);
const sourceValues = restreamEnabled ? ['mse', 'webrtc', 'jsmpeg'] : ['jsmpeg'];
const [options, setOptions] = usePersistence(`${camera}-feed`, emptyObject);
@ -63,6 +64,10 @@ export default function Camera({ camera }) {
return <ActivityIndicator />;
}
if (!restreamEnabled) {
setViewSource('jsmpeg');
}
const optionContent = showSettings ? (
<div className="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4">
<Switch

View File

@ -26,6 +26,7 @@ import Dialog from '../components/Dialog';
import MultiSelect from '../components/MultiSelect';
import { formatUnixTimestampToDateTime, getDurationFromTimestamps } from '../utils/dateUtil';
import TimeAgo from '../components/TimeAgo';
import TimelineSummary from '../components/TimelineSummary';
const API_LIMIT = 25;
@ -60,12 +61,14 @@ export default function Events({ path, ...props }) {
});
const [uploading, setUploading] = useState([]);
const [viewEvent, setViewEvent] = useState();
const [eventOverlay, setEventOverlay] = useState();
const [eventDetailType, setEventDetailType] = useState('clip');
const [downloadEvent, setDownloadEvent] = useState({
id: null,
has_clip: false,
has_snapshot: false,
plus_id: undefined,
end_time: null,
});
const [deleteFavoriteState, setDeleteFavoriteState] = useState({
deletingFavoriteEventId: null,
@ -179,6 +182,18 @@ export default function Events({ path, ...props }) {
onFilter(name, items);
};
const onEventFrameSelected = (event, frame) => {
const eventDuration = event.end_time - event.start_time;
if (this.player) {
this.player.pause();
const videoOffset = this.player.duration() - eventDuration;
const startTime = videoOffset + (frame.timestamp - event.start_time);
this.player.currentTime(startTime);
setEventOverlay(frame);
}
};
const datePicker = useRef();
const downloadButton = useRef();
@ -190,6 +205,7 @@ export default function Events({ path, ...props }) {
has_clip: event.has_clip,
has_snapshot: event.has_snapshot,
plus_id: event.plus_id,
end_time: event.end_time,
}));
downloadButton.current = e.target;
setState({ ...state, showDownloadMenu: true });
@ -287,9 +303,6 @@ export default function Events({ path, ...props }) {
return <ActivityIndicator />;
}
const timezone = config.ui?.timezone || Intl.DateTimeFormat().resolvedOptions().timeZone;
const locale = window.navigator?.language || 'en-US';
return (
<div className="space-y-4 p-2 px-4 w-full">
<Heading>Events</Heading>
@ -366,7 +379,7 @@ export default function Events({ path, ...props }) {
download
/>
)}
{downloadEvent.has_snapshot && !downloadEvent.plus_id && (
{(downloadEvent.end_time && downloadEvent.has_snapshot && !downloadEvent.plus_id) && (
<MenuItem
icon={UploadPlus}
label={uploading.includes(downloadEvent.id) ? 'Uploading...' : 'Send to Frigate+'}
@ -508,7 +521,7 @@ export default function Events({ path, ...props }) {
</div>
<div className="text-sm flex">
<Clock className="h-5 w-5 mr-2 inline" />
{formatUnixTimestampToDateTime(event.start_time, locale, timezone)}
{formatUnixTimestampToDateTime(event.start_time, { ...config.ui })}
<div className="hidden md:inline">
<span className="m-1">-</span>
<TimeAgo time={event.start_time * 1000} dense />
@ -527,7 +540,7 @@ export default function Events({ path, ...props }) {
</div>
</div>
<div class="hidden sm:flex flex-col justify-end mr-2">
{event.has_snapshot && (
{event.end_time && event.has_snapshot && (
<Fragment>
{event.plus_id ? (
<div className="uppercase text-xs">Sent to Frigate+</div>
@ -574,20 +587,52 @@ export default function Events({ path, ...props }) {
<div>
{eventDetailType == 'clip' && event.has_clip ? (
<VideoPlayer
options={{
preload: 'auto',
autoplay: true,
sources: [
{
src: `${apiHost}vod/event/${event.id}/master.m3u8`,
type: 'application/vnd.apple.mpegurl',
},
],
}}
seekOptions={{ forward: 10, back: 5 }}
onReady={() => {}}
/>
<div>
<TimelineSummary
event={event}
onFrameSelected={(frame) => onEventFrameSelected(event, frame)}
/>
<div>
<VideoPlayer
options={{
preload: 'auto',
autoplay: true,
sources: [
{
src: `${apiHost}vod/event/${event.id}/master.m3u8`,
type: 'application/vnd.apple.mpegurl',
},
],
}}
seekOptions={{ forward: 10, backward: 5 }}
onReady={(player) => {
this.player = player;
this.player.on('playing', () => {
setEventOverlay(undefined);
});
}}
onDispose={() => {
this.player = null;
}}
>
{eventOverlay ? (
<div
className="absolute border-4 border-red-600"
style={{
left: `${Math.round(eventOverlay.data.box[0] * 100)}%`,
top: `${Math.round(eventOverlay.data.box[1] * 100)}%`,
right: `${Math.round((1 - eventOverlay.data.box[2]) * 100)}%`,
bottom: `${Math.round((1 - eventOverlay.data.box[3]) * 100)}%`,
}}
>
{eventOverlay.class_type == 'entered_zone' ? (
<div className="absolute w-2 h-2 bg-yellow-500 left-[50%] bottom-0" />
) : null}
</div>
) : null}
</VideoPlayer>
</div>
</div>
) : null}
{eventDetailType == 'image' || !event.has_clip ? (

View File

@ -4,6 +4,7 @@ import { useCallback, useEffect, useState } from 'preact/hooks';
import ButtonsTabbed from '../components/ButtonsTabbed';
import useSWR from 'swr';
import Button from '../components/Button';
import copy from 'copy-to-clipboard';
export default function Logs() {
const [logService, setLogService] = useState('frigate');
@ -14,10 +15,7 @@ export default function Logs() {
const { data: nginxLogs } = useSWR('logs/nginx');
const handleCopyLogs = useCallback(() => {
async function copy() {
await window.navigator.clipboard.writeText(logs);
}
copy();
copy(logs);
}, [logs]);
useEffect(() => {

View File

@ -133,7 +133,7 @@ export default function Recording({ camera, date, hour = '00', minute = '00', se
return (
<div className="space-y-4 p-2 px-4">
<Heading>{camera.replaceAll('_', ' ')} Recordings</Heading>
<div className="text-xs">Dates and times are based on the browser's timezone {timezone}</div>
<div className="text-xs">Dates and times are based on the timezone {timezone}</div>
<VideoPlayer
options={{

View File

@ -128,7 +128,7 @@ export default function Storage() {
<Tbody>
<Tr>
<Td>{Math.round(camera['usage_percent'] ?? 0)}%</Td>
<Td>{camera['bandwidth'] ? getUnitSize(camera['bandwidth']) : 'Calculating...'}/hr</Td>
<Td>{camera['bandwidth'] ? `${getUnitSize(camera['bandwidth'])}/hr` : 'Calculating...'}</Td>
</Tr>
</Tbody>
</Table>

Some files were not shown because too many files have changed in this diff Show More