diff --git a/.cspell/frigate-dictionary.txt b/.cspell/frigate-dictionary.txt new file mode 100644 index 000000000..0cbcc4beb --- /dev/null +++ b/.cspell/frigate-dictionary.txt @@ -0,0 +1,304 @@ +aarch +absdiff +airockchip +Alloc +Amcrest +amdgpu +analyzeduration +Annke +apexcharts +arange +argmax +argmin +argpartition +ascontiguousarray +authelia +authentik +autodetected +automations +autotrack +autotracked +autotracker +autotracking +balena +Beelink +BGRA +BHWC +blackshear +blakeblackshear +bottombar +buildx +castable +cdist +Celeron +cgroups +chipset +chromadb +Chromecast +cmdline +codeowner +CODEOWNERS +codeproject +colormap +colorspace +comms +ctypeslib +CUDA +Cuvid +Dahua +datasheet +debconf +deci +deepstack +defragment +devcontainer +DEVICEMAP +discardcorrupt +dpkg +dsize +dtype +ECONNRESET +edgetpu +faststart +fflags +ffprobe +fillna +flac +foscam +fourcc +framebuffer +fregate +frégate +fromarray +frombuffer +frontdoor +fstype +fullchain +fullscreen +genai +generativeai +genpts +getpid +gpuload +HACS +Hailo +hass +hconcat +healthcheck +hideable +Hikvision +homeassistant +homekit +homography +hsize +hstack +httpx +hwaccel +hwdownload +hwmap +hwupload +iloc +imagestream +imdecode +imencode +imread +imutils +imwrite +interp +iostat +iotop +itemsize +Jellyfin +jetson +jetsons +joserfc +jsmpeg +jsonify +Kalman +keepalive +keepdims +labelmap +letsencrypt +levelname +LIBAVFORMAT +libedgetpu +libnvinfer +libva +libwebp +libx +libyolo +linalg +localzone +logpipe +Loryta +lstsq +lsusb +markupsafe +maxsplit +MEMHOSTALLOC +memlimit +meshgrid +metadatas +migraphx +minilm +mjpeg +mkfifo +mobiledet +mobilenet +modelpath +mosquitto +mountpoint +movflags +mpegts +mqtt +mse +msenc +namedtuples +nbytes +nchw +ndarray +ndimage +nethogs +newaxis +nhwc +NOBLOCK +nobuffer +nokey +NONBLOCK +noninteractive +noprint +Norfair +nptype +NTSC +numpy +nvenc +nvhost +nvml +nvmpi +ollama +onnx +onnxruntime +onvif +ONVIF +openai +opencv +openvino +OWASP +paho +passwordless +popleft +posthog +postprocess +poweroff +preexec +probesize +protobuf +psutil +pubkey +putenv +pycache +pydantic +pyobj +pysqlite +pytz +pywebpush +qnap +quantisation +Radeon +radeonsi +radeontop +rawvideo +rcond +RDONLY +rebranded +referer +reindex +Reolink +restream +restreamed +restreaming +rkmpp +rknn +rkrga +rockchip +rocm +rocminfo +rootfs +rtmp +RTSP +ruamel +scroller +setproctitle +setpts +shms +SIGUSR +skylake +sleeptime +SNDMORE +socs +sqliteq +ssdlite +statm +stimeout +stylelint +subclassing +substream +superfast +surveillance +svscan +Swipeable +sysconf +tailscale +Tapo +tensorrt +tflite +thresholded +timelapse +tmpfs +tobytes +toggleable +traefik +tzlocal +Ubiquiti +udev +udevadm +ultrafast +unichip +unidecode +Unifi +unixepoch +unraid +unreviewed +userdata +usermod +vaapi +vainfo +variations +vconcat +vitb +vstream +vsync +wallclock +webp +webpush +webrtc +websockets +webui +werkzeug +workdir +WRONLY +wsgirefserver +wsgiutils +wsize +xaddr +xmaxs +xmins +XPUB +XSUB +ymaxs +ymins +yolo +yolonas +yolox +zeep +zerolatency diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 0c460cfad..63adae73d 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -10,10 +10,14 @@ "features": { "ghcr.io/devcontainers/features/common-utils:1": {} }, - "forwardPorts": [5000, 5001, 5173, 1935, 8554, 8555], + "forwardPorts": [8971, 5000, 5001, 5173, 8554, 8555], "portsAttributes": { + "8971": { + "label": "External NGINX", + "onAutoForward": "silent" + }, "5000": { - "label": "NGINX", + "label": "Internal NGINX", "onAutoForward": "silent" }, "5001": { @@ -24,10 +28,6 @@ "label": "Vite Server", "onAutoForward": "silent" }, - "1935": { - "label": "RTMP", - "onAutoForward": "silent" - }, "8554": { "label": "gortc RTSP", "onAutoForward": "silent" @@ -52,7 +52,8 @@ "csstools.postcss", "blanu.vscode-styled-jsx", "bradlc.vscode-tailwindcss", - "charliermarsh.ruff" + "charliermarsh.ruff", + "eamodio.gitlens" ], "settings": { "remote.autoForwardPorts": false, diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index 1a1832f3b..ee0888016 100755 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -17,7 +17,7 @@ sudo chown -R "$(id -u):$(id -g)" /media/frigate # When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the # s6 service file. For dev, where frigate is started from an interactive # shell, we define it in .bashrc instead. -echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc +echo 'export LIBAVFORMAT_VERSION_MAJOR=$(/usr/lib/ffmpeg/7.0/bin/ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc make version diff --git a/.github/DISCUSSION_TEMPLATE/camera-support.yml b/.github/DISCUSSION_TEMPLATE/camera-support.yml new file mode 100644 index 000000000..a76fd5caf --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/camera-support.yml @@ -0,0 +1,138 @@ +title: "[Camera Support]: " +labels: ["support", "triage"] +body: + - type: markdown + attributes: + value: | + Use this form for support or questions for an issue with your cameras. + + Before submitting your support request, please [search the discussions][discussions], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your question has already been answered by the community. + + [discussions]: https://www.github.com/blakeblackshear/frigate/discussions + [docs]: https://docs.frigate.video + [faq]: https://github.com/blakeblackshear/frigate/discussions/12724 + - type: textarea + id: description + attributes: + label: Describe the problem you are having + validations: + required: true + - type: input + id: version + attributes: + label: Version + description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.14.0-ea36ds1) + validations: + required: true + - type: input + attributes: + label: What browser(s) are you using? + placeholder: Google Chrome 88.0.4324.150 + description: > + Provide the full name and don't forget to add the version! + - type: textarea + id: config + attributes: + label: Frigate config file + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: frigatelogs + attributes: + label: Relevant Frigate log output + description: Please copy and paste any relevant Frigate log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: go2rtclogs + attributes: + label: Relevant go2rtc log output + description: Please copy and paste any relevant go2rtc log output. Include logs before and after your exact error when possible. Logs can be viewed via the Frigate UI, Docker, or the go2rtc dashboard. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: ffprobe + attributes: + label: FFprobe output from your camera + description: Run `ffprobe ` from within the Frigate container if possible, and provide output below + render: shell + validations: + required: true + - type: textarea + id: stats + attributes: + label: Frigate stats + description: Output from frigate's /api/stats endpoint + render: json + - type: dropdown + id: os + attributes: + label: Operating system + options: + - HassOS + - Debian + - Other Linux + - Proxmox + - UNRAID + - Windows + - Other + validations: + required: true + - type: dropdown + id: install-method + attributes: + label: Install method + options: + - HassOS Addon + - Docker Compose + - Docker CLI + - Proxmox via Docker + - Proxmox via TTeck Script + - Windows WSL2 + validations: + required: true + - type: dropdown + id: object-detector + attributes: + label: Object Detector + options: + - Coral + - OpenVino + - TensorRT + - RKNN + - Other + - CPU (no coral) + validations: + required: true + - type: dropdown + id: network + attributes: + label: Network connection + options: + - Wired + - Wireless + - Mixed + validations: + required: true + - type: input + id: camera + attributes: + label: Camera make and model + description: Dahua, hikvision, amcrest, reolink, etc and model number + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots of the Frigate UI's System metrics pages + description: Drag and drop for images is possible in this field. Please post screenshots of at least General and Cameras tabs. + validations: + required: true + - type: textarea + id: other + attributes: + label: Any other information that may be helpful diff --git a/.github/DISCUSSION_TEMPLATE/config-support.yml b/.github/DISCUSSION_TEMPLATE/config-support.yml new file mode 100644 index 000000000..4934d7936 --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/config-support.yml @@ -0,0 +1,113 @@ +title: "[Config Support]: " +labels: ["support", "triage"] +body: + - type: markdown + attributes: + value: | + Use this form for support or questions related to Frigate's configuration and config file. + + Before submitting your support request, please [search the discussions][discussions], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your question has already been answered by the community. + + [discussions]: https://www.github.com/blakeblackshear/frigate/discussions + [docs]: https://docs.frigate.video + [faq]: https://github.com/blakeblackshear/frigate/discussions/12724 + - type: textarea + id: description + attributes: + label: Describe the problem you are having + validations: + required: true + - type: input + id: version + attributes: + label: Version + description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.14.0-ea36ds1) + validations: + required: true + - type: textarea + id: config + attributes: + label: Frigate config file + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: frigatelogs + attributes: + label: Relevant Frigate log output + description: Please copy and paste any relevant Frigate log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: go2rtclogs + attributes: + label: Relevant go2rtc log output + description: Please copy and paste any relevant go2rtc log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: stats + attributes: + label: Frigate stats + description: Output from frigate's /api/stats endpoint + render: json + - type: dropdown + id: os + attributes: + label: Operating system + options: + - HassOS + - Debian + - Other Linux + - Proxmox + - UNRAID + - Windows + - Other + validations: + required: true + - type: dropdown + id: install-method + attributes: + label: Install method + options: + - HassOS Addon + - Docker Compose + - Docker CLI + - Proxmox via Docker + - Proxmox via TTeck Script + - Windows WSL2 + validations: + required: true + - type: textarea + id: docker + attributes: + label: docker-compose file or Docker CLI command + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: dropdown + id: object-detector + attributes: + label: Object Detector + options: + - Coral + - OpenVino + - TensorRT + - RKNN + - Other + - CPU (no coral) + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots of the Frigate UI's System metrics pages + description: Drag and drop or simple cut/paste is possible in this field + - type: textarea + id: other + attributes: + label: Any other information that may be helpful diff --git a/.github/DISCUSSION_TEMPLATE/detector-support.yml b/.github/DISCUSSION_TEMPLATE/detector-support.yml new file mode 100644 index 000000000..442b2527a --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/detector-support.yml @@ -0,0 +1,87 @@ +title: "[Detector Support]: " +labels: ["support", "triage"] +body: + - type: markdown + attributes: + value: | + Use this form for support or questions related to Frigate's object detectors. + + Before submitting your support request, please [search the discussions][discussions], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your question has already been answered by the community. + + [discussions]: https://www.github.com/blakeblackshear/frigate/discussions + [docs]: https://docs.frigate.video + [faq]: https://github.com/blakeblackshear/frigate/discussions/12724 + - type: textarea + id: description + attributes: + label: Describe the problem you are having + validations: + required: true + - type: input + id: version + attributes: + label: Version + description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.14.0-ea36ds1) + validations: + required: true + - type: textarea + id: config + attributes: + label: Frigate config file + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: docker + attributes: + label: docker-compose file or Docker CLI command + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: frigatelogs + attributes: + label: Relevant Frigate log output + description: Please copy and paste any relevant Frigate log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: dropdown + id: install-method + attributes: + label: Install method + options: + - HassOS Addon + - Docker Compose + - Docker CLI + - Proxmox via Docker + - Proxmox via TTeck Script + - Windows WSL2 + validations: + required: true + - type: dropdown + id: object-detector + attributes: + label: Object Detector + options: + - Coral + - OpenVino + - TensorRT + - RKNN + - Other + - CPU (no coral) + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots of the Frigate UI's System metrics pages + description: Drag and drop for images is possible in this field. Please post screenshots of at least General and Cameras tabs. + validations: + required: true + - type: textarea + id: other + attributes: + label: Any other information that may be helpful diff --git a/.github/DISCUSSION_TEMPLATE/general-support.yml b/.github/DISCUSSION_TEMPLATE/general-support.yml new file mode 100644 index 000000000..7af52bdf5 --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/general-support.yml @@ -0,0 +1,130 @@ +title: "[Support]: " +labels: ["support", "triage"] +body: + - type: markdown + attributes: + value: | + Use this form for support for issues that don't fall into any specific category. + + Before submitting your support request, please [search the discussions][discussions], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your question has already been answered by the community. + + [discussions]: https://www.github.com/blakeblackshear/frigate/discussions + [docs]: https://docs.frigate.video + [faq]: https://github.com/blakeblackshear/frigate/discussions/12724 + - type: textarea + id: description + attributes: + label: Describe the problem you are having + validations: + required: true + - type: input + id: version + attributes: + label: Version + description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.14.0-ea36ds1) + validations: + required: true + - type: input + attributes: + label: What browser(s) are you using? + placeholder: Google Chrome 88.0.4324.150 + description: > + Provide the full name and don't forget to add the version! + - type: textarea + id: config + attributes: + label: Frigate config file + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: frigatelogs + attributes: + label: Relevant Frigate log output + description: Please copy and paste any relevant Frigate log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: go2rtclogs + attributes: + label: Relevant go2rtc log output + description: Please copy and paste any relevant go2rtc log output. Include logs before and after your exact error when possible. Logs can be viewed via the Frigate UI, Docker, or the go2rtc dashboard. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: ffprobe + attributes: + label: FFprobe output from your camera + description: Run `ffprobe ` from within the Frigate container if possible, and provide output below + render: shell + validations: + required: true + - type: textarea + id: stats + attributes: + label: Frigate stats + description: Output from frigate's /api/stats endpoint + render: json + - type: dropdown + id: install-method + attributes: + label: Install method + options: + - HassOS Addon + - Docker Compose + - Docker CLI + - Proxmox via Docker + - Proxmox via TTeck Script + - Windows WSL2 + validations: + required: true + - type: textarea + id: docker + attributes: + label: docker-compose file or Docker CLI command + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: dropdown + id: object-detector + attributes: + label: Object Detector + options: + - Coral + - OpenVino + - TensorRT + - RKNN + - Other + - CPU (no coral) + validations: + required: true + - type: dropdown + id: network + attributes: + label: Network connection + options: + - Wired + - Wireless + - Mixed + validations: + required: true + - type: input + id: camera + attributes: + label: Camera make and model + description: Dahua, hikvision, amcrest, reolink, etc and model number + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots of the Frigate UI's System metrics pages + description: Drag and drop for images is possible in this field + - type: textarea + id: other + attributes: + label: Any other information that may be helpful diff --git a/.github/DISCUSSION_TEMPLATE/hardware-acceleration-support.yml b/.github/DISCUSSION_TEMPLATE/hardware-acceleration-support.yml new file mode 100644 index 000000000..43fb3503b --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/hardware-acceleration-support.yml @@ -0,0 +1,120 @@ +title: "[HW Accel Support]: " +labels: ["support", "triage"] +body: + - type: markdown + attributes: + value: | + Use this form to submit a support request for hardware acceleration issues. + + Before submitting your support request, please [search the discussions][discussions], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your question has already been answered by the community. + + [discussions]: https://www.github.com/blakeblackshear/frigate/discussions + [docs]: https://docs.frigate.video + [faq]: https://github.com/blakeblackshear/frigate/discussions/12724 + - type: textarea + id: description + attributes: + label: Describe the problem you are having + validations: + required: true + - type: input + id: version + attributes: + label: Version + description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.14.0-ea36ds1) + validations: + required: true + - type: textarea + id: config + attributes: + label: Frigate config file + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: docker + attributes: + label: docker-compose file or Docker CLI command + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: frigatelogs + attributes: + label: Relevant Frigate log output + description: Please copy and paste any relevant Frigate log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: go2rtclogs + attributes: + label: Relevant go2rtc log output + description: Please copy and paste any relevant go2rtc log output. Include logs before and after your exact error when possible. Logs can be viewed via the Frigate UI, Docker, or the go2rtc dashboard. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: ffprobe + attributes: + label: FFprobe output from your camera + description: Run `ffprobe ` from within the Frigate container if possible, and provide output below + render: shell + validations: + required: true + - type: dropdown + id: install-method + attributes: + label: Install method + options: + - HassOS Addon + - Docker Compose + - Docker CLI + - Proxmox via Docker + - Proxmox via TTeck Script + - Windows WSL2 + validations: + required: true + - type: dropdown + id: object-detector + attributes: + label: Object Detector + options: + - Coral + - OpenVino + - TensorRT + - RKNN + - Other + - CPU (no coral) + validations: + required: true + - type: dropdown + id: network + attributes: + label: Network connection + options: + - Wired + - Wireless + - Mixed + validations: + required: true + - type: input + id: camera + attributes: + label: Camera make and model + description: Dahua, hikvision, amcrest, reolink, etc and model number + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots of the Frigate UI's System metrics pages + description: Drag and drop for images is possible in this field. Please post screenshots of at least General and Cameras tabs. + validations: + required: true + - type: textarea + id: other + attributes: + label: Any other information that may be helpful diff --git a/.github/DISCUSSION_TEMPLATE/question.yml b/.github/DISCUSSION_TEMPLATE/question.yml new file mode 100644 index 000000000..6a4789c9c --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/question.yml @@ -0,0 +1,21 @@ +title: "[Question]: " +labels: ["question"] +body: + - type: markdown + attributes: + value: | + Use this form for questions you have about Frigate. + + Before submitting your question, please [search the discussions][discussions], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your question has already been answered by the community. + + **If you are looking for support, start a new discussion and use a support category.** + + [discussions]: https://www.github.com/blakeblackshear/frigate/discussions + [docs]: https://docs.frigate.video + [faq]: https://github.com/blakeblackshear/frigate/discussions/12724 + - type: textarea + id: description + attributes: + label: "What is your question?" + validations: + required: true diff --git a/.github/DISCUSSION_TEMPLATE/report-a-bug.yml b/.github/DISCUSSION_TEMPLATE/report-a-bug.yml new file mode 100644 index 000000000..dba6d695e --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/report-a-bug.yml @@ -0,0 +1,146 @@ +title: "[Bug]: " +labels: ["bug", "triage"] +body: + - type: markdown + attributes: + value: | + Use this form to submit a reproducible bug in Frigate or Frigate's UI. + + Before submitting your bug report, please [search the discussions][discussions], look at recent open and closed [pull requests][prs], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your bug has already been fixed by the developers or reported by the community. + + **If you are unsure if your issue is actually a bug or not, please submit a support request first.** + + [discussions]: https://www.github.com/blakeblackshear/frigate/discussions + [prs]: https://www.github.com/blakeblackshear/frigate/pulls + [docs]: https://docs.frigate.video + [faq]: https://github.com/blakeblackshear/frigate/discussions/12724 + - type: checkboxes + attributes: + label: Checklist + description: Please verify that you've followed these steps + options: + - label: I have updated to the latest available Frigate version. + required: true + - label: I have cleared the cache of my browser. + required: true + - label: I have tried a different browser to see if it is related to my browser. + required: true + - label: I have tried reproducing the issue in [incognito mode](https://www.computerworld.com/article/1719851/how-to-go-incognito-in-chrome-firefox-safari-and-edge.html) to rule out problems with any third party extensions or plugins I have installed. + - type: textarea + id: description + attributes: + label: Describe the problem you are having + description: Provide a clear and concise description of what the bug is. + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce + description: | + Please tell us exactly how to reproduce your issue. + Provide clear and concise step by step instructions and add code snippets if needed. + value: | + 1. + 2. + 3. + ... + validations: + required: true + - type: input + id: version + attributes: + label: Version + description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.14.0-ea36ds1) + validations: + required: true + - type: input + attributes: + label: In which browser(s) are you experiencing the issue with? + placeholder: Google Chrome 88.0.4324.150 + description: > + Provide the full name and don't forget to add the version! + - type: textarea + id: config + attributes: + label: Frigate config file + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: docker + attributes: + label: docker-compose file or Docker CLI command + description: This will be automatically formatted into code, so no need for backticks. + render: yaml + validations: + required: true + - type: textarea + id: frigatelogs + attributes: + label: Relevant Frigate log output + description: Please copy and paste any relevant Frigate log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: textarea + id: go2rtclogs + attributes: + label: Relevant go2rtc log output + description: Please copy and paste any relevant go2rtc log output. Include logs before and after your exact error when possible. Logs can be viewed via the Frigate UI, Docker, or the go2rtc dashboard. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true + - type: dropdown + id: os + attributes: + label: Operating system + options: + - HassOS + - Debian + - Other Linux + - Proxmox + - UNRAID + - Windows + - Other + validations: + required: true + - type: dropdown + id: install-method + attributes: + label: Install method + options: + - HassOS Addon + - Docker Compose + - Docker CLI + validations: + required: true + - type: dropdown + id: network + attributes: + label: Network connection + options: + - Wired + - Wireless + - Mixed + validations: + required: true + - type: input + id: camera + attributes: + label: Camera make and model + description: Dahua, hikvision, amcrest, reolink, etc and model number + validations: + required: true + - type: textarea + id: screenshots + attributes: + label: Screenshots of the Frigate UI's System metrics pages + description: Drag and drop for images is possible in this field. Please post screenshots of all tabs. + validations: + required: true + - type: textarea + id: other + attributes: + label: Any other information that may be helpful diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index d212efb04..b8892ce80 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,4 @@ github: - blakeblackshear - NickM-27 + - hawkeye217 diff --git a/.github/ISSUE_TEMPLATE/camera_support_request.yml b/.github/ISSUE_TEMPLATE/camera_support_request.yml deleted file mode 100644 index 7640d26ec..000000000 --- a/.github/ISSUE_TEMPLATE/camera_support_request.yml +++ /dev/null @@ -1,107 +0,0 @@ -name: Camera Support Request -description: Support for setting up cameras in Frigate -title: "[Camera Support]: " -labels: ["support", "triage"] -assignees: [] -body: - - type: textarea - id: description - attributes: - label: Describe the problem you are having - validations: - required: true - - type: input - id: version - attributes: - label: Version - description: Visible on the Debug page in the Web UI - validations: - required: true - - type: textarea - id: config - attributes: - label: Frigate config file - description: This will be automatically formatted into code, so no need for backticks. - render: yaml - validations: - required: true - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell - validations: - required: true - - type: textarea - id: ffprobe - attributes: - label: FFprobe output from your camera - description: Run `ffprobe ` and provide output below - render: shell - validations: - required: true - - type: textarea - id: stats - attributes: - label: Frigate stats - description: Output from frigate's /api/stats endpoint - render: json - - type: dropdown - id: os - attributes: - label: Operating system - options: - - HassOS - - Debian - - Other Linux - - Proxmox - - UNRAID - - Windows - - Other - validations: - required: true - - type: dropdown - id: install-method - attributes: - label: Install method - options: - - HassOS Addon - - Docker Compose - - Docker CLI - validations: - required: true - - type: dropdown - id: coral - attributes: - label: Coral version - options: - - USB - - PCIe - - M.2 - - Dev Board - - Other - - CPU (no coral) - validations: - required: true - - type: dropdown - id: network - attributes: - label: Network connection - options: - - Wired - - Wireless - - Mixed - validations: - required: true - - type: input - id: camera - attributes: - label: Camera make and model - description: Dahua, hikvision, amcrest, reolink, etc and model number - validations: - required: true - - type: textarea - id: other - attributes: - label: Any other information that may be helpful diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 3ba13e0ce..a7474bc64 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1 +1,8 @@ blank_issues_enabled: false +contact_links: + - name: Frigate Support + url: https://github.com/blakeblackshear/frigate/discussions/new/choose + about: Get support for setting up or troubleshooting Frigate. + - name: Frigate Bug Report + url: https://github.com/blakeblackshear/frigate/discussions/new/choose + about: Report a specific UI or backend bug. diff --git a/.github/ISSUE_TEMPLATE/config_support_request.yml b/.github/ISSUE_TEMPLATE/config_support_request.yml deleted file mode 100644 index cb95f6fa9..000000000 --- a/.github/ISSUE_TEMPLATE/config_support_request.yml +++ /dev/null @@ -1,82 +0,0 @@ -name: Config Support Request -description: Support for Frigate configuration -title: "[Config Support]: " -labels: ["support", "triage"] -assignees: [] -body: - - type: textarea - id: description - attributes: - label: Describe the problem you are having - validations: - required: true - - type: input - id: version - attributes: - label: Version - description: Visible on the Debug page in the Web UI - validations: - required: true - - type: textarea - id: config - attributes: - label: Frigate config file - description: This will be automatically formatted into code, so no need for backticks. - render: yaml - validations: - required: true - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell - validations: - required: true - - type: textarea - id: stats - attributes: - label: Frigate stats - description: Output from frigate's /api/stats endpoint - render: json - - type: dropdown - id: os - attributes: - label: Operating system - options: - - HassOS - - Debian - - Other Linux - - Proxmox - - UNRAID - - Windows - - Other - validations: - required: true - - type: dropdown - id: install-method - attributes: - label: Install method - options: - - HassOS Addon - - Docker Compose - - Docker CLI - validations: - required: true - - type: dropdown - id: coral - attributes: - label: Coral version - options: - - USB - - PCIe - - M.2 - - Dev Board - - Other - - CPU (no coral) - validations: - required: true - - type: textarea - id: other - attributes: - label: Any other information that may be helpful diff --git a/.github/ISSUE_TEMPLATE/detector_support_request.yml b/.github/ISSUE_TEMPLATE/detector_support_request.yml deleted file mode 100644 index 3f5b34935..000000000 --- a/.github/ISSUE_TEMPLATE/detector_support_request.yml +++ /dev/null @@ -1,84 +0,0 @@ -name: Detector Support Request -description: Support for setting up object detector in Frigate (Coral, OpenVINO, TensorRT, etc.) -title: "[Detector Support]: " -labels: ["support", "triage"] -assignees: [] -body: - - type: textarea - id: description - attributes: - label: Describe the problem you are having - validations: - required: true - - type: input - id: version - attributes: - label: Version - description: Visible on the Debug page in the Web UI - validations: - required: true - - type: textarea - id: config - attributes: - label: Frigate config file - description: This will be automatically formatted into code, so no need for backticks. - render: yaml - validations: - required: true - - type: textarea - id: docker - attributes: - label: docker-compose file or Docker CLI command - description: This will be automatically formatted into code, so no need for backticks. - render: yaml - validations: - required: true - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell - validations: - required: true - - type: dropdown - id: os - attributes: - label: Operating system - options: - - HassOS - - Debian - - Other Linux - - Proxmox - - UNRAID - - Windows - - Other - validations: - required: true - - type: dropdown - id: install-method - attributes: - label: Install method - options: - - HassOS Addon - - Docker Compose - - Docker CLI - validations: - required: true - - type: dropdown - id: coral - attributes: - label: Coral version - options: - - USB - - PCIe - - M.2 - - Dev Board - - Other - - CPU (no coral) - validations: - required: true - - type: textarea - id: other - attributes: - label: Any other information that may be helpful diff --git a/.github/ISSUE_TEMPLATE/general_support_request.yml b/.github/ISSUE_TEMPLATE/general_support_request.yml deleted file mode 100644 index 7138cd844..000000000 --- a/.github/ISSUE_TEMPLATE/general_support_request.yml +++ /dev/null @@ -1,107 +0,0 @@ -name: General Support Request -description: General support request for Frigate -title: "[Support]: " -labels: ["support", "triage"] -assignees: [] -body: - - type: textarea - id: description - attributes: - label: Describe the problem you are having - validations: - required: true - - type: input - id: version - attributes: - label: Version - description: Visible on the Debug page in the Web UI - validations: - required: true - - type: textarea - id: config - attributes: - label: Frigate config file - description: This will be automatically formatted into code, so no need for backticks. - render: yaml - validations: - required: true - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell - validations: - required: true - - type: textarea - id: ffprobe - attributes: - label: FFprobe output from your camera - description: Run `ffprobe ` and provide output below - render: shell - validations: - required: true - - type: textarea - id: stats - attributes: - label: Frigate stats - description: Output from frigate's /api/stats endpoint - render: json - - type: dropdown - id: os - attributes: - label: Operating system - options: - - HassOS - - Debian - - Other Linux - - Proxmox - - UNRAID - - Windows - - Other - validations: - required: true - - type: dropdown - id: install-method - attributes: - label: Install method - options: - - HassOS Addon - - Docker Compose - - Docker CLI - validations: - required: true - - type: dropdown - id: coral - attributes: - label: Coral version - options: - - USB - - PCIe - - M.2 - - Dev Board - - Other - - CPU (no coral) - validations: - required: true - - type: dropdown - id: network - attributes: - label: Network connection - options: - - Wired - - Wireless - - Mixed - validations: - required: true - - type: input - id: camera - attributes: - label: Camera make and model - description: Dahua, hikvision, amcrest, reolink, etc and model number - validations: - required: true - - type: textarea - id: other - attributes: - label: Any other information that may be helpful diff --git a/.github/ISSUE_TEMPLATE/hwaccel_support_request.yml b/.github/ISSUE_TEMPLATE/hwaccel_support_request.yml deleted file mode 100644 index 82c64c4c3..000000000 --- a/.github/ISSUE_TEMPLATE/hwaccel_support_request.yml +++ /dev/null @@ -1,96 +0,0 @@ -name: Hardware Acceleration Support Request -description: Support for setting up GPU hardware acceleration in Frigate -title: "[HW Accel Support]: " -labels: ["support", "triage"] -assignees: [] -body: - - type: textarea - id: description - attributes: - label: Describe the problem you are having - validations: - required: true - - type: input - id: version - attributes: - label: Version - description: Visible on the Debug page in the Web UI - validations: - required: true - - type: textarea - id: config - attributes: - label: Frigate config file - description: This will be automatically formatted into code, so no need for backticks. - render: yaml - validations: - required: true - - type: textarea - id: docker - attributes: - label: docker-compose file or Docker CLI command - description: This will be automatically formatted into code, so no need for backticks. - render: yaml - validations: - required: true - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell - validations: - required: true - - type: textarea - id: ffprobe - attributes: - label: FFprobe output from your camera - description: Run `ffprobe ` and provide output below - render: shell - validations: - required: true - - type: dropdown - id: os - attributes: - label: Operating system - options: - - HassOS - - Debian - - Other Linux - - Proxmox - - UNRAID - - Windows - - Other - validations: - required: true - - type: dropdown - id: install-method - attributes: - label: Install method - options: - - HassOS Addon - - Docker Compose - - Docker CLI - validations: - required: true - - type: dropdown - id: network - attributes: - label: Network connection - options: - - Wired - - Wireless - - Mixed - validations: - required: true - - type: input - id: camera - attributes: - label: Camera make and model - description: Dahua, hikvision, amcrest, reolink, etc and model number - validations: - required: true - - type: textarea - id: other - attributes: - label: Any other information that may be helpful diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index c96102edb..793ea7d42 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -5,17 +5,28 @@ inputs: required: true outputs: image-name: - value: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ steps.create-short-sha.outputs.SHORT_SHA }} + value: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ steps.create-short-sha.outputs.SHORT_SHA }} cache-name: value: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:cache runs: using: "composite" steps: - - name: Remove unnecessary files - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc + # Stop docker so we can mount more space at /var/lib/docker + - name: Stop docker + run: sudo systemctl stop docker + shell: bash + # This creates a virtual volume at /var/lib/docker to maximize the size + # As of 2/14/2024, this results in 97G for docker images + - name: Maximize build space + uses: easimon/maximize-build-space@master + with: + remove-dotnet: 'true' + remove-android: 'true' + remove-haskell: 'true' + remove-codeql: 'true' + build-mount-path: '/var/lib/docker' + - name: Start docker + run: sudo systemctl start docker shell: bash - id: lowercaseRepo uses: ASzc/change-string-case-action@v5 diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 79e8b2881..db67aa2de 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -34,5 +34,7 @@ updates: directory: "/docs" schedule: interval: daily + allow: + - dependency-name: "@docusaurus/*" open-pull-requests-limit: 10 target-branch: dev diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..db3e5541e --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,31 @@ +## Proposed change + + + +## Type of change + +- [ ] Dependency upgrade +- [ ] Bugfix (non-breaking change which fixes an issue) +- [ ] New feature +- [ ] Breaking change (fix/feature causing existing functionality to break) +- [ ] Code quality improvements to existing code + +## Additional information + +- This PR fixes or closes issue: fixes # +- This PR is related to issue: + +## Checklist + + + +- [ ] The code change is tested and works locally. +- [ ] Local tests pass. **Your PR cannot be merged unless tests pass** +- [ ] There is no commented out code in this PR. +- [ ] The code has been formatted using Ruff (`ruff format frigate`) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c6fad8817..bbf47a57d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,16 +37,6 @@ jobs: target: frigate tags: ${{ steps.setup.outputs.image-name }}-amd64 cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 - - name: Build and push TensorRT (x86 GPU) - uses: docker/bake-action@v4 - with: - push: true - targets: tensorrt - files: docker/tensorrt/trt.hcl - set: | - tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt - *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 - *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max arm64_build: runs-on: ubuntu-latest name: ARM Build @@ -79,7 +69,7 @@ jobs: rpi.tags=${{ steps.setup.outputs.image-name }}-rpi *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max - - name: Build and push RockChip build + - name: Build and push Rockchip build uses: docker/bake-action@v3 with: push: true @@ -140,6 +130,67 @@ jobs: tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5 *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max + amd64_extra_builds: + runs-on: ubuntu-latest + name: AMD64 Extra Build + needs: + - amd64_build + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push TensorRT (x86 GPU) + env: + COMPUTE_LEVEL: "50 60 70 80 90" + uses: docker/bake-action@v4 + with: + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max + combined_extra_builds: + runs-on: ubuntu-latest + name: Combined Extra Builds + needs: + - amd64_build + - arm64_build + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push Hailo-8l build + uses: docker/bake-action@v4 + with: + push: true + targets: h8l + files: docker/hailo8l/h8l.hcl + set: | + h8l.tags=${{ steps.setup.outputs.image-name }}-h8l + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max + - name: AMD/ROCm general build + env: + AMDGPU: gfx + HSA_OVERRIDE: 0 + uses: docker/bake-action@v3 + with: + push: true + targets: rocm + files: docker/rocm/rocm.hcl + set: | + rocm.tags=${{ steps.setup.outputs.image-name }}-rocm + *.cache-from=type=gha # The majority of users running arm64 are rpi users, so the rpi # build should be the primary arm64 image assemble_default_build: @@ -154,16 +205,16 @@ jobs: with: string: ${{ github.repository }} - name: Log in to the Container registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Create short sha run: echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV - - uses: int128/docker-manifest-create-action@v1 + - uses: int128/docker-manifest-create-action@v2 with: - tags: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }} - suffixes: | - -amd64 - -rpi + tags: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }} + sources: | + ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-amd64 + ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ env.SHORT_SHA }}-rpi diff --git a/.github/workflows/dependabot-auto-merge.yaml b/.github/workflows/dependabot-auto-merge.yaml index a3eecb1d5..1c047c346 100644 --- a/.github/workflows/dependabot-auto-merge.yaml +++ b/.github/workflows/dependabot-auto-merge.yaml @@ -11,7 +11,7 @@ jobs: steps: - name: Get Dependabot metadata id: metadata - uses: dependabot/fetch-metadata@v1 + uses: dependabot/fetch-metadata@v2 with: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Enable auto-merge for Dependabot PRs diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index b86d9b658..2f81175e3 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -51,12 +51,12 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-node@master with: - node-version: 16.x + node-version: 20.x - run: npm install working-directory: ./web - - name: Test - run: npm run test - working-directory: ./web + # - name: Test + # run: npm run test + # working-directory: ./web python_checks: runs-on: ubuntu-latest @@ -65,7 +65,7 @@ jobs: - name: Check out the repository uses: actions/checkout@v4 - name: Set up Python ${{ env.DEFAULT_PYTHON }} - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@v5.1.0 with: python-version: ${{ env.DEFAULT_PYTHON }} - name: Install requirements diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3eb9785d9..36ff3326c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,22 +16,32 @@ jobs: with: string: ${{ github.repository }} - name: Log in to the Container registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Create tag variables run: | - BRANCH=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "master" || echo "dev") + BUILD_TYPE=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta") + echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV - echo "BUILD_TAG=${BRANCH}-${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV - name: Tag and push the main image run: | VERSION_TAG=${BASE}:${CLEAN_VERSION} + STABLE_TAG=${BASE}:stable PULL_TAG=${BASE}:${BUILD_TAG} docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG} for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant} done + + # stable tag + if [[ "${BUILD_TYPE}" == "stable" ]]; then + docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG} + for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do + docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant} + done + fi diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index a9c902f2e..8e7e3223c 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -24,3 +24,18 @@ jobs: operations-per-run: 120 - name: Print outputs run: echo ${{ join(steps.stale.outputs.*, ',') }} + + # clean_ghcr: + # name: Delete outdated dev container images + # runs-on: ubuntu-latest + # steps: + # - name: Delete old images + # uses: snok/container-retention-policy@v2 + # with: + # image-names: dev-* + # cut-off: 60 days ago UTC + # keep-at-least: 5 + # account-type: personal + # token: ${{ secrets.GITHUB_TOKEN }} + # token-type: github-token + diff --git a/.gitignore b/.gitignore index 33ec9ee24..8456d9be0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ .DS_Store -*.pyc +__pycache__ +.mypy_cache *.swp debug .vscode/* @@ -8,7 +9,6 @@ config/* !config/*.example models *.mp4 -*.ts *.db *.csv frigate/version.py @@ -17,4 +17,5 @@ web/node_modules web/coverage core !/web/**/*.ts -.idea/* \ No newline at end of file +.idea/* +.ipynb_checkpoints \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index fe709c92d..5c858267d 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -3,10 +3,9 @@ "configurations": [ { "name": "Python: Launch Frigate", - "type": "python", + "type": "debugpy", "request": "launch", - "module": "frigate", - "justMyCode": true + "module": "frigate" } ] } diff --git a/CODEOWNERS b/CODEOWNERS index 48b26a359..c37041c2c 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,5 +2,6 @@ /docker/tensorrt/ @madsciencetist @NateMeyer /docker/tensorrt/*arm64* @madsciencetist /docker/tensorrt/*jetson* @madsciencetist - /docker/rockchip/ @MarcA711 +/docker/rocm/ @harakas +/docker/hailo8l/ @spanner3003 diff --git a/Makefile b/Makefile index 2cd831670..b7c6ab821 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,9 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.13.0 +VERSION = 0.15.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) -CURRENT_UID := $(shell id -u) -CURRENT_GID := $(shell id -g) BOARDS= #Initialized empty include docker/*/*.mk @@ -18,25 +16,38 @@ version: echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py local: version - docker buildx build --target=frigate --tag frigate:latest --load --file docker/main/Dockerfile . + docker buildx build --target=frigate --file docker/main/Dockerfile . \ + --tag frigate:latest \ + --load amd64: - docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . + docker buildx build --target=frigate --file docker/main/Dockerfile . \ + --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \ + --platform linux/amd64 arm64: - docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . + docker buildx build --target=frigate --file docker/main/Dockerfile . \ + --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \ + --platform linux/arm64 build: version amd64 arm64 - docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . + docker buildx build --target=frigate --file docker/main/Dockerfile . \ + --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \ + --platform linux/arm64/v8,linux/amd64 push: push-boards - docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) --file docker/main/Dockerfile . + docker buildx build --target=frigate --file docker/main/Dockerfile . \ + --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) \ + --platform linux/arm64/v8,linux/amd64 \ + --push run: local docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest run_tests: local - docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m unittest - docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m mypy --config-file frigate/mypy.ini frigate + docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \ + python3 -u -m unittest + docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \ + python3 -u -m mypy --config-file frigate/mypy.ini frigate .PHONY: run_tests diff --git a/README.md b/README.md index 8fb3013b9..5b67c86c3 100644 --- a/README.md +++ b/README.md @@ -29,18 +29,22 @@ If you would like to make a donation to support development, please use [Github ## Screenshots -Integration into Home Assistant - +### Live dashboard
- - +Live dashboard
-Also comes with a builtin UI: - +### Streamlined review workflow
- - +Streamlined review workflow
-![Events](docs/static/img/events-ui.png) +### Multi-camera scrubbing +
+Multi-camera scrubbing +
+ +### Built-in mask and zone editor +
+Multi-camera scrubbing +
diff --git a/benchmark.py b/benchmark.py index 8ba22d093..5c0c68419 100755 --- a/benchmark.py +++ b/benchmark.py @@ -4,6 +4,7 @@ from statistics import mean import numpy as np +import frigate.util as util from frigate.config import DetectorTypeEnum from frigate.object_detection import ( ObjectDetectProcess, @@ -90,7 +91,7 @@ edgetpu_process_2 = ObjectDetectProcess( ) for x in range(0, 10): - camera_process = mp.Process( + camera_process = util.Process( target=start, args=(x, 300, detection_queue, events[str(x)]) ) camera_process.daemon = True diff --git a/cspell.json b/cspell.json new file mode 100644 index 000000000..132e51532 --- /dev/null +++ b/cspell.json @@ -0,0 +1,22 @@ +{ + "version": "0.2", + "ignorePaths": [ + "Dockerfile", + "Dockerfile.*", + "CMakeLists.txt", + "*.db", + "node_modules", + "__pycache__", + "dist", + "/audio-labelmap.txt" + ], + "language": "en", + "dictionaryDefinitions": [ + { + "name": "frigate-dictionary", + "path": "./.cspell/frigate-dictionary.txt", + "addWords": true + } + ], + "dictionaries": ["frigate-dictionary"] +} diff --git a/docker/hailo8l/Dockerfile b/docker/hailo8l/Dockerfile new file mode 100644 index 000000000..479ef9b27 --- /dev/null +++ b/docker/hailo8l/Dockerfile @@ -0,0 +1,104 @@ +# syntax=docker/dockerfile:1.6 + +ARG DEBIAN_FRONTEND=noninteractive + +# Build Python wheels +FROM wheels AS h8l-wheels + +COPY docker/main/requirements-wheels.txt /requirements-wheels.txt +COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt + +RUN sed -i "/https:\/\//d" /requirements-wheels.txt + +# Create a directory to store the built wheels +RUN mkdir /h8l-wheels + +# Build the wheels +RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt + +# Build HailoRT and create wheel +FROM wheels AS build-hailort +ARG TARGETARCH + +SHELL ["/bin/bash", "-c"] + +# Install necessary APT packages +RUN apt-get -qq update \ + && apt-get -qq install -y \ + apt-transport-https \ + gnupg \ + wget \ + # the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html + && wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \ + gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \ + tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \ + && apt-get -qq update \ + && apt-get -qq install -y \ + python3.9 \ + python3.9-dev \ + build-essential cmake git \ + && rm -rf /var/lib/apt/lists/* + +# Extract Python version and set environment variables +RUN PYTHON_VERSION=$(python3 --version 2>&1 | awk '{print $2}' | cut -d. -f1,2) && \ + PYTHON_VERSION_NO_DOT=$(echo $PYTHON_VERSION | sed 's/\.//') && \ + echo "PYTHON_VERSION=$PYTHON_VERSION" > /etc/environment && \ + echo "PYTHON_VERSION_NO_DOT=$PYTHON_VERSION_NO_DOT" >> /etc/environment + +# Clone and build HailoRT +RUN . /etc/environment && \ + git clone https://github.com/hailo-ai/hailort.git /opt/hailort && \ + cd /opt/hailort && \ + git checkout v4.18.0 && \ + cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_PYBIND=1 -DPYBIND11_PYTHON_VERSION=${PYTHON_VERSION} && \ + cmake --build build --config release --target libhailort && \ + cmake --build build --config release --target _pyhailort && \ + cp build/hailort/libhailort/bindings/python/src/_pyhailort.cpython-${PYTHON_VERSION_NO_DOT}-$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/ && \ + cp build/hailort/libhailort/src/libhailort.so hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/ + +RUN ls -ahl /opt/hailort/build/hailort/libhailort/src/ +RUN ls -ahl /opt/hailort/hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/ + +# Remove the existing setup.py if it exists in the target directory +RUN rm -f /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py + +# Copy generate_wheel_conf.py and setup.py +COPY docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py +COPY docker/hailo8l/pyhailort_build_scripts/setup.py /opt/hailort/hailort/libhailort/bindings/python/platform/setup.py + +# Run the generate_wheel_conf.py script +RUN python3 /opt/hailort/hailort/libhailort/bindings/python/platform/generate_wheel_conf.py + +# Create a wheel file using pip3 wheel +RUN cd /opt/hailort/hailort/libhailort/bindings/python/platform && \ + python3 setup.py bdist_wheel --dist-dir /hailo-wheels + +# Use deps as the base image +FROM deps AS h8l-frigate + +# Copy the wheels from the wheels stage +COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels +COPY --from=build-hailort /hailo-wheels /deps/hailo-wheels +COPY --from=build-hailort /etc/environment /etc/environment +RUN CC=$(python3 -c "import sysconfig; import shlex; cc = sysconfig.get_config_var('CC'); cc_cmd = shlex.split(cc)[0]; print(cc_cmd[:-4] if cc_cmd.endswith('-gcc') else cc_cmd)") && \ + echo "CC=$CC" >> /etc/environment + +# Install the wheels +RUN pip3 install -U /deps/h8l-wheels/*.whl +RUN pip3 install -U /deps/hailo-wheels/*.whl + +RUN . /etc/environment && \ + mv /usr/local/lib/python${PYTHON_VERSION}/dist-packages/hailo_platform/pyhailort/libhailort.so /usr/lib/${CC} && \ + cd /usr/lib/${CC}/ && \ + ln -s libhailort.so libhailort.so.4.18.0 + +# Copy base files from the rootfs stage +COPY --from=rootfs / / + +# Set environment variables for Hailo SDK +ENV PATH="/opt/hailort/bin:${PATH}" +ENV LD_LIBRARY_PATH="/usr/lib/$(if [ $TARGETARCH == "amd64" ]; then echo 'x86_64'; else echo 'aarch64'; fi )-linux-gnu:${LD_LIBRARY_PATH}" + +# Set workdir +WORKDIR /opt/frigate/ diff --git a/docker/hailo8l/h8l.hcl b/docker/hailo8l/h8l.hcl new file mode 100644 index 000000000..a1eb82fb5 --- /dev/null +++ b/docker/hailo8l/h8l.hcl @@ -0,0 +1,27 @@ +target wheels { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64","linux/amd64"] + target = "wheels" +} + +target deps { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64","linux/amd64"] + target = "deps" +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64","linux/amd64"] + target = "rootfs" +} + +target h8l { + dockerfile = "docker/hailo8l/Dockerfile" + contexts = { + wheels = "target:wheels" + deps = "target:deps" + rootfs = "target:rootfs" + } + platforms = ["linux/arm64","linux/amd64"] +} diff --git a/docker/hailo8l/h8l.mk b/docker/hailo8l/h8l.mk new file mode 100644 index 000000000..318771802 --- /dev/null +++ b/docker/hailo8l/h8l.mk @@ -0,0 +1,15 @@ +BOARDS += h8l + +local-h8l: version + docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \ + --set h8l.tags=frigate:latest-h8l \ + --load + +build-h8l: version + docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \ + --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l + +push-h8l: build-h8l + docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \ + --set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l \ + --push \ No newline at end of file diff --git a/docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py b/docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py new file mode 100644 index 000000000..a0e4987f1 --- /dev/null +++ b/docker/hailo8l/pyhailort_build_scripts/generate_wheel_conf.py @@ -0,0 +1,67 @@ +import json +import os +import platform +import sys +import sysconfig + + +def extract_toolchain_info(compiler): + # Remove the "-gcc" or "-g++" suffix if present + if compiler.endswith("-gcc") or compiler.endswith("-g++"): + compiler = compiler.rsplit("-", 1)[0] + + # Extract the toolchain and ABI part (e.g., "gnu") + toolchain_parts = compiler.split("-") + abi_conventions = next( + (part for part in toolchain_parts if part in ["gnu", "musl", "eabi", "uclibc"]), + "", + ) + + return abi_conventions + + +def generate_wheel_conf(): + conf_file_path = os.path.join( + os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json" + ) + + # Extract current system and Python version information + py_version = f"cp{sys.version_info.major}{sys.version_info.minor}" + arch = platform.machine() + system = platform.system().lower() + libc_version = platform.libc_ver()[1] + + # Get the compiler information + compiler = sysconfig.get_config_var("CC") + abi_conventions = extract_toolchain_info(compiler) + + # Create the new configuration data + new_conf_data = { + "py_version": py_version, + "arch": arch, + "system": system, + "libc_version": libc_version, + "abi": abi_conventions, + "extension": { + "posix": "so", + "nt": "pyd", # Windows + }[os.name], + } + + # If the file exists, load the existing data + if os.path.isfile(conf_file_path): + with open(conf_file_path, "r") as conf_file: + conf_data = json.load(conf_file) + # Update the existing data with the new data + conf_data.update(new_conf_data) + else: + # If the file does not exist, use the new data + conf_data = new_conf_data + + # Write the updated data to the file + with open(conf_file_path, "w") as conf_file: + json.dump(conf_data, conf_file, indent=4) + + +if __name__ == "__main__": + generate_wheel_conf() diff --git a/docker/hailo8l/pyhailort_build_scripts/setup.py b/docker/hailo8l/pyhailort_build_scripts/setup.py new file mode 100644 index 000000000..2abe07ee5 --- /dev/null +++ b/docker/hailo8l/pyhailort_build_scripts/setup.py @@ -0,0 +1,111 @@ +import json +import os + +from setuptools import find_packages, setup +from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel + + +class NonPurePythonBDistWheel(orig_bdist_wheel): + """Makes the wheel platform-dependent so it can be based on the _pyhailort architecture""" + + def finalize_options(self): + orig_bdist_wheel.finalize_options(self) + self.root_is_pure = False + + +def _get_hailort_lib_path(): + lib_filename = "libhailort.so" + lib_path = os.path.join( + os.path.abspath(os.path.dirname(__file__)), + f"hailo_platform/pyhailort/{lib_filename}", + ) + if os.path.exists(lib_path): + print(f"Found libhailort shared library at: {lib_path}") + else: + print(f"Error: libhailort shared library not found at: {lib_path}") + raise FileNotFoundError(f"libhailort shared library not found at: {lib_path}") + return lib_path + + +def _get_pyhailort_lib_path(): + conf_file_path = os.path.join( + os.path.abspath(os.path.dirname(__file__)), "wheel_conf.json" + ) + if not os.path.isfile(conf_file_path): + raise FileNotFoundError(f"Configuration file not found: {conf_file_path}") + + with open(conf_file_path, "r") as conf_file: + content = json.load(conf_file) + py_version = content["py_version"] + arch = content["arch"] + system = content["system"] + extension = content["extension"] + abi = content["abi"] + + # Construct the filename directly + lib_filename = f"_pyhailort.cpython-{py_version.split('cp')[1]}-{arch}-{system}-{abi}.{extension}" + lib_path = os.path.join( + os.path.abspath(os.path.dirname(__file__)), + f"hailo_platform/pyhailort/{lib_filename}", + ) + + if os.path.exists(lib_path): + print(f"Found _pyhailort shared library at: {lib_path}") + else: + print(f"Error: _pyhailort shared library not found at: {lib_path}") + raise FileNotFoundError( + f"_pyhailort shared library not found at: {lib_path}" + ) + + return lib_path + + +def _get_package_paths(): + packages = [] + pyhailort_lib = _get_pyhailort_lib_path() + hailort_lib = _get_hailort_lib_path() + if pyhailort_lib: + packages.append(pyhailort_lib) + if hailort_lib: + packages.append(hailort_lib) + packages.append(os.path.abspath("hailo_tutorials/notebooks/*")) + packages.append(os.path.abspath("hailo_tutorials/hefs/*")) + return packages + + +if __name__ == "__main__": + setup( + author="Hailo team", + author_email="contact@hailo.ai", + cmdclass={ + "bdist_wheel": NonPurePythonBDistWheel, + }, + description="HailoRT", + entry_points={ + "console_scripts": [ + "hailo=hailo_platform.tools.hailocli.main:main", + ] + }, + install_requires=[ + "argcomplete", + "contextlib2", + "future", + "netaddr", + "netifaces", + "verboselogs", + "numpy==1.23.3", + ], + name="hailort", + package_data={ + "hailo_platform": _get_package_paths(), + }, + packages=find_packages(), + platforms=[ + "linux_x86_64", + "linux_aarch64", + "win_amd64", + ], + url="https://hailo.ai/", + version="4.17.0", + zip_safe=False, + ) diff --git a/docker/hailo8l/requirements-wheels-h8l.txt b/docker/hailo8l/requirements-wheels-h8l.txt new file mode 100644 index 000000000..55b67e096 --- /dev/null +++ b/docker/hailo8l/requirements-wheels-h8l.txt @@ -0,0 +1,12 @@ +appdirs==1.4.4 +argcomplete==2.0.0 +contextlib2==0.6.0.post1 +distlib==0.3.6 +filelock==3.8.0 +future==0.18.2 +importlib-metadata==5.1.0 +importlib-resources==5.1.2 +netaddr==0.8.0 +netifaces==0.10.9 +verboselogs==1.7 +virtualenv==20.17.0 \ No newline at end of file diff --git a/docker/hailo8l/user_installation.sh b/docker/hailo8l/user_installation.sh new file mode 100644 index 000000000..734c640f9 --- /dev/null +++ b/docker/hailo8l/user_installation.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Update package list and install dependencies +sudo apt-get update +sudo apt-get install -y build-essential cmake git wget + +arch=$(uname -m) + +if [[ $arch == "x86_64" ]]; then + sudo apt install -y linux-headers-$(uname -r); +else + sudo apt install -y linux-modules-extra-$(uname -r); +fi + +# Clone the HailoRT driver repository +git clone --depth 1 --branch v4.18.0 https://github.com/hailo-ai/hailort-drivers.git + +# Build and install the HailoRT driver +cd hailort-drivers/linux/pcie +sudo make all +sudo make install + +# Load the Hailo PCI driver +sudo modprobe hailo_pci + +if [ $? -ne 0 ]; then + echo "Unable to load hailo_pci module, common reasons for this are:" + echo "- Key was rejected by service: Secure Boot is enabling disallowing install." + echo "- Permissions are not setup correctly." + exit 1 +fi + +# Download and install the firmware +cd ../../ +./download_firmware.sh + +# verify the firmware folder is present +if [ ! -d /lib/firmware/hailo ]; then + sudo mkdir /lib/firmware/hailo +fi +sudo mv hailo8_fw.4.17.0.bin /lib/firmware/hailo/hailo8_fw.bin + +# Install udev rules +sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/ +sudo udevadm control --reload-rules && sudo udevadm trigger + +echo "HailoRT driver installation complete." +echo "reboot your system to load the firmware!" diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index e35eac191..ac4d277bd 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -30,18 +30,31 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ --mount=type=cache,target=/root/.ccache \ /deps/build_nginx.sh +FROM wget AS sqlite-vec +ARG DEBIAN_FRONTEND + +# Build sqlite_vec from source +COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh +RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ + --mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \ + --mount=type=cache,target=/root/.ccache \ + /deps/build_sqlite_vec.sh + FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.8.4/go2rtc_linux_${TARGETARCH}" go2rtc +ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.2/go2rtc_linux_${TARGETARCH}" go2rtc +FROM wget AS tempio +ARG TARGETARCH +RUN --mount=type=bind,source=docker/main/install_tempio.sh,target=/deps/install_tempio.sh \ + /deps/install_tempio.sh #### # # OpenVino Support # # 1. Download and convert a model from Intel's Public Open Model Zoo -# 2. Build libUSB without udev to handle NCS2 enumeration # #### # Download and Convert OpenVino model @@ -51,17 +64,24 @@ ARG DEBIAN_FRONTEND # Install OpenVino Runtime and Dev library COPY docker/main/requirements-ov.txt /requirements-ov.txt RUN apt-get -qq update \ - && apt-get -qq install -y wget python3 python3-distutils \ + && apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \ && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ && python3 get-pip.py "pip" \ && pip install -r /requirements-ov.txt # Get OpenVino Model -RUN mkdir /models \ - && cd /models && omz_downloader --name ssdlite_mobilenet_v2 \ - && cd /models && omz_converter --name ssdlite_mobilenet_v2 --precision FP16 - +RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \ + mkdir /models && cd /models \ + && wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \ + && tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \ + && python3 /build_ov_model.py +#### +# +# Coral Compatibility +# +# Builds libusb without udev. Needed for synology and other devices with USB coral +#### # libUSB - No Udev FROM wget as libusb-build ARG TARGETARCH @@ -97,11 +117,12 @@ RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/ RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite COPY labelmap.txt . # Copy OpenVino model -COPY --from=ov-converter /models/public/ssdlite_mobilenet_v2/FP16 openvino-model +COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/ +COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/ RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \ sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt # Get Audio Model and labels -RUN wget -qO cpu_audio_model.tflite https://tfhub.dev/google/lite-model/yamnet/classification/tflite/1?lite-format=tflite +RUN wget -qO - https://www.kaggle.com/api/v1/models/google/yamnet/tfLite/classification-tflite/1/download | tar xvz && mv 1.tflite cpu_audio_model.tflite COPY audio-labelmap.txt . @@ -137,6 +158,8 @@ RUN apt-get -qq update \ gfortran openexr libatlas-base-dev libssl-dev\ libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \ libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \ + # sqlite3 dependencies + tclsh \ # scipy dependencies gcc gfortran libopenblas-dev liblapack-dev && \ rm -rf /var/lib/apt/lists/* @@ -150,6 +173,10 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ COPY docker/main/requirements.txt /requirements.txt RUN pip3 install -r /requirements.txt +# Build pysqlite3 from source +COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh +RUN /build_pysqlite3.sh + COPY docker/main/requirements-wheels.txt /requirements-wheels.txt RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt @@ -157,8 +184,10 @@ RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt # Collect deps in a single layer FROM scratch AS deps-rootfs COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/ +COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/ COPY --from=go2rtc /rootfs/ / COPY --from=libusb-build /usr/local/lib /usr/local/lib +COPY --from=tempio /rootfs/ / COPY --from=s6-overlay /rootfs/ / COPY --from=models /rootfs/ / COPY docker/main/rootfs/ / @@ -176,7 +205,14 @@ ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn ENV NVIDIA_VISIBLE_DEVICES=all ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility" -ENV PATH="/usr/lib/btbn-ffmpeg/bin:/usr/local/go2rtc/bin:/usr/local/nginx/sbin:${PATH}" +# Disable tokenizer parallelism warning +# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996 +ENV TOKENIZERS_PARALLELISM=true +# https://github.com/huggingface/transformers/issues/27214 +ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1 + +ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" +ENV LIBAVFORMAT_VERSION_MAJOR=60 # Install dependencies RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \ @@ -191,17 +227,18 @@ COPY --from=deps-rootfs / / RUN ldconfig EXPOSE 5000 -EXPOSE 1935 EXPOSE 8554 EXPOSE 8555/tcp 8555/udp # Configure logging to prepend timestamps, log to stdout, keep 0 archives and rotate on 10MB ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T" +# Do not fail on long-running download scripts +ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 ENTRYPOINT ["/init"] CMD [] -HEALTHCHECK --start-period=120s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ +HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 # Frigate deps with Node.js and NPM for devcontainer @@ -232,12 +269,14 @@ RUN apt-get update \ RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \ pip3 install -r requirements-dev.txt +HEALTHCHECK NONE + CMD ["sleep", "infinity"] # Frigate web build # This should be architecture agnostic, so speed up the build on multiarch by not using QEMU. -FROM --platform=$BUILDPLATFORM node:16 AS web-build +FROM --platform=$BUILDPLATFORM node:20 AS web-build WORKDIR /work COPY web/package.json web/package-lock.json ./ diff --git a/docker/main/build_nginx.sh b/docker/main/build_nginx.sh index fd604c122..e97f6bbe0 100755 --- a/docker/main/build_nginx.sh +++ b/docker/main/build_nginx.sh @@ -5,7 +5,8 @@ set -euxo pipefail NGINX_VERSION="1.25.3" VOD_MODULE_VERSION="1.31" SECURE_TOKEN_MODULE_VERSION="1.5" -RTMP_MODULE_VERSION="1.2.2" +SET_MISC_MODULE_VERSION="v0.33" +NGX_DEVEL_KIT_VERSION="v0.3.3" cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list @@ -49,10 +50,16 @@ mkdir /tmp/nginx-secure-token-module wget https://github.com/kaltura/nginx-secure-token-module/archive/refs/tags/${SECURE_TOKEN_MODULE_VERSION}.tar.gz tar -zxf ${SECURE_TOKEN_MODULE_VERSION}.tar.gz -C /tmp/nginx-secure-token-module --strip-components=1 rm ${SECURE_TOKEN_MODULE_VERSION}.tar.gz -mkdir /tmp/nginx-rtmp-module -wget -nv https://github.com/arut/nginx-rtmp-module/archive/refs/tags/v${RTMP_MODULE_VERSION}.tar.gz -tar -zxf v${RTMP_MODULE_VERSION}.tar.gz -C /tmp/nginx-rtmp-module --strip-components=1 -rm v${RTMP_MODULE_VERSION}.tar.gz + +mkdir /tmp/ngx_devel_kit +wget https://github.com/vision5/ngx_devel_kit/archive/refs/tags/${NGX_DEVEL_KIT_VERSION}.tar.gz +tar -zxf ${NGX_DEVEL_KIT_VERSION}.tar.gz -C /tmp/ngx_devel_kit --strip-components=1 +rm ${NGX_DEVEL_KIT_VERSION}.tar.gz + +mkdir /tmp/nginx-set-misc-module +wget https://github.com/openresty/set-misc-nginx-module/archive/refs/tags/${SET_MISC_MODULE_VERSION}.tar.gz +tar -zxf ${SET_MISC_MODULE_VERSION}.tar.gz -C /tmp/nginx-set-misc-module --strip-components=1 +rm ${SET_MISC_MODULE_VERSION}.tar.gz cd /tmp/nginx @@ -60,10 +67,13 @@ cd /tmp/nginx --with-file-aio \ --with-http_sub_module \ --with-http_ssl_module \ + --with-http_auth_request_module \ + --with-http_realip_module \ --with-threads \ + --add-module=../ngx_devel_kit \ + --add-module=../nginx-set-misc-module \ --add-module=../nginx-vod-module \ --add-module=../nginx-secure-token-module \ - --add-module=../nginx-rtmp-module \ --with-cc-opt="-O3 -Wno-error=implicit-fallthrough" make CC="ccache gcc" -j$(nproc) && make install diff --git a/docker/main/build_ov_model.py b/docker/main/build_ov_model.py new file mode 100644 index 000000000..9e110ad9f --- /dev/null +++ b/docker/main/build_ov_model.py @@ -0,0 +1,11 @@ +import openvino as ov +from openvino.tools import mo + +ov_model = mo.convert_model( + "/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb", + compress_to_fp16=True, + transformations_config="/usr/local/lib/python3.9/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json", + tensorflow_object_detection_api_pipeline_config="/models/ssdlite_mobilenet_v2_coco_2018_05_09/pipeline.config", + reverse_input_channels=True, +) +ov.save_model(ov_model, "/models/ssdlite_mobilenet_v2.xml") diff --git a/docker/main/build_pysqlite3.sh b/docker/main/build_pysqlite3.sh new file mode 100755 index 000000000..6375b33fa --- /dev/null +++ b/docker/main/build_pysqlite3.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -euxo pipefail + +SQLITE3_VERSION="96c92aba00c8375bc32fafcdf12429c58bd8aabfcadab6683e35bbb9cdebf19e" # 3.46.0 +PYSQLITE3_VERSION="0.5.3" + +# Fetch the source code for the latest release of Sqlite. +if [[ ! -d "sqlite" ]]; then + wget https://www.sqlite.org/src/tarball/sqlite.tar.gz?r=${SQLITE3_VERSION} -O sqlite.tar.gz + tar xzf sqlite.tar.gz + cd sqlite/ + LIBS="-lm" ./configure --disable-tcl --enable-tempstore=always + make sqlite3.c + cd ../ + rm sqlite.tar.gz +fi + +# Grab the pysqlite3 source code. +if [[ ! -d "./pysqlite3" ]]; then + git clone https://github.com/coleifer/pysqlite3.git +fi + +cd pysqlite3/ +git checkout ${PYSQLITE3_VERSION} + +# Copy the sqlite3 source amalgamation into the pysqlite3 directory so we can +# create a self-contained extension module. +cp "../sqlite/sqlite3.c" ./ +cp "../sqlite/sqlite3.h" ./ + +# Create the wheel and put it in the /wheels dir. +sed -i "s|name='pysqlite3-binary'|name=PACKAGE_NAME|g" setup.py +python3 setup.py build_static +pip3 wheel . -w /wheels diff --git a/docker/main/build_sqlite_vec.sh b/docker/main/build_sqlite_vec.sh new file mode 100755 index 000000000..3dc28bcbf --- /dev/null +++ b/docker/main/build_sqlite_vec.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -euxo pipefail + +SQLITE_VEC_VERSION="0.1.3" + +cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list +sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list +apt-get update +apt-get -yqq build-dep sqlite3 gettext git + +mkdir /tmp/sqlite_vec +# Grab the sqlite_vec source code. +wget -nv https://github.com/asg017/sqlite-vec/archive/refs/tags/v${SQLITE_VEC_VERSION}.tar.gz +tar -zxf v${SQLITE_VEC_VERSION}.tar.gz -C /tmp/sqlite_vec + +cd /tmp/sqlite_vec/sqlite-vec-${SQLITE_VEC_VERSION} + +mkdir -p vendor +wget -O sqlite-amalgamation.zip https://www.sqlite.org/2024/sqlite-amalgamation-3450300.zip +unzip sqlite-amalgamation.zip +mv sqlite-amalgamation-3450300/* vendor/ +rmdir sqlite-amalgamation-3450300 +rm sqlite-amalgamation.zip + +# build loadable module +make loadable + +# install it +cp dist/vec0.* /usr/local/lib + diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index 43fff479b..c63c015d3 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -13,6 +13,7 @@ apt-get -qq install --no-install-recommends -y \ python3.9 \ python3-pip \ curl \ + lsof \ jq \ nethogs @@ -39,39 +40,65 @@ apt-get -qq install --no-install-recommends --no-install-suggests -y \ # btbn-ffmpeg -> amd64 if [[ "${TARGETARCH}" == "amd64" ]]; then - mkdir -p /usr/lib/btbn-ffmpeg - wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz" - tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1 - rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay + mkdir -p /usr/lib/ffmpeg/5.0 + mkdir -p /usr/lib/ffmpeg/7.0 + wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz" + tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 + rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay + wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-30-15-36/ffmpeg-n7.1-linux64-gpl-7.1.tar.xz" + tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 + rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay fi # ffmpeg -> arm64 if [[ "${TARGETARCH}" == "arm64" ]]; then - mkdir -p /usr/lib/btbn-ffmpeg - wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz" - tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1 - rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay + mkdir -p /usr/lib/ffmpeg/5.0 + mkdir -p /usr/lib/ffmpeg/7.0 + wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz" + tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 + rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay + wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-30-15-36/ffmpeg-n7.1-linuxarm64-gpl-7.1.tar.xz" + tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 + rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay fi # arch specific packages if [[ "${TARGETARCH}" == "amd64" ]]; then - # use debian bookworm for hwaccel packages + # use debian bookworm for amd / intel-i965 driver packages echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list apt-get -qq update apt-get -qq install --no-install-recommends --no-install-suggests -y \ - intel-opencl-icd \ - mesa-va-drivers radeontop libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 intel-gpu-tools + i965-va-driver intel-gpu-tools onevpl-tools \ + libva-drm2 \ + mesa-va-drivers radeontop + # something about this dependency requires it to be installed in a separate call rather than in the line above apt-get -qq install --no-install-recommends --no-install-suggests -y \ i965-va-driver-shaders + rm -f /etc/apt/sources.list.d/debian-bookworm.list + + # use intel apt intel packages + wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list + apt-get -qq update + apt-get -qq install --no-install-recommends --no-install-suggests -y \ + intel-opencl-icd intel-level-zero-gpu intel-media-va-driver-non-free \ + libmfx1 libmfxgen1 libvpl2 + + rm -f /usr/share/keyrings/intel-graphics.gpg + rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list fi if [[ "${TARGETARCH}" == "arm64" ]]; then apt-get -qq install --no-install-recommends --no-install-suggests -y \ - libva-drm2 mesa-va-drivers + libva-drm2 mesa-va-drivers radeontop fi +# install vulkan +apt-get -qq install --no-install-recommends --no-install-suggests -y \ + libvulkan1 mesa-vulkan-drivers + apt-get purge gnupg apt-transport-https xz-utils -y apt-get clean autoclean -y apt-get autoremove --purge -y diff --git a/docker/main/install_tempio.sh b/docker/main/install_tempio.sh new file mode 100755 index 000000000..743a12288 --- /dev/null +++ b/docker/main/install_tempio.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -euxo pipefail + +tempio_version="2021.09.0" + +if [[ "${TARGETARCH}" == "amd64" ]]; then + arch="amd64" +elif [[ "${TARGETARCH}" == "arm64" ]]; then + arch="aarch64" +fi + +mkdir -p /rootfs/usr/local/tempio/bin + +wget -q -O /rootfs/usr/local/tempio/bin/tempio "https://github.com/home-assistant/tempio/releases/download/${tempio_version}/tempio_${arch}" +chmod 755 /rootfs/usr/local/tempio/bin/tempio diff --git a/docker/main/requirements-ov.txt b/docker/main/requirements-ov.txt index 20e5a29c1..6fd1ca55d 100644 --- a/docker/main/requirements-ov.txt +++ b/docker/main/requirements-ov.txt @@ -1,5 +1,3 @@ numpy -# Openvino Library - Custom built with MYRIAD support -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' -openvino-dev[tensorflow2] @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino_dev-2022.3.1-1-py3-none-any.whl +tensorflow +openvino-dev>=2024.0.0 \ No newline at end of file diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index f4167744e..11ad94f3f 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -1,29 +1,45 @@ click == 8.1.* -Flask == 2.3.* +# FastAPI +starlette-context == 0.3.6 +fastapi == 0.115.0 +uvicorn == 0.30.* +slowapi == 0.1.9 imutils == 0.5.* -matplotlib == 3.7.* +joserfc == 1.0.* +pathvalidate == 3.2.* +markupsafe == 2.1.* mypy == 1.6.1 -numpy == 1.23.* +numpy == 1.26.* onvif_zeep == 0.2.12 -opencv-python-headless == 4.7.0.* -paho-mqtt == 1.6.* +opencv-python-headless == 4.9.0.* +paho-mqtt == 2.1.* +pandas == 2.2.* peewee == 3.17.* -peewee_migrate == 1.12.* +peewee_migrate == 1.13.* psutil == 5.9.* -pydantic == 1.10.* +pydantic == 2.8.* git+https://github.com/fbcotter/py3nvml#egg=py3nvml -PyYAML == 6.0.* -pytz == 2023.3.post1 +pytz == 2024.1 +pyzmq == 26.2.* ruamel.yaml == 0.18.* tzlocal == 5.2 -types-PyYAML == 6.0.* -requests == 2.31.* -types-requests == 2.31.* -scipy == 1.11.* +requests == 2.32.* +types-requests == 2.32.* +scipy == 1.13.* norfair == 2.2.* setproctitle == 1.3.* ws4py == 0.5.* unidecode == 1.3.* -# Openvino Library - Custom built with MYRIAD support -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' +# OpenVino & ONNX +openvino == 2024.3.* +onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64' +onnxruntime == 1.19.* ; platform_machine == 'aarch64' +# Embeddings +transformers == 4.45.* +# Generative AI +google-generativeai == 0.8.* +ollama == 0.3.* +openai == 1.51.* +# push notifications +py-vapid == 1.9.* +pywebpush == 2.0.* diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/consumer-for new file mode 100644 index 000000000..09a147a5f --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/consumer-for @@ -0,0 +1 @@ +certsync diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/dependencies.d/log-prepare b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/dependencies.d/log-prepare new file mode 100644 index 000000000..e69de29bb diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/pipeline-name new file mode 100644 index 000000000..204da2771 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/pipeline-name @@ -0,0 +1 @@ +certsync-pipeline diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/run new file mode 100755 index 000000000..7d66e2c81 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/run @@ -0,0 +1,4 @@ +#!/command/with-contenv bash +# shellcheck shell=bash + +exec logutil-service /dev/shm/logs/certsync diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync-log/type @@ -0,0 +1 @@ +longrun diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/dependencies.d/nginx b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/dependencies.d/nginx new file mode 100644 index 000000000..e69de29bb diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/finish new file mode 100755 index 000000000..3450034b2 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/finish @@ -0,0 +1,30 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Take down the S6 supervision tree when the service fails + +set -o errexit -o nounset -o pipefail + +# Logs should be sent to stdout so that s6 can collect them + +declare exit_code_container +exit_code_container=$(cat /run/s6-linux-init-container-results/exitcode) +readonly exit_code_container +readonly exit_code_service="${1}" +readonly exit_code_signal="${2}" +readonly service="CERTSYNC" + +echo "[INFO] Service ${service} exited with code ${exit_code_service} (by signal ${exit_code_signal})" + +if [[ "${exit_code_service}" -eq 256 ]]; then + if [[ "${exit_code_container}" -eq 0 ]]; then + echo $((128 + exit_code_signal)) >/run/s6-linux-init-container-results/exitcode + fi + if [[ "${exit_code_signal}" -eq 15 ]]; then + exec /run/s6/basedir/bin/halt + fi +elif [[ "${exit_code_service}" -ne 0 ]]; then + if [[ "${exit_code_container}" -eq 0 ]]; then + echo "${exit_code_service}" >/run/s6-linux-init-container-results/exitcode + fi + exec /run/s6/basedir/bin/halt +fi diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/producer-for new file mode 100644 index 000000000..886683fd9 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/producer-for @@ -0,0 +1 @@ +certsync-log diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run new file mode 100755 index 000000000..af3bc04de --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/run @@ -0,0 +1,58 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Start the CERTSYNC service + +set -o errexit -o nounset -o pipefail + +# Logs should be sent to stdout so that s6 can collect them + +echo "[INFO] Starting certsync..." + +lefile="/etc/letsencrypt/live/frigate/fullchain.pem" + +tls_enabled=`python3 /usr/local/nginx/get_tls_settings.py | jq -r .enabled` + +while true +do + if [[ "$tls_enabled" == 'false' ]]; then + sleep 9999 + continue + fi + + if [ ! -e $lefile ] + then + echo "[ERROR] TLS certificate does not exist: $lefile" + fi + + leprint=`openssl x509 -in $lefile -fingerprint -noout 2>&1 || echo 'failed'` + + case "$leprint" in + *Fingerprint*) + ;; + *) + echo "[ERROR] Missing fingerprint from $lefile" + ;; + esac + + liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` + + case "$liveprint" in + *Fingerprint*) + ;; + *) + echo "[ERROR] Missing fingerprint from current nginx TLS cert" + ;; + esac + + if [[ "$leprint" != "failed" && "$liveprint" != "failed" && "$leprint" != "$liveprint" ]] + then + echo "[INFO] Reloading nginx to refresh TLS certificate" + echo "$lefile: $leprint" + /usr/local/nginx/sbin/nginx -s reload + fi + + sleep 60 + +done + +exit 0 \ No newline at end of file diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/timeout-kill new file mode 100644 index 000000000..3a05c8b3e --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/timeout-kill @@ -0,0 +1 @@ +30000 diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/type new file mode 100644 index 000000000..5883cff0c --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/certsync/type @@ -0,0 +1 @@ +longrun diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run index f2cc40fcf..eacce294f 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -16,8 +16,8 @@ function migrate_db_path() { if [[ -f "${config_file_yaml}" ]]; then config_file="${config_file_yaml}" elif [[ ! -f "${config_file}" ]]; then - echo "[ERROR] Frigate config file not found" - return 1 + # Frigate will create the config file on startup + return 0 fi unset config_file_yaml @@ -44,8 +44,6 @@ function migrate_db_path() { echo "[INFO] Preparing Frigate..." migrate_db_path -export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') - echo "[INFO] Starting Frigate..." cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run index 851d78799..9c4922d81 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run @@ -43,8 +43,6 @@ function get_ip_and_port_from_supervisor() { export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}" } -export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') - if [[ -f "/dev/shm/go2rtc.yaml" ]]; then echo "[INFO] Removing stale config from last run..." rm /dev/shm/go2rtc.yaml diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run index 0d8d73ce2..c493e320e 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run @@ -4,7 +4,7 @@ set -o errexit -o nounset -o pipefail -dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx) +dirs=(/dev/shm/logs/frigate /dev/shm/logs/go2rtc /dev/shm/logs/nginx /dev/shm/logs/certsync) mkdir -p "${dirs[@]}" chown nobody:nogroup "${dirs[@]}" diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/data/check b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/data/check new file mode 100755 index 000000000..8307a7956 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/data/check @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -e + +# Wait for PID file to exist. +while ! test -f /run/nginx.pid; do sleep 1; done \ No newline at end of file diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/notification-fd b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/notification-fd new file mode 100644 index 000000000..e440e5c84 --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/notification-fd @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run index 2754c0d09..677126a6d 100755 --- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run @@ -8,6 +8,84 @@ set -o errexit -o nounset -o pipefail echo "[INFO] Starting NGINX..." +# Taken from https://github.com/felipecrs/cgroup-scripts/commits/master/get_cpus.sh +function get_cpus() { + local quota="" + local period="" + + if [ -f /sys/fs/cgroup/cgroup.controllers ]; then + if [ -f /sys/fs/cgroup/cpu.max ]; then + read -r quota period &2 + fi + else + if [ -f /sys/fs/cgroup/cpu/cpu.cfs_quota_us ] && [ -f /sys/fs/cgroup/cpu/cpu.cfs_period_us ]; then + quota=$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us) + period=$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us) + + if [ "$quota" = "-1" ]; then + quota="" + period="" + fi + else + echo "[WARN] /sys/fs/cgroup/cpu/cpu.cfs_quota_us or /sys/fs/cgroup/cpu/cpu.cfs_period_us not found. Falling back to /proc/cpuinfo." >&2 + fi + fi + + local cpus + if [ "${period}" != "0" ] && [ -n "${quota}" ] && [ -n "${period}" ]; then + cpus=$((quota / period)) + if [ "$cpus" -eq 0 ]; then + cpus=1 + fi + else + cpus=$(grep -c ^processor /proc/cpuinfo) + fi + + printf '%s' "$cpus" +} + +function set_worker_processes() { + # Capture number of assigned CPUs to calculate worker processes + local cpus + + cpus=$(get_cpus) + if [[ "${cpus}" -gt 4 ]]; then + cpus=4 + fi + + # we need to catch any errors because sed will fail if user has bind mounted a custom nginx file + sed -i "s/worker_processes auto;/worker_processes ${cpus};/" /usr/local/nginx/conf/nginx.conf || true +} + +set_worker_processes + +# ensure the directory for ACME challenges exists +mkdir -p /etc/letsencrypt/www + +# Create self signed certs if needed +letsencrypt_path=/etc/letsencrypt/live/frigate +mkdir -p $letsencrypt_path + +if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain.pem" \) ]; then + echo "[INFO] No TLS certificate found. Generating a self signed certificate..." + openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \ + -subj "/O=FRIGATE DEFAULT CERT/CN=*" \ + -keyout "$letsencrypt_path/privkey.pem" -out "$letsencrypt_path/fullchain.pem" 2>/dev/null +fi + +# build templates for optional TLS support +python3 /usr/local/nginx/get_tls_settings.py | \ + tempio -template /usr/local/nginx/templates/listen.gotmpl \ + -out /usr/local/nginx/conf/listen.conf + # Replace the bash process with the NGINX process, redirecting stderr to stdout exec 2>&1 -exec nginx +exec \ + s6-notifyoncheck -t 30000 -n 1 \ + nginx diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/certsync-pipeline b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/certsync-pipeline new file mode 100644 index 000000000..e69de29bb diff --git a/docker/main/rootfs/labelmap/coco-80.txt b/docker/main/rootfs/labelmap/coco-80.txt new file mode 100644 index 000000000..79e017175 --- /dev/null +++ b/docker/main/rootfs/labelmap/coco-80.txt @@ -0,0 +1,80 @@ +0 person +1 bicycle +2 car +3 motorcycle +4 airplane +5 car +6 train +7 car +8 boat +9 traffic light +10 fire hydrant +11 stop sign +12 parking meter +13 bench +14 bird +15 cat +16 dog +17 horse +18 sheep +19 cow +20 elephant +21 bear +22 zebra +23 giraffe +24 backpack +25 umbrella +26 handbag +27 tie +28 suitcase +29 frisbee +30 skis +31 snowboard +32 sports ball +33 kite +34 baseball bat +35 baseball glove +36 skateboard +37 surfboard +38 tennis racket +39 bottle +40 wine glass +41 cup +42 fork +43 knife +44 spoon +45 bowl +46 banana +47 apple +48 sandwich +49 orange +50 broccoli +51 carrot +52 hot dog +53 pizza +54 donut +55 cake +56 chair +57 couch +58 potted plant +59 bed +60 dining table +61 toilet +62 tv +63 laptop +64 mouse +65 remote +66 keyboard +67 cell phone +68 microwave +69 oven +70 toaster +71 sink +72 refrigerator +73 book +74 clock +75 vase +76 scissors +77 teddy bear +78 hair drier +79 toothbrush \ No newline at end of file diff --git a/docker/main/rootfs/labelmap/coco.txt b/docker/main/rootfs/labelmap/coco.txt new file mode 100644 index 000000000..79fff1772 --- /dev/null +++ b/docker/main/rootfs/labelmap/coco.txt @@ -0,0 +1,91 @@ +0 person +1 bicycle +2 car +3 motorcycle +4 airplane +5 bus +6 train +7 car +8 boat +9 traffic light +10 fire hydrant +11 street sign +12 stop sign +13 parking meter +14 bench +15 bird +16 cat +17 dog +18 horse +19 sheep +20 cow +21 elephant +22 bear +23 zebra +24 giraffe +25 hat +26 backpack +27 umbrella +28 shoe +29 eye glasses +30 handbag +31 tie +32 suitcase +33 frisbee +34 skis +35 snowboard +36 sports ball +37 kite +38 baseball bat +39 baseball glove +40 skateboard +41 surfboard +42 tennis racket +43 bottle +44 plate +45 wine glass +46 cup +47 fork +48 knife +49 spoon +50 bowl +51 banana +52 apple +53 sandwich +54 orange +55 broccoli +56 carrot +57 hot dog +58 pizza +59 donut +60 cake +61 chair +62 couch +63 potted plant +64 bed +65 mirror +66 dining table +67 window +68 desk +69 toilet +70 door +71 tv +72 laptop +73 mouse +74 remote +75 keyboard +76 cell phone +77 microwave +78 oven +79 toaster +80 sink +81 refrigerator +82 blender +83 book +84 clock +85 vase +86 scissors +87 teddy bear +88 hair drier +89 toothbrush +90 hair brush \ No newline at end of file diff --git a/docker/main/rootfs/usr/local/go2rtc/create_config.py b/docker/main/rootfs/usr/local/go2rtc/create_config.py index 51d75f0e0..ae2c0128e 100644 --- a/docker/main/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/main/rootfs/usr/local/go2rtc/create_config.py @@ -2,28 +2,32 @@ import json import os +import shutil import sys from pathlib import Path -import yaml +from ruamel.yaml import YAML sys.path.insert(0, "/opt/frigate") -from frigate.const import BIRDSEYE_PIPE # noqa: E402 -from frigate.ffmpeg_presets import ( # noqa: E402 - parse_preset_hardware_acceleration_encode, +from frigate.const import ( + BIRDSEYE_PIPE, + DEFAULT_FFMPEG_VERSION, + INCLUDED_FFMPEG_VERSIONS, ) +from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode sys.path.remove("/opt/frigate") +yaml = YAML() FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} # read docker secret files as env vars too if os.path.isdir("/run/secrets"): for secret_file in os.listdir("/run/secrets"): if secret_file.startswith("FRIGATE_"): - FRIGATE_ENV_VARS[secret_file] = Path( - os.path.join("/run/secrets", secret_file) - ).read_text() + FRIGATE_ENV_VARS[secret_file] = ( + Path(os.path.join("/run/secrets", secret_file)).read_text().strip() + ) config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") @@ -32,13 +36,16 @@ config_file_yaml = config_file.replace(".yml", ".yaml") if os.path.isfile(config_file_yaml): config_file = config_file_yaml -with open(config_file) as f: - raw_config = f.read() +try: + with open(config_file) as f: + raw_config = f.read() -if config_file.endswith((".yaml", ".yml")): - config: dict[str, any] = yaml.safe_load(raw_config) -elif config_file.endswith(".json"): - config: dict[str, any] = json.loads(raw_config) + if config_file.endswith((".yaml", ".yml")): + config: dict[str, any] = yaml.load(raw_config) + elif config_file.endswith(".json"): + config: dict[str, any] = json.loads(raw_config) +except FileNotFoundError: + config: dict[str, any] = {} go2rtc_config: dict[str, any] = config.get("go2rtc", {}) @@ -102,30 +109,32 @@ else: **FRIGATE_ENV_VARS ) -# need to replace ffmpeg command when using ffmpeg4 -if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: - if go2rtc_config.get("ffmpeg") is None: - go2rtc_config["ffmpeg"] = { - "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" - } - elif go2rtc_config["ffmpeg"].get("rtsp") is None: - go2rtc_config["ffmpeg"][ - "rtsp" - ] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" - -# add hardware acceleration presets for rockchip devices -# may be removed if frigate uses a go2rtc version that includes these presets -if go2rtc_config.get("ffmpeg") is None: - go2rtc_config["ffmpeg"] = { - "h264/rk": "-c:v h264_rkmpp_encoder -g 50 -bf 0", - "h265/rk": "-c:v hevc_rkmpp_encoder -g 50 -bf 0", - } +# ensure ffmpeg path is set correctly +path = config.get("ffmpeg", {}).get("path", "default") +if path == "default": + if shutil.which("ffmpeg") is None: + ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg" + else: + ffmpeg_path = "ffmpeg" +elif path in INCLUDED_FFMPEG_VERSIONS: + ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg" else: - if go2rtc_config["ffmpeg"].get("h264/rk") is None: - go2rtc_config["ffmpeg"]["h264/rk"] = "-c:v h264_rkmpp_encoder -g 50 -bf 0" + ffmpeg_path = f"{path}/bin/ffmpeg" - if go2rtc_config["ffmpeg"].get("h265/rk") is None: - go2rtc_config["ffmpeg"]["h265/rk"] = "-c:v hevc_rkmpp_encoder -g 50 -bf 0" +if go2rtc_config.get("ffmpeg") is None: + go2rtc_config["ffmpeg"] = {"bin": ffmpeg_path} +elif go2rtc_config["ffmpeg"].get("bin") is None: + go2rtc_config["ffmpeg"]["bin"] = ffmpeg_path + +# need to replace ffmpeg command when using ffmpeg4 +if int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") < 59: + if go2rtc_config["ffmpeg"].get("rtsp") is None: + go2rtc_config["ffmpeg"]["rtsp"] = ( + "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" + ) +else: + if go2rtc_config.get("ffmpeg") is None: + go2rtc_config["ffmpeg"] = {"path": ""} for name in go2rtc_config.get("streams", {}): stream = go2rtc_config["streams"][name] @@ -156,7 +165,7 @@ if config.get("birdseye", {}).get("restream", False): birdseye: dict[str, any] = config.get("birdseye") input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}" - ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}" + ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}" if go2rtc_config.get("streams"): go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd diff --git a/docker/main/rootfs/usr/local/nginx/conf/auth_location.conf b/docker/main/rootfs/usr/local/nginx/conf/auth_location.conf new file mode 100644 index 000000000..285a3d81b --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/conf/auth_location.conf @@ -0,0 +1,43 @@ +set $upstream_auth http://127.0.0.1:5001/auth; + +## Virtual endpoint created by nginx to forward auth requests. +location /auth { + ## Essential Proxy Configuration + internal; + proxy_pass $upstream_auth; + + ## Headers + + # First strip out all the request headers + # Note: This is important to ensure that upgrade requests for secure + # websockets dont cause the backend to fail + proxy_pass_request_headers off; + # Pass info about the request + proxy_set_header X-Original-Method $request_method; + proxy_set_header X-Original-URL $scheme://$http_host$request_uri; + proxy_set_header X-Server-Port $server_port; + proxy_set_header Content-Length ""; + # Pass along auth related info + proxy_set_header Authorization $http_authorization; + proxy_set_header Cookie $http_cookie; + proxy_set_header X-CSRF-TOKEN "1"; + + # include headers from common auth proxies + include proxy_trusted_headers.conf; + + ## Basic Proxy Configuration + proxy_pass_request_body off; + proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; # Timeout if the real server is dead + proxy_redirect http:// $scheme://; + proxy_http_version 1.1; + proxy_cache_bypass $cookie_session; + proxy_no_cache $cookie_session; + proxy_buffers 4 32k; + client_body_buffer_size 128k; + + ## Advanced Proxy Configuration + send_timeout 5m; + proxy_read_timeout 240; + proxy_send_timeout 240; + proxy_connect_timeout 240; +} \ No newline at end of file diff --git a/docker/main/rootfs/usr/local/nginx/conf/auth_request.conf b/docker/main/rootfs/usr/local/nginx/conf/auth_request.conf new file mode 100644 index 000000000..b054a6b97 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/conf/auth_request.conf @@ -0,0 +1,22 @@ +## Send a subrequest to verify if the user is authenticated and has permission to access the resource. +auth_request /auth; + +## Save the upstream metadata response headers from Authelia to variables. +auth_request_set $user $upstream_http_remote_user; +auth_request_set $groups $upstream_http_remote_groups; +auth_request_set $name $upstream_http_remote_name; +auth_request_set $email $upstream_http_remote_email; + +## Inject the metadata response headers from the variables into the request made to the backend. +proxy_set_header Remote-User $user; +proxy_set_header Remote-Groups $groups; +proxy_set_header Remote-Email $email; +proxy_set_header Remote-Name $name; + +## Refresh the cookie as needed +auth_request_set $auth_cookie $upstream_http_set_cookie; +add_header Set-Cookie $auth_cookie; + +## Pass the location header back up if it exists +auth_request_set $redirection_url $upstream_http_location; +add_header Location $redirection_url; diff --git a/docker/main/rootfs/usr/local/nginx/conf/go2rtc_upstream.conf b/docker/main/rootfs/usr/local/nginx/conf/go2rtc_upstream.conf new file mode 100644 index 000000000..811bb9483 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/conf/go2rtc_upstream.conf @@ -0,0 +1,4 @@ +upstream go2rtc { + server 127.0.0.1:1984; + keepalive 1024; +} \ No newline at end of file diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index 46706a92f..75527bf53 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -10,6 +10,8 @@ events { } http { + map_hash_bucket_size 256; + include mime.types; default_type application/octet-stream; @@ -54,13 +56,10 @@ http { keepalive 1024; } - upstream go2rtc { - server 127.0.0.1:1984; - keepalive 1024; - } + include go2rtc_upstream.conf; server { - listen 5000; + include listen.conf; # vod settings vod_base_url ''; @@ -93,7 +92,10 @@ http { gzip on; gzip_types application/vnd.apple.mpegurl; + include auth_location.conf; + location /vod/ { + include auth_request.conf; aio threads; vod hls; @@ -102,9 +104,12 @@ http { add_header Cache-Control "no-store"; expires off; + + keepalive_disable safari; } location /stream/ { + include auth_request.conf; add_header Cache-Control "no-store"; expires off; @@ -119,12 +124,14 @@ http { } location /clips/ { - + include auth_request.conf; types { video/mp4 mp4; image/jpeg jpg; } + expires 7d; + add_header Cache-Control "public"; autoindex on; root /media/frigate; } @@ -135,6 +142,7 @@ http { } location /recordings/ { + include auth_request.conf; types { video/mp4 mp4; } @@ -145,6 +153,7 @@ http { } location /exports/ { + include auth_request.conf; types { video/mp4 mp4; } @@ -155,17 +164,20 @@ http { } location /ws { + include auth_request.conf; proxy_pass http://mqtt_ws/; include proxy.conf; } location /live/jsmpeg/ { + include auth_request.conf; proxy_pass http://jsmpeg/; include proxy.conf; } # frigate lovelace card uses this path location /live/mse/api/ws { + include auth_request.conf; limit_except GET { deny all; } @@ -174,6 +186,7 @@ http { } location /live/webrtc/api/ws { + include auth_request.conf; limit_except GET { deny all; } @@ -183,6 +196,7 @@ http { # pass through go2rtc player location /live/webrtc/webrtc.html { + include auth_request.conf; limit_except GET { deny all; } @@ -192,6 +206,7 @@ http { # frontend uses this to fetch the version location /api/go2rtc/api { + include auth_request.conf; limit_except GET { deny all; } @@ -201,6 +216,7 @@ http { # integration uses this to add webrtc candidate location /api/go2rtc/webrtc { + include auth_request.conf; limit_except POST { deny all; } @@ -208,13 +224,15 @@ http { include proxy.conf; } - location ~* /api/.*\.(jpg|jpeg|png)$ { - rewrite ^/api/(.*)$ $1 break; + location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ { + include auth_request.conf; + rewrite ^/api/(.*)$ /$1 break; proxy_pass http://frigate_api; include proxy.conf; } location /api/ { + include auth_request.conf; add_header Cache-Control "no-store"; expires off; proxy_pass http://frigate_api/; @@ -229,27 +247,38 @@ http { add_header X-Cache-Status $upstream_cache_status; location /api/vod/ { + include auth_request.conf; proxy_pass http://frigate_api/vod/; include proxy.conf; proxy_cache off; } + location /api/login { + auth_request off; + rewrite ^/api(/.*)$ $1 break; + proxy_pass http://frigate_api; + include proxy.conf; + } + location /api/stats { + include auth_request.conf; access_log off; - rewrite ^/api/(.*)$ $1 break; + rewrite ^/api(/.*)$ $1 break; proxy_pass http://frigate_api; include proxy.conf; } location /api/version { + include auth_request.conf; access_log off; - rewrite ^/api/(.*)$ $1 break; + rewrite ^/api(/.*)$ $1 break; proxy_pass http://frigate_api; include proxy.conf; } } location / { + # do not require auth for static assets add_header Cache-Control "no-store"; expires off; @@ -271,22 +300,7 @@ http { sub_filter_once off; root /opt/frigate/web; - try_files $uri $uri/ /index.html; - } - } -} - -rtmp { - server { - listen 1935; - chunk_size 4096; - allow publish 127.0.0.1; - deny publish all; - allow play all; - application live { - live on; - record off; - meta copy; + try_files $uri $uri.html $uri/ /index.html; } } } diff --git a/docker/main/rootfs/usr/local/nginx/conf/proxy.conf b/docker/main/rootfs/usr/local/nginx/conf/proxy.conf index 442c78718..a3aacc309 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/proxy.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/proxy.conf @@ -1,4 +1,26 @@ -proxy_http_version 1.1; +## Headers +proxy_set_header Host $host; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; -proxy_set_header Host $host; \ No newline at end of file +proxy_set_header X-Original-URL $scheme://$http_host$request_uri; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header X-Forwarded-Host $http_host; +proxy_set_header X-Forwarded-URI $request_uri; +proxy_set_header X-Forwarded-Ssl on; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Real-IP $remote_addr; + +## Basic Proxy Configuration +client_body_buffer_size 128k; +proxy_next_upstream error timeout invalid_header http_500 http_502 http_503; ## Timeout if the real server is dead. +proxy_redirect http:// $scheme://; +proxy_http_version 1.1; +proxy_cache_bypass $cookie_session; +proxy_no_cache $cookie_session; +proxy_buffers 64 256k; + +## Advanced Proxy Configuration +send_timeout 5m; +proxy_read_timeout 360; +proxy_send_timeout 360; +proxy_connect_timeout 360; \ No newline at end of file diff --git a/docker/main/rootfs/usr/local/nginx/conf/proxy_trusted_headers.conf b/docker/main/rootfs/usr/local/nginx/conf/proxy_trusted_headers.conf new file mode 100644 index 000000000..54c05ab3b --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/conf/proxy_trusted_headers.conf @@ -0,0 +1,25 @@ +# Header used to validate reverse proxy trust +proxy_set_header X-Proxy-Secret $http_x_proxy_secret; + +# these headers will be copied to the /auth request and are available +# to be mapped in the config to Frigate's remote-user header + +# List of headers sent by common authentication proxies: +# - Authelia +# - Traefik forward auth +# - oauth2_proxy +# - Authentik + +proxy_set_header Remote-User $http_remote_user; +proxy_set_header Remote-Groups $http_remote_groups; +proxy_set_header Remote-Email $http_remote_email; +proxy_set_header Remote-Name $http_remote_name; +proxy_set_header X-Forwarded-User $http_x_forwarded_user; +proxy_set_header X-Forwarded-Groups $http_x_forwarded_groups; +proxy_set_header X-Forwarded-Email $http_x_forwarded_email; +proxy_set_header X-Forwarded-Preferred-Username $http_x_forwarded_preferred_username; +proxy_set_header X-authentik-username $http_x_authentik_username; +proxy_set_header X-authentik-groups $http_x_authentik_groups; +proxy_set_header X-authentik-email $http_x_authentik_email; +proxy_set_header X-authentik-name $http_x_authentik_name; +proxy_set_header X-authentik-uid $http_x_authentik_uid; diff --git a/docker/main/rootfs/usr/local/nginx/get_tls_settings.py b/docker/main/rootfs/usr/local/nginx/get_tls_settings.py new file mode 100644 index 000000000..f1a4c85de --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/get_tls_settings.py @@ -0,0 +1,30 @@ +"""Prints the tls config as json to stdout.""" + +import json +import os + +from ruamel.yaml import YAML + +yaml = YAML() + +config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") + +# Check if we can use .yaml instead of .yml +config_file_yaml = config_file.replace(".yml", ".yaml") +if os.path.isfile(config_file_yaml): + config_file = config_file_yaml + +try: + with open(config_file) as f: + raw_config = f.read() + + if config_file.endswith((".yaml", ".yml")): + config: dict[str, any] = yaml.load(raw_config) + elif config_file.endswith(".json"): + config: dict[str, any] = json.loads(raw_config) +except FileNotFoundError: + config: dict[str, any] = {} + +tls_config: dict[str, any] = config.get("tls", {"enabled": True}) + +print(json.dumps(tls_config)) diff --git a/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl new file mode 100644 index 000000000..093d5f68e --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/templates/listen.gotmpl @@ -0,0 +1,33 @@ +# intended for internal traffic, not protected by auth +listen 5000; + +{{ if not .enabled }} +# intended for external traffic, protected by auth +listen 8971; +{{ else }} +# intended for external traffic, protected by auth +listen 8971 ssl; + +ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; +ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; + +# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP +# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7 +ssl_session_timeout 1d; +ssl_session_cache shared:MozSSL:10m; # about 40000 sessions +ssl_session_tickets off; + +# modern configuration +ssl_protocols TLSv1.3; +ssl_prefer_server_ciphers off; + +# HSTS (ngx_http_headers_module is required) (63072000 seconds) +add_header Strict-Transport-Security "max-age=63072000" always; + +# ACME challenge location +location /.well-known/acme-challenge/ { + default_type "text/plain"; + root /etc/letsencrypt/www; +} +{{ end }} + diff --git a/docker/rockchip/Dockerfile b/docker/rockchip/Dockerfile index b27e4f223..e1b43c255 100644 --- a/docker/rockchip/Dockerfile +++ b/docker/rockchip/Dockerfile @@ -9,24 +9,19 @@ COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt -FROM deps AS rk-deps +FROM deps AS rk-frigate ARG TARGETARCH -RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ - pip3 install -U /deps/rk-wheels/*.whl +RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ + pip3 install -U /deps/rk-wheels/*.whl WORKDIR /opt/frigate/ COPY --from=rootfs / / -ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/ -ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/ - -ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3562/yolov8n-320x320-rk3562.rknn /models/rknn/ -ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3566/yolov8n-320x320-rk3566.rknn /models/rknn/ -ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3568/yolov8n-320x320-rk3568.rknn /models/rknn/ -ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolov8n-320x320-rk3588.rknn /models/rknn/ +ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/ RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe -ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffmpeg /usr/lib/btbn-ffmpeg/bin/ -ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffprobe /usr/lib/btbn-ffmpeg/bin/ +ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/ +ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/ +ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}" diff --git a/docker/rockchip/requirements-wheels-rk.txt b/docker/rockchip/requirements-wheels-rk.txt index 9a3fe5c77..c56b69b66 100644 --- a/docker/rockchip/requirements-wheels-rk.txt +++ b/docker/rockchip/requirements-wheels-rk.txt @@ -1,2 +1 @@ -hide-warnings == 0.17 -rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v1.5.2/rknn_toolkit_lite2-1.5.2-cp39-cp39-linux_aarch64.whl \ No newline at end of file +rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp39-cp39-linux_aarch64.whl \ No newline at end of file diff --git a/docker/rockchip/rk.hcl b/docker/rockchip/rk.hcl index 513fefa25..9424b46e7 100644 --- a/docker/rockchip/rk.hcl +++ b/docker/rockchip/rk.hcl @@ -1,9 +1,3 @@ -target wget { - dockerfile = "docker/main/Dockerfile" - platforms = ["linux/arm64"] - target = "wget" -} - target wheels { dockerfile = "docker/main/Dockerfile" platforms = ["linux/arm64"] @@ -25,7 +19,6 @@ target rootfs { target rk { dockerfile = "docker/rockchip/Dockerfile" contexts = { - wget = "target:wget", wheels = "target:wheels", deps = "target:deps", rootfs = "target:rootfs" diff --git a/docker/rockchip/rk.mk b/docker/rockchip/rk.mk index 0d9bde16a..c8278f68b 100644 --- a/docker/rockchip/rk.mk +++ b/docker/rockchip/rk.mk @@ -1,10 +1,15 @@ BOARDS += rk local-rk: version - docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk + docker buildx bake --file=docker/rockchip/rk.hcl rk \ + --set rk.tags=frigate:latest-rk \ + --load build-rk: version - docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk + docker buildx bake --file=docker/rockchip/rk.hcl rk \ + --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk push-rk: build-rk - docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk \ No newline at end of file + docker buildx bake --file=docker/rockchip/rk.hcl rk \ + --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk \ + --push \ No newline at end of file diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile new file mode 100644 index 000000000..eebe04878 --- /dev/null +++ b/docker/rocm/Dockerfile @@ -0,0 +1,108 @@ +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive +ARG ROCM=5.7.3 +ARG AMDGPU=gfx900 +ARG HSA_OVERRIDE_GFX_VERSION +ARG HSA_OVERRIDE + +####################################################################### +FROM ubuntu:focal as rocm + +ARG ROCM + +RUN apt-get update && apt-get -y upgrade +RUN apt-get -y install gnupg wget + +RUN mkdir --parents --mode=0755 /etc/apt/keyrings + +RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null +COPY docker/rocm/rocm.list /etc/apt/sources.list.d/ +COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/ + +RUN apt-get update + +RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer +RUN apt-get -y install --no-install-recommends migraphx-dev + +RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib +RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ +RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm + +RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/ +RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf + +####################################################################### +FROM --platform=linux/amd64 debian:11 as debian-base + +RUN apt-get update && apt-get -y upgrade +RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod + +RUN apt-get -y install python3 + +####################################################################### +# ROCm does not come with migraphx wrappers for python 3.9, so we build it here +FROM debian-base as debian-build + +ARG ROCM + +COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM +RUN ln -s /opt/rocm-$ROCM /opt/rocm + +RUN apt-get -y install g++ cmake +RUN apt-get -y install python3-pybind11 python3.9-distutils python3-dev + +WORKDIR /opt/build + +COPY docker/rocm/migraphx . + +RUN mkdir build && cd build && cmake .. && make install + +####################################################################### +FROM deps AS deps-prelim + +# need this to install libnuma1 +RUN apt-get update +# no ugprade?!?! +RUN apt-get -y install libnuma1 + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / + +COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt +RUN python3 -m pip install --upgrade pip \ + && pip3 uninstall -y onnxruntime-openvino \ + && pip3 install -r /requirements.txt + +####################################################################### +FROM scratch AS rocm-dist + +ARG ROCM +ARG AMDGPU + +COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/ +COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/ +COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/ +COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/ +COPY --from=rocm /opt/rocm-dist/ / +COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/ + +####################################################################### +FROM deps-prelim AS rocm-prelim-hsa-override0 + +ENV HSA_ENABLE_SDMA=0 + +COPY --from=rocm-dist / / + +RUN ldconfig + +####################################################################### +FROM rocm-prelim-hsa-override0 as rocm-prelim-hsa-override1 + +ARG HSA_OVERRIDE_GFX_VERSION +ENV HSA_OVERRIDE_GFX_VERSION=$HSA_OVERRIDE_GFX_VERSION + +####################################################################### +FROM rocm-prelim-hsa-override$HSA_OVERRIDE as rocm-deps + diff --git a/docker/rocm/migraphx/CMakeLists.txt b/docker/rocm/migraphx/CMakeLists.txt new file mode 100644 index 000000000..271dd094b --- /dev/null +++ b/docker/rocm/migraphx/CMakeLists.txt @@ -0,0 +1,26 @@ + +cmake_minimum_required(VERSION 3.1) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release) +endif() + +SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) + +project(migraphx_py) + +include_directories(/opt/rocm/include) + +find_package(pybind11 REQUIRED) +pybind11_add_module(migraphx migraphx_py.cpp) + +target_link_libraries(migraphx PRIVATE /opt/rocm/lib/libmigraphx.so /opt/rocm/lib/libmigraphx_tf.so /opt/rocm/lib/libmigraphx_onnx.so) + +install(TARGETS migraphx + COMPONENT python + LIBRARY DESTINATION /opt/rocm/lib +) diff --git a/docker/rocm/migraphx/migraphx_py.cpp b/docker/rocm/migraphx/migraphx_py.cpp new file mode 100644 index 000000000..894c9d186 --- /dev/null +++ b/docker/rocm/migraphx/migraphx_py.cpp @@ -0,0 +1,582 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_GPU +#include +#endif + +using half = half_float::half; +namespace py = pybind11; + +#ifdef __clang__ +#define MIGRAPHX_PUSH_UNUSED_WARNING \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") +#define MIGRAPHX_POP_WARNING _Pragma("clang diagnostic pop") +#else +#define MIGRAPHX_PUSH_UNUSED_WARNING +#define MIGRAPHX_POP_WARNING +#endif +#define MIGRAPHX_PYBIND11_MODULE(...) \ + MIGRAPHX_PUSH_UNUSED_WARNING \ + PYBIND11_MODULE(__VA_ARGS__) \ + MIGRAPHX_POP_WARNING + +#define MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM(x, t) .value(#x, migraphx::shape::type_t::x) +namespace migraphx { + +migraphx::value to_value(py::kwargs kwargs); +migraphx::value to_value(py::list lst); + +template +void visit_py(T x, F f) +{ + if(py::isinstance(x)) + { + f(to_value(x.template cast())); + } + else if(py::isinstance(x)) + { + f(to_value(x.template cast())); + } + else if(py::isinstance(x)) + { + f(x.template cast()); + } + else if(py::isinstance(x) or py::hasattr(x, "__index__")) + { + f(x.template cast()); + } + else if(py::isinstance(x)) + { + f(x.template cast()); + } + else if(py::isinstance(x)) + { + f(x.template cast()); + } + else if(py::isinstance(x)) + { + f(migraphx::to_value(x.template cast())); + } + else + { + MIGRAPHX_THROW("VISIT_PY: Unsupported data type!"); + } +} + +migraphx::value to_value(py::list lst) +{ + migraphx::value v = migraphx::value::array{}; + for(auto val : lst) + { + visit_py(val, [&](auto py_val) { v.push_back(py_val); }); + } + + return v; +} + +migraphx::value to_value(py::kwargs kwargs) +{ + migraphx::value v = migraphx::value::object{}; + + for(auto arg : kwargs) + { + auto&& key = py::str(arg.first); + auto&& val = arg.second; + visit_py(val, [&](auto py_val) { v[key] = py_val; }); + } + return v; +} +} // namespace migraphx + +namespace pybind11 { +namespace detail { + +template <> +struct npy_format_descriptor +{ + static std::string format() + { + // following: https://docs.python.org/3/library/struct.html#format-characters + return "e"; + } + static constexpr auto name() { return _("half"); } +}; + +} // namespace detail +} // namespace pybind11 + +template +void visit_type(const migraphx::shape& s, F f) +{ + s.visit_type(f); +} + +template +void visit(const migraphx::raw_data& x, F f) +{ + x.visit(f); +} + +template +void visit_types(F f) +{ + migraphx::shape::visit_types(f); +} + +template +py::buffer_info to_buffer_info(T& x) +{ + migraphx::shape s = x.get_shape(); + assert(s.type() != migraphx::shape::tuple_type); + if(s.dynamic()) + MIGRAPHX_THROW("MIGRAPHX PYTHON: dynamic shape argument passed to to_buffer_info"); + auto strides = s.strides(); + std::transform( + strides.begin(), strides.end(), strides.begin(), [&](auto i) { return i * s.type_size(); }); + py::buffer_info b; + visit_type(s, [&](auto as) { + // migraphx use int8_t data to store bool type, we need to + // explicitly specify the data type as bool for python + if(s.type() == migraphx::shape::bool_type) + { + b = py::buffer_info(x.data(), + as.size(), + py::format_descriptor::format(), + s.ndim(), + s.lens(), + strides); + } + else + { + b = py::buffer_info(x.data(), + as.size(), + py::format_descriptor::format(), + s.ndim(), + s.lens(), + strides); + } + }); + return b; +} + +migraphx::shape to_shape(const py::buffer_info& info) +{ + migraphx::shape::type_t t; + std::size_t n = 0; + visit_types([&](auto as) { + if(info.format == py::format_descriptor::format() or + (info.format == "l" and py::format_descriptor::format() == "q") or + (info.format == "L" and py::format_descriptor::format() == "Q")) + { + t = as.type_enum(); + n = sizeof(as()); + } + else if(info.format == "?" and py::format_descriptor::format() == "b") + { + t = migraphx::shape::bool_type; + n = sizeof(bool); + } + }); + + if(n == 0) + { + MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type " + info.format); + } + + auto strides = info.strides; + std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t { + return n > 0 ? i / n : 0; + }); + + // scalar support + if(info.shape.empty()) + { + return migraphx::shape{t}; + } + else + { + return migraphx::shape{t, info.shape, strides}; + } +} + +MIGRAPHX_PYBIND11_MODULE(migraphx, m) +{ + py::class_ shape_cls(m, "shape"); + shape_cls + .def(py::init([](py::kwargs kwargs) { + auto v = migraphx::to_value(kwargs); + auto t = migraphx::shape::parse_type(v.get("type", "float")); + if(v.contains("dyn_dims")) + { + auto dyn_dims = + migraphx::from_value>( + v.at("dyn_dims")); + return migraphx::shape(t, dyn_dims); + } + auto lens = v.get("lens", {1}); + if(v.contains("strides")) + return migraphx::shape(t, lens, v.at("strides").to_vector()); + else + return migraphx::shape(t, lens); + })) + .def("type", &migraphx::shape::type) + .def("lens", &migraphx::shape::lens) + .def("strides", &migraphx::shape::strides) + .def("ndim", &migraphx::shape::ndim) + .def("elements", &migraphx::shape::elements) + .def("bytes", &migraphx::shape::bytes) + .def("type_string", &migraphx::shape::type_string) + .def("type_size", &migraphx::shape::type_size) + .def("dyn_dims", &migraphx::shape::dyn_dims) + .def("packed", &migraphx::shape::packed) + .def("transposed", &migraphx::shape::transposed) + .def("broadcasted", &migraphx::shape::broadcasted) + .def("standard", &migraphx::shape::standard) + .def("scalar", &migraphx::shape::scalar) + .def("dynamic", &migraphx::shape::dynamic) + .def("__eq__", std::equal_to{}) + .def("__ne__", std::not_equal_to{}) + .def("__repr__", [](const migraphx::shape& s) { return migraphx::to_string(s); }); + + py::enum_(shape_cls, "type_t") + MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM); + + py::class_(shape_cls, "dynamic_dimension") + .def(py::init<>()) + .def(py::init()) + .def(py::init>()) + .def_readwrite("min", &migraphx::shape::dynamic_dimension::min) + .def_readwrite("max", &migraphx::shape::dynamic_dimension::max) + .def_readwrite("optimals", &migraphx::shape::dynamic_dimension::optimals) + .def("is_fixed", &migraphx::shape::dynamic_dimension::is_fixed); + + py::class_(m, "argument", py::buffer_protocol()) + .def_buffer([](migraphx::argument& x) -> py::buffer_info { return to_buffer_info(x); }) + .def(py::init([](py::buffer b) { + py::buffer_info info = b.request(); + return migraphx::argument(to_shape(info), info.ptr); + })) + .def("get_shape", &migraphx::argument::get_shape) + .def("data_ptr", + [](migraphx::argument& x) { return reinterpret_cast(x.data()); }) + .def("tolist", + [](migraphx::argument& x) { + py::list l{x.get_shape().elements()}; + visit(x, [&](auto data) { l = py::cast(data.to_vector()); }); + return l; + }) + .def("__eq__", std::equal_to{}) + .def("__ne__", std::not_equal_to{}) + .def("__repr__", [](const migraphx::argument& x) { return migraphx::to_string(x); }); + + py::class_(m, "target"); + + py::class_(m, "instruction_ref") + .def("shape", [](migraphx::instruction_ref i) { return i->get_shape(); }) + .def("op", [](migraphx::instruction_ref i) { return i->get_operator(); }); + + py::class_>(m, "module") + .def("print", [](const migraphx::module& mm) { std::cout << mm << std::endl; }) + .def( + "add_instruction", + [](migraphx::module& mm, + const migraphx::operation& op, + std::vector& args, + std::vector& mod_args) { + return mm.add_instruction(op, args, mod_args); + }, + py::arg("op"), + py::arg("args"), + py::arg("mod_args") = std::vector{}) + .def( + "add_literal", + [](migraphx::module& mm, py::buffer data) { + py::buffer_info info = data.request(); + auto literal_shape = to_shape(info); + return mm.add_literal(literal_shape, reinterpret_cast(info.ptr)); + }, + py::arg("data")) + .def( + "add_parameter", + [](migraphx::module& mm, const std::string& name, const migraphx::shape shape) { + return mm.add_parameter(name, shape); + }, + py::arg("name"), + py::arg("shape")) + .def( + "add_return", + [](migraphx::module& mm, std::vector& args) { + return mm.add_return(args); + }, + py::arg("args")) + .def("__repr__", [](const migraphx::module& mm) { return migraphx::to_string(mm); }); + + py::class_(m, "program") + .def(py::init([]() { return migraphx::program(); })) + .def("get_parameter_names", &migraphx::program::get_parameter_names) + .def("get_parameter_shapes", &migraphx::program::get_parameter_shapes) + .def("get_output_shapes", &migraphx::program::get_output_shapes) + .def("is_compiled", &migraphx::program::is_compiled) + .def( + "compile", + [](migraphx::program& p, + const migraphx::target& t, + bool offload_copy, + bool fast_math, + bool exhaustive_tune) { + migraphx::compile_options options; + options.offload_copy = offload_copy; + options.fast_math = fast_math; + options.exhaustive_tune = exhaustive_tune; + p.compile(t, options); + }, + py::arg("t"), + py::arg("offload_copy") = true, + py::arg("fast_math") = true, + py::arg("exhaustive_tune") = false) + .def("get_main_module", [](const migraphx::program& p) { return p.get_main_module(); }) + .def( + "create_module", + [](migraphx::program& p, const std::string& name) { return p.create_module(name); }, + py::arg("name")) + .def("run", + [](migraphx::program& p, py::dict params) { + migraphx::parameter_map pm; + for(auto x : params) + { + std::string key = x.first.cast(); + py::buffer b = x.second.cast(); + py::buffer_info info = b.request(); + pm[key] = migraphx::argument(to_shape(info), info.ptr); + } + return p.eval(pm); + }) + .def("run_async", + [](migraphx::program& p, + py::dict params, + std::uintptr_t stream, + std::string stream_name) { + migraphx::parameter_map pm; + for(auto x : params) + { + std::string key = x.first.cast(); + py::buffer b = x.second.cast(); + py::buffer_info info = b.request(); + pm[key] = migraphx::argument(to_shape(info), info.ptr); + } + migraphx::execution_environment exec_env{ + migraphx::any_ptr(reinterpret_cast(stream), stream_name), true}; + return p.eval(pm, exec_env); + }) + .def("sort", &migraphx::program::sort) + .def("print", [](const migraphx::program& p) { std::cout << p << std::endl; }) + .def("__eq__", std::equal_to{}) + .def("__ne__", std::not_equal_to{}) + .def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); }); + + py::class_ op(m, "op"); + op.def(py::init([](const std::string& name, py::kwargs kwargs) { + migraphx::value v = migraphx::value::object{}; + if(kwargs) + { + v = migraphx::to_value(kwargs); + } + return migraphx::make_op(name, v); + })) + .def("name", &migraphx::operation::name); + + py::enum_(op, "pooling_mode") + .value("average", migraphx::op::pooling_mode::average) + .value("max", migraphx::op::pooling_mode::max) + .value("lpnorm", migraphx::op::pooling_mode::lpnorm); + + py::enum_(op, "rnn_direction") + .value("forward", migraphx::op::rnn_direction::forward) + .value("reverse", migraphx::op::rnn_direction::reverse) + .value("bidirectional", migraphx::op::rnn_direction::bidirectional); + + m.def( + "argument_from_pointer", + [](const migraphx::shape shape, const int64_t address) { + return migraphx::argument(shape, reinterpret_cast(address)); + }, + py::arg("shape"), + py::arg("address")); + + m.def( + "parse_tf", + [](const std::string& filename, + bool is_nhwc, + unsigned int batch_size, + std::unordered_map> map_input_dims, + std::vector output_names) { + return migraphx::parse_tf( + filename, migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names}); + }, + "Parse tf protobuf (default format is nhwc)", + py::arg("filename"), + py::arg("is_nhwc") = true, + py::arg("batch_size") = 1, + py::arg("map_input_dims") = std::unordered_map>(), + py::arg("output_names") = std::vector()); + + m.def( + "parse_onnx", + [](const std::string& filename, + unsigned int default_dim_value, + migraphx::shape::dynamic_dimension default_dyn_dim_value, + std::unordered_map> map_input_dims, + std::unordered_map> + map_dyn_input_dims, + bool skip_unknown_operators, + bool print_program_on_error, + int64_t max_loop_iterations) { + migraphx::onnx_options options; + options.default_dim_value = default_dim_value; + options.default_dyn_dim_value = default_dyn_dim_value; + options.map_input_dims = map_input_dims; + options.map_dyn_input_dims = map_dyn_input_dims; + options.skip_unknown_operators = skip_unknown_operators; + options.print_program_on_error = print_program_on_error; + options.max_loop_iterations = max_loop_iterations; + return migraphx::parse_onnx(filename, options); + }, + "Parse onnx file", + py::arg("filename"), + py::arg("default_dim_value") = 0, + py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1}, + py::arg("map_input_dims") = std::unordered_map>(), + py::arg("map_dyn_input_dims") = + std::unordered_map>(), + py::arg("skip_unknown_operators") = false, + py::arg("print_program_on_error") = false, + py::arg("max_loop_iterations") = 10); + + m.def( + "parse_onnx_buffer", + [](const std::string& onnx_buffer, + unsigned int default_dim_value, + migraphx::shape::dynamic_dimension default_dyn_dim_value, + std::unordered_map> map_input_dims, + std::unordered_map> + map_dyn_input_dims, + bool skip_unknown_operators, + bool print_program_on_error) { + migraphx::onnx_options options; + options.default_dim_value = default_dim_value; + options.default_dyn_dim_value = default_dyn_dim_value; + options.map_input_dims = map_input_dims; + options.map_dyn_input_dims = map_dyn_input_dims; + options.skip_unknown_operators = skip_unknown_operators; + options.print_program_on_error = print_program_on_error; + return migraphx::parse_onnx_buffer(onnx_buffer, options); + }, + "Parse onnx file", + py::arg("filename"), + py::arg("default_dim_value") = 0, + py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1}, + py::arg("map_input_dims") = std::unordered_map>(), + py::arg("map_dyn_input_dims") = + std::unordered_map>(), + py::arg("skip_unknown_operators") = false, + py::arg("print_program_on_error") = false); + + m.def( + "load", + [](const std::string& name, const std::string& format) { + migraphx::file_options options; + options.format = format; + return migraphx::load(name, options); + }, + "Load MIGraphX program", + py::arg("filename"), + py::arg("format") = "msgpack"); + + m.def( + "save", + [](const migraphx::program& p, const std::string& name, const std::string& format) { + migraphx::file_options options; + options.format = format; + return migraphx::save(p, name, options); + }, + "Save MIGraphX program", + py::arg("p"), + py::arg("filename"), + py::arg("format") = "msgpack"); + + m.def("get_target", &migraphx::make_target); + m.def("create_argument", [](const migraphx::shape& s, const std::vector& values) { + if(values.size() != s.elements()) + MIGRAPHX_THROW("Values and shape elements do not match"); + migraphx::argument a{s}; + a.fill(values.begin(), values.end()); + return a; + }); + m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0); + m.def("fill_argument", &migraphx::fill_argument, py::arg("s"), py::arg("value")); + m.def("quantize_fp16", + &migraphx::quantize_fp16, + py::arg("prog"), + py::arg("ins_names") = std::vector{"all"}); + m.def("quantize_int8", + &migraphx::quantize_int8, + py::arg("prog"), + py::arg("t"), + py::arg("calibration") = std::vector{}, + py::arg("ins_names") = std::vector{"dot", "convolution"}); + +#ifdef HAVE_GPU + m.def("allocate_gpu", &migraphx::gpu::allocate_gpu, py::arg("s"), py::arg("host") = false); + m.def("to_gpu", &migraphx::gpu::to_gpu, py::arg("arg"), py::arg("host") = false); + m.def("from_gpu", &migraphx::gpu::from_gpu); + m.def("gpu_sync", [] { migraphx::gpu::gpu_sync(); }); +#endif + +#ifdef VERSION_INFO + m.attr("__version__") = VERSION_INFO; +#else + m.attr("__version__") = "dev"; +#endif +} diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt new file mode 100644 index 000000000..89d0e6096 --- /dev/null +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -0,0 +1 @@ +onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm-pin-600 b/docker/rocm/rocm-pin-600 new file mode 100644 index 000000000..88348a5c1 --- /dev/null +++ b/docker/rocm/rocm-pin-600 @@ -0,0 +1,3 @@ +Package: * +Pin: release o=repo.radeon.com +Pin-Priority: 600 diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl new file mode 100644 index 000000000..33a2d2323 --- /dev/null +++ b/docker/rocm/rocm.hcl @@ -0,0 +1,38 @@ +variable "AMDGPU" { + default = "gfx900" +} +variable "ROCM" { + default = "5.7.3" +} +variable "HSA_OVERRIDE_GFX_VERSION" { + default = "" +} +variable "HSA_OVERRIDE" { + default = "1" +} +target deps { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/amd64"] + target = "deps" +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/amd64"] + target = "rootfs" +} + +target rocm { + dockerfile = "docker/rocm/Dockerfile" + contexts = { + deps = "target:deps", + rootfs = "target:rootfs" + } + platforms = ["linux/amd64"] + args = { + AMDGPU = AMDGPU, + ROCM = ROCM, + HSA_OVERRIDE_GFX_VERSION = HSA_OVERRIDE_GFX_VERSION, + HSA_OVERRIDE = HSA_OVERRIDE + } +} diff --git a/docker/rocm/rocm.list b/docker/rocm/rocm.list new file mode 100644 index 000000000..0915b4094 --- /dev/null +++ b/docker/rocm/rocm.list @@ -0,0 +1 @@ +deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/5.7.3 focal main diff --git a/docker/rocm/rocm.mk b/docker/rocm/rocm.mk new file mode 100644 index 000000000..c92a458f5 --- /dev/null +++ b/docker/rocm/rocm.mk @@ -0,0 +1,53 @@ +BOARDS += rocm + +# AMD/ROCm is chunky so we build couple of smaller images for specific chipsets +ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0 + +local-rocm: version + $(foreach chipset,$(ROCM_CHIPSETS), \ + AMDGPU=$(word 1,$(subst :, ,$(chipset))) \ + HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \ + HSA_OVERRIDE=1 \ + docker buildx bake --file=docker/rocm/rocm.hcl rocm \ + --set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) \ + --load \ + &&) true + + unset HSA_OVERRIDE_GFX_VERSION && \ + HSA_OVERRIDE=0 \ + AMDGPU=gfx \ + docker buildx bake --file=docker/rocm/rocm.hcl rocm \ + --set rocm.tags=frigate:latest-rocm \ + --load + +build-rocm: version + $(foreach chipset,$(ROCM_CHIPSETS), \ + AMDGPU=$(word 1,$(subst :, ,$(chipset))) \ + HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \ + HSA_OVERRIDE=1 \ + docker buildx bake --file=docker/rocm/rocm.hcl rocm \ + --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \ + &&) true + + unset HSA_OVERRIDE_GFX_VERSION && \ + HSA_OVERRIDE=0 \ + AMDGPU=gfx \ + docker buildx bake --file=docker/rocm/rocm.hcl rocm \ + --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm + +push-rocm: build-rocm + $(foreach chipset,$(ROCM_CHIPSETS), \ + AMDGPU=$(word 1,$(subst :, ,$(chipset))) \ + HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \ + HSA_OVERRIDE=1 \ + docker buildx bake --file=docker/rocm/rocm.hcl rocm \ + --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \ + --push \ + &&) true + + unset HSA_OVERRIDE_GFX_VERSION && \ + HSA_OVERRIDE=0 \ + AMDGPU=gfx \ + docker buildx bake --file=docker/rocm/rocm.hcl rocm \ + --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm \ + --push diff --git a/docker/rpi/Dockerfile b/docker/rpi/Dockerfile index 581ca7ff8..9860e65ec 100644 --- a/docker/rpi/Dockerfile +++ b/docker/rpi/Dockerfile @@ -12,5 +12,7 @@ RUN rm -rf /usr/lib/btbn-ffmpeg/ RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \ /deps/install_deps.sh +ENV LIBAVFORMAT_VERSION_MAJOR=58 + WORKDIR /opt/frigate/ COPY --from=rootfs / / diff --git a/docker/rpi/rpi.mk b/docker/rpi/rpi.mk index c1282b011..290b30c31 100644 --- a/docker/rpi/rpi.mk +++ b/docker/rpi/rpi.mk @@ -1,10 +1,15 @@ BOARDS += rpi local-rpi: version - docker buildx bake --load --file=docker/rpi/rpi.hcl --set rpi.tags=frigate:latest-rpi rpi + docker buildx bake --file=docker/rpi/rpi.hcl rpi \ + --set rpi.tags=frigate:latest-rpi \ + --load build-rpi: version - docker buildx bake --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi + docker buildx bake --file=docker/rpi/rpi.hcl rpi \ + --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi push-rpi: build-rpi - docker buildx bake --push --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi \ No newline at end of file + docker buildx bake --file=docker/rpi/rpi.hcl rpi \ + --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi \ + --push diff --git a/docker/tensorrt/Dockerfile.amd64 b/docker/tensorrt/Dockerfile.amd64 index 075726eda..3dcb42658 100644 --- a/docker/tensorrt/Dockerfile.amd64 +++ b/docker/tensorrt/Dockerfile.amd64 @@ -12,12 +12,28 @@ ARG TARGETARCH COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt +# Build CuDNN +FROM wget AS cudnn-deps + +ARG COMPUTE_LEVEL + +RUN apt-get update \ + && apt-get install -y git build-essential + +RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \ +&& dpkg -i cuda-keyring_1.1-1_all.deb \ +&& apt-get update \ +&& apt-get -y install cuda-toolkit \ +&& rm -rf /var/lib/apt/lists/* + FROM tensorrt-base AS frigate-tensorrt ENV TRT_VER=8.5.3 RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ pip3 install -U /deps/trt-wheels/*.whl && \ ldconfig +COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda +ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib WORKDIR /opt/frigate/ COPY --from=rootfs / / @@ -26,6 +42,7 @@ FROM devcontainer AS devcontainer-trt COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda COPY docker/tensorrt/detector/rootfs/ / COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base index b0015016d..59ead46f5 100644 --- a/docker/tensorrt/Dockerfile.base +++ b/docker/tensorrt/Dockerfile.base @@ -8,6 +8,8 @@ ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3 # Build TensorRT-specific library FROM ${TRT_BASE} AS trt-deps +ARG COMPUTE_LEVEL + RUN apt-get update \ && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/tensorrt/detector/tensorrt_libyolo.sh b/docker/tensorrt/detector/tensorrt_libyolo.sh index 91b9340a9..46e4077fa 100755 --- a/docker/tensorrt/detector/tensorrt_libyolo.sh +++ b/docker/tensorrt/detector/tensorrt_libyolo.sh @@ -11,7 +11,7 @@ git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b condition if [ ! -e /usr/local/cuda ]; then ln -s /usr/local/cuda-* /usr/local/cuda fi -cd ./tensorrt_demos/plugins && make all -j$(nproc) +cd ./tensorrt_demos/plugins && make all -j$(nproc) computes="${COMPUTE_LEVEL:-}" cp libyolo_layer.so /usr/local/lib/libyolo_layer.so # Store yolo scripts for later conversion diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index 214202e43..b5ad4fcbd 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -8,5 +8,7 @@ nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64' nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64' nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64' +nvidia-cufft-cu11==10.*; platform_machine == 'x86_64' onnx==1.14.0; platform_machine == 'x86_64' -protobuf==3.20.3; platform_machine == 'x86_64' \ No newline at end of file +onnxruntime-gpu==1.17.*; platform_machine == 'x86_64' +protobuf==3.20.3; platform_machine == 'x86_64' diff --git a/docker/tensorrt/trt.hcl b/docker/tensorrt/trt.hcl index 56e294100..3195fb5bf 100644 --- a/docker/tensorrt/trt.hcl +++ b/docker/tensorrt/trt.hcl @@ -10,12 +10,16 @@ variable "SLIM_BASE" { variable "TRT_BASE" { default = null } +variable "COMPUTE_LEVEL" { + default = "" +} target "_build_args" { args = { BASE_IMAGE = BASE_IMAGE, SLIM_BASE = SLIM_BASE, - TRT_BASE = TRT_BASE + TRT_BASE = TRT_BASE, + COMPUTE_LEVEL = COMPUTE_LEVEL } platforms = ["linux/${ARCH}"] } diff --git a/docker/tensorrt/trt.mk b/docker/tensorrt/trt.mk index 0e01c1402..455e1ee11 100644 --- a/docker/tensorrt/trt.mk +++ b/docker/tensorrt/trt.mk @@ -2,25 +2,40 @@ BOARDS += trt JETPACK4_BASE ?= timongentzsch/l4t-ubuntu20-opencv:latest # L4T 32.7.1 JetPack 4.6.1 JETPACK5_BASE ?= nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime # L4T 35.3.1 JetPack 5.1.1 -X86_DGPU_ARGS := ARCH=amd64 +X86_DGPU_ARGS := ARCH=amd64 COMPUTE_LEVEL="50 60 70 80 90" JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BASE) TRT_BASE=$(JETPACK4_BASE) JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE) local-trt: version - $(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt + $(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=frigate:latest-tensorrt \ + --load local-trt-jp4: version - $(JETPACK4_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp4 tensorrt + $(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=frigate:latest-tensorrt-jp4 \ + --load local-trt-jp5: version - $(JETPACK5_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp5 tensorrt + $(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=frigate:latest-tensorrt-jp5 \ + --load build-trt: - $(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt - $(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt - $(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt + $(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt + $(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 + $(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 push-trt: build-trt - $(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt - $(JETPACK4_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt - $(JETPACK5_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt + $(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt \ + --push + $(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 \ + --push + $(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \ + --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 \ + --push diff --git a/docs/README.md b/docs/README.md index bd4aded51..68b27e15a 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,5 +1,10 @@ # Website -This website is built using [Docusaurus 2](https://v2.docusaurus.io/), a modern static website generator. +This website is built using [Docusaurus 3.5](https://docusaurus.io/docs), a modern static website generator. For installation and contributing instructions, please follow the [Contributing Docs](https://docs.frigate.video/development/contributing). + +# Development + +1. Run `npm i` to install dependencies +2. Run `npm run start` to start the website diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 50cd5ff79..da5383886 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -41,7 +41,7 @@ environment_vars: ### `database` -Event and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. +Tracked object and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary. @@ -80,6 +80,14 @@ model: input_pixel_format: "bgr" ``` +#### `labelmap` + +:::warning + +If the labelmap is customized then the labels used for alerts will need to be adjusted as well. See [alert labels](../configuration/review.md#restricting-alerts-to-specific-labels) for more info. + +::: + The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. By default, truck is renamed to car because they are often confused. You cannot add new object types, but you can change the names of existing objects in the model. ```yaml @@ -96,7 +104,7 @@ model: Note that if you rename objects in the labelmap, you will also need to update your `objects -> track` list as well. -:::caution +:::warning Some labels have special handling and modifications can disable functionality. @@ -106,21 +114,67 @@ Some labels have special handling and modifications can disable functionality. ::: -## Custom ffmpeg build +## Network Configuration -Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, a docker volume mapping can be used to overwrite the included ffmpeg build with an ffmpeg build that works for your specific hardware setup. +Changes to Frigate's internal network configuration can be made by bind mounting nginx.conf into the container. For example: + +```yaml +services: + frigate: + container_name: frigate + ... + volumes: + ... + - /path/to/your/nginx.conf:/usr/local/nginx/conf/nginx.conf +``` + +### Enabling IPv6 + +IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example: + +``` +{{ if not .enabled }} +# intended for external traffic, protected by auth +listen 8971; +{{ else }} +# intended for external traffic, protected by auth +listen 8971 ssl; + +# intended for internal traffic, not protected by auth +listen 5000; +``` + +becomes + +``` +{{ if not .enabled }} +# intended for external traffic, protected by auth +listen [::]:8971 ipv6only=off; +{{ else }} +# intended for external traffic, protected by auth +listen [::]:8971 ipv6only=off ssl; + +# intended for internal traffic, not protected by auth +listen [::]:5000 ipv6only=off; +``` + +## Custom Dependencies + +### Custom ffmpeg build + +Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built ffmpeg binary can be downloaded to /config and used. To do this: -1. Download your ffmpeg build and uncompress to a folder on the host (let's use `/home/appdata/frigate/custom-ffmpeg` for this example). +1. Download your ffmpeg build and uncompress to the Frigate config folder. 2. Update your docker-compose or docker CLI to include `'/home/appdata/frigate/custom-ffmpeg':'/usr/lib/btbn-ffmpeg':'ro'` in the volume mappings. 3. Restart Frigate and the custom version will be used if the mapping was done correctly. -NOTE: The folder that is mapped from the host needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then `/home/appdata/frigate/custom-ffmpeg` needs to be mapped to `/usr/lib/btbn-ffmpeg`. +NOTE: The folder that is set for the config needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then the `ffmpeg -> path` field should be `/config/custom-ffmpeg/bin`. -## Custom go2rtc version +### Custom go2rtc version -Frigate currently includes go2rtc v1.8.4, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.9.4, there may be certain cases where you want to run a different version of go2rtc. To do this: @@ -129,7 +183,7 @@ To do this: 3. Give `go2rtc` execute permission. 4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs. -## Validating your config.yaml file updates +## Validating your config.yml file updates When frigate starts up, it checks whether your config file is valid, and if it is not, the process exits. To minimize interruptions when updating your config, you have three options -- you can edit the config via the WebUI which has built in validation, use the config API, or you can validate on the command line using the frigate docker container. @@ -157,5 +211,5 @@ docker run \ --entrypoint python3 \ ghcr.io/blakeblackshear/frigate:stable \ -u -m frigate \ - --validate_config + --validate-config ``` diff --git a/docs/docs/configuration/authentication.md b/docs/docs/configuration/authentication.md new file mode 100644 index 000000000..a48b03b48 --- /dev/null +++ b/docs/docs/configuration/authentication.md @@ -0,0 +1,132 @@ +--- +id: authentication +title: Authentication +--- + +# Authentication + +Frigate stores user information in its database. Password hashes are generated using industry standard PBKDF2-SHA256 with 600,000 iterations. Upon successful login, a JWT token is issued with an expiration date and set as a cookie. The cookie is refreshed as needed automatically. This JWT token can also be passed in the Authorization header as a bearer token. + +Users are managed in the UI under Settings > Users. + +The following ports are available to access the Frigate web UI. + +| Port | Description | +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `8971` | Authenticated UI and API. Reverse proxies should use this port. | +| `5000` | Internal unauthenticated UI and API access. Access to this port should be limited. Intended to be used within the docker network for services that integrate with Frigate and do not support authentication. | + +## Onboarding + +On startup, an admin user and password are generated and printed in the logs. It is recommended to set a new password for the admin account after logging in for the first time under Settings > Users. + +## Resetting admin password + +In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup using the `reset_admin_password` setting in your config file. + +## Login failure rate limiting + +In order to limit the risk of brute force attacks, rate limiting is available for login failures. This is implemented with SlowApi, and the string notation for valid values is available in [the documentation](https://limits.readthedocs.io/en/stable/quickstart.html#examples). + +For example, `1/second;5/minute;20/hour` will rate limit the login endpoint when failures occur more than: + +- 1 time per second +- 5 times per minute +- 20 times per hour + +Restarting Frigate will reset the rate limits. + +If you are running Frigate behind a proxy, you will want to set `trusted_proxies` or these rate limits will apply to the upstream proxy IP address. This means that a brute force attack will rate limit login attempts from other devices and could temporarily lock you out of your instance. In order to ensure rate limits only apply to the actual IP address where the requests are coming from, you will need to list the upstream networks that you want to trust. These trusted proxies are checked against the `X-Forwarded-For` header when looking for the IP address where the request originated. + +If you are running a reverse proxy in the same docker compose file as Frigate, here is an example of how your auth config might look: + +```yaml +auth: + failed_login_rate_limit: "1/second;5/minute;20/hour" + trusted_proxies: + - 172.18.0.0/16 # <---- this is the subnet for the internal docker compose network +``` + +## JWT Token Secret + +The JWT token secret needs to be kept secure. Anyone with this secret can generate valid JWT tokens to authenticate with Frigate. This should be a cryptographically random string of at least 64 characters. + +You can generate a token using the Python secret library with the following command: + +```shell +python3 -c 'import secrets; print(secrets.token_hex(64))' +``` + +Frigate looks for a JWT token secret in the following order: + +1. An environment variable named `FRIGATE_JWT_SECRET` +2. A docker secret named `FRIGATE_JWT_SECRET` in `/run/secrets/` +3. A `jwt_secret` option from the Home Assistant Addon options +4. A `.jwt_secret` file in the config directory + +If no secret is found on startup, Frigate generates one and stores it in a `.jwt_secret` file in the config directory. + +Changing the secret will invalidate current tokens. + +## Proxy configuration + +Frigate can be configured to leverage features of common upstream authentication proxies such as Authelia, Authentik, oauth2_proxy, or traefik-forward-auth. + +If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret. + +Here is an example of how to disable Frigate's authentication and also ensure the requests come only from your known proxy. + +```yaml +auth: + enabled: False + +proxy: + auth_secret: +``` + +You can use the following code to generate a random secret. + +```shell +python3 -c 'import secrets; print(secrets.token_hex(64))' +``` + +### Header mapping + +If you have disabled Frigate's authentication and your proxy supports passing a header with the authenticated username, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` value. Header names are not case sensitive. + +```yaml +proxy: + ... + header_map: + user: x-forwarded-user +``` + +Note that only the following list of headers are permitted by default: + +``` +Remote-User +Remote-Groups +Remote-Email +Remote-Name +X-Forwarded-User +X-Forwarded-Groups +X-Forwarded-Email +X-Forwarded-Preferred-Username +X-authentik-username +X-authentik-groups +X-authentik-email +X-authentik-name +X-authentik-uid +``` + +If you would like to add more options, you can overwrite the default file with a docker bind mount at `/usr/local/nginx/conf/proxy_trusted_headers.conf`. Reference the source code for the default file formatting. + +Future versions of Frigate may leverage group and role headers for authorization in Frigate as well. + +### Login page redirection + +Frigate gracefully performs login page redirection that should work with most authentication proxies. If your reverse proxy returns a `Location` header on `401`, `302`, or `307` unauthorized responses, Frigate's frontend will automatically detect it and redirect to that URL. + +### Custom logout url + +If your reverse proxy has a dedicated logout url, you can specify using the `logout_url` config option. This will update the link for the `Logout` link in the UI. diff --git a/docs/docs/configuration/autotracking.md b/docs/docs/configuration/autotracking.md index 31048db2e..a8903441e 100644 --- a/docs/docs/configuration/autotracking.md +++ b/docs/docs/configuration/autotracking.md @@ -159,7 +159,7 @@ This is often caused by the same reason as above - the `MoveStatus` ONVIF parame ### I'm seeing this error in the logs: "Autotracker: motion estimator couldn't get transformations". What does this mean? -To maintain object tracking during PTZ moves, Frigate tracks the motion of your camera based on the details of the frame. If you are seeing this message, it could mean that your `zoom_factor` may be set too high, the scene around your detected object does not have enough details (like hard edges or color variatons), or your camera's shutter speed is too slow and motion blur is occurring. Try reducing `zoom_factor`, finding a way to alter the scene around your object, or changing your camera's shutter speed. +To maintain object tracking during PTZ moves, Frigate tracks the motion of your camera based on the details of the frame. If you are seeing this message, it could mean that your `zoom_factor` may be set too high, the scene around your detected object does not have enough details (like hard edges or color variations), or your camera's shutter speed is too slow and motion blur is occurring. Try reducing `zoom_factor`, finding a way to alter the scene around your object, or changing your camera's shutter speed. ### Calibration seems to have completed, but the camera is not actually moving to track my object. Why? diff --git a/docs/docs/configuration/birdseye.md b/docs/docs/configuration/birdseye.md index 6471bf4e3..2c9fbbdf4 100644 --- a/docs/docs/configuration/birdseye.md +++ b/docs/docs/configuration/birdseye.md @@ -1,13 +1,20 @@ # Birdseye -Birdseye allows a heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about. +In addition to Frigate's Live camera dashboard, Birdseye allows a portable heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about. + +Birdseye can be viewed by adding the "Birdseye" camera to a Camera Group in the Web UI. Add a Camera Group by pressing the "+" icon on the Live page, and choose "Birdseye" as one of the cameras. + +Birdseye can also be used in HomeAssistant dashboards, cast to media devices, etc. + +## Birdseye Behavior ### Birdseye Modes Birdseye offers different modes to customize which cameras show under which circumstances. - - **continuous:** All cameras are always included - - **motion:** Cameras that have detected motion within the last 30 seconds are included - - **objects:** Cameras that have tracked an active object within the last 30 seconds are included + +- **continuous:** All cameras are always included +- **motion:** Cameras that have detected motion within the last 30 seconds are included +- **objects:** Cameras that have tracked an active object within the last 30 seconds are included ### Custom Birdseye Icon @@ -34,6 +41,29 @@ cameras: enabled: False ``` +### Birdseye Inactivity + +By default birdseye shows all cameras that have had the configured activity in the last 30 seconds, this can be configured: + +```yaml +birdseye: + enabled: True + inactivity_threshold: 15 +``` + +## Birdseye Layout + +### Birdseye Dimensions + +The resolution and aspect ratio of birdseye can be configured. Resolution will increase the quality but does not affect the layout. Changing the aspect ratio of birdseye does affect how cameras are laid out. + +```yaml +birdseye: + enabled: True + width: 1280 + height: 720 +``` + ### Sorting cameras in the Birdseye view It is possible to override the order of cameras that are being shown in the Birdseye view. @@ -54,4 +84,28 @@ cameras: order: 2 ``` -*Note*: Cameras are sorted by default using their name to ensure a constant view inside Birdseye. +_Note_: Cameras are sorted by default using their name to ensure a constant view inside Birdseye. + +### Birdseye Cameras + +It is possible to limit the number of cameras shown on birdseye at one time. When this is enabled, birdseye will show the cameras with most recent activity. There is a cooldown to ensure that cameras do not switch too frequently. + +For example, this can be configured to only show the most recently active camera. + +```yaml +birdseye: + enabled: True + layout: + max_cameras: 1 +``` + +### Birdseye Scaling + +By default birdseye tries to fit 2 cameras in each row and then double in size until a suitable layout is found. The scaling can be configured with a value between 1.0 and 5.0 depending on use case. + +```yaml +birdseye: + enabled: True + layout: + scaling_factor: 3.0 +``` diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 7c1fe4f84..70638b69e 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -9,6 +9,12 @@ This page makes use of presets of FFmpeg args. For more information on presets, ::: +:::note + +Many cameras support encoding options which greatly affect the live view experience, see the [Live view](/configuration/live) page for more info. + +::: + ## MJPEG Cameras Note that mjpeg cameras require encoding the video into h264 for recording, and restream roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. It is recommended to use the restream role to create an h264 restream and then use that as the source for ffmpeg. @@ -69,16 +75,12 @@ cameras: ffmpeg: output_args: record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac - rtmp: -c:v copy -c:a aac -f flv inputs: - path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera roles: - detect - record - - rtmp - rtmp: - enabled: False # <-- RTMP should be disabled if your stream is not H264 detect: width: # <- optional, by default Frigate tries to automatically detect resolution height: # <- optional, by default Frigate tries to automatically detect resolution @@ -105,7 +107,10 @@ If available, recommended settings are: According to [this discussion](https://github.com/blakeblackshear/frigate/issues/3235#issuecomment-1135876973), the http video streams seem to be the most reliable for Reolink. -:::caution +Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels. +The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras. + +:::warning The below configuration only works for reolink cameras with stream resolution of 5MP or lower, 8MP+ cameras need to use RTSP as http-flv is not supported in this case. @@ -118,6 +123,11 @@ go2rtc: - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus" your_reolink_camera_sub: - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password" + your_reolink_camera_via_nvr: + - "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_main.bcs&user=username&password=password" # channel numbers are 0-15 + - "ffmpeg:your_reolink_camera_via_nvr#audio=aac" + your_reolink_camera_via_nvr_sub: + - "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_ext.bcs&user=username&password=password" cameras: your_reolink_camera: @@ -131,6 +141,17 @@ cameras: input_args: preset-rtsp-restream roles: - detect + reolink_via_nvr: + ffmpeg: + inputs: + - path: rtsp://127.0.0.1:8554/your_reolink_camera_via_nvr?video=copy&audio=aac + input_args: preset-rtsp-restream + roles: + - record + - path: rtsp://127.0.0.1:8554/your_reolink_camera_via_nvr_sub?video=copy + input_args: preset-rtsp-restream + roles: + - detect ``` #### Reolink Doorbell @@ -160,17 +181,16 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-rtsp) -In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect. +In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. ```yaml ffmpeg: output_args: record: preset-record-ubiquiti - rtmp: preset-rtmp-ubiquiti # recommend using go2rtc instead ``` ### TP-Link VIGI Cameras -TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded events. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`. +TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`. diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index a95ffae86..b7c2798e1 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -7,16 +7,15 @@ title: Camera Configuration Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa. -A camera is enabled by default but can be temporarily disabled by using `enabled: False`. Existing events and recordings can still be accessed. Live streams, recording and detecting are not working. Camera specific configurations will be used. +A camera is enabled by default but can be temporarily disabled by using `enabled: False`. Existing tracked objects and recordings can still be accessed. Live streams, recording and detecting are not working. Camera specific configurations will be used. Each role can only be assigned to one input per camera. The options for roles are as follows: -| Role | Description | -| -------- | ---------------------------------------------------------------------------------------- | -| `detect` | Main feed for object detection. [docs](object_detectors.md) | -| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | -| `audio` | Feed for audio based detection. [docs](audio_detectors.md) | -| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) | +| Role | Description | +| -------- | ----------------------------------------------------------------------------------- | +| `detect` | Main feed for object detection. [docs](object_detectors.md) | +| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | +| `audio` | Feed for audio based detection. [docs](audio_detectors.md) | ```yaml mqtt: @@ -29,7 +28,6 @@ cameras: - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 roles: - detect - - rtmp # <- deprecated, recommend using restream instead - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/live roles: - record @@ -48,11 +46,19 @@ cameras: side: ... ``` +:::note + +If you only define one stream in your `inputs` and do not assign a `detect` role to it, Frigate will automatically assign it the `detect` role. Frigate will always decode a stream to support motion detection, Birdseye, the API image endpoints, and other features, even if you have disabled object detection with `enabled: False` in your config's `detect` section. + +If you plan to use Frigate for recording only, it is still recommended to define a `detect` role for a low resolution stream to minimize resource usage from the required stream decoding. + +::: + For camera model specific settings check the [camera specific](camera_specific.md) infos. ## Setting up camera PTZ controls -:::caution +:::warning Not every PTZ supports ONVIF, which is the standard protocol Frigate uses to communicate with your camera. Check the [official list of ONVIF conformant products](https://www.onvif.org/conformant-products/), your camera documentation, or camera manufacturer's website to ensure your PTZ supports ONVIF. Also, ensure your camera is running the latest firmware. @@ -73,26 +79,58 @@ cameras: If the ONVIF connection is successful, PTZ controls will be available in the camera's WebUI. +:::tip + +If your ONVIF camera does not require authentication credentials, you may still need to specify an empty string for `user` and `password`, eg: `user: ""` and `password: ""`. + +::: + An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs. ## ONVIF PTZ camera recommendations This list of working and non-working PTZ cameras is based on user feedback. -| Brand or specific camera | PTZ Controls | Autotracking | Notes | -| ------------------------ | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------- | -| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking | -| Amcrest ASH21 | ❌ | ❌ | No ONVIF support | -| Ctronics PTZ | ✅ | ❌ | | -| Dahua | ✅ | ✅ | | -| Foscam R5 | ✅ | ❌ | | -| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others | -| Reolink 511WA | ✅ | ❌ | Zoom only | -| Reolink E1 Pro | ✅ | ❌ | | -| Reolink E1 Zoom | ✅ | ❌ | | -| Reolink RLC-823A 16x | ✅ | ❌ | | -| Sunba 405-D20X | ✅ | ❌ | | -| Tapo C200 | ✅ | ❌ | Incomplete ONVIF support | -| Tapo C210 | ❌ | ❌ | Incomplete ONVIF support | -| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands | -| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support | +| Brand or specific camera | PTZ Controls | Autotracking | Notes | +| ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking | +| Amcrest ASH21 | ✅ | ❌ | ONVIF service port: 80 | +| Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. | +| Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. | +| Ctronics PTZ | ✅ | ❌ | | +| Dahua | ✅ | ✅ | | +| Dahua DH-SD2A500HB | ✅ | ❌ | | +| Foscam R5 | ✅ | ❌ | | +| Hanwha XNP-6550RH | ✅ | ❌ | | +| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others | +| Hikvision DS-2DE3A404IWG-E/W | ✅ | ✅ | | +| Reolink 511WA | ✅ | ❌ | Zoom only | +| Reolink E1 Pro | ✅ | ❌ | | +| Reolink E1 Zoom | ✅ | ❌ | | +| Reolink RLC-823A 16x | ✅ | ❌ | | +| Speco O8P32X | ✅ | ❌ | | +| Sunba 405-D20X | ✅ | ❌ | | +| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 | +| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands | +| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. | +| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support | + +## Setting up camera groups + +:::tip + +It is recommended to set up camera groups using the UI. + +::: + +Cameras can be grouped together and assigned a name and icon, this allows them to be reviewed and filtered together. There will always be the default group for all cameras. + +```yaml +camera_groups: + front: + cameras: + - driveway_cam + - garage_cam + icon: LuCar + order: 0 +``` diff --git a/docs/docs/configuration/ffmpeg_presets.md b/docs/docs/configuration/ffmpeg_presets.md index 4715fae95..0dc8cdedd 100644 --- a/docs/docs/configuration/ffmpeg_presets.md +++ b/docs/docs/configuration/ffmpeg_presets.md @@ -18,13 +18,11 @@ See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on | preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen | | preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead | | preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead | -| preset-nvidia-h264 | Nvidia GPU with h264 stream | | -| preset-nvidia-h265 | Nvidia GPU with h265 stream | | -| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 | +| preset-nvidia | Nvidia GPU | | | preset-jetson-h264 | Nvidia Jetson with h264 stream | | | preset-jetson-h265 | Nvidia Jetson with h265 stream | | -| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with *-rk suffix and privileged mode | -| preset-rk-h265 | Rockchip MPP with h265 stream | Use image with *-rk suffix and privileged mode | +| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with \*-rk suffix and privileged mode | +| preset-rk-h265 | Rockchip MPP with h265 stream | Use image with \*-rk suffix and privileged mode | ### Input Args Presets @@ -44,7 +42,7 @@ See [the camera specific docs](/configuration/camera_specific.md) for more info | preset-rtsp-udp | RTSP Stream via UDP | Use when camera is UDP only | | preset-rtsp-blue-iris | Blue Iris RTSP Stream | Use when consuming a stream from Blue Iris | -:::caution +:::warning It is important to be mindful of input args when using restream because you can have a mix of protocols. `http` and `rtmp` presets cannot be used with `rtsp` streams. For example, when using a reolink cam with the rtsp restream as a source for record the preset-http-reolink will cause a crash. In this case presets will need to be set at the stream level. See the example below. @@ -73,11 +71,11 @@ cameras: Output args presets help make the config more readable and handle use cases for different types of streams to ensure consistent recordings. -| Preset | Usage | Other Notes | -| -------------------------------- | --------------------------------- | --------------------------------------------- | -| preset-record-generic | Record WITHOUT audio | This is the default when nothing is specified | -| preset-record-generic-audio-aac | Record WITH aac audio | Use this to enable audio in recordings | -| preset-record-generic-audio-copy | Record WITH original audio | Use this to enable audio in recordings | -| preset-record-mjpeg | Record an mjpeg stream | Recommend restreaming mjpeg stream instead | -| preset-record-jpeg | Record live jpeg | Recommend restreaming live jpeg instead | -| preset-record-ubiquiti | Record ubiquiti stream with audio | Recordings with ubiquiti non-standard audio | +| Preset | Usage | Other Notes | +| -------------------------------- | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| preset-record-generic | Record WITHOUT audio | This is the default when nothing is specified | +| preset-record-generic-audio-copy | Record WITH original audio | Use this to enable audio in recordings | +| preset-record-generic-audio-aac | Record WITH transcoded aac audio | Use this to transcode to aac audio. If your source is already aac, use preset-record-generic-audio-copy instead to avoid re-encoding | +| preset-record-mjpeg | Record an mjpeg stream | Recommend restreaming mjpeg stream instead | +| preset-record-jpeg | Record live jpeg | Recommend restreaming live jpeg instead | +| preset-record-ubiquiti | Record ubiquiti stream with audio | Recordings with ubiquiti non-standard audio | diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md new file mode 100644 index 000000000..e2f6ac318 --- /dev/null +++ b/docs/docs/configuration/genai.md @@ -0,0 +1,179 @@ +--- +id: genai +title: Generative AI +--- + +Generative AI can be used to automatically generate descriptions based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate by providing detailed text descriptions as a basis of the search query. + +Semantic Search must be enabled to use Generative AI. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail. + +## Configuration + +Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 providers available to integrate with Frigate. + +If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. + +```yaml +genai: + enabled: True + provider: gemini + api_key: "{FRIGATE_GEMINI_API_KEY}" + model: gemini-1.5-flash + +cameras: + front_camera: ... + indoor_camera: + genai: # <- disable GenAI for your indoor camera + enabled: False +``` + +## Ollama + +[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance. Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [docker container](https://hub.docker.com/r/ollama/ollama) available. + +### Supported Models + +You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. + +:::note + +You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models. + +::: + +### Configuration + +```yaml +genai: + enabled: True + provider: ollama + base_url: http://localhost:11434 + model: llava +``` + +## Google Gemini + +Google Gemini has a free tier allowing [15 queries per minute](https://ai.google.dev/pricing) to the API, which is more than sufficient for standard Frigate usage. + +### Supported Models + +You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). At the time of writing, this includes `gemini-1.5-pro` and `gemini-1.5-flash`. + +### Get API Key + +To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com). + +1. Accept the Terms of Service +2. Click "Get API Key" from the right hand navigation +3. Click "Create API key in new project" +4. Copy the API key for use in your config + +### Configuration + +```yaml +genai: + enabled: True + provider: gemini + api_key: "{FRIGATE_GEMINI_API_KEY}" + model: gemini-1.5-flash +``` + +## OpenAI + +OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route. + +### Supported Models + +You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`. + +### Get API Key + +To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview). + +### Configuration + +```yaml +genai: + enabled: True + provider: openai + api_key: "{FRIGATE_OPENAI_API_KEY}" + model: gpt-4o +``` + +## Azure OpenAI + +Microsoft offers several vision models through Azure OpenAI. A subscription is required. + +### Supported Models + +You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`. + +### Create Resource and Get API Key + +To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource. + +### Configuration + +```yaml +genai: + enabled: True + provider: azure_openai + base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview + api_key: "{FRIGATE_OPENAI_API_KEY}" +``` + +## Custom Prompts + +Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows: + +``` +Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background. +``` + +:::tip + +Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt. + +::: + +You are also able to define custom prompts in your configuration. + +```yaml +genai: + enabled: True + provider: ollama + base_url: http://localhost:11434 + model: llava + prompt: "Describe the {label} in these images from the {camera} security camera." + object_prompts: + person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc)." + car: "Label the primary vehicle in these images with just the name of the company if it is a delivery vehicle, or the color make and model." +``` + +Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones. + +Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the thumbnails collected over the object's lifetime to the model. Using a snapshot provides the AI with a higher-resolution image (typically downscaled by the AI itself), but the trade-off is that only a single image is used, which might limit the model's ability to determine object movement or direction. + +```yaml +cameras: + front_door: + genai: + use_snapshot: True + prompt: "Describe the {label} in these images from the {camera} security camera at the front door of a house, aimed outward toward the street." + object_prompts: + person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from." + cat: "Describe the cat in these images (color, size, tail). Indicate whether or not the cat is by the flower pots. If the cat is chasing a mouse, make up a name for the mouse." + objects: + - person + - cat + required_zones: + - steps +``` + +### Experiment with prompts + +Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate. + +- OpenAI - [ChatGPT](https://chatgpt.com) +- Gemini - [Google AI Studio](https://aistudio.google.com) +- Ollama - [Open WebUI](https://docs.openwebui.com/) diff --git a/docs/docs/configuration/hardware_acceleration.md b/docs/docs/configuration/hardware_acceleration.md index ad9d27211..c6acdea14 100644 --- a/docs/docs/configuration/hardware_acceleration.md +++ b/docs/docs/configuration/hardware_acceleration.md @@ -5,14 +5,16 @@ title: Hardware Acceleration # Hardware Acceleration -It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro +It is highly recommended to use a GPU for hardware acceleration in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg. + +Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro # Officially Supported ## Raspberry Pi 3/4 -Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Performance Options > GPU Memory). -**NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. +Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory). +If you are using the HA addon, you may need to use the full access variant and turn off `Protection mode` for hardware acceleration. ```yaml # if you want to decode a h264 stream @@ -26,38 +28,74 @@ ffmpeg: :::note -If running Frigate in docker, you either need to run in priviliged mode or be sure to map the /dev/video1x devices to Frigate +If running Frigate in Docker, you either need to run in privileged mode or +map the `/dev/video*` devices to Frigate. With Docker compose add: ```yaml -docker run -d \ ---name frigate \ -... ---device /dev/video10 \ -ghcr.io/blakeblackshear/frigate:stable +services: + frigate: + ... + devices: + - /dev/video11:/dev/video11 ``` +Or with `docker run`: + +```bash +docker run -d \ + --name frigate \ + ... + --device /dev/video11 \ + ghcr.io/blakeblackshear/frigate:stable +``` + +`/dev/video11` is the correct device (on Raspberry Pi 4B). You can check +by running the following and looking for `H264`: + +```bash +for d in /dev/video*; do + echo -e "---\n$d" + v4l2-ctl --list-formats-ext -d $d +done +``` + +Or map in all the `/dev/video*` devices. + ::: ## Intel-based CPUs +:::info + +**Recommended hwaccel Preset** + +| CPU Generation | Intel Driver | Recommended Preset | Notes | +| -------------- | ------------ | ------------------ | ----------------------------------- | +| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported | +| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-* can also be used | +| gen13+ | iHD / Xe | preset-intel-qsv-* | | +| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | | + +::: + +:::note + +The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). + +See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is. + +::: + ### Via VAAPI -VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs if QSV does not work. +VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. ```yaml ffmpeg: hwaccel_args: preset-vaapi ``` -:::note - -With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). - -::: - -### Via Quicksync (>=10th Generation only) - -QSV must be set specifically based on the video encoding of the stream. +### Via Quicksync #### H.264 streams @@ -263,10 +301,10 @@ These instructions were originally based on the [Jellyfin documentation](https:/ ## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano) -A separate set of docker images is available that is based on Jetpack/L4T. They comes with an `ffmpeg` build +A separate set of docker images is available that is based on Jetpack/L4T. They come with an `ffmpeg` build with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 4.6, use the -`frigate-tensorrt-jp4` image, or if your Jetson host is running Jetpack 5.0+, use the `frigate-tensorrt-jp5` -image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, +`stable-tensorrt-jp4` tagged image, or if your Jetson host is running Jetpack 5.0+, use the `stable-tensorrt-jp5` +tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection. You will need to use the image with the nvidia container runtime: @@ -277,7 +315,7 @@ You will need to use the image with the nvidia container runtime: docker run -d \ ... --runtime nvidia - ghcr.io/blakeblackshear/frigate-tensorrt-jp5 + ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp5 ``` ### Docker Compose - Jetson @@ -287,7 +325,7 @@ version: '2.4' services: frigate: ... - image: ghcr.io/blakeblackshear/frigate-tensorrt-jp5 + image: ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp5 runtime: nvidia # Add this ``` @@ -337,15 +375,15 @@ that NVDEC/NVDEC1 are in use. ## Rockchip platform -Hardware accelerated video de-/encoding is supported on all Rockchip SoCs. +Hardware accelerated video de-/encoding is supported on all Rockchip SoCs using [Nyanmisaka's FFmpeg 6.1 Fork](https://github.com/nyanmisaka/ffmpeg-rockchip) based on [Rockchip's mpp library](https://github.com/rockchip-linux/mpp). -### Setup +### Prerequisites -Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file. +Make sure to follow the [Rockchip specific installation instructions](/frigate/installation#rockchip-platform). ### Configuration -Add one of the following ffmpeg presets to your `config.yaml` to enable hardware acceleration: +Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing: ```yaml # if you try to decode a h264 encoded stream @@ -362,29 +400,3 @@ ffmpeg: Make sure that your SoC supports hardware acceleration for your input stream. For example, if your camera streams with h265 encoding and a 4k resolution, your SoC must be able to de- and encode h265 with a 4k resolution or higher. If you are unsure whether your SoC meets the requirements, take a look at the datasheet. ::: - -### go2rtc presets for hardware accelerated transcoding - -If your input stream is to be transcoded using hardware acceleration, there are these presets for go2rtc: `h264/rk` and `h265/rk`. You can use them this way: - -``` -go2rtc: - streams: - Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264/rk - Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265/rk -``` - -:::warning - -The go2rtc docs may suggest the following configuration: - -``` -go2rtc: - streams: - Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264#hardware=rk - Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265#hardware=rk -``` - -However, this does not currently work. - -::: diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 8d7547b72..a0b58558d 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -1,33 +1,35 @@ --- id: index -title: Frigate Configuration Reference +title: Frigate Configuration --- For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored. For all other installation types, the config file should be mapped to `/config/config.yml` inside the container. -It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md): +It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation. ```yaml mqtt: - host: mqtt.server.com + enabled: False + cameras: - back: + dummy_camera: # <--- this will be changed to your actual camera later + enabled: False ffmpeg: inputs: - - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + - path: rtsp://127.0.0.1:554/rtsp roles: - detect ``` -### VSCode Configuration Schema +## VSCode Configuration Schema -VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon. +VSCode supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VSCode and Frigate as an add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VSCode on another machine. -### Environment Variable Substitution +## Environment Variable Substitution -Frigate supports the use of environment variables starting with `FRIGATE_` **only** where specifically indicated in the configuration reference below. For example, the following values can be replaced at runtime by using environment variables: +Frigate supports the use of environment variables starting with `FRIGATE_` **only** where specifically indicated in the [reference config](./reference.md). For example, the following values can be replaced at runtime by using environment variables: ```yaml mqtt: @@ -54,631 +56,192 @@ go2rtc: password: "{FRIGATE_GO2RTC_RTSP_PASSWORD}" ``` -### Full configuration reference: +```yaml +genai: + api_key: "{FRIGATE_GENAI_API_KEY}" +``` -:::caution +## Common configuration examples -It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions. +Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values. -::: +### Raspberry Pi Home Assistant Addon with USB Coral + +- Single camera with 720p, 5fps stream for detect +- MQTT connected to home assistant mosquitto addon +- Hardware acceleration for decoding video +- USB Coral detector +- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not +- Continue to keep all video if it qualified as an alert or detection for 30 days +- Save snapshots for 30 days +- Motion mask for the camera timestamp ```yaml mqtt: - # Optional: Enable mqtt server (default: shown below) - enabled: True - # Required: host name - host: mqtt.server.com - # Optional: port (default: shown below) - port: 1883 - # Optional: topic prefix (default: shown below) - # NOTE: must be unique if you are running multiple instances - topic_prefix: frigate - # Optional: client id (default: shown below) - # NOTE: must be unique if you are running multiple instances - client_id: frigate - # Optional: user - # NOTE: MQTT user can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'. - # e.g. user: '{FRIGATE_MQTT_USER}' - user: mqtt_user - # Optional: password - # NOTE: MQTT password can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'. - # e.g. password: '{FRIGATE_MQTT_PASSWORD}' - password: password - # Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None) - tls_ca_certs: /path/to/ca.crt - # Optional: tls_client_cert and tls_client key in order to use self-signed client - # certificates (default: None) - # NOTE: certificate must not be password-protected - # do not set user and password when using a client certificate - tls_client_cert: /path/to/client.crt - tls_client_key: /path/to/client.key - # Optional: tls_insecure (true/false) for enabling TLS verification of - # the server hostname in the server certificate (default: None) - tls_insecure: false - # Optional: interval in seconds for publishing stats (default: shown below) - stats_interval: 60 + host: core-mosquitto + user: mqtt-user + password: xxxxxxxxxx -# Optional: Detectors configuration. Defaults to a single CPU detector -detectors: - # Required: name of the detector - detector_name: - # Required: type of the detector - # Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below) - # Additional detector types can also be plugged in. - # Detectors may require additional configuration. - # Refer to the Detectors configuration page for more information. - type: cpu - -# Optional: Database configuration -database: - # The path to store the SQLite DB (default: shown below) - path: /config/frigate.db - -# Optional: model modifications -model: - # Optional: path to the model (default: automatic based on detector) - path: /edgetpu_model.tflite - # Optional: path to the labelmap (default: shown below) - labelmap_path: /labelmap.txt - # Required: Object detection model input width (default: shown below) - width: 320 - # Required: Object detection model input height (default: shown below) - height: 320 - # Optional: Object detection model input colorspace - # Valid values are rgb, bgr, or yuv. (default: shown below) - input_pixel_format: rgb - # Optional: Object detection model input tensor format - # Valid values are nhwc or nchw (default: shown below) - input_tensor: nhwc - # Optional: Object detection model type, currently only used with the OpenVINO detector - # Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below) - model_type: ssd - # Optional: Label name modifications. These are merged into the standard labelmap. - labelmap: - 2: vehicle - -# Optional: Audio Events Configuration -# NOTE: Can be overridden at the camera level -audio: - # Optional: Enable audio events (default: shown below) - enabled: False - # Optional: Configure the amount of seconds without detected audio to end the event (default: shown below) - max_not_heard: 30 - # Optional: Configure the min rms volume required to run audio detection (default: shown below) - # As a rule of thumb: - # - 200 - high sensitivity - # - 500 - medium sensitivity - # - 1000 - low sensitivity - min_volume: 500 - # Optional: Types of audio to listen for (default: shown below) - listen: - - bark - - fire_alarm - - scream - - speech - - yell - # Optional: Filters to configure detection. - filters: - # Label that matches label in listen config. - speech: - # Minimum score that triggers an audio event (default: shown below) - threshold: 0.8 - -# Optional: logger verbosity settings -logger: - # Optional: Default log verbosity (default: shown below) - default: info - # Optional: Component specific logger overrides - logs: - frigate.event: debug - -# Optional: set environment variables -environment_vars: - EXAMPLE_VAR: value - -# Optional: birdseye configuration -# NOTE: Can (enabled, mode) be overridden at the camera level -birdseye: - # Optional: Enable birdseye view (default: shown below) - enabled: True - # Optional: Restream birdseye via RTSP (default: shown below) - # NOTE: Enabling this will set birdseye to run 24/7 which may increase CPU usage somewhat. - restream: False - # Optional: Width of the output resolution (default: shown below) - width: 1280 - # Optional: Height of the output resolution (default: shown below) - height: 720 - # Optional: Encoding quality of the mpeg1 feed (default: shown below) - # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. - quality: 8 - # Optional: Mode of the view. Available options are: objects, motion, and continuous - # objects - cameras are included if they have had a tracked object within the last 30 seconds - # motion - cameras are included if motion was detected in the last 30 seconds - # continuous - all cameras are included always - mode: objects - -# Optional: ffmpeg configuration -# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets ffmpeg: - # Optional: global ffmpeg args (default: shown below) - global_args: -hide_banner -loglevel warning -threads 2 - # Optional: global hwaccel args (default: shown below) - # NOTE: See hardware acceleration docs for your specific device - hwaccel_args: [] - # Optional: global input args (default: shown below) - input_args: preset-rtsp-generic - # Optional: global output args - output_args: - # Optional: output args for detect streams (default: shown below) - detect: -threads 2 -f rawvideo -pix_fmt yuv420p - # Optional: output args for record streams (default: shown below) - record: preset-record-generic - # Optional: output args for rtmp streams (default: shown below) - rtmp: preset-rtmp-generic - # Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below) - # If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once - # If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage - # NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout. - retry_interval: 10 + hwaccel_args: preset-rpi-64-h264 -# Optional: Detect configuration -# NOTE: Can be overridden at the camera level -detect: - # Optional: width of the frame for the input with the detect role (default: use native stream resolution) - width: 1280 - # Optional: height of the frame for the input with the detect role (default: use native stream resolution) - height: 720 - # Optional: desired fps for your camera for the input with the detect role (default: shown below) - # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. - fps: 5 - # Optional: enables detection for the camera (default: True) - enabled: True - # Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate) - min_initialized: 2 - # Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate) - max_disappeared: 25 - # Optional: Configuration for stationary object tracking - stationary: - # Optional: Frequency for confirming stationary objects (default: same as threshold) - # When set to 1, object detection will run to confirm the object still exists on every frame. - # If set to 10, object detection will run to confirm the object still exists on every 10th frame. - interval: 50 - # Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s) - threshold: 50 - # Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever) - # This can help with false positives for objects that should only be stationary for a limited amount of time. - # It can also be used to disable stationary object tracking. For example, you may want to set a value for person, but leave - # car at the default. - # WARNING: Setting these values overrides default behavior and disables stationary object tracking. - # There are very few situations where you would want it disabled. It is NOT recommended to - # copy these values from the example config into your config unless you know they are needed. - max_frames: - # Optional: Default for all object types (default: not set, track forever) - default: 3000 - # Optional: Object specific values - objects: - person: 1000 - # Optional: Milliseconds to offset detect annotations by (default: shown below). - # There can often be latency between a recording and the detect process, - # especially when using separate streams for detect and record. - # Use this setting to make the timeline bounding boxes more closely align - # with the recording. The value can be positive or negative. - # TIP: Imagine there is an event clip with a person walking from left to right. - # If the event timeline bounding box is consistently to the left of the person - # then the value should be decreased. Similarly, if a person is walking from - # left to right and the bounding box is consistently ahead of the person - # then the value should be increased. - # TIP: This offset is dynamic so you can change the value and it will update existing - # events, this makes it easy to tune. - # WARNING: Fast moving objects will likely not have the bounding box align. - annotation_offset: 0 +detectors: + coral: + type: edgetpu + device: usb -# Optional: Object configuration -# NOTE: Can be overridden at the camera level -objects: - # Optional: list of objects to track from labelmap.txt (default: shown below) - track: - - person - # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object. - # NOTE: This mask is COMBINED with the object type specific mask below - mask: 0,0,1000,0,1000,200,0,200 - # Optional: filters to reduce false positives for specific object types - filters: - person: - # Optional: minimum width*height of the bounding box for the detected object (default: 0) - min_area: 5000 - # Optional: maximum width*height of the bounding box for the detected object (default: 24000000) - max_area: 100000 - # Optional: minimum width/height of the bounding box for the detected object (default: 0) - min_ratio: 0.5 - # Optional: maximum width/height of the bounding box for the detected object (default: 24000000) - max_ratio: 2.0 - # Optional: minimum score for the object to initiate tracking (default: shown below) - min_score: 0.5 - # Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below) - threshold: 0.7 - # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object - mask: 0,0,1000,0,1000,200,0,200 - -# Optional: Motion configuration -# NOTE: Can be overridden at the camera level -motion: - # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) - # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. - # The value should be between 1 and 255. - threshold: 30 - # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection - # needs to recalibrate. (default: shown below) - # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. - # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching - # a doorbell camera. - lightning_threshold: 0.8 - # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) - # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will - # make motion detection more sensitive to smaller moving objects. - # As a rule of thumb: - # - 10 - high sensitivity - # - 30 - medium sensitivity - # - 50 - low sensitivity - contour_area: 10 - # Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below) - # Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster. - # Low values will cause things like moving shadows to be detected as motion for longer. - # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ - frame_alpha: 0.01 - # Optional: Height of the resized motion frame (default: 100) - # Higher values will result in more granular motion detection at the expense of higher CPU usage. - # Lower values result in less CPU, but small changes may not register as motion. - frame_height: 100 - # Optional: motion mask - # NOTE: see docs for more detailed info on creating masks - mask: 0,900,1080,900,1080,1920,0,1920 - # Optional: improve contrast (default: shown below) - # Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive - # for daytime. - improve_contrast: True - # Optional: Delay when updating camera motion through MQTT from ON -> OFF (default: shown below). - mqtt_off_delay: 30 - -# Optional: Record configuration -# NOTE: Can be overridden at the camera level record: - # Optional: Enable recording (default: shown below) - # WARNING: If recording is disabled in the config, turning it on via - # the UI or MQTT later will have no effect. - enabled: False - # Optional: Number of minutes to wait between cleanup runs (default: shown below) - # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o - expire_interval: 60 - # Optional: Sync recordings with disk on startup and once a day (default: shown below). - sync_recordings: False - # Optional: Retention settings for recording + enabled: True retain: - # Optional: Number of days to retain recordings regardless of events (default: shown below) - # NOTE: This should be set to 0 and retention should be defined in events section below - # if you only want to retain recordings of events. - days: 0 - # Optional: Mode for retention. Available options are: all, motion, and active_objects - # all - save all recording segments regardless of activity - # motion - save all recordings segments with any detected motion - # active_objects - save all recording segments with active/moving objects - # NOTE: this mode only applies when the days setting above is greater than 0 - mode: all - # Optional: Recording Export Settings - export: - # Optional: Timelapse Output Args (default: shown below). - # NOTE: The default args are set to fit 24 hours of recording into 1 hour playback. - # See https://stackoverflow.com/a/58268695 for more info on how these args work. - # As an example: if you wanted to go from 24 hours to 30 minutes that would be going - # from 86400 seconds to 1800 seconds which would be 1800 / 86400 = 0.02. - # The -r (framerate) dictates how smooth the output video is. - # So the args would be -vf setpts=0.02*PTS -r 30 in that case. - timelapse_args: "-vf setpts=0.04*PTS -r 30" - # Optional: Event recording settings - events: - # Optional: Number of seconds before the event to include (default: shown below) - pre_capture: 5 - # Optional: Number of seconds after the event to include (default: shown below) - post_capture: 5 - # Optional: Objects to save recordings for. (default: all tracked objects) - objects: - - person - # Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Retention settings for recordings of events + days: 7 + mode: motion + alerts: retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Mode for retention. (default: shown below) - # all - save all recording segments for events regardless of activity - # motion - save all recordings segments for events with any detected motion - # active_objects - save all recording segments for event with active/moving objects - # - # NOTE: If the retain mode for the camera is more restrictive than the mode configured - # here, the segments will already be gone by the time this mode is applied. - # For example, if the camera retain mode is "motion", the segments without motion are - # never stored, so setting the mode to "all" here won't bring them back. - mode: motion - # Optional: Per object retention days - objects: - person: 15 + days: 30 + detections: + retain: + days: 30 -# Optional: Configuration for the jpg snapshots written to the clips directory for each event -# NOTE: Can be overridden at the camera level snapshots: - # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) - enabled: False - # Optional: save a clean PNG copy of the snapshot image (default: shown below) - clean_copy: True - # Optional: print a timestamp on the snapshots (default: shown below) - timestamp: False - # Optional: draw bounding box on the snapshots (default: shown below) - bounding_box: True - # Optional: crop the snapshot (default: shown below) - crop: False - # Optional: height to resize the snapshot to (default: original size) - height: 175 - # Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Camera override for retention settings (default: global values) + enabled: True retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 - # Optional: quality of the encoded jpeg, 0-100 (default: shown below) - quality: 70 + default: 30 -# Optional: RTMP configuration -# NOTE: RTMP is deprecated in favor of restream -# NOTE: Can be overridden at the camera level -rtmp: - # Optional: Enable the RTMP stream (default: False) +cameras: + name_of_your_camera: + detect: + width: 1280 + height: 720 + fps: 5 + ffmpeg: + inputs: + - path: rtsp://10.0.10.10:554/rtsp + roles: + - detect + motion: + mask: + - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400 +``` + +### Standalone Intel Mini PC with USB Coral + +- Single camera with 720p, 5fps stream for detect +- MQTT disabled (not integrated with home assistant) +- VAAPI hardware acceleration for decoding video +- USB Coral detector +- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not +- Continue to keep all video if it qualified as an alert or detection for 30 days +- Save snapshots for 30 days +- Motion mask for the camera timestamp + +```yaml +mqtt: enabled: False -# Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.8.3) -go2rtc: +ffmpeg: + hwaccel_args: preset-vaapi -# Optional: jsmpeg stream configuration for WebUI -live: - # Optional: Set the name of the stream that should be used for live view - # in frigate WebUI. (default: name of camera) - stream_name: camera_name - # Optional: Set the height of the jsmpeg stream. (default: 720) - # This must be less than or equal to the height of the detect stream. Lower resolutions - # reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio. - height: 720 - # Optional: Set the encode quality of the jsmpeg stream (default: shown below) - # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. - quality: 8 +detectors: + coral: + type: edgetpu + device: usb -# Optional: in-feed timestamp style configuration -# NOTE: Can be overridden at the camera level -timestamp_style: - # Optional: Position of the timestamp (default: shown below) - # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) - position: "tl" - # Optional: Format specifier conform to the Python package "datetime" (default: shown below) - # Additional Examples: - # german: "%d.%m.%Y %H:%M:%S" - format: "%m/%d/%Y %H:%M:%S" - # Optional: Color of font - color: - # All Required when color is specified (default: shown below) - red: 255 - green: 255 - blue: 255 - # Optional: Line thickness of font (default: shown below) - thickness: 2 - # Optional: Effect of lettering (default: shown below) - # None (No effect), - # "solid" (solid background in inverse color of font) - # "shadow" (shadow for font) - effect: None +record: + enabled: True + retain: + days: 7 + mode: motion + alerts: + retain: + days: 30 + detections: + retain: + days: 30 + +snapshots: + enabled: True + retain: + default: 30 -# Required cameras: - # Required: name of the camera - back: - # Optional: Enable/Disable the camera (default: shown below). - # If disabled: config is used but no live stream and no capture etc. - # Events/Recordings are still viewable. - enabled: True - # Required: ffmpeg settings for the camera + name_of_your_camera: + detect: + width: 1280 + height: 720 + fps: 5 ffmpeg: - # Required: A list of input streams for the camera. See documentation for more information. inputs: - # Required: the path to the stream - # NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {} - - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - # Required: list of roles for this stream. valid values are: audio,detect,record,rtmp - # NOTICE: In addition to assigning the audio, record, and rtmp roles, - # they must also be enabled in the camera config. + - path: rtsp://10.0.10.10:554/rtsp roles: - - audio - detect - - record - - rtmp - # Optional: stream specific global args (default: inherit) - # global_args: - # Optional: stream specific hwaccel args (default: inherit) - # hwaccel_args: - # Optional: stream specific input args (default: inherit) - # input_args: - # Optional: camera specific global args (default: inherit) - # global_args: - # Optional: camera specific hwaccel args (default: inherit) - # hwaccel_args: - # Optional: camera specific input args (default: inherit) - # input_args: - # Optional: camera specific output args (default: inherit) - # output_args: - - # Optional: timeout for highest scoring image before allowing it - # to be replaced by a newer image. (default: shown below) - best_image_timeout: 60 - - # Optional: URL to visit the camera web UI directly from the system page. Might not be available on every camera. - webui_url: "" - - # Optional: zones for this camera - zones: - # Required: name of the zone - # NOTE: This must be different than any camera names, but can match with another zone on another - # camera. - front_steps: - # Required: List of x,y coordinates to define the polygon of the zone. - # NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box. - coordinates: 545,1077,747,939,788,805 - # Optional: Number of consecutive frames required for object to be considered present in the zone (default: shown below). - inertia: 3 - # Optional: List of objects that can trigger this zone (default: all tracked objects) - objects: - - person - # Optional: Zone level object filters. - # NOTE: The global and camera filters are applied upstream. - filters: - person: - min_area: 5000 - max_area: 100000 - threshold: 0.7 - - # Optional: Configuration for the jpg snapshots published via MQTT - mqtt: - # Optional: Enable publishing snapshot via mqtt for camera (default: shown below) - # NOTE: Only applies to publishing image data to MQTT via 'frigate///snapshot'. - # All other messages will still be published. - enabled: True - # Optional: print a timestamp on the snapshots (default: shown below) - timestamp: True - # Optional: draw bounding box on the snapshots (default: shown below) - bounding_box: True - # Optional: crop the snapshot (default: shown below) - crop: True - # Optional: height to resize the snapshot to (default: shown below) - height: 270 - # Optional: jpeg encode quality (default: shown below) - quality: 70 - # Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - - # Optional: Configuration for how camera is handled in the GUI. - ui: - # Optional: Adjust sort order of cameras in the UI. Larger numbers come later (default: shown below) - # By default the cameras are sorted alphabetically. - order: 0 - # Optional: Whether or not to show the camera in the Frigate UI (default: shown below) - dashboard: True - - # Optional: connect to ONVIF camera - # to enable PTZ controls. - onvif: - # Required: host of the camera being connected to. - host: 0.0.0.0 - # Optional: ONVIF port for device (default: shown below). - port: 8000 - # Optional: username for login. - # NOTE: Some devices require admin to access ONVIF. - user: admin - # Optional: password for login. - password: admin - # Optional: PTZ camera object autotracking. Keeps a moving object in - # the center of the frame by automatically moving the PTZ camera. - autotracking: - # Optional: enable/disable object autotracking. (default: shown below) - enabled: False - # Optional: calibrate the camera on startup (default: shown below) - # A calibration will move the PTZ in increments and measure the time it takes to move. - # The results are used to help estimate the position of tracked objects after a camera move. - # Frigate will update your config file automatically after a calibration with - # a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False. - calibrate_on_startup: False - # Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below) - # Available options are: disabled, absolute, and relative - # disabled - don't zoom in/out on autotracked objects, use pan/tilt only - # absolute - use absolute zooming (supported by most PTZ capable cameras) - # relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements) - zooming: disabled - # Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below) - # A lower value will keep more of the scene in view around a tracked object. - # A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly. - # The value should be between 0.1 and 0.75 - zoom_factor: 0.3 - # Optional: list of objects to track from labelmap.txt (default: shown below) - track: - - person - # Required: Begin automatically tracking an object when it enters any of the listed zones. - required_zones: - - zone_name - # Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below) - return_preset: home - # Optional: Seconds to delay before returning to preset. (default: shown below) - timeout: 10 - # Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below) - movement_weights: [] - - # Optional: Configuration for how to sort the cameras in the Birdseye view. - birdseye: - # Optional: Adjust sort order of cameras in the Birdseye view. Larger numbers come later (default: shown below) - # By default the cameras are sorted alphabetically. - order: 0 - -# Optional -ui: - # Optional: Set the default live mode for cameras in the UI (default: shown below) - live_mode: mse - # Optional: Set a timezone to use in the UI (default: use browser local time) - # timezone: America/Denver - # Optional: Use an experimental recordings / camera view UI (default: shown below) - use_experimental: False - # Optional: Set the time format used. - # Options are browser, 12hour, or 24hour (default: shown below) - time_format: browser - # Optional: Set the date style for a specified length. - # Options are: full, long, medium, short - # Examples: - # short: 2/11/23 - # medium: Feb 11, 2023 - # full: Saturday, February 11, 2023 - # (default: shown below). - date_style: short - # Optional: Set the time style for a specified length. - # Options are: full, long, medium, short - # Examples: - # short: 8:14 PM - # medium: 8:15:22 PM - # full: 8:15:22 PM Mountain Standard Time - # (default: shown below). - time_style: medium - # Optional: Ability to manually override the date / time styling to use strftime format - # https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html - # possible values are shown above (default: not set) - strftime_fmt: "%Y/%m/%d %H:%M" - -# Optional: Telemetry configuration -telemetry: - # Optional: Enabled network interfaces for bandwidth stats monitoring (default: empty list, let nethogs search all) - network_interfaces: - - eth - - enp - - eno - - ens - - wl - - lo - # Optional: Configure system stats - stats: - # Enable AMD GPU stats (default: shown below) - amd_gpu_stats: True - # Enable Intel GPU stats (default: shown below) - intel_gpu_stats: True - # Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below) - # NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled. - network_bandwidth: False - # Optional: Enable the latest version outbound check (default: shown below) - # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions - version_check: True + motion: + mask: + - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400 +``` + +### Home Assistant integrated Intel Mini PC with OpenVino + +- Single camera with 720p, 5fps stream for detect +- MQTT connected to same mqtt server as home assistant +- VAAPI hardware acceleration for decoding video +- OpenVino detector +- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not +- Continue to keep all video if it qualified as an alert or detection for 30 days +- Save snapshots for 30 days +- Motion mask for the camera timestamp + +```yaml +mqtt: + host: 192.168.X.X # <---- same mqtt broker that home assistant uses + user: mqtt-user + password: xxxxxxxxxx + +ffmpeg: + hwaccel_args: preset-vaapi + +detectors: + ov: + type: openvino + device: AUTO + model: + path: /openvino-model/ssdlite_mobilenet_v2.xml + +model: + width: 300 + height: 300 + input_tensor: nhwc + input_pixel_format: bgr + labelmap_path: /openvino-model/coco_91cl_bkgr.txt + +record: + enabled: True + retain: + days: 7 + mode: motion + alerts: + retain: + days: 30 + detections: + retain: + days: 30 + +snapshots: + enabled: True + retain: + default: 30 + +cameras: + name_of_your_camera: + detect: + width: 1280 + height: 720 + fps: 5 + ffmpeg: + inputs: + - path: rtsp://10.0.10.10:554/rtsp + roles: + - detect + motion: + mask: + - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400 ``` diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index 003e7599c..31e720031 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -3,17 +3,29 @@ id: live title: Live View --- -Frigate has different live view options, some of which require the bundled `go2rtc` to be configured as shown in the [step by step guide](/guides/configuring_go2rtc). +Frigate intelligently displays your camera streams on the Live view dashboard. Your camera images update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any motion is detected, cameras seamlessly switch to a live stream. -## Live View Options +## Live View technologies -Live view options can be selected while viewing the live stream. The options are: +Frigate intelligently uses three different streaming technologies to display your camera streams on the dashboard and the single camera view, switching between available modes based on network bandwidth, player errors, or required features like two-way talk. The highest quality and fluency of the Live view requires the bundled `go2rtc` to be configured as shown in the [step by step guide](/guides/configuring_go2rtc). -| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations | -| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | ------------------------------------------------ | -| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none | -| mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only | -| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 | +The jsmpeg live view will use more browser and client GPU resources. Using go2rtc is highly recommended and will provide a superior experience. + +| Source | Frame Rate | Resolution | Audio | Requires go2rtc | Notes | +| ------ | ------------------------------------- | ---------- | ---------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| jsmpeg | same as `detect -> fps`, capped at 10 | 720p | no | no | Resolution is configurable, but go2rtc is recommended if you want higher resolutions and better frame rates. jsmpeg is Frigate's default without go2rtc configured. | +| mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. | +| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration, doesn't support h.265. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. | + +### Camera Settings Recommendations + +If you are using go2rtc, you should adjust the following settings in your camera's firmware for the best experience with Live view: + +- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below). +- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio. +- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. + +The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information. ### Audio Support @@ -30,6 +42,15 @@ go2rtc: - "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) ``` +If your camera does not have audio and you are having problems with Live view, you should have go2rtc send video only: + +```yaml +go2rtc: + streams: + no_audio_camera: + - ffmpeg:rtsp://192.168.1.5:554/live0#video=copy +``` + ### Setting Stream For Live UI There may be some cameras that you would prefer to use the sub stream for live view, but the main stream for recording. This can be done via `live -> stream_name`. @@ -79,7 +100,7 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req - stun:8555 ``` -- For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.0.0.0/8` CIDR block. +- For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.64.0.0/10` CIDR block. :::tip diff --git a/docs/docs/configuration/masks.md b/docs/docs/configuration/masks.md index 321b909cb..4b57be964 100644 --- a/docs/docs/configuration/masks.md +++ b/docs/docs/configuration/masks.md @@ -3,41 +3,49 @@ id: masks title: Masks --- -There are two types of masks available: +## Motion masks -**Motion masks**: Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the debug feed with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. Over masking will make it more difficult for objects to be tracked. To see this effect, create a mask, and then watch the video feed with `Motion Boxes` enabled again. +Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the Debug feed (Settings --> Debug) with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. _Over-masking will make it more difficult for objects to be tracked._ -**Object filter masks**: Object filter masks are used to filter out false positives for a given object type based on location. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell Frigate that anything in your yard is a false positive. +See [further clarification](#further-clarification) below on why you may not want to use a motion mask. + +## Object filter masks + +Object filter masks are used to filter out false positives for a given object type based on location. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell Frigate that anything in your yard is a false positive. + +Object filter masks can be used to filter out stubborn false positives in fixed locations. For example, the base of this tree may be frequently detected as a person. The following image shows an example of an object filter mask (shaded red area) over the location where the bottom center is typically located to filter out person detections in a precise location. + +![object mask](/img/bottom-center-mask.jpg) + +## Using the mask creator To create a poly mask: 1. Visit the Web UI -1. Click the camera you wish to create a mask for -1. Select "Debug" at the top -1. Expand the "Options" below the video feed -1. Click "Mask & Zone creator" -1. Click "Add" on the type of mask or zone you would like to create -1. Click on the camera's latest image to create a masked area. The yaml representation will be updated in real-time -1. When you've finished creating your mask, click "Copy" and paste the contents into your config file and restart Frigate +2. Click/tap the gear icon and open "Settings" +3. Select "Mask / zone editor" +4. At the top right, select the camera you wish to create a mask or zone for +5. Click the plus icon under the type of mask or zone you would like to create +6. Click on the camera's latest image to create the points for a masked area. Click the first point again to close the polygon. +7. When you've finished creating your mask, press Save. +8. Restart Frigate to apply your changes. -Example of a finished row corresponding to the below example image: +Your config file will be updated with the relative coordinates of the mask/zone: ```yaml motion: - mask: "0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432" + mask: "0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456,0.700,0.424,0.701,0.311,0.507,0.294,0.453,0.347,0.451,0.400" ``` -Multiple masks can be listed. +Multiple masks can be listed in your config. ```yaml motion: mask: - - 458,1346,336,973,317,869,375,866,432 - - 0,461,3,0,1919,0,1919,843,1699,492,1344 + - 0.239,1.246,0.175,0.901,0.165,0.805,0.195,0.802 + - 0.000,0.427,0.002,0.000,0.999,0.000,0.999,0.781,0.885,0.456 ``` -![poly](/img/example-mask-poly-min.png) - ### Further Clarification This is a response to a [question posed on reddit](https://www.reddit.com/r/homeautomation/comments/ppxdve/replacing_my_doorbell_with_a_security_camera_a_6/hd876w4?utm_source=share&utm_medium=web2x&context=3): @@ -70,7 +78,7 @@ It is, but the definition of "unnecessary" varies. I want to ignore areas of mot > For me, giving my masks ANY padding results in a lot of people detection I'm not interested in. I live in the city and catch a lot of the sidewalk on my camera. People walk by my front door all the time and the margin between the sidewalk and actually walking onto my stoop is very thin, so I basically have everything but the exact contours of my stoop masked out. This results in very tidy detections but this info keeps throwing me off. Am I just overthinking it? -This is what `required_zones` are for. You should define a zone (remember this is evaluated based on the bottom center of the bounding box) and make it required to save snapshots and clips (now events in 0.9.0). You can also use this in your conditions for a notification. +This is what `required_zones` are for. You should define a zone (remember this is evaluated based on the bottom center of the bounding box) and make it required to save snapshots and clips (previously events in 0.9.0 to 0.13.0 and review items in 0.14.0 and later). You can also use this in your conditions for a notification. > Maybe my specific situation just warrants this. I've just been having a hard time understanding the relevance of this information - it seems to be that it's exactly what would be expected when "masking out" an area of ANY image. diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md index f3d1d7692..0844c04a8 100644 --- a/docs/docs/configuration/motion_detection.md +++ b/docs/docs/configuration/motion_detection.md @@ -13,17 +13,15 @@ Once motion is detected, it tries to group up nearby areas of motion together in The default motion settings should work well for the majority of cameras, however there are cases where tuning motion detection can lead to better and more optimal results. Each camera has its own environment with different variables that affect motion, this means that the same motion settings will not fit all of your cameras. -Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss events. +Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss objects that you want to track. ## Create Motion Masks -First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md). +First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want alerts or detections**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md). ## Prepare For Testing -The easiest way to tune motion detection is to do it live, have one window / screen open with the frigate debug view and motion boxes enabled with another window / screen open allowing for configuring the motion settings. It is recommended to use Home Assistant or MQTT as they offer live configuration of some motion settings meaning that Frigate does not need to be restarted when values are changed. - -In Home Assistant the `Improve Contrast`, `Contour Area`, and `Threshold` configuration entities are disabled by default but can easily be enabled and used to tune live, otherwise MQTT can be used. +The easiest way to tune motion detection is to use the Frigate UI under Settings > Motion Tuner. This screen allows the changing of motion detection values live to easily see the immediate effect on what is detected as motion. ## Tuning Motion Detection During The Day @@ -31,13 +29,13 @@ Now that things are set up, find a time to tune that represents normal circumsta :::note -Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss events from objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected. +Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected. ::: ### Threshold -The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. +The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. ```yaml # default threshold value @@ -69,7 +67,7 @@ motion: Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera. -Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. +Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. ### Improve Contrast @@ -77,7 +75,7 @@ At this point if motion is working as desired there is no reason to continue wit ## Tuning Motion Detection During The Night -Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. +Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. @@ -96,7 +94,7 @@ motion: :::tip -Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these events are not missed. +Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed. ::: diff --git a/docs/docs/configuration/notifications.md b/docs/docs/configuration/notifications.md new file mode 100644 index 000000000..9225ea6e8 --- /dev/null +++ b/docs/docs/configuration/notifications.md @@ -0,0 +1,42 @@ +--- +id: notifications +title: Notifications +--- + +# Notifications + +Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption. + +## Setting up Notifications + +In order to use notifications the following requirements must be met: + +- Frigate must be accessed via a secure https connection +- A supported browser must be used. Currently Chrome, Firefox, and Safari are known to be supported. +- In order for notifications to be usable externally, Frigate must be accessible externally + +### Configuration + +To configure notifications, go to the Frigate WebUI -> Settings -> Notifications and enable, then fill out the fields and save. + +### Registration + +Once notifications are enabled, press the `Register for Notifications` button on all devices that you would like to receive notifications on. This will register the background worker. After this Frigate must be restarted and then notifications will begin to be sent. + +## Supported Notifications + +Currently notifications are only supported for review alerts. More notifications will be supported in the future. + +:::note + +Currently, only Chrome supports images in notifications. Safari and Firefox will only show a title and message in the notification. + +::: + +## Reduce Notification Latency + +Different platforms handle notifications differently, some settings changes may be required to get optimal notification delivery. + +### Android + +Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well. \ No newline at end of file diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index e0aca2b87..d4cee196d 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -3,43 +3,51 @@ id: object_detectors title: Object Detectors --- +# Supported Hardware + +:::info + +Frigate supports multiple different detectors that work on different types of hardware: + +**Most Hardware** +- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. +- [Hailo](#hailo-8l): The Hailo8 AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices. + +**AMD** +- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection. +- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured. + +**Intel** +- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. +- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured. + +**Nvidia** +- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models. +- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured. + +**Rockchip** +- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs. + +**For Testing** +- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. + +::: + # Officially Supported Detectors -Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt`, and `rknn`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. +Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. -## CPU Detector (not recommended) +## Edge TPU Detector -The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`. +The Edge TPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`. -The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.` - -A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. - -```yaml -detectors: - cpu1: - type: cpu - num_threads: 3 - model: - path: "/custom_model.tflite" - cpu2: - type: cpu - num_threads: 3 -``` - -When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. - -## Edge-TPU Detector - -The EdgeTPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an EdgeTPU detector, set the `"type"` attribute to `"edgetpu"`. - -The EdgeTPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds. +The Edge TPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds. A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. :::tip -See [common Edge-TPU troubleshooting steps](/troubleshooting/edgetpu) if the EdgeTPu is not detected. +See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edge TPU is not detected. ::: @@ -75,6 +83,15 @@ detectors: device: "" ``` +### Single PCIE/M.2 Coral + +```yaml +detectors: + coral: + type: edgetpu + device: pci +``` + ### Multiple PCIE/M.2 Corals ```yaml @@ -101,11 +118,31 @@ detectors: ## OpenVINO Detector -The OpenVINO detector type runs an OpenVINO IR model on Intel CPU, GPU and VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. +The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. -The OpenVINO device to be used is specified using the `"device"` attribute according to the naming conventions in the [Device Documentation](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Working_with_devices.html). Other supported devices could be `AUTO`, `CPU`, `GPU`, `MYRIAD`, etc. If not specified, the default OpenVINO device will be selected by the `AUTO` plugin. +The OpenVINO device to be used is specified using the `"device"` attribute according to the naming conventions in the [Device Documentation](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes.html). The most common devices are `CPU` and `GPU`. Currently, there is a known issue with using `AUTO`. For backwards compatibility, Frigate will attempt to use `GPU` if `AUTO` is set in your configuration. -OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html) +OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` device with OpenVINO. For detailed system requirements, see [OpenVINO System Requirements](https://docs.openvino.ai/2024/about-openvino/release-notes-openvino/system-requirements.html) + +:::tip + +When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be: + +```yaml +detectors: + ov_0: + type: openvino + device: GPU + ov_1: + type: openvino + device: GPU +``` + +::: + +### Supported Models + +#### SSDLite MobileNet v2 An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model. @@ -113,70 +150,56 @@ An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobil detectors: ov: type: openvino - device: AUTO - model: - path: /openvino-model/ssdlite_mobilenet_v2.xml + device: GPU model: width: 300 height: 300 input_tensor: nhwc input_pixel_format: bgr + path: /openvino-model/ssdlite_mobilenet_v2.xml labelmap_path: /openvino-model/coco_91cl_bkgr.txt ``` -This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/index.md#full-configuration-reference) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate: +#### YOLOX + +This detector also supports YOLOX. Frigate does not come with any YOLOX models preloaded, so you will need to supply your own models. + +#### YOLO-NAS + +[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). + +:::warning + +The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html + +::: + +The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. + +After placing the downloaded onnx model in your config folder, you can use the following configuration: ```yaml detectors: ov: type: openvino - device: AUTO - model: - path: /path/to/yolox_tiny.xml + device: GPU model: - width: 416 - height: 416 + model_type: yolonas + width: 320 # <--- should match whatever was set in notebook + height: 320 # <--- should match whatever was set in notebook input_tensor: nchw input_pixel_format: bgr - model_type: yolox - labelmap_path: /path/to/coco_80cl.txt + path: /config/yolo_nas_s.onnx + labelmap_path: /labelmap/coco-80.txt ``` -### Intel NCS2 VPU and Myriad X Setup - -Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device. - -```bash -sudo usermod -a -G users "$(whoami)" -cat < 97-myriad-usbboot.rules -SUBSYSTEM=="usb", ATTRS{idProduct}=="2485", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" -SUBSYSTEM=="usb", ATTRS{idProduct}=="f63b", ATTRS{idVendor}=="03e7", GROUP="users", MODE="0666", ENV{ID_MM_DEVICE_IGNORE}="1" -EOF -sudo cp 97-myriad-usbboot.rules /etc/udev/rules.d/ -sudo udevadm control --reload-rules -sudo udevadm trigger -``` - -Additionally, the Frigate docker container needs to run with the following configuration: - -```bash ---device-cgroup-rule='c 189:\* rmw' -v /dev/bus/usb:/dev/bus/usb -``` - -or in your compose file: - -```yml -device_cgroup_rules: - - "c 189:* rmw" -volumes: - - /dev/bus/usb:/dev/bus/usb -``` +Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. ## NVidia TensorRT Detector -NVidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix. This detector is designed to work with Yolo models for object detection. +Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection. ### Minimum Hardware Support @@ -198,7 +221,7 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models. -The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. +The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. @@ -245,7 +268,7 @@ frigate: - USE_FP16=false ``` -If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU. +If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU. ```yml frigate: @@ -255,7 +278,7 @@ frigate: ### Configuration Parameters -The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. +The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated. @@ -273,6 +296,201 @@ model: height: 320 ``` +## AMD/ROCm GPU detector + +### Setup + +The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`. + +### Docker settings for GPU access + +ROCm needs access to the `/dev/kfd` and `/dev/dri` devices. When docker or frigate is not run under root then also `video` (and possibly `render` and `ssl/_ssl`) groups should be added. + +When running docker directly the following flags should be added for device access: + +```bash +$ docker run --device=/dev/kfd --device=/dev/dri \ + ... +``` + +When using docker compose: + +```yaml +services: + frigate: +--- +devices: + - /dev/dri + - /dev/kfd +``` + +For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed). + +### Docker settings for overriding the GPU chipset + +Your GPU might work just fine without any special configuration but in many cases they need manual settings. AMD/ROCm software stack comes with a limited set of GPU drivers and for newer or missing models you will have to override the chipset version to an older/generic version to get things working. + +Also AMD/ROCm does not "officially" support integrated GPUs. It still does work with most of them just fine but requires special settings. One has to configure the `HSA_OVERRIDE_GFX_VERSION` environment variable. See the [ROCm bug report](https://github.com/ROCm/ROCm/issues/1743) for context and examples. + +For the rocm frigate build there is some automatic detection: + +- gfx90c -> 9.0.0 +- gfx1031 -> 10.3.0 +- gfx1103 -> 11.0.0 + +If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as: + +```bash +$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \ + ... +``` + +When using docker compose: + +```yaml +services: + frigate: +... +environment: + HSA_OVERRIDE_GFX_VERSION: "9.0.0" +``` + +Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name. + +- first make sure that rocm environment is running properly by running `/opt/rocm/bin/rocminfo` in the frigate container -- it should list both the CPU and the GPU with their properties +- find the chipset version you have (gfxNNN) from the output of the `rocminfo` (see below) +- use a search engine to query what `HSA_OVERRIDE_GFX_VERSION` you need for the given gfx name ("gfxNNN ROCm HSA_OVERRIDE_GFX_VERSION") +- override the `HSA_OVERRIDE_GFX_VERSION` with relevant value +- if things are not working check the frigate docker logs + +#### Figuring out if AMD/ROCm is working and found your GPU + +```bash +$ docker exec -it frigate /opt/rocm/bin/rocminfo +``` + +#### Figuring out your AMD GPU chipset version: + +We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from messing up the result: + +```bash +$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)' +``` + +### Supported Models + +There is no default model provided, the following formats are supported: + +#### YOLO-NAS + +[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). + +:::warning + +The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html + +::: + +The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. + +After placing the downloaded onnx model in your config folder, you can use the following configuration: + +```yaml +detectors: + rocm: + type: rocm + +model: + model_type: yolonas + width: 320 # <--- should match whatever was set in notebook + height: 320 # <--- should match whatever was set in notebook + input_pixel_format: bgr + path: /config/yolo_nas_s.onnx + labelmap_path: /labelmap/coco-80.txt +``` + +Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. + +## ONNX + +ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available. + +:::tip + +When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be: + +```yaml +detectors: + onnx_0: + type: onnx + onnx_1: + type: onnx +``` + +::: + +### Supported Models + +There is no default model provided, the following formats are supported: + +#### YOLO-NAS + +[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). + +:::warning + +The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html + +::: + +The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired. + +After placing the downloaded onnx model in your config folder, you can use the following configuration: + +```yaml +detectors: + onnx: + type: onnx + +model: + model_type: yolonas + width: 320 # <--- should match whatever was set in notebook + height: 320 # <--- should match whatever was set in notebook + input_pixel_format: bgr + path: /config/yolo_nas_s.onnx + labelmap_path: /labelmap/coco-80.txt +``` + +Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. + +## CPU Detector (not recommended) + +The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`. + +:::danger + +The CPU detector is not recommended for general use. If you do not have GPU or Edge TPU hardware, using the [OpenVINO Detector](#openvino-detector) in CPU mode is often more efficient than using the CPU detector. + +::: + +The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.` + +A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. + +```yaml +detectors: + cpu1: + type: cpu + num_threads: 3 + model: + path: "/custom_model.tflite" + cpu2: + type: cpu + num_threads: 3 +``` + +When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. + ## Deepstack / CodeProject.AI Server Detector The Deepstack / CodeProject.AI Server detector for Frigate allows you to integrate Deepstack and CodeProject.AI object detection capabilities into Frigate. CodeProject.AI and DeepStack are open-source AI platforms that can be run on various devices such as the Raspberry Pi, Nvidia Jetson, and other compatible hardware. It is important to note that the integration is performed over the network, so the inference times may not be as fast as native Frigate detectors, but it still provides an efficient and reliable solution for object detection and tracking. @@ -295,72 +513,78 @@ Replace `` and `` with the IP address and p To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly. - # Community Supported Detectors -## Rockchip RKNN-Toolkit-Lite2 +## Rockchip platform + +Hardware accelerated object detection is supported on the following SoCs: -This detector is only available if one of the following Rockchip SoCs is used: -- RK3588/RK3588S -- RK3568 -- RK3566 - RK3562 +- RK3566 +- RK3568 +- RK3576 +- RK3588 -These SoCs come with a NPU that will highly speed up detection. +This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.0.0.beta0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model. -### Setup +### Prerequisites -Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file. +Make sure to follow the [Rockchip specific installation instrucitions](/frigate/installation#rockchip-platform). ### Configuration -This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for one). Lines that are required at least to use the detector are labeled as required, all other lines are optional. +This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional. ```yaml -detectors: # required - rknn: # required - type: rknn # required - # core mask for npu - core_mask: 0 +detectors: # required + rknn: # required + type: rknn # required + # number of NPU cores to use + # 0 means choose automatically + # increase for better performance if you have a multicore NPU e.g. set to 3 on rk3588 + num_cores: 0 -model: # required - # name of yolov8 model or path to your own .rknn model file +model: # required + # name of model (will be automatically downloaded) or path to your own .rknn model file # possible values are: - # - default-yolov8n - # - default-yolov8s - # - default-yolov8m - # - default-yolov8l - # - default-yolov8x - # - /config/model_cache/rknn/your_custom_model.rknn - path: default-yolov8n + # - deci-fp16-yolonas_s + # - deci-fp16-yolonas_m + # - deci-fp16-yolonas_l + # - /config/model_cache/your_custom_model.rknn + path: deci-fp16-yolonas_s # width and height of detection frames width: 320 height: 320 # pixel format of detection frame - # default value is rgb but yolov models usually use bgr format - input_pixel_format: bgr # required + # default value is rgb but yolo models usually use bgr format + input_pixel_format: bgr # required # shape of detection frame input_tensor: nhwc + # needs to be adjusted to model, see below + labelmap_path: /labelmap.txt # required ``` -Explanation for rknn specific options: -- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples: - - `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value. - - `core_mask: 0b001` use only core0. - - `core_mask: 0b011` use core0 and core1. - - `core_mask: 0b110` use core1 and core2. **This does not** work, since core0 is disabled. +The correct labelmap must be loaded for each model. If you use a custom model (see notes below), you must make sure to provide the correct labelmap. The table below lists the correct paths for the bundled models: + +| `path` | `labelmap_path` | +| --------------------- | --------------------- | +| deci-fp16-yolonas\_\* | /labelmap/coco-80.txt | ### Choosing a model -There are 5 default yolov8 models that differ in size and therefore load the NPU more or less. In ascending order, with the top one being the smallest and least computationally intensive model: +:::warning -| Model | Size in mb | -| ------- | ---------- | -| yolov8n | 9 | -| yolov8s | 25 | -| yolov8m | 54 | -| yolov8l | 90 | -| yolov8x | 136 | +The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html + +::: + +The inference time was determined on a rk3588 with 3 NPU cores. + +| Model | Size in mb | Inference time in ms | +| ------------------- | ---------- | -------------------- | +| deci-fp16-yolonas_s | 24 | 25 | +| deci-fp16-yolonas_m | 62 | 35 | +| deci-fp16-yolonas_l | 81 | 45 | :::tip @@ -373,25 +597,29 @@ $ cat /sys/kernel/debug/rknpu/load ::: -- By default the rknn detector uses the yolov8n model (`model: path: default-yolov8n`). This model comes with the image, so no further steps than those mentioned above are necessary. -- If you want to use a more precise model, you can pass `default-yolov8s`, `default-yolov8m`, `default-yolov8l` or `default-yolov8x` as `model: path:` option. - - If the model does not exist, it will be automatically downloaded to `/config/model_cache/rknn`. - - If your server has no internet connection, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases) using another device and place it in the `config/model_cache/rknn` on your system. -- Finally, you can also provide your own model. Note that only yolov8 models are currently supported. Moreover, you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can place your `.rknn` model file in the `config/model_cache/rknn` directory on your system. Then you need to pass the path to your model using the `path` option of your `model` block like this: -```yaml -model: - path: /config/model_cache/rknn/my-rknn-model.rknn -``` +- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space. +- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models. -:::tip +## Hailo-8l -When you have a multicore NPU, you can enable all cores to reduce inference times. You should consider activating all cores if you use a larger model like yolov8l. If your NPU has 3 cores (like rk3588/S SoCs), you can enable all 3 cores using: +This detector is available for use with Hailo-8 AI Acceleration Module. + +See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8. + +### Configuration ```yaml detectors: - rknn: - type: rknn - core_mask: 0b111 -``` + hailo8l: + type: hailo8l + device: PCIe + model: + path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef -::: +model: + width: 300 + height: 300 + input_tensor: nhwc + input_pixel_format: bgr + model_type: ssd +``` diff --git a/docs/docs/guides/false_positives.md b/docs/docs/configuration/object_filters.md similarity index 74% rename from docs/docs/guides/false_positives.md rename to docs/docs/configuration/object_filters.md index a77e9e9f3..ca7260094 100644 --- a/docs/docs/guides/false_positives.md +++ b/docs/docs/configuration/object_filters.md @@ -1,8 +1,10 @@ --- -id: false_positives -title: Reducing false positives +id: object_filters +title: Filters --- +There are several types of object filters that can be used to reduce false positive rates. + ## Object Scores For object filters in your configuration, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85: @@ -20,11 +22,11 @@ In frame 2, the score is below the `min_score` value, so Frigate ignores it and ### Minimum Score -Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid events to be lost or disjointed. +Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid tracked objects to be lost or disjointed. ### Threshold -`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an event. If `threshold` is too high then true positive events may be missed due to the object never scoring high enough. +`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an tracked object. If `threshold` is too high then true positive tracked objects may be missed due to the object never scoring high enough. ## Object Shape @@ -32,17 +34,23 @@ False positives can also be reduced by filtering a detection based on its shape. ### Object Area -`min_area` and `max_area` filter on the area of an objects bounding box in pixels and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter. The recordings timeline can be used to determine the area of the bounding box in that frame by selecting a timeline item then mousing over or tapping the red box. +`min_area` and `max_area` filter on the area of an objects bounding box in pixels and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter. ### Object Proportions -`min_ratio` and `max_ratio` filter on the ratio of width / height of an objects bounding box and can be used to reduce false positives. For example if a false positive is detected as very tall for a dog which is often wider, a `min_ratio` filter can be used to filter out these false positives. +`min_ratio` and `max_ratio` values are compared against a given detected object's width/height ratio (in pixels). If the ratio is outside this range, the object will be ignored as a false positive. This allows objects that are proportionally too short-and-wide (higher ratio) or too tall-and-narrow (smaller ratio) to be ignored. + +:::info + +Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a "wide flat" box. If `min_ratio` is 1.0, any object that is taller than it is wide will be ignored. Similarly, if `max_ratio` is 1.0, then any object that is wider than it is tall will be ignored. + +::: ## Other Tools ### Zones -[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create events for objects that enter the zone. +[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create tracked objects for objects that enter the zone. ### Object Masks diff --git a/docs/docs/configuration/objects.mdx b/docs/docs/configuration/objects.md similarity index 63% rename from docs/docs/configuration/objects.mdx rename to docs/docs/configuration/objects.md index c15907339..1a93f9704 100644 --- a/docs/docs/configuration/objects.mdx +++ b/docs/docs/configuration/objects.md @@ -1,15 +1,16 @@ --- id: objects -title: Objects +title: Available Objects --- import labels from "../../../labelmap.txt"; Frigate includes the object models listed below from the Google Coral test data. -Please note: - - `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused. - - `person` is the only tracked object by default. See the [full configuration reference](index.md#full-configuration-reference) for an example of expanding the list of tracked objects. +Please note: + +- `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused. +- `person` is the only tracked object by default. See the [full configuration reference](reference.md) for an example of expanding the list of tracked objects.
    {labels.split("\n").map((label) => ( diff --git a/docs/docs/configuration/pwa.md b/docs/docs/configuration/pwa.md new file mode 100644 index 000000000..abe8d6934 --- /dev/null +++ b/docs/docs/configuration/pwa.md @@ -0,0 +1,24 @@ +--- +id: pwa +title: Installing Frigate App +--- + +Frigate supports being installed as a [Progressive Web App](https://web.dev/explore/progressive-web-apps) on Desktop, Android, and iOS. + +This adds features including the ability to deep link directly into the app. + +## Requirements + +In order to install Frigate as a PWA, the following requirements must be met: + +- Frigate must be accessed via a secure context (localhost, secure https, etc.) +- On Android, Firefox, Chrome, Edge, Opera, and Samsung Internet Browser all support installing PWAs. +- On iOS 16.4 and later, PWAs can be installed from the Share menu in Safari, Chrome, Edge, Firefox, and Orion. + +## Installation + +Installation varies slightly based on the device that is being used: + +- Desktop: Use the install button typically found in right edge of the address bar +- Android: Use the `Install as App` button in the more options menu +- iOS: Use the `Add to Homescreen` button in the share menu \ No newline at end of file diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index a14135c9e..fd7de42d0 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -3,19 +3,76 @@ id: record title: Recording --- -Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed. +Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the tracked object retention when determining if a recording should be removed. New recording segments are written from the camera stream to cache, they are only moved to disk if they match the setup recording retention policy. H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other browsers require recordings to be encoded with H264. +## Common recording configurations + +### Most conservative: Ensure all video is saved + +For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed. + +```yaml +record: + enabled: True + retain: + days: 3 + mode: all + alerts: + retain: + days: 30 + mode: motion + detections: + retain: + days: 30 + mode: motion +``` + +### Reduced storage: Only saving video when motion is detected + +In order to reduce storage requirements, you can adjust your config to only retain video where motion was detected. + +```yaml +record: + enabled: True + retain: + days: 3 + mode: motion + alerts: + retain: + days: 30 + mode: motion + detections: + retain: + days: 30 + mode: motion +``` + +### Minimum: Alerts only + +If you only want to retain video that occurs during a tracked object, this config will discard video unless an alert is ongoing. + +```yaml +record: + enabled: True + retain: + days: 0 + alerts: + retain: + days: 30 + mode: motion +``` + ## Will Frigate delete old recordings if my storage runs out? As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted. ## Configuring Recording Retention -Frigate supports both 24/7 and event based recordings with separate retention modes and retention periods. +Frigate supports both continuous and tracked object based recordings with separate retention modes and retention periods. :::tip @@ -23,88 +80,99 @@ Retention configs support decimals meaning they can be configured to retain `0.5 ::: -### 24/7 Recording +### Continuous Recording -The number of days to retain 24/7 recordings can be set via the following config where X is a number, by default 24/7 recording is disabled. +The number of days to retain continuous recordings can be set via the following config where X is a number, by default continuous recording is disabled. ```yaml record: enabled: True retain: - days: 1 # <- number of days to keep 24/7 recordings + days: 1 # <- number of days to keep continuous recordings ``` -24/7 recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) +Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) -### Event Recording +### Object Recording -If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled. +The number of days to record review items can be specified for review items classified as alerts as well as tracked objects. ```yaml record: enabled: True - events: + alerts: retain: - default: 10 # <- number of days to keep event recordings + days: 10 # <- number of days to keep alert recordings + detections: + retain: + days: 10 # <- number of days to keep detections recordings ``` -This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs. +This configuration will retain recording segments that overlap with alerts and detections for 10 days. Because multiple tracked objects can reference the same recording segments, this avoids storing duplicate footage for overlapping tracked objects and reduces overall storage needs. **WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect. ## What do the different retain modes mean? -Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for 24/7 recording (but can also affect events). +Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect tracked objects). + +Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording. -Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of 24/7 recording. - With the `all` option all 48 hours of those two days would be kept and viewable. - With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments. - With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary. -The same options are available with events. Let's consider a scenario where you drive up and park in your driveway, go inside, then come back out 4 hours later. -- With the `all` option all segments for the duration of the event would be saved for the event. This event would have 4 hours of footage. -- With the `motion` option all segments for the duration of the event with motion would be saved. This means any segment where a car drove by in the street, person walked by, lighting changed, etc. would be saved. -- With the `active_objects` it would only keep segments where the object was active. In this case the only segments that would be saved would be the ones where the car was driving up, you going inside, you coming outside, and the car driving away. Essentially reducing the 4 hours to a minute or two of event footage. +The same options are available with alerts and detections, except it will only save the recordings when it overlaps with a review item of that type. A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows: + ```yaml record: enabled: True retain: days: 7 mode: motion - events: + alerts: retain: - default: 14 + days: 14 + mode: active_objects + detections: + retain: + days: 14 mode: active_objects ``` + The above configuration example can be added globally or on a per camera basis. -### Object Specific Retention - -You can also set specific retention length for an object type. The below configuration example builds on from above but also specifies that recordings of dogs only need to be kept for 2 days and recordings of cars should be kept for 7 days. -```yaml -record: - enabled: True - retain: - days: 7 - mode: motion - events: - retain: - default: 14 - mode: active_objects - objects: - dog: 2 - car: 7 -``` - -## Can I have "24/7" recordings, but only at certain times? +## Can I have "continuous" recordings, but only at certain times? Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. ## How do I export recordings? -The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a time-lapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress. +Footage can be exported from Frigate by right-clicking (desktop) or long pressing (mobile) on a review item in the Review pane or by clicking the Export button in the History view. Exported footage is then organized and searchable through the Export view, accessible from the main navigation bar. + +### Time-lapse export + +Time lapse exporting is available only via the [HTTP API](../integrations/api/export-recording-export-camera-name-start-start-time-end-end-time-post.api.mdx). + +When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS. + +To configure the speed-up factor, the frame rate and further custom settings, the configuration parameter `timelapse_args` can be used. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS: + +```yaml +record: + enabled: True + export: + timelapse_args: "-vf setpts=PTS/60 -r 25" +``` + +:::tip + +When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large. +To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. + +::: ## Syncing Recordings With Disk diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md new file mode 100644 index 000000000..604791621 --- /dev/null +++ b/docs/docs/configuration/reference.md @@ -0,0 +1,824 @@ +--- +id: reference +title: Full Reference Config +--- + +### Full configuration reference: + +:::warning + +It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions. + +::: + +```yaml +mqtt: + # Optional: Enable mqtt server (default: shown below) + enabled: True + # Required: host name + host: mqtt.server.com + # Optional: port (default: shown below) + port: 1883 + # Optional: topic prefix (default: shown below) + # NOTE: must be unique if you are running multiple instances + topic_prefix: frigate + # Optional: client id (default: shown below) + # NOTE: must be unique if you are running multiple instances + client_id: frigate + # Optional: user + # NOTE: MQTT user can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'. + # e.g. user: '{FRIGATE_MQTT_USER}' + user: mqtt_user + # Optional: password + # NOTE: MQTT password can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'. + # e.g. password: '{FRIGATE_MQTT_PASSWORD}' + password: password + # Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None) + tls_ca_certs: /path/to/ca.crt + # Optional: tls_client_cert and tls_client key in order to use self-signed client + # certificates (default: None) + # NOTE: certificate must not be password-protected + # do not set user and password when using a client certificate + tls_client_cert: /path/to/client.crt + tls_client_key: /path/to/client.key + # Optional: tls_insecure (true/false) for enabling TLS verification of + # the server hostname in the server certificate (default: None) + tls_insecure: false + # Optional: interval in seconds for publishing stats (default: shown below) + stats_interval: 60 + +# Optional: Detectors configuration. Defaults to a single CPU detector +detectors: + # Required: name of the detector + detector_name: + # Required: type of the detector + # Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below) + # Additional detector types can also be plugged in. + # Detectors may require additional configuration. + # Refer to the Detectors configuration page for more information. + type: cpu + +# Optional: Database configuration +database: + # The path to store the SQLite DB (default: shown below) + path: /config/frigate.db + +# Optional: TLS configuration +tls: + # Optional: Enable TLS for port 8971 (default: shown below) + enabled: True + +# Optional: Proxy configuration +proxy: + # Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth + # is disabled. + # NOTE: Many authentication proxies pass a header downstream with the authenticated + # user name. Not all values are supported. It must be a whitelisted header. + # See the docs for more info. + header_map: + user: x-forwarded-user + # Optional: Url for logging out a user. This sets the location of the logout url in + # the UI. + logout_url: /api/logout + # Optional: Auth secret that is checked against the X-Proxy-Secret header sent from + # the proxy. If not set, all requests are trusted regardless of origin. + auth_secret: None + +# Optional: Authentication configuration +auth: + # Optional: Enable authentication + enabled: True + # Optional: Reset the admin user password on startup (default: shown below) + # New password is printed in the logs + reset_admin_password: False + # Optional: Cookie to store the JWT token for native auth (default: shown below) + cookie_name: frigate_token + # Optional: Set secure flag on cookie. (default: shown below) + # NOTE: This should be set to True if you are using TLS + cookie_secure: False + # Optional: Session length in seconds (default: shown below) + session_length: 86400 # 24 hours + # Optional: Refresh time in seconds (default: shown below) + # When the session is going to expire in less time than this setting, + # it will be refreshed back to the session_length. + refresh_time: 43200 # 12 hours + # Optional: Rate limiting for login failures to help prevent brute force + # login attacks (default: shown below) + # See the docs for more information on valid values + failed_login_rate_limit: None + # Optional: Trusted proxies for determining IP address to rate limit + # NOTE: This is only used for rate limiting login attempts and does not bypass + # authentication. See the authentication docs for more details. + trusted_proxies: [] + # Optional: Number of hashing iterations for user passwords + # As of Feb 2023, OWASP recommends 600000 iterations for PBKDF2-SHA256 + # NOTE: changing this value will not automatically update password hashes, you + # will need to change each user password for it to apply + hash_iterations: 600000 + +# Optional: model modifications +model: + # Optional: path to the model (default: automatic based on detector) + path: /edgetpu_model.tflite + # Optional: path to the labelmap (default: shown below) + labelmap_path: /labelmap.txt + # Required: Object detection model input width (default: shown below) + width: 320 + # Required: Object detection model input height (default: shown below) + height: 320 + # Optional: Object detection model input colorspace + # Valid values are rgb, bgr, or yuv. (default: shown below) + input_pixel_format: rgb + # Optional: Object detection model input tensor format + # Valid values are nhwc or nchw (default: shown below) + input_tensor: nhwc + # Optional: Object detection model type, currently only used with the OpenVINO detector + # Valid values are ssd, yolox, yolonas (default: shown below) + model_type: ssd + # Optional: Label name modifications. These are merged into the standard labelmap. + labelmap: + 2: vehicle + # Optional: Map of object labels to their attribute labels (default: depends on model) + attributes_map: + person: + - amazon + - face + car: + - amazon + - fedex + - license_plate + - ups + +# Optional: Audio Events Configuration +# NOTE: Can be overridden at the camera level +audio: + # Optional: Enable audio events (default: shown below) + enabled: False + # Optional: Configure the amount of seconds without detected audio to end the event (default: shown below) + max_not_heard: 30 + # Optional: Configure the min rms volume required to run audio detection (default: shown below) + # As a rule of thumb: + # - 200 - high sensitivity + # - 500 - medium sensitivity + # - 1000 - low sensitivity + min_volume: 500 + # Optional: Types of audio to listen for (default: shown below) + listen: + - bark + - fire_alarm + - scream + - speech + - yell + # Optional: Filters to configure detection. + filters: + # Label that matches label in listen config. + speech: + # Minimum score that triggers an audio event (default: shown below) + threshold: 0.8 + +# Optional: logger verbosity settings +logger: + # Optional: Default log verbosity (default: shown below) + default: info + # Optional: Component specific logger overrides + logs: + frigate.event: debug + +# Optional: set environment variables +environment_vars: + EXAMPLE_VAR: value + +# Optional: birdseye configuration +# NOTE: Can (enabled, mode) be overridden at the camera level +birdseye: + # Optional: Enable birdseye view (default: shown below) + enabled: True + # Optional: Restream birdseye via RTSP (default: shown below) + # NOTE: Enabling this will set birdseye to run 24/7 which may increase CPU usage somewhat. + restream: False + # Optional: Width of the output resolution (default: shown below) + width: 1280 + # Optional: Height of the output resolution (default: shown below) + height: 720 + # Optional: Encoding quality of the mpeg1 feed (default: shown below) + # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. + quality: 8 + # Optional: Mode of the view. Available options are: objects, motion, and continuous + # objects - cameras are included if they have had a tracked object within the last 30 seconds + # motion - cameras are included if motion was detected in the last 30 seconds + # continuous - all cameras are included always + mode: objects + # Optional: Threshold for camera activity to stop showing camera (default: shown below) + inactivity_threshold: 30 + # Optional: Configure the birdseye layout + layout: + # Optional: Scaling factor for the layout calculator, range 1.0-5.0 (default: shown below) + scaling_factor: 2.0 + # Optional: Maximum number of cameras to show at one time, showing the most recent (default: show all cameras) + max_cameras: 1 + +# Optional: ffmpeg configuration +# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets +ffmpeg: + # Optional: ffmpeg binry path (default: shown below) + # can also be set to `7.0` or `5.0` to specify one of the included versions + # or can be set to any path that holds `bin/ffmpeg` & `bin/ffprobe` + path: "default" + # Optional: global ffmpeg args (default: shown below) + global_args: -hide_banner -loglevel warning -threads 2 + # Optional: global hwaccel args (default: auto detect) + # NOTE: See hardware acceleration docs for your specific device + hwaccel_args: "auto" + # Optional: global input args (default: shown below) + input_args: preset-rtsp-generic + # Optional: global output args + output_args: + # Optional: output args for detect streams (default: shown below) + detect: -threads 2 -f rawvideo -pix_fmt yuv420p + # Optional: output args for record streams (default: shown below) + record: preset-record-generic + # Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below) + # If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once + # If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage + # NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout. + retry_interval: 10 + +# Optional: Detect configuration +# NOTE: Can be overridden at the camera level +detect: + # Optional: width of the frame for the input with the detect role (default: use native stream resolution) + width: 1280 + # Optional: height of the frame for the input with the detect role (default: use native stream resolution) + height: 720 + # Optional: desired fps for your camera for the input with the detect role (default: shown below) + # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. + fps: 5 + # Optional: enables detection for the camera (default: True) + enabled: True + # Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate) + min_initialized: 2 + # Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate) + max_disappeared: 25 + # Optional: Configuration for stationary object tracking + stationary: + # Optional: Frequency for confirming stationary objects (default: same as threshold) + # When set to 1, object detection will run to confirm the object still exists on every frame. + # If set to 10, object detection will run to confirm the object still exists on every 10th frame. + interval: 50 + # Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s) + threshold: 50 + # Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever) + # This can help with false positives for objects that should only be stationary for a limited amount of time. + # It can also be used to disable stationary object tracking. For example, you may want to set a value for person, but leave + # car at the default. + # WARNING: Setting these values overrides default behavior and disables stationary object tracking. + # There are very few situations where you would want it disabled. It is NOT recommended to + # copy these values from the example config into your config unless you know they are needed. + max_frames: + # Optional: Default for all object types (default: not set, track forever) + default: 3000 + # Optional: Object specific values + objects: + person: 1000 + # Optional: Milliseconds to offset detect annotations by (default: shown below). + # There can often be latency between a recording and the detect process, + # especially when using separate streams for detect and record. + # Use this setting to make the timeline bounding boxes more closely align + # with the recording. The value can be positive or negative. + # TIP: Imagine there is an tracked object clip with a person walking from left to right. + # If the tracked object lifecycle bounding box is consistently to the left of the person + # then the value should be decreased. Similarly, if a person is walking from + # left to right and the bounding box is consistently ahead of the person + # then the value should be increased. + # TIP: This offset is dynamic so you can change the value and it will update existing + # tracked objects, this makes it easy to tune. + # WARNING: Fast moving objects will likely not have the bounding box align. + annotation_offset: 0 + +# Optional: Object configuration +# NOTE: Can be overridden at the camera level +objects: + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) + # Checks based on the bottom center of the bounding box of the object. + # NOTE: This mask is COMBINED with the object type specific mask below + mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278 + # Optional: filters to reduce false positives for specific object types + filters: + person: + # Optional: minimum width*height of the bounding box for the detected object (default: 0) + min_area: 5000 + # Optional: maximum width*height of the bounding box for the detected object (default: 24000000) + max_area: 100000 + # Optional: minimum width/height of the bounding box for the detected object (default: 0) + min_ratio: 0.5 + # Optional: maximum width/height of the bounding box for the detected object (default: 24000000) + max_ratio: 2.0 + # Optional: minimum score for the object to initiate tracking (default: shown below) + min_score: 0.5 + # Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below) + threshold: 0.7 + # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) + # Checks based on the bottom center of the bounding box of the object + mask: 0.000,0.000,0.781,0.000,0.781,0.278,0.000,0.278 + +# Optional: Review configuration +# NOTE: Can be overridden at the camera level +review: + # Optional: alerts configuration + alerts: + # Optional: labels that qualify as an alert (default: shown below) + labels: + - car + - person + # Optional: required zones for an object to be marked as an alert (default: none) + # NOTE: when settings required zones globally, this zone must exist on all cameras + # or the config will be considered invalid. In that case the required_zones + # should be configured at the camera level. + required_zones: + - driveway + # Optional: detections configuration + detections: + # Optional: labels that qualify as a detection (default: all labels that are tracked / listened to) + labels: + - car + - person + # Optional: required zones for an object to be marked as a detection (default: none) + # NOTE: when settings required zones globally, this zone must exist on all cameras + # or the config will be considered invalid. In that case the required_zones + # should be configured at the camera level. + required_zones: + - driveway + +# Optional: Motion configuration +# NOTE: Can be overridden at the camera level +motion: + # Optional: enables detection for the camera (default: True) + # NOTE: Motion detection is required for object detection, + # setting this to False and leaving detect enabled + # will result in an error on startup. + enabled: False + # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) + # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. + # The value should be between 1 and 255. + threshold: 30 + # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection + # needs to recalibrate. (default: shown below) + # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. + # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching + # a doorbell camera. + lightning_threshold: 0.8 + # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) + # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will + # make motion detection more sensitive to smaller moving objects. + # As a rule of thumb: + # - 10 - high sensitivity + # - 30 - medium sensitivity + # - 50 - low sensitivity + contour_area: 10 + # Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below) + # Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster. + # Low values will cause things like moving shadows to be detected as motion for longer. + # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ + frame_alpha: 0.01 + # Optional: Height of the resized motion frame (default: 100) + # Higher values will result in more granular motion detection at the expense of higher CPU usage. + # Lower values result in less CPU, but small changes may not register as motion. + frame_height: 100 + # Optional: motion mask + # NOTE: see docs for more detailed info on creating masks + mask: 0.000,0.469,1.000,0.469,1.000,1.000,0.000,1.000 + # Optional: improve contrast (default: shown below) + # Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive + # for daytime. + improve_contrast: True + # Optional: Delay when updating camera motion through MQTT from ON -> OFF (default: shown below). + mqtt_off_delay: 30 + +# Optional: Notification Configuration +notifications: + # Optional: Enable notification service (default: shown below) + enabled: False + # Optional: Email for push service to reach out to + # NOTE: This is required to use notifications + email: "admin@example.com" + +# Optional: Record configuration +# NOTE: Can be overridden at the camera level +record: + # Optional: Enable recording (default: shown below) + # WARNING: If recording is disabled in the config, turning it on via + # the UI or MQTT later will have no effect. + enabled: False + # Optional: Number of minutes to wait between cleanup runs (default: shown below) + # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o + expire_interval: 60 + # Optional: Sync recordings with disk on startup and once a day (default: shown below). + sync_recordings: False + # Optional: Retention settings for recording + retain: + # Optional: Number of days to retain recordings regardless of tracked objects (default: shown below) + # NOTE: This should be set to 0 and retention should be defined in alerts and detections section below + # if you only want to retain recordings of alerts and detections. + days: 0 + # Optional: Mode for retention. Available options are: all, motion, and active_objects + # all - save all recording segments regardless of activity + # motion - save all recordings segments with any detected motion + # active_objects - save all recording segments with active/moving objects + # NOTE: this mode only applies when the days setting above is greater than 0 + mode: all + # Optional: Recording Export Settings + export: + # Optional: Timelapse Output Args (default: shown below). + # NOTE: The default args are set to fit 24 hours of recording into 1 hour playback. + # See https://stackoverflow.com/a/58268695 for more info on how these args work. + # As an example: if you wanted to go from 24 hours to 30 minutes that would be going + # from 86400 seconds to 1800 seconds which would be 1800 / 86400 = 0.02. + # The -r (framerate) dictates how smooth the output video is. + # So the args would be -vf setpts=0.02*PTS -r 30 in that case. + timelapse_args: "-vf setpts=0.04*PTS -r 30" + # Optional: Recording Preview Settings + preview: + # Optional: Quality of recording preview (default: shown below). + # Options are: very_low, low, medium, high, very_high + quality: medium + # Optional: alert recording settings + alerts: + # Optional: Number of seconds before the alert to include (default: shown below) + pre_capture: 5 + # Optional: Number of seconds after the alert to include (default: shown below) + post_capture: 5 + # Optional: Retention settings for recordings of alerts + retain: + # Required: Retention days (default: shown below) + days: 14 + # Optional: Mode for retention. (default: shown below) + # all - save all recording segments for alerts regardless of activity + # motion - save all recordings segments for alerts with any detected motion + # active_objects - save all recording segments for alerts with active/moving objects + # + # NOTE: If the retain mode for the camera is more restrictive than the mode configured + # here, the segments will already be gone by the time this mode is applied. + # For example, if the camera retain mode is "motion", the segments without motion are + # never stored, so setting the mode to "all" here won't bring them back. + mode: motion + # Optional: detection recording settings + detections: + # Optional: Number of seconds before the detection to include (default: shown below) + pre_capture: 5 + # Optional: Number of seconds after the detection to include (default: shown below) + post_capture: 5 + # Optional: Retention settings for recordings of detections + retain: + # Required: Retention days (default: shown below) + days: 14 + # Optional: Mode for retention. (default: shown below) + # all - save all recording segments for detections regardless of activity + # motion - save all recordings segments for detections with any detected motion + # active_objects - save all recording segments for detections with active/moving objects + # + # NOTE: If the retain mode for the camera is more restrictive than the mode configured + # here, the segments will already be gone by the time this mode is applied. + # For example, if the camera retain mode is "motion", the segments without motion are + # never stored, so setting the mode to "all" here won't bring them back. + mode: motion + +# Optional: Configuration for the jpg snapshots written to the clips directory for each tracked object +# NOTE: Can be overridden at the camera level +snapshots: + # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) + enabled: False + # Optional: save a clean PNG copy of the snapshot image (default: shown below) + clean_copy: True + # Optional: print a timestamp on the snapshots (default: shown below) + timestamp: False + # Optional: draw bounding box on the snapshots (default: shown below) + bounding_box: True + # Optional: crop the snapshot (default: shown below) + crop: False + # Optional: height to resize the snapshot to (default: original size) + height: 175 + # Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Camera override for retention settings (default: global values) + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Per object retention days + objects: + person: 15 + # Optional: quality of the encoded jpeg, 0-100 (default: shown below) + quality: 70 + +# Optional: Configuration for semantic search capability +semantic_search: + # Optional: Enable semantic search (default: shown below) + enabled: False + # Optional: Re-index embeddings database from historical tracked objects (default: shown below) + reindex: False + # Optional: Set the model size used for embeddings. (default: shown below) + # NOTE: small model runs on CPU and large model runs on GPU + model_size: "small" + +# Optional: Configuration for AI generated tracked object descriptions +# NOTE: Semantic Search must be enabled for this to do anything. +# WARNING: Depending on the provider, this will send thumbnails over the internet +# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at +# the camera level (enabled: False) to enhance privacy for indoor cameras. +genai: + # Optional: Enable AI description generation (default: shown below) + enabled: False + # Required if enabled: Provider must be one of ollama, gemini, or openai + provider: ollama + # Required if provider is ollama. May also be used for an OpenAI API compatible backend with the openai provider. + base_url: http://localhost::11434 + # Required if gemini or openai + api_key: "{FRIGATE_GENAI_API_KEY}" + # Optional: The default prompt for generating descriptions. Can use replacement + # variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below) + prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background." + # Optional: Object specific prompts to customize description results + # Format: {label}: {prompt} + object_prompts: + person: "My special person prompt." + +# Optional: Restream configuration +# Uses https://github.com/AlexxIT/go2rtc (v1.9.2) +go2rtc: + +# Optional: jsmpeg stream configuration for WebUI +live: + # Optional: Set the name of the stream that should be used for live view + # in frigate WebUI. (default: name of camera) + stream_name: camera_name + # Optional: Set the height of the jsmpeg stream. (default: 720) + # This must be less than or equal to the height of the detect stream. Lower resolutions + # reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio. + height: 720 + # Optional: Set the encode quality of the jsmpeg stream (default: shown below) + # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. + quality: 8 + +# Optional: in-feed timestamp style configuration +# NOTE: Can be overridden at the camera level +timestamp_style: + # Optional: Position of the timestamp (default: shown below) + # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) + position: "tl" + # Optional: Format specifier conform to the Python package "datetime" (default: shown below) + # Additional Examples: + # german: "%d.%m.%Y %H:%M:%S" + format: "%m/%d/%Y %H:%M:%S" + # Optional: Color of font + color: + # All Required when color is specified (default: shown below) + red: 255 + green: 255 + blue: 255 + # Optional: Line thickness of font (default: shown below) + thickness: 2 + # Optional: Effect of lettering (default: shown below) + # None (No effect), + # "solid" (solid background in inverse color of font) + # "shadow" (shadow for font) + effect: None + +# Required +cameras: + # Required: name of the camera + back: + # Optional: Enable/Disable the camera (default: shown below). + # If disabled: config is used but no live stream and no capture etc. + # Events/Recordings are still viewable. + enabled: True + # Required: ffmpeg settings for the camera + ffmpeg: + # Required: A list of input streams for the camera. See documentation for more information. + inputs: + # Required: the path to the stream + # NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {} + - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + # Required: list of roles for this stream. valid values are: audio,detect,record + # NOTICE: In addition to assigning the audio, detect, and record roles + # they must also be enabled in the camera config. + roles: + - audio + - detect + - record + # Optional: stream specific global args (default: inherit) + # global_args: + # Optional: stream specific hwaccel args (default: inherit) + # hwaccel_args: + # Optional: stream specific input args (default: inherit) + # input_args: + # Optional: camera specific global args (default: inherit) + # global_args: + # Optional: camera specific hwaccel args (default: inherit) + # hwaccel_args: + # Optional: camera specific input args (default: inherit) + # input_args: + # Optional: camera specific output args (default: inherit) + # output_args: + + # Optional: timeout for highest scoring image before allowing it + # to be replaced by a newer image. (default: shown below) + best_image_timeout: 60 + + # Optional: URL to visit the camera web UI directly from the system page. Might not be available on every camera. + webui_url: "" + + # Optional: zones for this camera + zones: + # Required: name of the zone + # NOTE: This must be different than any camera names, but can match with another zone on another + # camera. + front_steps: + # Required: List of x,y coordinates to define the polygon of the zone. + # NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box. + coordinates: 0.284,0.997,0.389,0.869,0.410,0.745 + # Optional: Number of consecutive frames required for object to be considered present in the zone (default: shown below). + inertia: 3 + # Optional: Number of seconds that an object must loiter to be considered in the zone (default: shown below) + loitering_time: 0 + # Optional: List of objects that can trigger this zone (default: all tracked objects) + objects: + - person + # Optional: Zone level object filters. + # NOTE: The global and camera filters are applied upstream. + filters: + person: + min_area: 5000 + max_area: 100000 + threshold: 0.7 + + # Optional: Configuration for the jpg snapshots published via MQTT + mqtt: + # Optional: Enable publishing snapshot via mqtt for camera (default: shown below) + # NOTE: Only applies to publishing image data to MQTT via 'frigate///snapshot'. + # All other messages will still be published. + enabled: True + # Optional: print a timestamp on the snapshots (default: shown below) + timestamp: True + # Optional: draw bounding box on the snapshots (default: shown below) + bounding_box: True + # Optional: crop the snapshot (default: shown below) + crop: True + # Optional: height to resize the snapshot to (default: shown below) + height: 270 + # Optional: jpeg encode quality (default: shown below) + quality: 70 + # Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + + # Optional: Configuration for how camera is handled in the GUI. + ui: + # Optional: Adjust sort order of cameras in the UI. Larger numbers come later (default: shown below) + # By default the cameras are sorted alphabetically. + order: 0 + # Optional: Whether or not to show the camera in the Frigate UI (default: shown below) + dashboard: True + + # Optional: connect to ONVIF camera + # to enable PTZ controls. + onvif: + # Required: host of the camera being connected to. + host: 0.0.0.0 + # Optional: ONVIF port for device (default: shown below). + port: 8000 + # Optional: username for login. + # NOTE: Some devices require admin to access ONVIF. + user: admin + # Optional: password for login. + password: admin + # Optional: Ignores time synchronization mismatches between the camera and the server during authentication. + # Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents. + ignore_time_mismatch: False + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: calibrate the camera on startup (default: shown below) + # A calibration will move the PTZ in increments and measure the time it takes to move. + # The results are used to help estimate the position of tracked objects after a camera move. + # Frigate will update your config file automatically after a calibration with + # a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False. + calibrate_on_startup: False + # Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below) + # Available options are: disabled, absolute, and relative + # disabled - don't zoom in/out on autotracked objects, use pan/tilt only + # absolute - use absolute zooming (supported by most PTZ capable cameras) + # relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements) + zooming: disabled + # Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below) + # A lower value will keep more of the scene in view around a tracked object. + # A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly. + # The value should be between 0.1 and 0.75 + zoom_factor: 0.3 + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below) + return_preset: home + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 + # Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below) + movement_weights: [] + + # Optional: Configuration for how to sort the cameras in the Birdseye view. + birdseye: + # Optional: Adjust sort order of cameras in the Birdseye view. Larger numbers come later (default: shown below) + # By default the cameras are sorted alphabetically. + order: 0 + + # Optional: Configuration for AI generated tracked object descriptions + genai: + # Optional: Enable AI description generation (default: shown below) + enabled: False + # Optional: Use the object snapshot instead of thumbnails for description generation (default: shown below) + use_snapshot: False + # Optional: The default prompt for generating descriptions. Can use replacement + # variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below) + prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background." + # Optional: Object specific prompts to customize description results + # Format: {label}: {prompt} + object_prompts: + person: "My special person prompt." + # Optional: objects to generate descriptions for (default: all objects that are tracked) + objects: + - person + - cat + # Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify) + required_zones: [] + +# Optional +ui: + # Optional: Set a timezone to use in the UI (default: use browser local time) + # timezone: America/Denver + # Optional: Set the time format used. + # Options are browser, 12hour, or 24hour (default: shown below) + time_format: browser + # Optional: Set the date style for a specified length. + # Options are: full, long, medium, short + # Examples: + # short: 2/11/23 + # medium: Feb 11, 2023 + # full: Saturday, February 11, 2023 + # (default: shown below). + date_style: short + # Optional: Set the time style for a specified length. + # Options are: full, long, medium, short + # Examples: + # short: 8:14 PM + # medium: 8:15:22 PM + # full: 8:15:22 PM Mountain Standard Time + # (default: shown below). + time_style: medium + # Optional: Ability to manually override the date / time styling to use strftime format + # https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html + # possible values are shown above (default: not set) + strftime_fmt: "%Y/%m/%d %H:%M" + +# Optional: Telemetry configuration +telemetry: + # Optional: Enabled network interfaces for bandwidth stats monitoring (default: empty list, let nethogs search all) + network_interfaces: + - eth + - enp + - eno + - ens + - wl + - lo + # Optional: Configure system stats + stats: + # Enable AMD GPU stats (default: shown below) + amd_gpu_stats: True + # Enable Intel GPU stats (default: shown below) + intel_gpu_stats: True + # Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below) + # NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled. + network_bandwidth: False + # Optional: Enable the latest version outbound check (default: shown below) + # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions + version_check: True + +# Optional: Camera groups (default: no groups are setup) +# NOTE: It is recommended to use the UI to setup camera groups +camera_groups: + # Required: Name of camera group + front: + # Required: list of cameras in the group + cameras: + - front_cam + - side_cam + - front_doorbell_cam + # Required: icon used for group + icon: LuCar + # Required: index of this group + order: 0 +``` diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index 15fddb45c..211050972 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,11 +7,11 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.8.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration) for more advanced configurations and features. :::note -You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which can be helpful to debug as well as provide useful information about your camera streams. +You can access the go2rtc stream info at `/api/go2rtc/streams` which can be helpful to debug as well as provide useful information about your camera streams. ::: @@ -21,7 +21,7 @@ Birdseye RTSP restream can be accessed at `rtsp://:8554/birdseye`. ```yaml birdseye: - restream: true + restream: True ``` ### Securing Restream With Authentication @@ -38,10 +38,6 @@ go2rtc: **NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras. -## RTMP (Deprecated) - -In previous Frigate versions RTMP was used for re-streaming. RTMP has disadvantages however including being incompatible with H.265, high bitrates, and certain audio codecs. RTMP is deprecated and it is recommended use the built in go2rtc config for restreaming. - ## Reduce Connections To Camera Some cameras only support one active connection or you may just want to have a single connection open to the camera. The RTSP restream allows this to be possible. @@ -138,7 +134,7 @@ cameras: ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: NOTE: The output will need to be passed with two curly braces `{{output}}` diff --git a/docs/docs/configuration/review.md b/docs/docs/configuration/review.md new file mode 100644 index 000000000..e321fcb8a --- /dev/null +++ b/docs/docs/configuration/review.md @@ -0,0 +1,75 @@ +--- +id: review +title: Review +--- + +The Review page of the Frigate UI is for quickly reviewing historical footage of interest from your cameras. _Review items_ are indicated on a vertical timeline and displayed as a grid of previews - bandwidth-optimized, low frame rate, low resolution videos. Hovering over or swiping a preview plays the video and marks it as reviewed. If more in-depth analysis is required, the preview can be clicked/tapped and the full frame rate, full resolution recording is displayed. + +Review items are filterable by date, object type, and camera. + +### Review items vs. tracked objects (formerly "events") + +In Frigate 0.13 and earlier versions, the UI presented "events". An event was synonymous with a tracked or detected object. In Frigate 0.14 and later, a review item is a time period where any number of tracked objects were active. + +For example, consider a situation where two people walked past your house. One was walking a dog. At the same time, a car drove by on the street behind them. + +In this scenario, Frigate 0.13 and earlier would show 4 "events" in the UI - one for each person, another for the dog, and yet another for the car. You would have had 4 separate videos to watch even though they would have all overlapped. + +In 0.14 and later, all of that is bundled into a single review item which starts and ends to capture all of that activity. Reviews for a single camera cannot overlap. Once you have watched that time period on that camera, it is marked as reviewed. + +## Alerts and Detections + +Not every segment of video captured by Frigate may be of the same level of interest to you. Video of people who enter your property may be a different priority than those walking by on the sidewalk. For this reason, Frigate 0.14 categorizes review items as _alerts_ and _detections_. By default, all person and car objects are considered alerts. You can refine categorization of your review items by configuring required zones for them. + +## Restricting alerts to specific labels + +By default a review item will only be marked as an alert if a person or car is detected. This can be configured to include any object or audio label using the following config: + +```yaml +# can be overridden at the camera level +review: + alerts: + labels: + - car + - cat + - dog + - person + - speech +``` + +## Restricting detections to specific labels + +By default all detections that do not qualify as an alert qualify as a detection. However, detections can further be filtered to only include certain labels or certain zones. + +```yaml +# can be overridden at the camera level +review: + detections: + labels: + - bark + - dog +``` + +## Excluding a camera from alerts or detections + +To exclude a specific camera from alerts or detections, simply provide an empty list to the alerts or detections field _at the camera level_. + +For example, to exclude objects on the camera _gatecamera_ from any detections, include this in your config: + +```yaml +cameras: + gatecamera: + review: + detections: + labels: [] +``` + +## Restricting review items to specific zones + +By default a review item will be created if any `review -> alerts -> labels` and `review -> detections -> labels` are detected anywhere in the camera frame. You will likely want to configure review items to only be created when the object enters an area of interest, [see the zone docs for more information](./zones.md#restricting-alerts-and-detections-to-specific-zones) + +:::info + +Because zones don't apply to audio, audio labels will always be marked as a detection by default. + +::: diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md new file mode 100644 index 000000000..a569e8f1a --- /dev/null +++ b/docs/docs/configuration/semantic_search.md @@ -0,0 +1,59 @@ +--- +id: semantic_search +title: Using Semantic Search +--- + +Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results. + +Frigate has support for [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create embeddings, which runs locally. Embeddings are then saved to Frigate's database. + +Semantic Search is accessed via the _Explore_ view in the Frigate UI. + +## Configuration + +Semantic search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting. + +```yaml +semantic_search: + enabled: True + reindex: False +``` + +:::tip + +The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to set the config back to `False` before restarting Frigate again. + +If you are enabling the Search feature for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that. + +::: + +### Jina AI CLIP + +The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails. + +The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions. + +Differently weighted CLIP models are available and can be selected by setting the `model_size` config option: + +:::tip + +The CLIP models are downloaded in ONNX format, which means they will be accelerated using GPU hardware when available. This depends on the Docker build that is used. See [the object detector docs](../configuration/object_detectors.md) for more information. + +::: + +```yaml +semantic_search: + enabled: True + model_size: small +``` + +- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable. +- Configuring the `small` model employs a quantized version of the model that uses much less RAM and runs faster on CPU with a very negligible difference in embedding quality. + +## Usage + +1. Semantic search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and semantic search for the best results. +2. The comparison between text and image embedding distances generally means that results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" filter to help find what you are looking for. +3. Make your search language and tone closely match your descriptions. If you are using thumbnail search, phrase your query as an image caption. +4. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well. +5. Experiment! Find a tracked object you want to test and start typing keywords to see what works for you. diff --git a/docs/docs/configuration/snapshots.md b/docs/docs/configuration/snapshots.md index 6145812db..e6c260913 100644 --- a/docs/docs/configuration/snapshots.md +++ b/docs/docs/configuration/snapshots.md @@ -3,6 +3,10 @@ id: snapshots title: Snapshots --- -Frigate can save a snapshot image to `/media/frigate/clips` for each event named as `-.jpg`. +Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `-.jpg`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx) -Snapshots sent via MQTT are configured in the [config file](https://docs.frigate.video/configuration/) under `cameras -> your_camera -> mqtt` +For users with Frigate+ enabled, snapshots are accessible in the UI in the Frigate+ pane to allow for quick submission to the Frigate+ service. + +To only save snapshots for objects that enter a specific zone, [see the zone docs](./zones.md#restricting-snapshots-to-specific-zones) + +Snapshots sent via MQTT are configured in the [config file](https://docs.frigate.video/configuration/) under `cameras -> your_camera -> mqtt` diff --git a/docs/docs/configuration/stationary_objects.md b/docs/docs/configuration/stationary_objects.md index 530c41aa4..341d1ea57 100644 --- a/docs/docs/configuration/stationary_objects.md +++ b/docs/docs/configuration/stationary_objects.md @@ -23,6 +23,30 @@ NOTE: There is no way to disable stationary object tracking with this value. `threshold` is the number of frames an object needs to remain relatively still before it is considered stationary. -## Avoiding stationary objects +## Why does Frigate track stationary objects? -In some cases, like a driveway, you may prefer to only have an event when a car is coming & going vs a constant event of it stationary in the driveway. [This docs sections](../guides/stationary_objects.md) explains how to approach that scenario. +Frigate didn't always track stationary objects. In fact, it didn't even track objects at all initially. + +Let's look at an example use case: I want to record any cars that enter my driveway. + +One might simply think "Why not just run object detection any time there is motion around the driveway area and notify if the bounding box is in that zone?" + +With that approach, what video is related to the car that entered the driveway? Did it come from the left or right? Was it parked across the street for an hour before turning into the driveway? One approach is to just record 24/7 or for motion (on any changed changed pixels) and not attempt to do that at all. This is what most other NVRs do. Just don't even try to identify a start and end for that object since it's hard and you will be wrong some portion of the time. + +Couldn't you just look at when motion stopped and started? Motion for a video feed is nothing more than looking for pixels that are different than they were in previous frames. If the car entered the driveway while someone was mowing the grass, how would you know which motion was for the car and which was for the person when they mow along the driveway or street? What if another car was driving the other direction on the street? Or what if its a windy day and the bush by your mailbox is blowing around? + +In order to do it more accurately, you need to identify objects and track them with a unique id. In each subsequent frame, everything has moved a little and you need to determine which bounding boxes go with each object from the previous frame. + +Tracking objects across frames is a challenging problem. Especially if you want to do it in real time. There are entire competitions for research algorithms to see which of them can do it the most accurately. Zero of them are accurate 100% of the time. Even the ones that can't do it in realtime. There is always an error rate in the algorithm. + +Now consider that the car is driving down a street that has other cars parked along it. It will drive behind some of these cars and in front of others. There may even be a car driving the opposite direction. + +Let's assume for now that we are NOT already tracking two parked cars on the street or the car parked in the driveway, ie, there is no stationary object tracking. + +As the car you are tracking approaches an area with 2 cars parked, the headlights reflect off the parked cars and the car parked in your driveway. The pixel values are different in that area, so there is motion detected. Object detection runs and identifies the remaining 3 cars. In the previous frame, you had a single bounding box from the car you are tracking. Now you have 4. The original object, the 2 cars on the street and the one in your driveway. + +Now you have to determine which of the bounding boxes in this frame should be matched to the tracking id from the previous frame where you only had one. Remember, you have never seen these additional 3 cars before, so you know nothing about them. On top of that the bounding box for the car you are tracking has now moved to a new location, so which of the 4 belongs to the car you were originally tracking? The algorithms here are fairly good. They use a Kalman filter to predict the next location of an object using the historical bounding boxes and the bounding box closest to the predicted location is linked. It's right sometimes, but the error rate is going to be high when there are 4 possible bounding boxes. + +Now let's assume that those other 3 cars were already being tracked as stationary objects, so the car driving down the street is a new 4th car. The object tracker knows we have had 3 cars and we now have 4. As the new car approaches the parked cars, the bounding boxes for all 4 cars is predicted based on the previous frames. The predicted boxes for the parked cars is pretty much a 100% overlap with the bounding boxes in the new frame. The parked cars are slam dunk matches to the tracking ids they had before and the only one left is the remaining bounding box which gets assigned to the new car. This results in a much lower error rate. Not perfect, but better. + +The most difficult scenario that causes IDs to be assigned incorrectly is when an object completely occludes another object. When a car drives in front of another car and its no longer visible, a bounding box disappeared and it's a bit of a toss up when assigning the id since it's difficult to know which one is in front of the other. This happens for cars passing in front of other cars fairly often. It's something that we want to improve in the future. diff --git a/docs/docs/configuration/tls.md b/docs/docs/configuration/tls.md new file mode 100644 index 000000000..7b254c100 --- /dev/null +++ b/docs/docs/configuration/tls.md @@ -0,0 +1,59 @@ +--- +id: tls +title: TLS +--- + +# TLS + +Frigate's integrated NGINX server supports TLS certificates. By default Frigate will generate a self signed certificate that will be used for port 8971. Frigate is designed to make it easy to use whatever tool you prefer to manage certificates. + +Frigate is often running behind a reverse proxy that manages TLS certificates for multiple services. You will likely need to set your reverse proxy to allow self signed certificates or you can disable TLS in Frigate's config. However, if you are running on a dedicated device that's separate from your proxy or if you expose Frigate directly to the internet, you may want to configure TLS with valid certificates. + +In many deployments, TLS will be unnecessary. It can be disabled in the config with the following yaml: + +```yaml +tls: + enabled: False +``` + +## Certificates + +TLS certificates can be mounted at `/etc/letsencrypt/live/frigate` using a bind mount or docker volume. + +```yaml +frigate: + ... + volumes: + - /path/to/your/certificate_folder:/etc/letsencrypt/live/frigate:ro + ... +``` + +Within the folder, the private key is expected to be named `privkey.pem` and the certificate is expected to be named `fullchain.pem`. + +Note that certbot uses symlinks, and those can't be followed by the container unless it has access to the targets as well, so if using certbot you'll also have to mount the `archive` folder for your domain, e.g.: + +```yaml +frigate: + ... + volumes: + - /etc/letsencrypt/live/frigate:/etc/letsencrypt/live/frigate:ro + - /etc/letsencrypt/archive/frigate:/etc/letsencrypt/archive/frigate:ro + ... + +``` + +Frigate automatically compares the fingerprint of the certificate at `/etc/letsencrypt/live/frigate/fullchain.pem` against the fingerprint of the TLS cert in NGINX every minute. If these differ, the NGINX config is reloaded to pick up the updated certificate. + +If you issue Frigate valid certificates you will likely want to configure it to run on port 443 so you can access it without a port number like `https://your-frigate-domain.com` by mapping 8971 to 443. + +```yaml +frigate: + ... + ports: + - "443:8971" + ... +``` + +## ACME Challenge + +Frigate also supports hosting the acme challenge files for the HTTP challenge method if needed. The challenge files should be mounted at `/etc/letsencrypt/www`. diff --git a/docs/docs/configuration/user_interface.md b/docs/docs/configuration/user_interface.md deleted file mode 100644 index 72ce5a5d6..000000000 --- a/docs/docs/configuration/user_interface.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -id: user_interface -title: User Interface Configurations ---- - -### Experimental UI - -While developing and testing new components, users may decide to opt-in to test potential new features on the front-end. - -```yaml -ui: - use_experimental: true -``` - -Note that experimental changes may contain bugs or may be removed at any time in future releases of the software. Use of these features are presented as-is and with no functional guarantee. diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md index daca1786a..aef6b0a5b 100644 --- a/docs/docs/configuration/zones.md +++ b/docs/docs/configuration/zones.md @@ -5,78 +5,133 @@ title: Zones Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Presence in a zone is evaluated based on the bottom center of the bounding box for the object. It does not matter how much of the bounding box overlaps with the zone. +For example, the cat in this image is currently in Zone 1, but **not** Zone 2. +![bottom center](/img/bottom-center.jpg) + Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera. -During testing, enable the Zones option for the debug feed so you can adjust as needed. The zone line will increase in thickness when any object enters the zone. +During testing, enable the Zones option for the Debug view of your camera (Settings --> Debug) so you can adjust as needed. The zone line will increase in thickness when any object enters the zone. To create a zone, follow [the steps for a "Motion mask"](masks.md), but use the section of the web UI for creating a zone instead. -### Restricting events to specific zones +### Restricting alerts and detections to specific zones -Often you will only want events to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to be notified when an object enters your entire_yard zone, the config would be: +Often you will only want alerts to be created when an object enters areas of interest. This is done using zones along with setting required_zones. Let's say you only want to have an alert created when an object enters your entire_yard zone, the config would be: ```yaml -camera: - record: - events: +cameras: + name_of_your_camera: + review: + alerts: + required_zones: + - entire_yard + zones: + entire_yard: + coordinates: ... +``` + +You may also want to filter detections to only be created when an object enters a secondary area of interest. This is done using zones along with setting required_zones. Let's say you want alerts when an object enters the inner area of the yard but detections when an object enters the edge of the yard, the config would be + +```yaml +cameras: + name_of_your_camera: + review: + alerts: + required_zones: + - inner_yard + detections: + required_zones: + - edge_yard + zones: + edge_yard: + coordinates: ... + inner_yard: + coordinates: ... +``` + +### Restricting snapshots to specific zones + +```yaml +cameras: + name_of_your_camera: + snapshots: required_zones: - entire_yard - snapshots: - required_zones: - - entire_yard - zones: - entire_yard: - coordinates: ... + zones: + entire_yard: + coordinates: ... ``` ### Restricting zones to specific objects -Sometimes you want to limit a zone to specific object types to have more granular control of when events/snapshots are saved. The following example will limit one zone to person objects and the other to cars. +Sometimes you want to limit a zone to specific object types to have more granular control of when alerts, detections, and snapshots are saved. The following example will limit one zone to person objects and the other to cars. ```yaml -camera: - record: - events: - required_zones: - - entire_yard - - front_yard_street - snapshots: - required_zones: - - entire_yard - - front_yard_street - zones: - entire_yard: - coordinates: ... (everywhere you want a person) - objects: - - person - front_yard_street: - coordinates: ... (just the street) - objects: - - car +cameras: + name_of_your_camera: + zones: + entire_yard: + coordinates: ... (everywhere you want a person) + objects: + - person + front_yard_street: + coordinates: ... (just the street) + objects: + - car ``` -Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. You will get events for person objects that enter anywhere in the yard, and events for cars only if they enter the street. +Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street. + +### Zone Loitering + +Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time before the object will be considered in the zone. + +```yaml +cameras: + name_of_your_camera: + zones: + sidewalk: + loitering_time: 4 # unit is in seconds + objects: + - person +``` ### Zone Inertia Sometimes an objects bounding box may be slightly incorrect and the bottom center of the bounding box is inside the zone while the object is not actually in the zone. Zone inertia helps guard against this by requiring an object's bounding box to be within the zone for multiple consecutive frames. This value can be configured: ```yaml -camera: - zones: - front_yard: - inertia: 3 - objects: - - person +cameras: + name_of_your_camera: + zones: + front_yard: + inertia: 3 + objects: + - person ``` There may also be cases where you expect an object to quickly enter and exit a zone, like when a car is pulling into the driveway, and you may want to have the object be considered present in the zone immediately: ```yaml -camera: - zones: - driveway_entrance: - inertia: 1 - objects: - - car +cameras: + name_of_your_camera: + zones: + driveway_entrance: + inertia: 1 + objects: + - car +``` + +### Loitering Time + +Zones support a `loitering_time` configuration which can be used to only consider an object as part of a zone if they loiter in the zone for the specified number of seconds. This can be used, for example, to create alerts for cars that stop on the street but not cars that just drive past your camera. + +```yaml +cameras: + name_of_your_camera: + zones: + front_yard: + loitering_time: 5 # unit is in seconds + objects: + - person ``` diff --git a/docs/docs/development/contributing.md b/docs/docs/development/contributing.md index bc08afbc9..32fc13e1f 100644 --- a/docs/docs/development/contributing.md +++ b/docs/docs/development/contributing.md @@ -33,7 +33,6 @@ Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshe ### Prerequisites -- [Frigate source code](#frigate-core-web-and-docs) - GNU make - Docker - An extra detector (Coral, OpenVINO, etc.) is optional but recommended to simulate real world performance. @@ -129,7 +128,6 @@ ffmpeg -c:v h264_qsv -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/ ### Prerequisites -- [Frigate source code](#frigate-core-web-and-docs) - All [core](#core) prerequisites _or_ another running Frigate instance locally available - Node.js 20 @@ -155,6 +153,12 @@ cd web && npm install cd web && npm run dev ``` +##### 3a. Run the development server against a non-local instance + +To run the development server against a non-local instance, you will need to +replace the `localhost` values in `vite.config.ts` with the IP address of the +non-local backend server. + #### 4. Making changes The Web UI is built using [Vite](https://vitejs.dev/), [Preact](https://preactjs.com), and [Tailwind CSS](https://tailwindcss.com). @@ -182,7 +186,6 @@ npm run test ### Prerequisites -- [Frigate source code](#frigate-core-web-and-docs) - Node.js 20 ### Making changes @@ -190,7 +193,7 @@ npm run test #### 1. Installation ```console -npm install +cd docs && npm install ``` #### 2. Local Development @@ -222,3 +225,13 @@ docker buildx create --name builder --driver docker-container --driver-opt netwo docker buildx inspect builder --bootstrap make push ``` + +## Other + +### Nginx + +When testing nginx config changes from within the dev container, the following command can be used to copy and reload the config for testing without rebuilding the container: + +```console +sudo cp docker/main/rootfs/usr/local/nginx/conf/* /usr/local/nginx/conf/ && sudo /usr/local/nginx/sbin/nginx -s reload +``` diff --git a/docs/docs/frigate/camera_setup.md b/docs/docs/frigate/camera_setup.md index 0e53b4809..33ae24cab 100644 --- a/docs/docs/frigate/camera_setup.md +++ b/docs/docs/frigate/camera_setup.md @@ -5,9 +5,9 @@ title: Camera setup Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around. -- **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server. +- **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher (10fps is the recommended maximum for most users) for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server. -- **Recording**: This stream should be the resolution you wish to store for reference. Typically, this will be the highest resolution your camera supports. I recommend setting this feed to 15 fps. +- **Recording**: This stream should be the resolution you wish to store for reference. Typically, this will be the highest resolution your camera supports. I recommend setting this feed in your camera's firmware to 15 fps. - **Stream Viewing**: This stream will be rebroadcast as is to Home Assistant for viewing with the stream component. Setting this resolution too high will use significant bandwidth when viewing streams in Home Assistant, and they may not load reliably over slower connections. diff --git a/docs/docs/frigate/glossary.md b/docs/docs/frigate/glossary.md new file mode 100644 index 000000000..bd039554c --- /dev/null +++ b/docs/docs/frigate/glossary.md @@ -0,0 +1,69 @@ +--- +id: glossary +title: Glossary +--- + +The glossary explains terms commonly used in Frigate's documentation. + +## Bounding Box + +A box returned from the object detection model that outlines an object in the frame. These have multiple colors depending on object type in the debug live view. + +### Bounding Box Colors + +- At startup different colors will be assigned to each object label +- A dark blue thin line indicates that object is not detected at this current point in time +- A gray thin line indicates that object is detected as being stationary +- A thick line indicates that object is the subject of autotracking (when enabled). + +## False Positive + +An incorrect detection of an object type. For example a dog being detected as a person, a chair being detected as a dog, etc. A person being detected in an area you want to ignore is not a false positive. + +## Mask + +There are two types of masks in Frigate. [See the mask docs for more info](/configuration/masks) + +### Motion Mask + +Motion masks prevent detection of [motion](#motion) in masked areas from triggering Frigate to run object detection, but do not prevent objects from being detected if object detection runs due to motion in nearby areas. For example: camera timestamps, skies, the tops of trees, etc. + +### Object Mask + +Object filter masks drop any bounding boxes where the bottom center (overlap doesn't matter) is in the masked area. It forces them to be considered a [false positive](#false-positive) so that they are ignored. + +## Min Score + +The lowest score that an object can be detected with during tracking, any detection with a lower score will be assumed to be a false positive + +## Motion + +When pixels in the current camera frame are different than previous frames. When many nearby pixels are different in the current frame they grouped together and indicated with a red motion box in the live debug view. [See the motion detection docs for more info](/configuration/motion_detection) + +## Region + +A portion of the camera frame that is sent to object detection, regions can be sent due to motion, active objects, or occasionally for stationary objects. These are represented by green boxes in the debug live view. + +## Review Item + +A review item is a time period where any number of events/tracked objects were active. [See the review docs for more info](/configuration/review) + +## Snapshot Score + +The score shown in a snapshot is the score of that object at that specific moment in time. + +## Threshold + +The threshold is the median score that an object must reach in order to be considered a true positive. + +## Top Score + +The top score for an object is the highest median score for an object. + +## Tracked Object ("event" in previous versions) + +The time period starting when a tracked object entered the frame and ending when it left the frame, including any time that the object remained still. Tracked objects are saved when it is considered a [true positive](#threshold) and meets the requirements for a snapshot or recording to be saved. + +## Zone + +Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones) diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index 0df4b2349..5e8ab23e7 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -21,13 +21,12 @@ I may earn a small commission for my endorsement, recommendation, testimonial, o ## Server -My current favorite is the Minisforum GK41 because of the dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website. +My current favorite is the Beelink EQ12 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website. -| Name | Coral Inference Speed | Coral Compatibility | Notes | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -| Odyssey X86 Blue J4125 (Amazon) (SeeedStudio) | 9-10ms | M.2 B+M, USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | -| Minisforum GK41 (Amazon) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | -| Intel NUC (Amazon) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. | +| Name | Coral Inference Speed | Coral Compatibility | Notes | +| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | +| Beelink EQ12 (Amazon) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | +| Intel NUC (Amazon) | 5-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. | ## Detectors @@ -41,14 +40,15 @@ The USB version is compatible with the widest variety of hardware and does not r The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai -A single Coral can handle many cameras and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed. +A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed. -### OpenVino +### OpenVINO The OpenVINO detector type is able to run on: - 6th Gen Intel Platforms and newer that have an iGPU - x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2) +- Most modern AMD CPUs (though this is officially not supported by Intel) More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) @@ -69,6 +69,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known | Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms | | Intel i5 1135G7 | 10 - 15 ms | | | Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms | +| Intel Arc A750 | ~ 4 ms | | ### TensorRT - Nvidia GPU @@ -87,6 +88,10 @@ Inference speeds will vary greatly depending on the GPU and the model used. | Quadro P400 2GB | 20 - 25 ms | | Quadro P2000 | ~ 12 ms | +#### AMD GPUs + +With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs. + ### Community Supported: #### Nvidia Jetson @@ -95,15 +100,23 @@ Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powe Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time. -#### Rockchip SoC +#### Rockchip platform + +Frigate supports hardware video processing on all Rockchip boards. However, hardware object detection is only supported on these boards: -Frigate supports SBCs with the following Rockchip SoCs: -- RK3566/RK3568 -- RK3588/RK3588S -- RV1103/RV1106 - RK3562 +- RK3566 +- RK3568 +- RK3576 +- RK3588 -Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms. +The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s. + +#### Hailo-8l PCIe + +Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit. + +The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model. ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) diff --git a/docs/docs/frigate/index.md b/docs/docs/frigate/index.md index 08d8f1de6..73b3305e7 100644 --- a/docs/docs/frigate/index.md +++ b/docs/docs/frigate/index.md @@ -20,6 +20,10 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but ## Screenshots +![Live View](/img/live-view.png) + +![Review Items](/img/review-items.png) + ![Media Browser](/img/media_browser-min.png) ![Notification](/img/notification-min.png) diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index fcdaa68ba..10c83b013 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -5,6 +5,12 @@ title: Installation Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant. +:::tip + +If you already have Frigate installed as a Home Assistant addon, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate. + +::: + ## Dependencies **MQTT broker (optional)** - An MQTT broker is optional with Frigate, but is required for the Home Assistant integration. If using Home Assistant, Frigate and Home Assistant must be connected to the same MQTT broker. @@ -13,7 +19,7 @@ Frigate is a Docker container that can be run on any Docker host including as a ### Operating System -Frigate runs best with docker installed on bare metal debian-based distributions. For ideal performance, Frigate needs access to underlying hardware for the Coral and GPU devices. Running Frigate in a VM on top of Proxmox, ESXi, Virtualbox, etc. is not recommended. The virtualization layer often introduces a sizable amount of overhead for communication with Coral devices, but [not in all circumstances](https://github.com/blakeblackshear/frigate/discussions/1837). +Frigate runs best with Docker installed on bare metal Debian-based distributions. For ideal performance, Frigate needs low overhead access to underlying hardware for the Coral and GPU devices. Running Frigate in a VM on top of Proxmox, ESXi, Virtualbox, etc. is not recommended though [some users have had success with Proxmox](#proxmox). Windows is not officially supported, but some users have had success getting it to run under WSL or Virtualbox. Getting the GPU and/or Coral devices properly passed to Frigate may be difficult or impossible. Search previous discussions or issues for help. @@ -28,12 +34,22 @@ Frigate uses the following locations for read/write operations in the container. - `/tmp/cache`: Cache location for recording segments. Initial recordings are written here before being checked and converted to mp4 and moved to the recordings folder. Segments generated via the `clip.mp4` endpoints are also concatenated and processed here. It is recommended to use a [`tmpfs`](https://docs.docker.com/storage/tmpfs/) mount for this. - `/dev/shm`: Internal cache for raw decoded frames in shared memory. It is not recommended to modify this directory or map it with docker. The minimum size is impacted by the `shm-size` calculations below. +### Ports + +The following ports are used by Frigate and can be mapped via docker as required. + +| Port | Description | +| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `8971` | Authenticated UI and API access without TLS. Reverse proxies should use this port. | +| `5000` | Internal unauthenticated UI and API access. Access to this port should be limited. Intended to be used within the docker network for services that integrate with Frigate. | +| `8554` | RTSP restreaming. By default, these streams are unauthenticated. Authentication can be configured in go2rtc section of config. | +| `8555` | WebRTC connections for low latency live views. | + #### Common docker compose storage configurations Writing to a local disk or external USB drive: ```yaml -version: "3.9" services: frigate: ... @@ -47,9 +63,9 @@ services: ... ``` -:::caution +:::warning -Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder. +Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder. ::: @@ -57,23 +73,23 @@ Users of the Snapcraft build of Docker cannot use storage locations outside your Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**. -The default shm size of **64MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose). +The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose). -The Frigate container also stores logs in shm, which can take up to **30MB**, so make sure to take this into account in your math as well. +The Frigate container also stores logs in shm, which can take up to **40MB**, so make sure to take this into account in your math as well. -You can calculate the necessary shm size for each camera with the following formula using the resolution specified for detect: +You can calculate the **minimum** shm size for each camera with the following formula using the resolution specified for detect: ```console # Replace and -$ python -c 'print("{:.2f}MB".format(( * * 1.5 * 9 + 270480) / 1048576))' +$ python -c 'print("{:.2f}MB".format(( * * 1.5 * 10 + 270480) / 1048576))' # Example for 1280x720 -$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 9 + 270480) / 1048576))' -12.12MB +$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 10 + 270480) / 1048576))' +13.44MB # Example for eight cameras detecting at 1280x720, including logs -$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 9 + 270480) / 1048576) * 8 + 30))' -126.99MB +$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 10 + 270480) / 1048576) * 8 + 40))' +136.99MB ``` The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration. @@ -84,6 +100,88 @@ By default, the Raspberry Pi limits the amount of memory available to the GPU. I Additionally, the USB Coral draws a considerable amount of power. If using any other USB devices such as an SSD, you will experience instability due to the Pi not providing enough power to USB devices. You will need to purchase an external USB hub with it's own power supply. Some have reported success with this (affiliate link). +### Hailo-8L + +The Hailo-8L is an M.2 card typically connected to a carrier board for PCIe, which then connects to the Raspberry Pi 5 as part of the AI Kit. However, it can also be used on other boards equipped with an M.2 M key edge connector. + +#### Installation + +For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simply follow this [guide](https://www.raspberrypi.com/documentation/accessories/ai-kit.html#ai-kit-installation) to install the driver and software. + +For other installations, follow these steps for installation: + +1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it. +2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/41c9b13d2fffce508b32dfc971fa529b49295fbd/docker/hailo8l/user_installation.sh). +3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh` +4. Run the script with `./user_installation.sh` + +#### Setup + +To set up Frigate, follow the default installation instructions, but use a Docker image with the `-h8l` suffix, for example: `ghcr.io/blakeblackshear/frigate:stable-h8l` + +Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file: + +```yaml +devices: + - /dev/hailo0 +``` + +If you are using `docker run`, add this option to your command `--device /dev/hailo0` + +#### Configuration + +Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup. + +### Rockchip platform + +Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and necessary drivers (especially rkvdec2 and rknpu). To check, enter the following commands: + +``` +$ uname -r +5.10.xxx-rockchip # or 6.1.xxx; the -rockchip suffix is important +$ ls /dev/dri +by-path card0 card1 renderD128 renderD129 # should list renderD128 (VPU) and renderD129 (NPU) +$ sudo cat /sys/kernel/debug/rknpu/version +RKNPU driver: v0.9.2 # or later version +``` + +I recommend [Joshua Riek's Ubuntu for Rockchip](https://github.com/Joshua-Riek/ubuntu-rockchip), if your board is supported. + +#### Setup + +Follow Frigate's default installation instructions, but use a docker image with `-rk` suffix for example `ghcr.io/blakeblackshear/frigate:stable-rk`. + +Next, you need to grant docker permissions to access your hardware: + +- During the configuration process, you should run docker in privileged mode to avoid any errors due to insufficient permissions. To do so, add `privileged: true` to your `docker-compose.yml` file or the `--privileged` flag to your docker run command. +- After everything works, you should only grant necessary permissions to increase security. Disable the privileged mode and add the lines below to your `docker-compose.yml` file: + +```yaml +security_opt: + - apparmor=unconfined + - systempaths=unconfined +devices: + - /dev/dri + - /dev/dma_heap + - /dev/rga + - /dev/mpp_service +``` + +or add these options to your `docker run` command: + +``` +--security-opt systempaths=unconfined \ +--security-opt apparmor=unconfined \ +--device /dev/dri \ +--device /dev/dma_heap \ +--device /dev/rga \ +--device /dev/mpp_service +``` + +#### Configuration + +Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration#rockchip-platform). + ## Docker Running in Docker with compose is the recommended install method. @@ -98,9 +196,10 @@ services: image: ghcr.io/blakeblackshear/frigate:stable shm_size: "64mb" # update for your cameras based on calculation above devices: - - /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions - - /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux - - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware + - /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions + - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux + - /dev/video11:/dev/video11 # For Raspberry Pi 4B + - /dev/dri/renderD128:/dev/dri/renderD128 # For intel hwaccel, needs to be updated for your hardware volumes: - /etc/localtime:/etc/localtime:ro - /path/to/your/config:/config @@ -110,7 +209,8 @@ services: tmpfs: size: 1000000000 ports: - - "5000:5000" + - "8971:8971" + # - "5000:5000" # Internal unauthenticated access. Expose carefully. - "8554:8554" # RTSP feeds - "8555:8555/tcp" # WebRTC over tcp - "8555:8555/udp" # WebRTC over udp @@ -132,7 +232,7 @@ docker run -d \ -v /path/to/your/config:/config \ -v /etc/localtime:/etc/localtime:ro \ -e FRIGATE_RTSP_PASSWORD='password' \ - -p 5000:5000 \ + -p 8971:8971 \ -p 8554:8554 \ -p 8555:8555/tcp \ -p 8555:8555/udp \ @@ -150,10 +250,12 @@ The community supported docker image tags for the current stable version are: - `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5 - `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6 - `stable-rk` - Frigate build for SBCs with Rockchip SoC +- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector) + - `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat ## Home Assistant Addon -:::caution +:::warning As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported. @@ -201,13 +303,22 @@ To install make sure you have the [community app plugin here](https://forums.unr ## Proxmox -It is recommended to run Frigate in LXC for maximum performance. See [this discussion](https://github.com/blakeblackshear/frigate/discussions/1111) for more information. +It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include: + +- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/.conf` LXC configuration: + - `lxc.cgroup2.devices.allow: c 226:128 rwm` + - `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file` +- The LXC configuration will likely also need `features: fuse=1,nesting=1`. This allows running a Docker container in an LXC container (`nesting`) and prevents duplicated files and wasted storage (`fuse`). +- Successfully passing hardware devices through multiple levels of containerization (LXC then Docker) can be difficult. Many people make devices like `/dev/dri/renderD128` world-readable in the host or run Frigate in a privileged LXC container. +- The virtualization layer often introduces a sizable amount of overhead for communication with Coral devices, but [not in all circumstances](https://github.com/blakeblackshear/frigate/discussions/1837). + +See the [Proxmox LXC discussion](https://github.com/blakeblackshear/frigate/discussions/5773) for more general information. ## ESXi For details on running Frigate using ESXi, please see the instructions [here](https://williamlam.com/2023/05/frigate-nvr-with-coral-tpu-igpu-passthrough-using-esxi-on-intel-nuc.html). -If you're running Frigate on a rack mounted server and want to passthough the Google Coral, [read this.](https://github.com/blakeblackshear/frigate/issues/305) +If you're running Frigate on a rack mounted server and want to passthrough the Google Coral, [read this.](https://github.com/blakeblackshear/frigate/issues/305) ## Synology NAS on DSM 7 @@ -239,7 +350,7 @@ There may be other services running on your NAS that are using the same ports th You need to configure 2 paths: -- The location of your config file in yaml format, this needs to be file and you need to go to the location of where your config.yml is located, this will be different depending on your NAS folder structure e.g. `/docker/frigate/config/config.yml` will mount to `/config/config.yml` within the container. +- The location of your config directory which will be different depending on your NAS folder structure e.g. `/docker/frigate/config` will mount to `/config` within the container. - The location on your NAS where the recordings will be saved this needs to be a folder e.g. `/docker/volumes/frigate-0-media` ![image](https://user-images.githubusercontent.com/4516296/232585872-44431d15-55e0-4004-b78b-1e512702b911.png) @@ -277,8 +388,8 @@ mkdir -p /share/Container/frigate/config # Copy the config file prepared in step 2 into the newly created config directory. cp path/to/your/config/file /share/Container/frigate/config # Create directory to host Frigate media files on QNAP file system. -# (if you have a surveilliance disk, create media directory on the surveilliance disk. -# Example command assumes share_vol2 is the surveilliance drive +# (if you have a surveillance disk, create media directory on the surveillance disk. +# Example command assumes share_vol2 is the surveillance drive mkdir -p /share/share_vol2/frigate/media # Create Frigate docker container. Replace shm-size value with the value from preparation step 3. # Also replace the time zone value for 'TZ' in the sample command. @@ -295,8 +406,7 @@ docker run \ --network=bridge \ --privileged \ --workdir=/opt/frigate \ - -p 1935:1935 \ - -p 5000:5000 \ + -p 8971:8971 \ -p 8554:8554 \ -p 8555:8555 \ -p 8555:8555/udp \ diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index 1279f9950..8316376f2 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -9,11 +9,11 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect - WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream - Live stream support for cameras in Home Assistant Integration -- RTSP (instead of RTMP) relay for use with other consumers to reduce the number of connections to your camera streams +- RTSP relay for use with other consumers to reduce the number of connections to your camera streams # Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. For the best experience, you should set the stream name under go2rtc to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp. ```yaml go2rtc: @@ -22,65 +22,83 @@ go2rtc: - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 ``` -The easiest live view to get working is MSE. After adding this to the config, restart Frigate and try to watch the live stream by selecting MSE in the dropdown after clicking on the camera. +After adding this to the config, restart Frigate and try to watch the live stream for a single camera by clicking on it from the dashboard. It should look much clearer and more fluent than the original jsmpeg stream. + ### What if my video doesn't play? -If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: +- Check Logs: + - Access the go2rtc logs in the Frigate UI under Logs in the sidebar. + - If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. -```yaml -go2rtc: - streams: - back: - - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - - "ffmpeg:back#video=h264" -``` +- Check go2rtc Web Interface: if you don't see any errors in the logs, try viewing the camera through go2rtc's web interface. + - Navigate to port 1984 in your browser to access go2rtc's web interface. + - If using Frigate through Home Assistant, enable the web interface at port 1984. + - If using Docker, forward port 1984 before accessing the web interface. + - Click `stream` for the specific camera to see if the camera's stream is being received. -Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types. +- Check Video Codec: + - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. + - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#codecs-madness) in go2rtc documentation. + - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. + ```yaml + go2rtc: + streams: + back: + - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + - "ffmpeg:back#video=h264#hardware" + ``` -```yaml -go2rtc: - streams: - back: - - ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 -``` +- Switch to FFmpeg if needed: + - Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types. + ```yaml + go2rtc: + streams: + back: + - ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + ``` -If you can see the video but do not have audio, this is most likely because your camera's audio stream is not AAC. If possible, update your camera's audio settings to AAC. If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows: + - If you can see the video but do not have audio, this is most likely because your camera's audio stream codec is not AAC. + - If possible, update your camera's audio settings to AAC in your camera's firmware. + - If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows: + ```yaml + go2rtc: + streams: + back: + - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + - "ffmpeg:back#audio=aac" + ``` -```yaml -go2rtc: - streams: - back: - - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - - "ffmpeg:back#audio=aac" -``` + If you need to convert **both** the audio and video streams, you can use the following: -If you need to convert **both** the audio and video streams, you can use the following: + ```yaml + go2rtc: + streams: + back: + - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + - "ffmpeg:back#video=h264#audio=aac#hardware" + ``` -```yaml -go2rtc: - streams: - back: - - rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - - "ffmpeg:back#video=h264#audio=aac" -``` + When using the ffmpeg module, you would add AAC audio like this: -When using the ffmpeg module, you would add AAC audio like this: + ```yaml + go2rtc: + streams: + back: + - "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac#hardware" + ``` -```yaml -go2rtc: - streams: - back: - - "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac" -``` +:::warning -:::caution - -To access the go2rtc stream externally when utilizing the Frigate Add-On (for instance through VLC), you must first enable the RTSP Restream port. You can do this by visiting the Frigate Add-On configuration page within Home Assistant and revealing the hidden options under the "Show disabled ports" section. +To access the go2rtc stream externally when utilizing the Frigate Add-On (for +instance through VLC), you must first enable the RTSP Restream port. +You can do this by visiting the Frigate Add-On configuration page within Home +Assistant and revealing the hidden options under the "Show disabled ports" +section. ::: ## Next steps 1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera). -1. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats. +2. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats and may require opening ports on your router. diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index 26d8eef26..908e0ce1b 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -5,9 +5,17 @@ title: Getting started # Getting Started +:::tip + +If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below. + +If you already have Frigate installed in Docker or as a Home Assistant addon, you can continue to [Configuring Frigate](#configuring-frigate) below. + +::: + ## Setting up hardware -This section guides you through setting up a server with Debian Bookworm and Docker. If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below. +This section guides you through setting up a server with Debian Bookworm and Docker. ### Install Debian 12 (Bookworm) @@ -77,20 +85,19 @@ This section shows how to create a minimal directory structure for a Docker inst ### Setup directories -Frigate requires a valid config file to start. The following directory structure is the bare minimum to get started. Once Frigate is running, you can use the built-in config editor which supports config validation. +Frigate will create a config file if one does not exist on the initial startup. The following directory structure is the bare minimum to get started. Once Frigate is running, you can use the built-in config editor which supports config validation. ``` . ├── docker-compose.yml ├── config/ -│ └── config.yml └── storage/ ``` This will create the above structure: ```bash -mkdir storage config && touch docker-compose.yml config/config.yml +mkdir storage config && touch docker-compose.yml ``` If you are setting up Frigate on a Linux device via SSH, you can use [nano](https://itsfoss.com/nano-editor-guide/) to edit the following files. If you prefer to edit remote files with a full editor instead of a terminal, I recommend using [Visual Studio Code](https://code.visualstudio.com/) with the [Remote SSH extension](https://code.visualstudio.com/docs/remote/ssh-tutorial). @@ -117,27 +124,11 @@ services: tmpfs: size: 1000000000 ports: - - "5000:5000" + - "8971:8971" - "8554:8554" # RTSP feeds ``` -`config.yml` - -```yaml -mqtt: - enabled: False - -cameras: - dummy_camera: # <--- this will be changed to your actual camera later - enabled: False - ffmpeg: - inputs: - - path: rtsp://127.0.0.1:554/rtsp - roles: - - detect -``` - -Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. Frigate should now be accessible at `server_ip:5000` and you can finish the configuration using the built-in configuration editor. +Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. On startup, an admin user and password will be created and outputted in the logs. You can see this by running `docker logs frigate`. Frigate should now be accessible at `https://server_ip:8971` where you can login with the `admin` user and finish the configuration using the built-in configuration editor. ## Configuring Frigate @@ -165,7 +156,7 @@ cameras: ### Step 2: Start Frigate -At this point you should be able to start Frigate and see the the video feed in the UI. +At this point you should be able to start Frigate and see the video feed in the UI. If you get an error image from the camera, this means ffmpeg was not able to get the video feed from your camera. Check the logs for error messages from ffmpeg. The default ffmpeg arguments are designed to work with H264 RTSP cameras that support TCP connections. @@ -237,7 +228,7 @@ cameras: More details on available detectors can be found [here](../configuration/object_detectors.md). -Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference). +Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/reference.md). ### Step 5: Setup motion masks @@ -245,9 +236,9 @@ Now that you have optimized your configuration for decoding the video stream, yo Now that you know where you need to mask, use the "Mask & Zone creator" in the options pane to generate the coordinates needed for your config file. More information about masks can be found [here](../configuration/masks.md). -:::caution +:::warning -Note that motion masks should not be used to mark out areas where you do not want objects to be detected or to reduce false positives. They do not alter the image sent to object detection, so you can still get events and detections in areas with motion masks. These only prevent motion in these areas from initiating object detection. +Note that motion masks should not be used to mark out areas where you do not want objects to be detected or to reduce false positives. They do not alter the image sent to object detection, so you can still get tracked objects, alerts, and detections in areas with motion masks. These only prevent motion in these areas from initiating object detection. ::: @@ -274,13 +265,11 @@ cameras: - 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432 ``` -### Step 6: Enable recording and/or snapshots +### Step 6: Enable recordings -In order to see Events in the Frigate UI, either snapshots or record will need to be enabled. +In order to review activity in the Frigate UI, recordings need to be enabled. -#### Record - -To enable recording video, add the `record` role to a stream and enable it in the config. If record is disabled in the config, turning it on via the UI will not have any effect. +To enable recording video, add the `record` role to a stream and enable it in the config. If record is disabled in the config, it won't be possible to enable it in the UI. ```yaml mqtt: ... @@ -305,31 +294,26 @@ cameras: If you don't have separate streams for detect and record, you would just add the record role to the list on the first input. -By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/index.md#full-configuration-reference). +:::note -#### Snapshots +If you only define one stream in your `inputs` and do not assign a `detect` role to it, Frigate will automatically assign it the `detect` role. Frigate will always decode a stream to support motion detection, Birdseye, the API image endpoints, and other features, even if you have disabled object detection with `enabled: False` in your config's `detect` section. -To enable snapshots of your events, just enable it in the config. Snapshots are taken from the detect stream because it is the only stream decoded. +If you only plan to use Frigate for recording, it is still recommended to define a `detect` role for a low resolution stream to minimize resource usage from the required stream decoding. -```yaml -mqtt: ... +::: -detectors: ... +By default, Frigate will retain video of all tracked objects for 10 days. The full set of options for recording can be found [here](../configuration/reference.md). -cameras: - name_of_your_camera: ... - detect: ... - record: ... - snapshots: # <----- Enable snapshots - enabled: True - motion: ... -``` +### Step 7: Complete config -By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/index.md#full-configuration-reference). +At this point you have a complete config with basic functionality. You can see the [full config reference](../configuration/reference.md) for a complete list of configuration options. -### Step 7: Follow up guides +### Follow up -Now that you have a working install, you can use the following guides for additional features: +Now that you have a working install, you can use the following documentation for additional features: -1. [Configuring go2rtc](configuring_go2rtc) - Additional live view options and RTSP relay -2. [Home Assistant Integration](../integrations/home-assistant.md) - Integrate with Home Assistant +1. [Configuring go2rtc](configuring_go2rtc.md) - Additional live view options and RTSP relay +2. [Zones](../configuration/zones.md) +3. [Review](../configuration/review.md) +4. [Masks](../configuration/masks.md) +5. [Home Assistant Integration](../integrations/home-assistant.md) - Integrate with Home Assistant diff --git a/docs/docs/guides/ha_network_storage.md b/docs/docs/guides/ha_network_storage.md index 498dd7d0c..fe00311ab 100644 --- a/docs/docs/guides/ha_network_storage.md +++ b/docs/docs/guides/ha_network_storage.md @@ -1,21 +1,22 @@ --- id: ha_network_storage -title: HA Network Storage +title: Home Assistant network storage --- -As of HomeAsisstant Core 2023.6, Network Mounted Storage is supported for addons. +As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons. ## Setting Up Remote Storage For Frigate ### Prerequisites - HA Core 2023.6 or newer is installed -- Running HA OS 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for superivsed install) +- Running HA OS 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install) ### Initial Setup 1. Stop the Frigate addon 2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding: + ```yaml database: path: /config/frigate.db diff --git a/docs/docs/guides/ha_notifications.md b/docs/docs/guides/ha_notifications.md index cf3e03349..a92dab10f 100644 --- a/docs/docs/guides/ha_notifications.md +++ b/docs/docs/guides/ha_notifications.md @@ -5,19 +5,19 @@ title: Home Assistant notifications The best way to get started with notifications for Frigate is to use the [Blueprint](https://community.home-assistant.io/t/frigate-mobile-app-notifications-2-0/559732). You can use the yaml generated from the Blueprint as a starting point and customize from there. -It is generally recommended to trigger notifications based on the `frigate/events` mqtt topic. This provides the event_id needed to fetch [thumbnails/snapshots/clips](../integrations/home-assistant.md#notification-api) and other useful information to customize when and where you want to receive alerts. The data is published in the form of a change feed, which means you can reference the "previous state" of the object in the `before` section and the "current state" of the object in the `after` section. You can see an example [here](../integrations/mqtt.md#frigateevents). +It is generally recommended to trigger notifications based on the `frigate/reviews` mqtt topic. This provides the event_id(s) needed to fetch [thumbnails/snapshots/clips](../integrations/home-assistant.md#notification-api) and other useful information to customize when and where you want to receive alerts. The data is published in the form of a change feed, which means you can reference the "previous state" of the object in the `before` section and the "current state" of the object in the `after` section. You can see an example [here](../integrations/mqtt.md#frigateevents). -Here is a simple example of a notification automation of events which will update the existing notification for each change. This means the image you see in the notification will update as Frigate finds a "better" image. +Here is a simple example of a notification automation of tracked objects which will update the existing notification for each change. This means the image you see in the notification will update as Frigate finds a "better" image. ```yaml automation: - - alias: Notify of events + - alias: Notify of tracked object trigger: platform: mqtt topic: frigate/events action: - service: notify.mobile_app_pixel_3 - data_template: + data: message: 'A {{trigger.payload_json["after"]["label"]}} was detected.' data: image: 'https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg?format=android' @@ -33,48 +33,18 @@ automation: description: "" trigger: - platform: mqtt - topic: frigate/events - payload: new - value_template: "{{ value_json.type }}" + topic: frigate/reviews + payload: alert + value_template: "{{ value_json['after']['severity'] }}" action: - service: notify.mobile_app_iphone data: - message: 'A {{trigger.payload_json["after"]["label"]}} was detected.' + message: 'A {{trigger.payload_json["after"]["data"]["objects"] | sort | join(", ") | title}} was detected.' data: image: >- - https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}/thumbnail.jpg + https://your.public.hass.address.com/api/frigate/notifications/{{trigger.payload_json["after"]["data"]["detections"][0]}}/thumbnail.jpg tag: '{{trigger.payload_json["after"]["id"]}}' when: '{{trigger.payload_json["after"]["start_time"]|int}}' entity_id: camera.{{trigger.payload_json["after"]["camera"] | replace("-","_") | lower}} mode: single ``` - -## Conditions - -Conditions with the `before` and `after` values allow a high degree of customization for automations. - -When a person enters a zone named yard - -```yaml -condition: - - "{{ trigger.payload_json['after']['label'] == 'person' }}" - - "{{ 'yard' in trigger.payload_json['after']['entered_zones'] }}" -``` - -When a person leaves a zone named yard - -```yaml -condition: - - "{{ trigger.payload_json['after']['label'] == 'person' }}" - - "{{ 'yard' in trigger.payload_json['before']['current_zones'] }}" - - "{{ not 'yard' in trigger.payload_json['after']['current_zones'] }}" -``` - -Notify for dogs in the front with a high top score - -```yaml -condition: - - "{{ trigger.payload_json['after']['label'] == 'dog' }}" - - "{{ trigger.payload_json['after']['camera'] == 'front' }}" - - "{{ trigger.payload_json['after']['top_score'] > 0.98 }}" -``` diff --git a/docs/docs/guides/reverse_proxy.md b/docs/docs/guides/reverse_proxy.md index 479df53e8..d408a1444 100644 --- a/docs/docs/guides/reverse_proxy.md +++ b/docs/docs/guides/reverse_proxy.md @@ -3,25 +3,38 @@ id: reverse_proxy title: Setting up a reverse proxy --- -This guide outlines the basic configuration steps needed to expose your Frigate UI to the internet. -A common way of accomplishing this is to use a reverse proxy webserver between your router and your Frigate instance. -A reverse proxy accepts HTTP requests from the public internet and redirects them transparently to internal webserver(s) on your network. +This guide outlines the basic configuration steps needed to set up a reverse proxy in front of your Frigate instance. -The suggested steps are: +A reverse proxy is typically needed if you want to set up Frigate on a custom URL, on a subdomain, or on a host serving multiple sites. It could also be used to set up your own authentication provider or for more advanced HTTP routing. -- **Configure** a 'proxy' HTTP webserver (such as [Apache2](https://httpd.apache.org/docs/current/) or [NPM](https://github.com/NginxProxyManager/nginx-proxy-manager)) and only expose ports 80/443 from this webserver to the internet -- **Encrypt** content from the proxy webserver by installing SSL (such as with [Let's Encrypt](https://letsencrypt.org/)). Note that SSL is then not required on your Frigate webserver as the proxy encrypts all requests for you -- **Restrict** access to your Frigate instance at the proxy using, for example, password authentication +Before setting up a reverse proxy, check if any of the built-in functionality in Frigate suits your needs: +|Topic|Docs| +|-|-| +|TLS|Please see the `tls` [configuration option](../configuration/tls.md)| +|Authentication|Please see the [authentication](../configuration/authentication.md) documentation| +|IPv6|[Enabling IPv6](../configuration/advanced.md#enabling-ipv6) -:::caution -A reverse proxy can be used to secure access to an internal webserver but the user will be entirely reliant -on the steps they have taken. You must ensure you are following security best practices. -This page does not attempt to outline the specific steps needed to secure your internal website. +**Note about TLS** +When using a reverse proxy, the TLS session is usually terminated at the proxy, sending the internal request over plain HTTP. If this is the desired behavior, TLS must first be disabled in Frigate, or you will encounter an HTTP 400 error: "The plain HTTP request was sent to HTTPS port." +To disable TLS, set the following in your Frigate configuration: +```yml +tls: + enabled: false +``` + +:::warning +A reverse proxy can be used to secure access to an internal web server, but the user will be entirely reliant on the steps they have taken. You must ensure you are following security best practices. +This page does not attempt to outline the specific steps needed to secure your internal website. Please use your own knowledge to assess and vet the reverse proxy software before you install anything on your system. ::: -There are several technologies available to implement reverse proxies. This document currently suggests one, using Apache2, -and the community is invited to document others through a contribution to this page. +## Proxies + +There are many solutions available to implement reverse proxies and the community is invited to help out documenting others through a contribution to this page. + +* [Apache2](#apache2-reverse-proxy) +* [Nginx](#nginx-reverse-proxy) +* [Traefik](#traefik-reverse-proxy) ## Apache2 Reverse Proxy @@ -38,20 +51,20 @@ Here we access Frigate via https://cctv.mydomain.co.uk ServerName cctv.mydomain.co.uk ProxyPreserveHost On - ProxyPass "/" "http://frigatepi.local:5000/" - ProxyPassReverse "/" "http://frigatepi.local:5000/" + ProxyPass "/" "http://frigatepi.local:8971/" + ProxyPassReverse "/" "http://frigatepi.local:8971/" - ProxyPass /ws ws://frigatepi.local:5000/ws - ProxyPassReverse /ws ws://frigatepi.local:5000/ws + ProxyPass /ws ws://frigatepi.local:8971/ws + ProxyPassReverse /ws ws://frigatepi.local:8971/ws - ProxyPass /live/ ws://frigatepi.local:5000/live/ - ProxyPassReverse /live/ ws://frigatepi.local:5000/live/ + ProxyPass /live/ ws://frigatepi.local:8971/live/ + ProxyPassReverse /live/ ws://frigatepi.local:8971/live/ RewriteEngine on RewriteCond %{HTTP:Upgrade} =websocket [NC] - RewriteRule /(.*) ws://frigatepi.local:5000/$1 [P,L] + RewriteRule /(.*) ws://frigatepi.local:8971/$1 [P,L] RewriteCond %{HTTP:Upgrade} !=websocket [NC] - RewriteRule /(.*) http://frigatepi.local:5000/$1 [P,L] + RewriteRule /(.*) http://frigatepi.local:8971/$1 [P,L] ``` @@ -87,11 +100,11 @@ There are many ways to authenticate a website but a straightforward approach is ## Nginx Reverse Proxy -This method shows a working example for subdomain type reverse proxy with SSL enabled. +This method shows a working example for subdomain type reverse proxy with SSL enabled. ### Setup server and port to reverse proxy -This is set in `$server` and `$port` this should match your ports you have exposed to your docker container. Optionally you listen on port `443` and enable `SSL` +This is set in `$server` and `$port` this should match your ports you have exposed to your docker container. Optionally you listen on port `443` and enable `SSL` ``` # ------------------------------------------------------------ @@ -101,7 +114,7 @@ This is set in `$server` and `$port` this should match your ports you have expos server { set $forward_scheme http; set $server "192.168.100.2"; # FRIGATE SERVER LOCATION - set $port 5000; + set $port 8971; listen 80; listen 443 ssl http2; @@ -112,7 +125,7 @@ server { ### Setup SSL (optional) -This section points to your SSL files, the example below shows locations to a default Lets Encrypt SSL certificate. +This section points to your SSL files, the example below shows locations to a default Lets Encrypt SSL certificate. ``` # Let's Encrypt SSL @@ -122,8 +135,7 @@ This section points to your SSL files, the example below shows locations to a de ssl_certificate_key /etc/letsencrypt/live/npm-1/privkey.pem; ``` - -### Setup reverse proxy settings +### Setup reverse proxy settings The settings below enabled connection upgrade, sets up logging (optional) and proxies everything from the `/` context to the docker host and port specified earlier in the configuration @@ -142,3 +154,26 @@ The settings below enabled connection upgrade, sets up logging (optional) and pr } ``` + +## Traefik Reverse Proxy + +This example shows how to add a `label` to the Frigate Docker compose file, enabling Traefik to automatically discover your Frigate instance. +Before using the example below, you must first set up Traefik with the [Docker provider](https://doc.traefik.io/traefik/providers/docker/) + +```yml +services: + frigate: + container_name: frigate + image: ghcr.io/blakeblackshear/frigate:stable + ... + ... + labels: + - "traefik.enable=true" + - "traefik.http.services.frigate.loadbalancer.server.port=8971" + - "traefik.http.routers.frigate.rule=Host(`traefik.example.com`)" +``` + +The above configuration will create a "service" in Traefik, automatically adding your container's IP on port 8971 as a backend. +It will also add a router, routing requests to "traefik.example.com" to your local container. + +Note that with this approach, you don't need to expose any ports for the Frigate instance since all traffic will be routed over the internal Docker network. diff --git a/docs/docs/guides/stationary_objects.md b/docs/docs/guides/stationary_objects.md deleted file mode 100644 index 5d45e58c5..000000000 --- a/docs/docs/guides/stationary_objects.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: stationary_objects -title: Avoiding stationary objects ---- - -Many people use Frigate to detect cars entering their driveway, and they often run into an issue with repeated notifications or events of a parked car being repeatedly detected over the course of multiple days (for example if the car is lost at night and detected again the following morning). - -You can use zones to restrict events and notifications to objects that have entered specific areas. - -:::caution - -It is not recommended to use masks to try and eliminate parked cars in your driveway. Masks are designed to prevent motion from triggering object detection and/or to indicate areas that are guaranteed false positives. - -Frigate is designed to track objects as they move and over-masking can prevent it from knowing that an object in the current frame is the same as the previous frame. You want Frigate to detect objects everywhere and configure your events and alerts to be based on the location of the object with zones. - -::: - -:::info - -Once a vehicle crosses the entrance into the parking area, that event will stay `In Progress` until it is no longer seen in the frame. Frigate is designed to have an event last as long as an object is visible in the frame, an event being `In Progress` does not mean the event is being constantly recorded. You can define the recording behavior by adjusting the [recording retention settings](../configuration/record.md). - -::: - -To only be notified of cars that enter your driveway from the street, you could create multiple zones that cover your driveway. For cars, you would only notify if `entered_zones` from the events MQTT topic has more than 1 zone. - -See [this example](../configuration/zones.md#restricting-zones-to-specific-objects) from the Zones documentation to see how to restrict zones to certain object types. - -![Driveway Zones](/img/driveway_zones-min.png) - -To limit snapshots and events, you can list the zone for the entrance of your driveway under `required_zones` in your configuration file. Example below. - -```yaml -camera: - record: - events: - required_zones: - - zone_2 - zones: - zone_1: - coordinates: ... (parking area) - zone_2: - coordinates: ... (entrance to driveway) -``` diff --git a/docs/docs/integrations/api.md b/docs/docs/integrations/api.md index 20877bb6f..e69de29bb 100644 --- a/docs/docs/integrations/api.md +++ b/docs/docs/integrations/api.md @@ -1,384 +0,0 @@ ---- -id: api -title: HTTP API ---- - -A web server is available on port 5000 with the following endpoints. - -### `GET /api/` - -An mjpeg stream for debugging. Keep in mind the mjpeg endpoint is for debugging only and will put additional load on the system when in use. - -Accepts the following query string parameters: - -| param | Type | Description | -| ----------- | ---- | ------------------------------------------------------------------ | -| `fps` | int | Frame rate | -| `h` | int | Height in pixels | -| `bbox` | int | Show bounding boxes for detected objects (0 or 1) | -| `timestamp` | int | Print the timestamp in the upper left (0 or 1) | -| `zones` | int | Draw the zones on the image (0 or 1) | -| `mask` | int | Overlay the mask on the image (0 or 1) | -| `motion` | int | Draw blue boxes for areas with detected motion (0 or 1) | -| `regions` | int | Draw green boxes for areas where object detection was run (0 or 1) | - -You can access a higher resolution mjpeg stream by appending `h=height-in-pixels` to the endpoint. For example `http://localhost:5000/api/back?h=1080`. You can also increase the FPS by appending `fps=frame-rate` to the URL such as `http://localhost:5000/api/back?fps=10` or both with `?fps=10&h=1000`. - -### `GET /api//latest.jpg[?h=300]` - -The most recent frame that Frigate has finished processing. It is a full resolution image by default. - -Accepts the following query string parameters: - -| param | Type | Description | -| ----------- | ---- | ------------------------------------------------------------------ | -| `h` | int | Height in pixels | -| `bbox` | int | Show bounding boxes for detected objects (0 or 1) | -| `timestamp` | int | Print the timestamp in the upper left (0 or 1) | -| `zones` | int | Draw the zones on the image (0 or 1) | -| `mask` | int | Overlay the mask on the image (0 or 1) | -| `motion` | int | Draw blue boxes for areas with detected motion (0 or 1) | -| `regions` | int | Draw green boxes for areas where object detection was run (0 or 1) | -| `quality` | int | Jpeg encoding quality (0-100). Defaults to 70. | - -Example parameters: - -- `h=300`: resizes the image to 300 pixes tall - -### `GET /api/stats` - -Contains some granular debug info that can be used for sensors in Home Assistant. - -Sample response: - -```json -{ - /* Per Camera Stats */ - "back": { - /*************** - * Frames per second being consumed from your camera. If this is higher - * than it is supposed to be, you should set -r FPS in your input_args. - * camera_fps = process_fps + skipped_fps - ***************/ - "camera_fps": 5.0, - /*************** - * Number of times detection is run per second. This can be higher than - * your camera FPS because Frigate often looks at the same frame multiple times - * or in multiple locations - ***************/ - "detection_fps": 1.5, - /*************** - * PID for the ffmpeg process that consumes this camera - ***************/ - "capture_pid": 27, - /*************** - * PID for the process that runs detection for this camera - ***************/ - "pid": 34, - /*************** - * Frames per second being processed by Frigate. - ***************/ - "process_fps": 5.1, - /*************** - * Frames per second skip for processing by Frigate. - ***************/ - "skipped_fps": 0.0 - }, - /*************** - * Sum of detection_fps across all cameras and detectors. - * This should be the sum of all detection_fps values from cameras. - ***************/ - "detection_fps": 5.0, - /* Detectors Stats */ - "detectors": { - "coral": { - /*************** - * Timestamp when object detection started. If this value stays non-zero and constant - * for a long time, that means the detection process is stuck. - ***************/ - "detection_start": 0.0, - /*************** - * Time spent running object detection in milliseconds. - ***************/ - "inference_speed": 10.48, - /*************** - * PID for the shared process that runs object detection on the Coral. - ***************/ - "pid": 25321 - } - }, - "service": { - /* Uptime in seconds */ - "uptime": 10, - "version": "0.10.1-8883709", - "latest_version": "0.10.1", - /* Storage data in MB for important locations */ - "storage": { - "/media/frigate/clips": { - "total": 1000, - "used": 700, - "free": 300, - "mnt_type": "ext4" - }, - "/media/frigate/recordings": { - "total": 1000, - "used": 700, - "free": 300, - "mnt_type": "ext4" - }, - "/tmp/cache": { - "total": 256, - "used": 100, - "free": 156, - "mnt_type": "tmpfs" - }, - "/dev/shm": { - "total": 256, - "used": 100, - "free": 156, - "mnt_type": "tmpfs" - } - } - } -} -``` - -### `GET /api/config` - -A json representation of your configuration - -### `GET /api/version` - -Version info - -### `GET /api/events` - -Events from the database. Accepts the following query string parameters: - -| param | Type | Description | -| -------------------- | ----- | ----------------------------------------------------- | -| `before` | int | Epoch time | -| `after` | int | Epoch time | -| `cameras` | str | , separated list of cameras | -| `labels` | str | , separated list of labels | -| `zones` | str | , separated list of zones | -| `limit` | int | Limit the number of events returned | -| `has_snapshot` | int | Filter to events that have snapshots (0 or 1) | -| `has_clip` | int | Filter to events that have clips (0 or 1) | -| `include_thumbnails` | int | Include thumbnails in the response (0 or 1) | -| `in_progress` | int | Limit to events in progress (0 or 1) | -| `time_range` | str | Time range in format after,before (00:00,24:00) | -| `timezone` | str | Timezone to use for time range | -| `min_score` | float | Minimum score of the event | -| `max_score` | float | Maximum score of the event | -| `is_submitted` | int | Filter events that are submitted to Frigate+ (0 or 1) | -| `min_length` | float | Minimum length of the event | -| `max_length` | float | Maximum length of the event | - -### `GET /api/timeline` - -Timeline of key moments of an event(s) from the database. Accepts the following query string parameters: - -| param | Type | Description | -| ----------- | ---- | ----------------------------------- | -| `camera` | str | Name of camera | -| `source_id` | str | ID of tracked object | -| `limit` | int | Limit the number of events returned | - -### `GET /api/events/summary` - -Returns summary data for events in the database. Used by the Home Assistant integration. - -### `GET /api/events/` - -Returns data for a single event. - -### `DELETE /api/events/` - -Permanently deletes the event along with any clips/snapshots. - -### `POST /api/events//retain` - -Sets retain to true for the event id. - -### `POST /api/events//plus` - -Submits the snapshot of the event to Frigate+ for labeling. - -| param | Type | Description | -| -------------------- | ---- | ---------------------------------- | -| `include_annotation` | int | Submit annotation to Frigate+ too. | - -### `PUT /api/events//false_positive` - -Submits the snapshot of the event to Frigate+ for labeling and adds the detection as a false positive. - -### `DELETE /api/events//retain` - -Sets retain to false for the event id (event may be deleted quickly after removing). - -### `POST /api/events//sub_label` - -Set a sub label for an event. For example to update `person` -> `person's name` if they were recognized with facial recognition. -Sub labels must be 100 characters or shorter. - -```json -{ - "subLabel": "some_string", - "subLabelScore": 0.79, -} -``` - -### `GET /api/events//thumbnail.jpg` - -Returns a thumbnail for the event id optimized for notifications. Works while the event is in progress and after completion. Passing `?format=android` will convert the thumbnail to 2:1 aspect ratio. - -### `GET /api//