From f56668e4676232d9cd97b86770ae378e5e9121e4 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 1 Mar 2025 16:09:41 -0700 Subject: [PATCH 01/12] Update d-fine documentation (#16881) --- docs/docs/configuration/object_detectors.md | 46 ++++++++++++--------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index bc76779cb..37ce86b07 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -562,30 +562,15 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl #### D-FINE -[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. +[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the YOLO-NAS model for use in Frigate. -To export as ONNX: +:::warning -1. Clone: https://github.com/Peterande/D-FINE and install all dependencies. -2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE). -3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)` -4. Run the export, making sure you select the right config, for your checkpoint. - -Example: - -``` -python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth -``` - -:::tip - -Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually. - -Make sure you change the batch size to 1 before exporting. +D-FINE is currently not supported on OpenVINO ::: -After placing the downloaded onnx model in your config folder, you can use the following configuration: +After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: ```yaml detectors: @@ -784,6 +769,29 @@ Some model types are not included in Frigate by default. Here are some tips for getting different model types +### Downloading D-FINE Model + +To export as ONNX: + +1. Clone: https://github.com/Peterande/D-FINE and install all dependencies. +2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE). +3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)` +4. Run the export, making sure you select the right config, for your checkpoint. + +Example: + +``` +python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth +``` + +:::tip + +Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually. + +Make sure you change the batch size to 1 before exporting. + +::: + ### Downloading YOLO-NAS Model You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). From 4e03efaba99d29c57f4ebb8c4ce0ac8df7a93803 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sun, 2 Mar 2025 08:26:59 -0700 Subject: [PATCH 02/12] Disable hailort log (#16888) --- docker/main/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 8dee8e642..674add58e 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -225,6 +225,9 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1 # Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html ENV OPENCV_FFMPEG_LOGLEVEL=8 +# Set HailoRT to disable logging +ENV HAILORT_LOGGER_PATH=NONE + ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" # Install dependencies From b8f4cb5435afee3e9cd87202cd798cf9ce964420 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sun, 2 Mar 2025 09:30:18 -0700 Subject: [PATCH 03/12] Fix docs (#16889) --- docs/docs/configuration/object_detectors.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 37ce86b07..531ef5108 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -562,7 +562,7 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl #### D-FINE -[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the YOLO-NAS model for use in Frigate. +[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate. :::warning From 0128ec2ba60ac5229b8f88c3c271dc6e078da4c4 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sun, 2 Mar 2025 20:46:46 -0700 Subject: [PATCH 04/12] Upgrade RocM to 6.3.3 (#16900) * Simplify rocm install and update to 6.3.1 * Build out more necessary packages * Update to 6.3.3 * Set bake version * Fix typo * Ensure NHWC is used * Reset dev changes * Write to cache --- .github/workflows/ci.yml | 1 + docker/rocm/Dockerfile | 79 +-- docker/rocm/migraphx/CMakeLists.txt | 26 - docker/rocm/migraphx/migraphx_py.cpp | 582 ----------------------- docker/rocm/requirements-wheels-rocm.txt | 2 +- docker/rocm/rocm-pin-600 | 3 - docker/rocm/rocm.hcl | 10 +- docker/rocm/rocm.list | 1 - frigate/detectors/plugins/onnx.py | 2 +- 9 files changed, 36 insertions(+), 670 deletions(-) delete mode 100644 docker/rocm/migraphx/CMakeLists.txt delete mode 100644 docker/rocm/migraphx/migraphx_py.cpp delete mode 100644 docker/rocm/rocm-pin-600 delete mode 100644 docker/rocm/rocm.list diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a666b897..5b787b273 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -175,6 +175,7 @@ jobs: files: docker/rocm/rocm.hcl set: | rocm.tags=${{ steps.setup.outputs.image-name }}-rocm + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-rocm,mode=max *.cache-from=type=gha arm64_extra_builds: runs-on: ubuntu-22.04-arm diff --git a/docker/rocm/Dockerfile b/docker/rocm/Dockerfile index 34c7efffb..78f91b96f 100644 --- a/docker/rocm/Dockerfile +++ b/docker/rocm/Dockerfile @@ -2,79 +2,49 @@ # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive -ARG ROCM=5.7.3 +ARG ROCM=6.3.3 ARG AMDGPU=gfx900 ARG HSA_OVERRIDE_GFX_VERSION ARG HSA_OVERRIDE ####################################################################### -FROM ubuntu:focal as rocm +FROM wget AS rocm ARG ROCM +ARG AMDGPU -RUN apt-get update && apt-get -y upgrade -RUN apt-get -y install gnupg wget - -RUN mkdir --parents --mode=0755 /etc/apt/keyrings - -RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null -COPY docker/rocm/rocm.list /etc/apt/sources.list.d/ -COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/ - -RUN apt-get update - -RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer -RUN apt-get -y install --no-install-recommends migraphx-dev +RUN apt update && \ + apt install -y wget gpg && \ + wget -O rocm.deb https://repo.radeon.com/amdgpu-install/$ROCM/ubuntu/jammy/amdgpu-install_6.3.60303-1_all.deb && \ + apt install -y ./rocm.deb && \ + apt update && \ + apt install -y rocm RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib -RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ +RUN cd /opt/rocm-$ROCM/lib && \ + cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \ + mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \ + cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/ RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf -####################################################################### -FROM --platform=linux/amd64 debian:12 as debian-base - -RUN apt-get update && apt-get -y upgrade -RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod - -RUN apt-get -y install python3 - -####################################################################### -# ROCm does not come with migraphx wrappers for python 3.9, so we build it here -FROM debian-base as debian-build - -ARG ROCM - -COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM -RUN ln -s /opt/rocm-$ROCM /opt/rocm - -RUN apt-get -y install g++ cmake -RUN apt-get -y install python3-pybind11 python3-distutils python3-dev - -WORKDIR /opt/build - -COPY docker/rocm/migraphx . - -RUN mkdir build && cd build && cmake .. && make install - ####################################################################### FROM deps AS deps-prelim -# need this to install libnuma1 -RUN apt-get update -# no ugprade?!?! -RUN apt-get -y install libnuma1 +RUN apt-get update && apt-get install -y libnuma1 -WORKDIR /opt/frigate/ +WORKDIR /opt/frigate COPY --from=rootfs / / -# Temporarily disabled to see if a new wheel can be built to support py3.11 -#COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt -#RUN python3 -m pip install --upgrade pip \ -# && pip3 uninstall -y onnxruntime-openvino \ -# && pip3 install -r /requirements.txt +RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && python3 get-pip.py "pip" --break-system-packages +RUN python3 -m pip config set global.break-system-packages true + +COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt +RUN pip3 uninstall -y onnxruntime-openvino \ + && pip3 install -r /requirements.txt ####################################################################### FROM scratch AS rocm-dist @@ -87,12 +57,11 @@ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/ COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/ COPY --from=rocm /opt/rocm-dist/ / -COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-311-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/ ####################################################################### FROM deps-prelim AS rocm-prelim-hsa-override0 -\ - ENV HSA_ENABLE_SDMA=0 +ENV HSA_ENABLE_SDMA=0 +ENV MIGRAPHX_ENABLE_NHWC=1 COPY --from=rocm-dist / / diff --git a/docker/rocm/migraphx/CMakeLists.txt b/docker/rocm/migraphx/CMakeLists.txt deleted file mode 100644 index 271dd094b..000000000 --- a/docker/rocm/migraphx/CMakeLists.txt +++ /dev/null @@ -1,26 +0,0 @@ - -cmake_minimum_required(VERSION 3.1) - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) - -if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE Release) -endif() - -SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) - -project(migraphx_py) - -include_directories(/opt/rocm/include) - -find_package(pybind11 REQUIRED) -pybind11_add_module(migraphx migraphx_py.cpp) - -target_link_libraries(migraphx PRIVATE /opt/rocm/lib/libmigraphx.so /opt/rocm/lib/libmigraphx_tf.so /opt/rocm/lib/libmigraphx_onnx.so) - -install(TARGETS migraphx - COMPONENT python - LIBRARY DESTINATION /opt/rocm/lib -) diff --git a/docker/rocm/migraphx/migraphx_py.cpp b/docker/rocm/migraphx/migraphx_py.cpp deleted file mode 100644 index 894c9d186..000000000 --- a/docker/rocm/migraphx/migraphx_py.cpp +++ /dev/null @@ -1,582 +0,0 @@ -/* - * The MIT License (MIT) - * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef HAVE_GPU -#include -#endif - -using half = half_float::half; -namespace py = pybind11; - -#ifdef __clang__ -#define MIGRAPHX_PUSH_UNUSED_WARNING \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") -#define MIGRAPHX_POP_WARNING _Pragma("clang diagnostic pop") -#else -#define MIGRAPHX_PUSH_UNUSED_WARNING -#define MIGRAPHX_POP_WARNING -#endif -#define MIGRAPHX_PYBIND11_MODULE(...) \ - MIGRAPHX_PUSH_UNUSED_WARNING \ - PYBIND11_MODULE(__VA_ARGS__) \ - MIGRAPHX_POP_WARNING - -#define MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM(x, t) .value(#x, migraphx::shape::type_t::x) -namespace migraphx { - -migraphx::value to_value(py::kwargs kwargs); -migraphx::value to_value(py::list lst); - -template -void visit_py(T x, F f) -{ - if(py::isinstance(x)) - { - f(to_value(x.template cast())); - } - else if(py::isinstance(x)) - { - f(to_value(x.template cast())); - } - else if(py::isinstance(x)) - { - f(x.template cast()); - } - else if(py::isinstance(x) or py::hasattr(x, "__index__")) - { - f(x.template cast()); - } - else if(py::isinstance(x)) - { - f(x.template cast()); - } - else if(py::isinstance(x)) - { - f(x.template cast()); - } - else if(py::isinstance(x)) - { - f(migraphx::to_value(x.template cast())); - } - else - { - MIGRAPHX_THROW("VISIT_PY: Unsupported data type!"); - } -} - -migraphx::value to_value(py::list lst) -{ - migraphx::value v = migraphx::value::array{}; - for(auto val : lst) - { - visit_py(val, [&](auto py_val) { v.push_back(py_val); }); - } - - return v; -} - -migraphx::value to_value(py::kwargs kwargs) -{ - migraphx::value v = migraphx::value::object{}; - - for(auto arg : kwargs) - { - auto&& key = py::str(arg.first); - auto&& val = arg.second; - visit_py(val, [&](auto py_val) { v[key] = py_val; }); - } - return v; -} -} // namespace migraphx - -namespace pybind11 { -namespace detail { - -template <> -struct npy_format_descriptor -{ - static std::string format() - { - // following: https://docs.python.org/3/library/struct.html#format-characters - return "e"; - } - static constexpr auto name() { return _("half"); } -}; - -} // namespace detail -} // namespace pybind11 - -template -void visit_type(const migraphx::shape& s, F f) -{ - s.visit_type(f); -} - -template -void visit(const migraphx::raw_data& x, F f) -{ - x.visit(f); -} - -template -void visit_types(F f) -{ - migraphx::shape::visit_types(f); -} - -template -py::buffer_info to_buffer_info(T& x) -{ - migraphx::shape s = x.get_shape(); - assert(s.type() != migraphx::shape::tuple_type); - if(s.dynamic()) - MIGRAPHX_THROW("MIGRAPHX PYTHON: dynamic shape argument passed to to_buffer_info"); - auto strides = s.strides(); - std::transform( - strides.begin(), strides.end(), strides.begin(), [&](auto i) { return i * s.type_size(); }); - py::buffer_info b; - visit_type(s, [&](auto as) { - // migraphx use int8_t data to store bool type, we need to - // explicitly specify the data type as bool for python - if(s.type() == migraphx::shape::bool_type) - { - b = py::buffer_info(x.data(), - as.size(), - py::format_descriptor::format(), - s.ndim(), - s.lens(), - strides); - } - else - { - b = py::buffer_info(x.data(), - as.size(), - py::format_descriptor::format(), - s.ndim(), - s.lens(), - strides); - } - }); - return b; -} - -migraphx::shape to_shape(const py::buffer_info& info) -{ - migraphx::shape::type_t t; - std::size_t n = 0; - visit_types([&](auto as) { - if(info.format == py::format_descriptor::format() or - (info.format == "l" and py::format_descriptor::format() == "q") or - (info.format == "L" and py::format_descriptor::format() == "Q")) - { - t = as.type_enum(); - n = sizeof(as()); - } - else if(info.format == "?" and py::format_descriptor::format() == "b") - { - t = migraphx::shape::bool_type; - n = sizeof(bool); - } - }); - - if(n == 0) - { - MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type " + info.format); - } - - auto strides = info.strides; - std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t { - return n > 0 ? i / n : 0; - }); - - // scalar support - if(info.shape.empty()) - { - return migraphx::shape{t}; - } - else - { - return migraphx::shape{t, info.shape, strides}; - } -} - -MIGRAPHX_PYBIND11_MODULE(migraphx, m) -{ - py::class_ shape_cls(m, "shape"); - shape_cls - .def(py::init([](py::kwargs kwargs) { - auto v = migraphx::to_value(kwargs); - auto t = migraphx::shape::parse_type(v.get("type", "float")); - if(v.contains("dyn_dims")) - { - auto dyn_dims = - migraphx::from_value>( - v.at("dyn_dims")); - return migraphx::shape(t, dyn_dims); - } - auto lens = v.get("lens", {1}); - if(v.contains("strides")) - return migraphx::shape(t, lens, v.at("strides").to_vector()); - else - return migraphx::shape(t, lens); - })) - .def("type", &migraphx::shape::type) - .def("lens", &migraphx::shape::lens) - .def("strides", &migraphx::shape::strides) - .def("ndim", &migraphx::shape::ndim) - .def("elements", &migraphx::shape::elements) - .def("bytes", &migraphx::shape::bytes) - .def("type_string", &migraphx::shape::type_string) - .def("type_size", &migraphx::shape::type_size) - .def("dyn_dims", &migraphx::shape::dyn_dims) - .def("packed", &migraphx::shape::packed) - .def("transposed", &migraphx::shape::transposed) - .def("broadcasted", &migraphx::shape::broadcasted) - .def("standard", &migraphx::shape::standard) - .def("scalar", &migraphx::shape::scalar) - .def("dynamic", &migraphx::shape::dynamic) - .def("__eq__", std::equal_to{}) - .def("__ne__", std::not_equal_to{}) - .def("__repr__", [](const migraphx::shape& s) { return migraphx::to_string(s); }); - - py::enum_(shape_cls, "type_t") - MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM); - - py::class_(shape_cls, "dynamic_dimension") - .def(py::init<>()) - .def(py::init()) - .def(py::init>()) - .def_readwrite("min", &migraphx::shape::dynamic_dimension::min) - .def_readwrite("max", &migraphx::shape::dynamic_dimension::max) - .def_readwrite("optimals", &migraphx::shape::dynamic_dimension::optimals) - .def("is_fixed", &migraphx::shape::dynamic_dimension::is_fixed); - - py::class_(m, "argument", py::buffer_protocol()) - .def_buffer([](migraphx::argument& x) -> py::buffer_info { return to_buffer_info(x); }) - .def(py::init([](py::buffer b) { - py::buffer_info info = b.request(); - return migraphx::argument(to_shape(info), info.ptr); - })) - .def("get_shape", &migraphx::argument::get_shape) - .def("data_ptr", - [](migraphx::argument& x) { return reinterpret_cast(x.data()); }) - .def("tolist", - [](migraphx::argument& x) { - py::list l{x.get_shape().elements()}; - visit(x, [&](auto data) { l = py::cast(data.to_vector()); }); - return l; - }) - .def("__eq__", std::equal_to{}) - .def("__ne__", std::not_equal_to{}) - .def("__repr__", [](const migraphx::argument& x) { return migraphx::to_string(x); }); - - py::class_(m, "target"); - - py::class_(m, "instruction_ref") - .def("shape", [](migraphx::instruction_ref i) { return i->get_shape(); }) - .def("op", [](migraphx::instruction_ref i) { return i->get_operator(); }); - - py::class_>(m, "module") - .def("print", [](const migraphx::module& mm) { std::cout << mm << std::endl; }) - .def( - "add_instruction", - [](migraphx::module& mm, - const migraphx::operation& op, - std::vector& args, - std::vector& mod_args) { - return mm.add_instruction(op, args, mod_args); - }, - py::arg("op"), - py::arg("args"), - py::arg("mod_args") = std::vector{}) - .def( - "add_literal", - [](migraphx::module& mm, py::buffer data) { - py::buffer_info info = data.request(); - auto literal_shape = to_shape(info); - return mm.add_literal(literal_shape, reinterpret_cast(info.ptr)); - }, - py::arg("data")) - .def( - "add_parameter", - [](migraphx::module& mm, const std::string& name, const migraphx::shape shape) { - return mm.add_parameter(name, shape); - }, - py::arg("name"), - py::arg("shape")) - .def( - "add_return", - [](migraphx::module& mm, std::vector& args) { - return mm.add_return(args); - }, - py::arg("args")) - .def("__repr__", [](const migraphx::module& mm) { return migraphx::to_string(mm); }); - - py::class_(m, "program") - .def(py::init([]() { return migraphx::program(); })) - .def("get_parameter_names", &migraphx::program::get_parameter_names) - .def("get_parameter_shapes", &migraphx::program::get_parameter_shapes) - .def("get_output_shapes", &migraphx::program::get_output_shapes) - .def("is_compiled", &migraphx::program::is_compiled) - .def( - "compile", - [](migraphx::program& p, - const migraphx::target& t, - bool offload_copy, - bool fast_math, - bool exhaustive_tune) { - migraphx::compile_options options; - options.offload_copy = offload_copy; - options.fast_math = fast_math; - options.exhaustive_tune = exhaustive_tune; - p.compile(t, options); - }, - py::arg("t"), - py::arg("offload_copy") = true, - py::arg("fast_math") = true, - py::arg("exhaustive_tune") = false) - .def("get_main_module", [](const migraphx::program& p) { return p.get_main_module(); }) - .def( - "create_module", - [](migraphx::program& p, const std::string& name) { return p.create_module(name); }, - py::arg("name")) - .def("run", - [](migraphx::program& p, py::dict params) { - migraphx::parameter_map pm; - for(auto x : params) - { - std::string key = x.first.cast(); - py::buffer b = x.second.cast(); - py::buffer_info info = b.request(); - pm[key] = migraphx::argument(to_shape(info), info.ptr); - } - return p.eval(pm); - }) - .def("run_async", - [](migraphx::program& p, - py::dict params, - std::uintptr_t stream, - std::string stream_name) { - migraphx::parameter_map pm; - for(auto x : params) - { - std::string key = x.first.cast(); - py::buffer b = x.second.cast(); - py::buffer_info info = b.request(); - pm[key] = migraphx::argument(to_shape(info), info.ptr); - } - migraphx::execution_environment exec_env{ - migraphx::any_ptr(reinterpret_cast(stream), stream_name), true}; - return p.eval(pm, exec_env); - }) - .def("sort", &migraphx::program::sort) - .def("print", [](const migraphx::program& p) { std::cout << p << std::endl; }) - .def("__eq__", std::equal_to{}) - .def("__ne__", std::not_equal_to{}) - .def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); }); - - py::class_ op(m, "op"); - op.def(py::init([](const std::string& name, py::kwargs kwargs) { - migraphx::value v = migraphx::value::object{}; - if(kwargs) - { - v = migraphx::to_value(kwargs); - } - return migraphx::make_op(name, v); - })) - .def("name", &migraphx::operation::name); - - py::enum_(op, "pooling_mode") - .value("average", migraphx::op::pooling_mode::average) - .value("max", migraphx::op::pooling_mode::max) - .value("lpnorm", migraphx::op::pooling_mode::lpnorm); - - py::enum_(op, "rnn_direction") - .value("forward", migraphx::op::rnn_direction::forward) - .value("reverse", migraphx::op::rnn_direction::reverse) - .value("bidirectional", migraphx::op::rnn_direction::bidirectional); - - m.def( - "argument_from_pointer", - [](const migraphx::shape shape, const int64_t address) { - return migraphx::argument(shape, reinterpret_cast(address)); - }, - py::arg("shape"), - py::arg("address")); - - m.def( - "parse_tf", - [](const std::string& filename, - bool is_nhwc, - unsigned int batch_size, - std::unordered_map> map_input_dims, - std::vector output_names) { - return migraphx::parse_tf( - filename, migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names}); - }, - "Parse tf protobuf (default format is nhwc)", - py::arg("filename"), - py::arg("is_nhwc") = true, - py::arg("batch_size") = 1, - py::arg("map_input_dims") = std::unordered_map>(), - py::arg("output_names") = std::vector()); - - m.def( - "parse_onnx", - [](const std::string& filename, - unsigned int default_dim_value, - migraphx::shape::dynamic_dimension default_dyn_dim_value, - std::unordered_map> map_input_dims, - std::unordered_map> - map_dyn_input_dims, - bool skip_unknown_operators, - bool print_program_on_error, - int64_t max_loop_iterations) { - migraphx::onnx_options options; - options.default_dim_value = default_dim_value; - options.default_dyn_dim_value = default_dyn_dim_value; - options.map_input_dims = map_input_dims; - options.map_dyn_input_dims = map_dyn_input_dims; - options.skip_unknown_operators = skip_unknown_operators; - options.print_program_on_error = print_program_on_error; - options.max_loop_iterations = max_loop_iterations; - return migraphx::parse_onnx(filename, options); - }, - "Parse onnx file", - py::arg("filename"), - py::arg("default_dim_value") = 0, - py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1}, - py::arg("map_input_dims") = std::unordered_map>(), - py::arg("map_dyn_input_dims") = - std::unordered_map>(), - py::arg("skip_unknown_operators") = false, - py::arg("print_program_on_error") = false, - py::arg("max_loop_iterations") = 10); - - m.def( - "parse_onnx_buffer", - [](const std::string& onnx_buffer, - unsigned int default_dim_value, - migraphx::shape::dynamic_dimension default_dyn_dim_value, - std::unordered_map> map_input_dims, - std::unordered_map> - map_dyn_input_dims, - bool skip_unknown_operators, - bool print_program_on_error) { - migraphx::onnx_options options; - options.default_dim_value = default_dim_value; - options.default_dyn_dim_value = default_dyn_dim_value; - options.map_input_dims = map_input_dims; - options.map_dyn_input_dims = map_dyn_input_dims; - options.skip_unknown_operators = skip_unknown_operators; - options.print_program_on_error = print_program_on_error; - return migraphx::parse_onnx_buffer(onnx_buffer, options); - }, - "Parse onnx file", - py::arg("filename"), - py::arg("default_dim_value") = 0, - py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1}, - py::arg("map_input_dims") = std::unordered_map>(), - py::arg("map_dyn_input_dims") = - std::unordered_map>(), - py::arg("skip_unknown_operators") = false, - py::arg("print_program_on_error") = false); - - m.def( - "load", - [](const std::string& name, const std::string& format) { - migraphx::file_options options; - options.format = format; - return migraphx::load(name, options); - }, - "Load MIGraphX program", - py::arg("filename"), - py::arg("format") = "msgpack"); - - m.def( - "save", - [](const migraphx::program& p, const std::string& name, const std::string& format) { - migraphx::file_options options; - options.format = format; - return migraphx::save(p, name, options); - }, - "Save MIGraphX program", - py::arg("p"), - py::arg("filename"), - py::arg("format") = "msgpack"); - - m.def("get_target", &migraphx::make_target); - m.def("create_argument", [](const migraphx::shape& s, const std::vector& values) { - if(values.size() != s.elements()) - MIGRAPHX_THROW("Values and shape elements do not match"); - migraphx::argument a{s}; - a.fill(values.begin(), values.end()); - return a; - }); - m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0); - m.def("fill_argument", &migraphx::fill_argument, py::arg("s"), py::arg("value")); - m.def("quantize_fp16", - &migraphx::quantize_fp16, - py::arg("prog"), - py::arg("ins_names") = std::vector{"all"}); - m.def("quantize_int8", - &migraphx::quantize_int8, - py::arg("prog"), - py::arg("t"), - py::arg("calibration") = std::vector{}, - py::arg("ins_names") = std::vector{"dot", "convolution"}); - -#ifdef HAVE_GPU - m.def("allocate_gpu", &migraphx::gpu::allocate_gpu, py::arg("s"), py::arg("host") = false); - m.def("to_gpu", &migraphx::gpu::to_gpu, py::arg("arg"), py::arg("host") = false); - m.def("from_gpu", &migraphx::gpu::from_gpu); - m.def("gpu_sync", [] { migraphx::gpu::gpu_sync(); }); -#endif - -#ifdef VERSION_INFO - m.attr("__version__") = VERSION_INFO; -#else - m.attr("__version__") = "dev"; -#endif -} diff --git a/docker/rocm/requirements-wheels-rocm.txt b/docker/rocm/requirements-wheels-rocm.txt index 89d0e6096..85450768e 100644 --- a/docker/rocm/requirements-wheels-rocm.txt +++ b/docker/rocm/requirements-wheels-rocm.txt @@ -1 +1 @@ -onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl \ No newline at end of file +onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.3.3/onnxruntime_rocm-1.20.1-cp311-cp311-linux_x86_64.whl \ No newline at end of file diff --git a/docker/rocm/rocm-pin-600 b/docker/rocm/rocm-pin-600 deleted file mode 100644 index 88348a5c1..000000000 --- a/docker/rocm/rocm-pin-600 +++ /dev/null @@ -1,3 +0,0 @@ -Package: * -Pin: release o=repo.radeon.com -Pin-Priority: 600 diff --git a/docker/rocm/rocm.hcl b/docker/rocm/rocm.hcl index 33a2d2323..6a84b350d 100644 --- a/docker/rocm/rocm.hcl +++ b/docker/rocm/rocm.hcl @@ -2,7 +2,7 @@ variable "AMDGPU" { default = "gfx900" } variable "ROCM" { - default = "5.7.3" + default = "6.3.3" } variable "HSA_OVERRIDE_GFX_VERSION" { default = "" @@ -10,6 +10,13 @@ variable "HSA_OVERRIDE_GFX_VERSION" { variable "HSA_OVERRIDE" { default = "1" } + +target wget { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/amd64"] + target = "wget" +} + target deps { dockerfile = "docker/main/Dockerfile" platforms = ["linux/amd64"] @@ -26,6 +33,7 @@ target rocm { dockerfile = "docker/rocm/Dockerfile" contexts = { deps = "target:deps", + wget = "target:wget", rootfs = "target:rootfs" } platforms = ["linux/amd64"] diff --git a/docker/rocm/rocm.list b/docker/rocm/rocm.list deleted file mode 100644 index 0915b4094..000000000 --- a/docker/rocm/rocm.list +++ /dev/null @@ -1 +0,0 @@ -deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/5.7.3 focal main diff --git a/frigate/detectors/plugins/onnx.py b/frigate/detectors/plugins/onnx.py index 13a948de9..d94b4660f 100644 --- a/frigate/detectors/plugins/onnx.py +++ b/frigate/detectors/plugins/onnx.py @@ -99,5 +99,5 @@ class ONNXDetector(DetectionApi): return post_process_yolov9(predictions, self.w, self.h) else: raise Exception( - f"{self.onnx_model_type} is currently not supported for rocm. See the docs for more info on supported models." + f"{self.onnx_model_type} is currently not supported for onnx. See the docs for more info on supported models." ) From 71e6e04d778538e33424c0dac7e5ca08f120b1f0 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 3 Mar 2025 07:16:14 -0700 Subject: [PATCH 05/12] Remove rocm detector (#16913) * Remove rocm detector plugin * Update docs to recommend using onnx for rocm * Formatting --- docs/docs/configuration/object_detectors.md | 30 +--- docs/docs/plus/index.md | 6 +- frigate/detectors/plugins/rocm.py | 170 -------------------- frigate/object_detection.py | 9 +- 4 files changed, 9 insertions(+), 206 deletions(-) delete mode 100644 frigate/detectors/plugins/rocm.py diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 531ef5108..6834f8014 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -49,7 +49,7 @@ This does not affect using hardware for accelerating other tasks such as [semant # Officially Supported Detectors -Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. +Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. ## Edge TPU Detector @@ -367,7 +367,7 @@ model: ### Setup -The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`. +Support for AMD GPUs is provided using the [ONNX detector](#ONNX). In order to utilize the AMD GPU for object detection use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`. ### Docker settings for GPU access @@ -446,29 +446,9 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/ ### Supported Models -There is no default model provided, the following formats are supported: - -#### YOLO-NAS - -[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate. - -After placing the downloaded onnx model in your config folder, you can use the following configuration: - -```yaml -detectors: - rocm: - type: rocm - -model: - model_type: yolonas - width: 320 # <--- should match whatever was set in notebook - height: 320 # <--- should match whatever was set in notebook - input_pixel_format: bgr - path: /config/yolo_nas_s.onnx - labelmap_path: /labelmap/coco-80.txt -``` - -Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. +See [ONNX supported models](#supported-models) for supported models, there are some caveats: +- D-FINE models are not supported +- YOLO-NAS models are known to not run well on integrated GPUs ## ONNX diff --git a/docs/docs/plus/index.md b/docs/docs/plus/index.md index 37798badb..589adca72 100644 --- a/docs/docs/plus/index.md +++ b/docs/docs/plus/index.md @@ -28,11 +28,11 @@ Not all model types are supported by all detectors, so it's important to choose ## Supported detector types -Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), and ROCm (`rocm`) detectors. +Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), and ONNX (`onnx`) detectors. :::warning -Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 and later. +Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later. ::: @@ -42,7 +42,7 @@ Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 | [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` | | [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` | | [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` | -| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` | +| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `onnx` | `yolonas` | _\* Requires Frigate 0.15_ diff --git a/frigate/detectors/plugins/rocm.py b/frigate/detectors/plugins/rocm.py deleted file mode 100644 index 7c87edb50..000000000 --- a/frigate/detectors/plugins/rocm.py +++ /dev/null @@ -1,170 +0,0 @@ -import ctypes -import logging -import os -import subprocess -import sys - -import cv2 -import numpy as np -from pydantic import Field -from typing_extensions import Literal - -from frigate.const import MODEL_CACHE_DIR -from frigate.detectors.detection_api import DetectionApi -from frigate.detectors.detector_config import ( - BaseDetectorConfig, - ModelTypeEnum, - PixelFormatEnum, -) - -logger = logging.getLogger(__name__) - -DETECTOR_KEY = "rocm" - - -def detect_gfx_version(): - return subprocess.getoutput( - "unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo | grep gfx |head -1|awk '{print $2}'" - ) - - -def auto_override_gfx_version(): - # If environment variable already in place, do not override - gfx_version = detect_gfx_version() - old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION") - if old_override not in (None, ""): - logger.warning( - f"AMD/ROCm: detected {gfx_version} but HSA_OVERRIDE_GFX_VERSION already present ({old_override}), not overriding!" - ) - return old_override - mapping = { - "gfx90c": "9.0.0", - "gfx1031": "10.3.0", - "gfx1103": "11.0.0", - } - override = mapping.get(gfx_version) - if override is not None: - logger.warning( - f"AMD/ROCm: detected {gfx_version}, overriding HSA_OVERRIDE_GFX_VERSION={override}" - ) - os.putenv("HSA_OVERRIDE_GFX_VERSION", override) - return override - return "" - - -class ROCmDetectorConfig(BaseDetectorConfig): - type: Literal[DETECTOR_KEY] - conserve_cpu: bool = Field( - default=True, - title="Conserve CPU at the expense of latency (and reduced max throughput)", - ) - auto_override_gfx: bool = Field( - default=True, title="Automatically detect and override gfx version" - ) - - -class ROCmDetector(DetectionApi): - type_key = DETECTOR_KEY - - def __init__(self, detector_config: ROCmDetectorConfig): - if detector_config.auto_override_gfx: - auto_override_gfx_version() - - try: - sys.path.append("/opt/rocm/lib") - import migraphx - - logger.info("AMD/ROCm: loaded migraphx module") - except ModuleNotFoundError: - logger.error("AMD/ROCm: module loading failed, missing ROCm environment?") - raise - - if detector_config.conserve_cpu: - logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU") - ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4) - - self.h = detector_config.model.height - self.w = detector_config.model.width - self.rocm_model_type = detector_config.model.model_type - self.rocm_model_px = detector_config.model.input_pixel_format - path = detector_config.model.path - - mxr_path = os.path.splitext(path)[0] + ".mxr" - if path.endswith(".mxr"): - logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}") - self.model = migraphx.load(mxr_path) - elif os.path.exists(mxr_path): - logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}") - self.model = migraphx.load(mxr_path) - else: - logger.info(f"AMD/ROCm: loading model from {path}") - - if ( - path.endswith(".tf") - or path.endswith(".tf2") - or path.endswith(".tflite") - ): - # untested - self.model = migraphx.parse_tf(path) - else: - self.model = migraphx.parse_onnx(path) - - logger.info("AMD/ROCm: compiling the model") - - self.model.compile( - migraphx.get_target("gpu"), offload_copy=True, fast_math=True - ) - - logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}") - - os.makedirs(os.path.join(MODEL_CACHE_DIR, "rocm"), exist_ok=True) - migraphx.save(self.model, mxr_path) - - logger.info("AMD/ROCm: model loaded") - - def detect_raw(self, tensor_input): - model_input_name = self.model.get_parameter_names()[0] - model_input_shape = tuple( - self.model.get_parameter_shapes()[model_input_name].lens() - ) - - tensor_input = cv2.dnn.blobFromImage( - tensor_input[0], - 1.0, - (model_input_shape[3], model_input_shape[2]), - None, - swapRB=self.rocm_model_px == PixelFormatEnum.bgr, - ).astype(np.uint8) - - detector_result = self.model.run({model_input_name: tensor_input})[0] - addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float)) - - tensor_output = np.ctypeslib.as_array( - addr, shape=detector_result.get_shape().lens() - ) - - if self.rocm_model_type == ModelTypeEnum.yolonas: - predictions = tensor_output - - detections = np.zeros((20, 6), np.float32) - - for i, prediction in enumerate(predictions): - if i == 20: - break - (_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction - # when running in GPU mode, empty predictions in the output have class_id of -1 - if class_id < 0: - break - detections[i] = [ - class_id, - confidence, - y_min / self.h, - x_min / self.w, - y_max / self.h, - x_max / self.w, - ] - return detections - else: - raise Exception( - f"{self.rocm_model_type} is currently not supported for rocm. See the docs for more info on supported models." - ) diff --git a/frigate/object_detection.py b/frigate/object_detection.py index 022e565f0..8e88ae578 100644 --- a/frigate/object_detection.py +++ b/frigate/object_detection.py @@ -17,7 +17,6 @@ from frigate.detectors.detector_config import ( InputDTypeEnum, InputTensorEnum, ) -from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY from frigate.util.builtin import EventsPerSecond, load_labels from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory from frigate.util.services import listen @@ -52,13 +51,7 @@ class LocalObjectDetector(ObjectDetector): self.labels = load_labels(labels) if detector_config: - if detector_config.type == ROCM_DETECTOR_KEY: - # ROCm requires NHWC as input - self.input_transform = None - else: - self.input_transform = tensor_transform( - detector_config.model.input_tensor - ) + self.input_transform = tensor_transform(detector_config.model.input_tensor) self.dtype = detector_config.model.input_dtype else: From 531042467ab85180bf20b3f265ae9256f157f5bf Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 3 Mar 2025 09:30:52 -0600 Subject: [PATCH 06/12] Dynamically enable/disable cameras (#16894) * config options * metrics * stop and restart ffmpeg processes * dispatcher * frontend websocket * buttons for testing * don't recreate log pipe * add/remove cam from birdseye when enabling/disabling * end all objects and send empty camera activity * enable/disable switch in ui * disable buttons when camera is disabled * use enabled_in_config for some frontend checks * tweaks * handle settings pane with disabled cameras * frontend tweaks * change to debug log * mqtt docs * tweak * ensure all ffmpeg processes are initially started * clean up * use zmq * remove camera metrics * remove camera metrics * tweaks * frontend tweaks --- docs/docs/integrations/mqtt.md | 8 + frigate/camera/activity_manager.py | 2 +- frigate/comms/dispatcher.py | 23 ++ frigate/config/camera/camera.py | 3 + frigate/config/config.py | 1 + frigate/object_processing.py | 62 +++++ frigate/output/birdseye.py | 227 ++++++++++-------- frigate/output/output.py | 20 ++ frigate/video.py | 142 +++++++++-- web/src/api/ws.tsx | 13 + web/src/components/camera/CameraImage.tsx | 8 +- .../components/camera/ResizingCameraImage.tsx | 4 +- .../dynamic/CameraFeatureToggle.tsx | 23 +- web/src/components/menu/LiveContextMenu.tsx | 114 +++++++-- web/src/components/player/LivePlayer.tsx | 90 +++++-- web/src/components/settings/ZoneEditPane.tsx | 2 +- web/src/hooks/use-camera-activity.ts | 18 +- web/src/pages/Live.tsx | 6 +- web/src/pages/Settings.tsx | 84 +++++-- web/src/types/frigateConfig.ts | 1 + web/src/types/ws.ts | 1 + web/src/views/live/LiveCameraView.tsx | 32 ++- web/src/views/settings/CameraSettingsView.tsx | 29 ++- .../settings/NotificationsSettingsView.tsx | 2 +- 24 files changed, 713 insertions(+), 202 deletions(-) diff --git a/docs/docs/integrations/mqtt.md b/docs/docs/integrations/mqtt.md index 4eaf61919..fc8888e40 100644 --- a/docs/docs/integrations/mqtt.md +++ b/docs/docs/integrations/mqtt.md @@ -222,6 +222,14 @@ Publishes the rms value for audio detected on this camera. **NOTE:** Requires audio detection to be enabled +### `frigate//enabled/set` + +Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`. + +### `frigate//enabled/state` + +Topic with current state of processing for a camera. Published values are `ON` and `OFF`. + ### `frigate//detect/set` Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`. diff --git a/frigate/camera/activity_manager.py b/frigate/camera/activity_manager.py index a6e40f4ca..7f6354641 100644 --- a/frigate/camera/activity_manager.py +++ b/frigate/camera/activity_manager.py @@ -20,7 +20,7 @@ class CameraActivityManager: self.all_zone_labels: dict[str, set[str]] = {} for camera_config in config.cameras.values(): - if not camera_config.enabled: + if not camera_config.enabled_in_config: continue self.last_camera_activity[camera_config.name] = {} diff --git a/frigate/comms/dispatcher.py b/frigate/comms/dispatcher.py index 61530d086..586b70cbb 100644 --- a/frigate/comms/dispatcher.py +++ b/frigate/comms/dispatcher.py @@ -55,6 +55,7 @@ class Dispatcher: self._camera_settings_handlers: dict[str, Callable] = { "audio": self._on_audio_command, "detect": self._on_detect_command, + "enabled": self._on_enabled_command, "improve_contrast": self._on_motion_improve_contrast_command, "ptz_autotracker": self._on_ptz_autotracker_command, "motion": self._on_motion_command, @@ -167,6 +168,7 @@ class Dispatcher: for camera in camera_status.keys(): camera_status[camera]["config"] = { "detect": self.config.cameras[camera].detect.enabled, + "enabled": self.config.cameras[camera].enabled, "snapshots": self.config.cameras[camera].snapshots.enabled, "record": self.config.cameras[camera].record.enabled, "audio": self.config.cameras[camera].audio.enabled, @@ -278,6 +280,27 @@ class Dispatcher: self.config_updater.publish(f"config/detect/{camera_name}", detect_settings) self.publish(f"{camera_name}/detect/state", payload, retain=True) + def _on_enabled_command(self, camera_name: str, payload: str) -> None: + """Callback for camera topic.""" + camera_settings = self.config.cameras[camera_name] + + if payload == "ON": + if not self.config.cameras[camera_name].enabled_in_config: + logger.error( + "Camera must be enabled in the config to be turned on via MQTT." + ) + return + if not camera_settings.enabled: + logger.info(f"Turning on camera {camera_name}") + camera_settings.enabled = True + elif payload == "OFF": + if camera_settings.enabled: + logger.info(f"Turning off camera {camera_name}") + camera_settings.enabled = False + + self.config_updater.publish(f"config/enabled/{camera_name}", camera_settings) + self.publish(f"{camera_name}/enabled/state", payload, retain=True) + def _on_motion_command(self, camera_name: str, payload: str) -> None: """Callback for motion topic.""" detect_settings = self.config.cameras[camera_name].detect diff --git a/frigate/config/camera/camera.py b/frigate/config/camera/camera.py index 50f61f33c..2d928661e 100644 --- a/frigate/config/camera/camera.py +++ b/frigate/config/camera/camera.py @@ -102,6 +102,9 @@ class CameraConfig(FrigateBaseModel): zones: dict[str, ZoneConfig] = Field( default_factory=dict, title="Zone configuration." ) + enabled_in_config: Optional[bool] = Field( + default=None, title="Keep track of original state of camera." + ) _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr() diff --git a/frigate/config/config.py b/frigate/config/config.py index d2ca9a6f5..633aef803 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -516,6 +516,7 @@ class FrigateConfig(FrigateBaseModel): camera_config.detect.stationary.interval = stationary_threshold # set config pre-value + camera_config.enabled_in_config = camera_config.enabled camera_config.audio.enabled_in_config = camera_config.audio.enabled camera_config.record.enabled_in_config = camera_config.record.enabled camera_config.notifications.enabled_in_config = ( diff --git a/frigate/object_processing.py b/frigate/object_processing.py index 137883b2b..783c2b2d0 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -10,6 +10,7 @@ from typing import Callable, Optional import cv2 import numpy as np +from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.dispatcher import Dispatcher from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher @@ -61,6 +62,7 @@ class CameraState: self.previous_frame_id = None self.callbacks = defaultdict(list) self.ptz_autotracker_thread = ptz_autotracker_thread + self.prev_enabled = self.camera_config.enabled def get_current_frame(self, draw_options={}): with self.current_frame_lock: @@ -310,6 +312,7 @@ class CameraState: # TODO: can i switch to looking this up and only changing when an event ends? # maintain best objects camera_activity: dict[str, list[any]] = { + "enabled": True, "motion": len(motion_boxes) > 0, "objects": [], } @@ -437,6 +440,11 @@ class TrackedObjectProcessor(threading.Thread): self.last_motion_detected: dict[str, float] = {} self.ptz_autotracker_thread = ptz_autotracker_thread + self.enabled_subscribers = { + camera: ConfigSubscriber(f"config/enabled/{camera}", True) + for camera in config.cameras.keys() + } + self.requestor = InterProcessRequestor() self.detection_publisher = DetectionPublisher(DetectionTypeEnum.video) self.event_sender = EventUpdatePublisher() @@ -679,8 +687,55 @@ class TrackedObjectProcessor(threading.Thread): """Returns the latest frame time for a given camera.""" return self.camera_states[camera].current_frame_time + def force_end_all_events(self, camera: str, camera_state: CameraState): + """Ends all active events on camera when disabling.""" + last_frame_name = camera_state.previous_frame_id + for obj_id, obj in list(camera_state.tracked_objects.items()): + if "end_time" not in obj.obj_data: + logger.debug(f"Camera {camera} disabled, ending active event {obj_id}") + obj.obj_data["end_time"] = datetime.datetime.now().timestamp() + # end callbacks + for callback in camera_state.callbacks["end"]: + callback(camera, obj, last_frame_name) + + # camera activity callbacks + for callback in camera_state.callbacks["camera_activity"]: + callback( + camera, + {"enabled": False, "motion": 0, "objects": []}, + ) + + def _get_enabled_state(self, camera: str) -> bool: + _, config_data = self.enabled_subscribers[camera].check_for_update() + if config_data: + enabled = config_data.enabled + if self.camera_states[camera].prev_enabled is None: + self.camera_states[camera].prev_enabled = enabled + return enabled + return ( + self.camera_states[camera].prev_enabled + if self.camera_states[camera].prev_enabled is not None + else self.config.cameras[camera].enabled + ) + def run(self): while not self.stop_event.is_set(): + for camera, config in self.config.cameras.items(): + if not config.enabled_in_config: + continue + + current_enabled = self._get_enabled_state(camera) + camera_state = self.camera_states[camera] + + if camera_state.prev_enabled and not current_enabled: + logger.debug(f"Not processing objects for disabled camera {camera}") + self.force_end_all_events(camera, camera_state) + + camera_state.prev_enabled = current_enabled + + if not current_enabled: + continue + try: ( camera, @@ -693,6 +748,10 @@ class TrackedObjectProcessor(threading.Thread): except queue.Empty: continue + if not self._get_enabled_state(camera): + logger.debug(f"Camera {camera} disabled, skipping update") + continue + camera_state = self.camera_states[camera] camera_state.update( @@ -735,4 +794,7 @@ class TrackedObjectProcessor(threading.Thread): self.detection_publisher.stop() self.event_sender.stop() self.event_end_subscriber.stop() + for subscriber in self.enabled_subscribers.values(): + subscriber.stop() + logger.info("Exiting object processor...") diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index 8331eb64a..3d036e9d5 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -10,6 +10,7 @@ import queue import subprocess as sp import threading import traceback +from typing import Optional import cv2 import numpy as np @@ -280,6 +281,12 @@ class BirdsEyeFrameManager: self.stop_event = stop_event self.inactivity_threshold = config.birdseye.inactivity_threshold + self.enabled_subscribers = { + cam: ConfigSubscriber(f"config/enabled/{cam}", True) + for cam in config.cameras.keys() + if config.cameras[cam].enabled_in_config + } + if config.birdseye.layout.max_cameras: self.last_refresh_time = 0 @@ -380,8 +387,18 @@ class BirdsEyeFrameManager: if mode == BirdseyeModeEnum.objects and object_box_count > 0: return True - def update_frame(self, frame: np.ndarray): - """Update to a new frame for birdseye.""" + def _get_enabled_state(self, camera: str) -> bool: + """Fetch the latest enabled state for a camera from ZMQ.""" + _, config_data = self.enabled_subscribers[camera].check_for_update() + if config_data: + return config_data.enabled + return self.config.cameras[camera].enabled + + def update_frame(self, frame: Optional[np.ndarray] = None) -> bool: + """ + Update birdseye, optionally with a new frame. + When no frame is passed, check the layout and update for any disabled cameras. + """ # determine how many cameras are tracking objects within the last inactivity_threshold seconds active_cameras: set[str] = set( @@ -389,11 +406,14 @@ class BirdsEyeFrameManager: cam for cam, cam_data in self.cameras.items() if self.config.cameras[cam].birdseye.enabled + and self.config.cameras[cam].enabled_in_config + and self._get_enabled_state(cam) and cam_data["last_active_frame"] > 0 and cam_data["current_frame_time"] - cam_data["last_active_frame"] < self.inactivity_threshold ] ) + logger.debug(f"Active cameras: {active_cameras}") max_cameras = self.config.birdseye.layout.max_cameras max_camera_refresh = False @@ -411,118 +431,125 @@ class BirdsEyeFrameManager: - self.cameras[active_camera]["last_active_frame"] ), ) - active_cameras = limited_active_cameras[ - : self.config.birdseye.layout.max_cameras - ] + active_cameras = limited_active_cameras[:max_cameras] max_camera_refresh = True self.last_refresh_time = now - # if there are no active cameras + # Track if the frame changes + frame_changed = False + + # If no active cameras and layout is already empty, no update needed if len(active_cameras) == 0: # if the layout is already cleared if len(self.camera_layout) == 0: return False # if the layout needs to be cleared - else: - self.camera_layout = [] - self.active_cameras = set() - self.clear_frame() - return True - - # check if we need to reset the layout because there is a different number of cameras - if len(self.active_cameras) - len(active_cameras) == 0: - if len(self.active_cameras) == 1 and self.active_cameras != active_cameras: - reset_layout = True - elif max_camera_refresh: - reset_layout = True - else: - reset_layout = False - else: - reset_layout = True - - # reset the layout if it needs to be different - if reset_layout: - logger.debug("Added new cameras, resetting layout...") + self.camera_layout = [] + self.active_cameras = set() self.clear_frame() - self.active_cameras = active_cameras - - # this also converts added_cameras from a set to a list since we need - # to pop elements in order - active_cameras_to_add = sorted( - active_cameras, - # sort cameras by order and by name if the order is the same - key=lambda active_camera: ( - self.config.cameras[active_camera].birdseye.order, - active_camera, - ), - ) - - if len(active_cameras) == 1: - # show single camera as fullscreen - camera = active_cameras_to_add[0] - camera_dims = self.cameras[camera]["dimensions"].copy() - scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1]) - - # center camera view in canvas and ensure that it fits - if scaled_width < self.canvas.width: - coefficient = 1 - x_offset = int((self.canvas.width - scaled_width) / 2) + frame_changed = True + else: + # Determine if layout needs resetting + if len(self.active_cameras) - len(active_cameras) == 0: + if ( + len(self.active_cameras) == 1 + and self.active_cameras != active_cameras + ): + reset_layout = True + elif max_camera_refresh: + reset_layout = True else: - coefficient = self.canvas.width / scaled_width - x_offset = int( - (self.canvas.width - (scaled_width * coefficient)) / 2 - ) - - self.camera_layout = [ - [ - ( - camera, - ( - x_offset, - 0, - int(scaled_width * coefficient), - int(self.canvas.height * coefficient), - ), - ) - ] - ] + reset_layout = False else: - # calculate optimal layout - coefficient = self.canvas.get_coefficient(len(active_cameras)) - calculating = True + reset_layout = True - # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas - while calculating: - if self.stop_event.is_set(): - return + if reset_layout: + logger.debug("Resetting Birdseye layout...") + self.clear_frame() + self.active_cameras = active_cameras - layout_candidate = self.calculate_layout( - active_cameras_to_add, - coefficient, + # this also converts added_cameras from a set to a list since we need + # to pop elements in order + active_cameras_to_add = sorted( + active_cameras, + # sort cameras by order and by name if the order is the same + key=lambda active_camera: ( + self.config.cameras[active_camera].birdseye.order, + active_camera, + ), + ) + if len(active_cameras) == 1: + # show single camera as fullscreen + camera = active_cameras_to_add[0] + camera_dims = self.cameras[camera]["dimensions"].copy() + scaled_width = int( + self.canvas.height * camera_dims[0] / camera_dims[1] ) - if not layout_candidate: - if coefficient < 10: - coefficient += 1 - continue - else: - logger.error("Error finding appropriate birdseye layout") + # center camera view in canvas and ensure that it fits + if scaled_width < self.canvas.width: + coefficient = 1 + x_offset = int((self.canvas.width - scaled_width) / 2) + else: + coefficient = self.canvas.width / scaled_width + x_offset = int( + (self.canvas.width - (scaled_width * coefficient)) / 2 + ) + + self.camera_layout = [ + [ + ( + camera, + ( + x_offset, + 0, + int(scaled_width * coefficient), + int(self.canvas.height * coefficient), + ), + ) + ] + ] + else: + # calculate optimal layout + coefficient = self.canvas.get_coefficient(len(active_cameras)) + calculating = True + + # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas + while calculating: + if self.stop_event.is_set(): return - calculating = False - self.canvas.set_coefficient(len(active_cameras), coefficient) + layout_candidate = self.calculate_layout( + active_cameras_to_add, coefficient + ) - self.camera_layout = layout_candidate + if not layout_candidate: + if coefficient < 10: + coefficient += 1 + continue + else: + logger.error( + "Error finding appropriate birdseye layout" + ) + return + calculating = False + self.canvas.set_coefficient(len(active_cameras), coefficient) - for row in self.camera_layout: - for position in row: - self.copy_to_position( - position[1], - position[0], - self.cameras[position[0]]["current_frame"], - ) + self.camera_layout = layout_candidate + frame_changed = True - return True + # Draw the layout + for row in self.camera_layout: + for position in row: + src_frame = self.cameras[position[0]]["current_frame"] + if src_frame is None or src_frame.size == 0: + logger.debug(f"Skipping invalid frame for {position[0]}") + continue + self.copy_to_position(position[1], position[0], src_frame) + if frame is not None: # Frame presence indicates a potential change + frame_changed = True + + return frame_changed def calculate_layout( self, @@ -678,11 +705,8 @@ class BirdsEyeFrameManager: # don't process if birdseye is disabled for this camera camera_config = self.config.cameras[camera].birdseye - if not camera_config.enabled: - return False - # disabling birdseye is a little tricky - if not camera_config.enabled: + if not camera_config.enabled or not self._get_enabled_state(camera): # if we've rendered a frame (we have a value for last_active_frame) # then we need to set it to zero if self.cameras[camera]["last_active_frame"] > 0: @@ -716,6 +740,11 @@ class BirdsEyeFrameManager: return True return False + def stop(self): + """Clean up subscribers when stopping.""" + for subscriber in self.enabled_subscribers.values(): + subscriber.stop() + class Birdseye: def __init__( @@ -743,6 +772,7 @@ class Birdseye: self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) self.config_subscriber = ConfigSubscriber("config/birdseye/") self.frame_manager = SharedMemoryFrameManager() + self.stop_event = stop_event if config.birdseye.restream: self.birdseye_buffer = self.frame_manager.create( @@ -794,5 +824,6 @@ class Birdseye: def stop(self) -> None: self.config_subscriber.stop() + self.birdseye_manager.stop() self.converter.join() self.broadcaster.join() diff --git a/frigate/output/output.py b/frigate/output/output.py index bb2d73511..9beb87250 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -17,6 +17,7 @@ from ws4py.server.wsgirefserver import ( ) from ws4py.server.wsgiutils import WebSocketWSGIApplication +from frigate.comms.config_updater import ConfigSubscriber from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.ws import WebSocket from frigate.config import FrigateConfig @@ -59,6 +60,12 @@ def output_frames( detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) + enabled_subscribers = { + camera: ConfigSubscriber(f"config/enabled/{camera}", True) + for camera in config.cameras.keys() + if config.cameras[camera].enabled_in_config + } + jsmpeg_cameras: dict[str, JsmpegCamera] = {} birdseye: Optional[Birdseye] = None preview_recorders: dict[str, PreviewRecorder] = {} @@ -80,6 +87,13 @@ def output_frames( websocket_thread.start() + def get_enabled_state(camera: str) -> bool: + _, config_data = enabled_subscribers[camera].check_for_update() + if config_data: + return config_data.enabled + # default + return config.cameras[camera].enabled + while not stop_event.is_set(): (topic, data) = detection_subscriber.check_for_update(timeout=1) @@ -95,6 +109,9 @@ def output_frames( _, ) = data + if not get_enabled_state(camera): + continue + frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv) if frame is None: @@ -184,6 +201,9 @@ def output_frames( if birdseye is not None: birdseye.stop() + for subscriber in enabled_subscribers.values(): + subscriber.stop() + websocket_server.manager.close_all() websocket_server.manager.stop() websocket_server.manager.join() diff --git a/frigate/video.py b/frigate/video.py index 233cebb9e..69f6c1bfa 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -108,8 +108,20 @@ def capture_frames( frame_rate.start() skipped_eps = EventsPerSecond() skipped_eps.start() + config_subscriber = ConfigSubscriber(f"config/enabled/{config.name}", True) + + def get_enabled_state(): + """Fetch the latest enabled state from ZMQ.""" + _, config_data = config_subscriber.check_for_update() + if config_data: + return config_data.enabled + return config.enabled + + while not stop_event.is_set(): + if not get_enabled_state(): + logger.debug(f"Stopping capture thread for disabled {config.name}") + break - while True: fps.value = frame_rate.eps() skipped_fps.value = skipped_eps.eps() current_frame.value = datetime.datetime.now().timestamp() @@ -178,26 +190,37 @@ class CameraWatchdog(threading.Thread): self.stop_event = stop_event self.sleeptime = self.config.ffmpeg.retry_interval - def run(self): - self.start_ffmpeg_detect() + self.config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True) + self.was_enabled = self.config.enabled - for c in self.config.ffmpeg_cmds: - if "detect" in c["roles"]: - continue - logpipe = LogPipe( - f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}" - ) - self.ffmpeg_other_processes.append( - { - "cmd": c["cmd"], - "roles": c["roles"], - "logpipe": logpipe, - "process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe), - } - ) + def _update_enabled_state(self) -> bool: + """Fetch the latest config and update enabled state.""" + _, config_data = self.config_subscriber.check_for_update() + if config_data: + enabled = config_data.enabled + return enabled + return self.was_enabled if self.was_enabled is not None else self.config.enabled + + def run(self): + if self._update_enabled_state(): + self.start_all_ffmpeg() time.sleep(self.sleeptime) while not self.stop_event.wait(self.sleeptime): + enabled = self._update_enabled_state() + if enabled != self.was_enabled: + if enabled: + self.logger.debug(f"Enabling camera {self.camera_name}") + self.start_all_ffmpeg() + else: + self.logger.debug(f"Disabling camera {self.camera_name}") + self.stop_all_ffmpeg() + self.was_enabled = enabled + continue + + if not enabled: + continue + now = datetime.datetime.now().timestamp() if not self.capture_thread.is_alive(): @@ -279,11 +302,9 @@ class CameraWatchdog(threading.Thread): p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] ) - stop_ffmpeg(self.ffmpeg_detect_process, self.logger) - for p in self.ffmpeg_other_processes: - stop_ffmpeg(p["process"], self.logger) - p["logpipe"].close() + self.stop_all_ffmpeg() self.logpipe.close() + self.config_subscriber.stop() def start_ffmpeg_detect(self): ffmpeg_cmd = [ @@ -306,6 +327,43 @@ class CameraWatchdog(threading.Thread): ) self.capture_thread.start() + def start_all_ffmpeg(self): + """Start all ffmpeg processes (detection and others).""" + logger.debug(f"Starting all ffmpeg processes for {self.camera_name}") + self.start_ffmpeg_detect() + for c in self.config.ffmpeg_cmds: + if "detect" in c["roles"]: + continue + logpipe = LogPipe( + f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}" + ) + self.ffmpeg_other_processes.append( + { + "cmd": c["cmd"], + "roles": c["roles"], + "logpipe": logpipe, + "process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe), + } + ) + + def stop_all_ffmpeg(self): + """Stop all ffmpeg processes (detection and others).""" + logger.debug(f"Stopping all ffmpeg processes for {self.camera_name}") + if self.capture_thread is not None and self.capture_thread.is_alive(): + self.capture_thread.join(timeout=5) + if self.capture_thread.is_alive(): + self.logger.warning( + f"Capture thread for {self.camera_name} did not stop gracefully." + ) + if self.ffmpeg_detect_process is not None: + stop_ffmpeg(self.ffmpeg_detect_process, self.logger) + self.ffmpeg_detect_process = None + for p in self.ffmpeg_other_processes[:]: + if p["process"] is not None: + stop_ffmpeg(p["process"], self.logger) + p["logpipe"].close() + self.ffmpeg_other_processes.clear() + def get_latest_segment_datetime(self, latest_segment: datetime.datetime) -> int: """Checks if ffmpeg is still writing recording segments to cache.""" cache_files = sorted( @@ -539,7 +597,8 @@ def process_frames( exit_on_empty: bool = False, ): next_region_update = get_tomorrow_at_time(2) - config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True) + detect_config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True) + enabled_config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True) fps_tracker = EventsPerSecond() fps_tracker.start() @@ -549,9 +608,43 @@ def process_frames( region_min_size = get_min_region_size(model_config) + prev_enabled = None + while not stop_event.is_set(): + _, enabled_config = enabled_config_subscriber.check_for_update() + current_enabled = ( + enabled_config.enabled + if enabled_config + else (prev_enabled if prev_enabled is not None else True) + ) + if prev_enabled is None: + prev_enabled = current_enabled + + if prev_enabled and not current_enabled and camera_metrics.frame_queue.empty(): + logger.debug(f"Camera {camera_name} disabled, clearing tracked objects") + + # Clear norfair's dictionaries + object_tracker.tracked_objects.clear() + object_tracker.disappeared.clear() + object_tracker.stationary_box_history.clear() + object_tracker.positions.clear() + object_tracker.track_id_map.clear() + + # Clear internal norfair states + for trackers_by_type in object_tracker.trackers.values(): + for tracker in trackers_by_type.values(): + tracker.tracked_objects = [] + for tracker in object_tracker.default_tracker.values(): + tracker.tracked_objects = [] + + prev_enabled = current_enabled + + if not current_enabled: + time.sleep(0.1) + continue + # check for updated detect config - _, updated_detect_config = config_subscriber.check_for_update() + _, updated_detect_config = detect_config_subscriber.check_for_update() if updated_detect_config: detect_config = updated_detect_config @@ -845,4 +938,5 @@ def process_frames( motion_detector.stop() requestor.stop() - config_subscriber.stop() + detect_config_subscriber.stop() + enabled_config_subscriber.stop() diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 7ca9ae69d..27600993a 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -56,6 +56,7 @@ function useValue(): useValueReturn { const { record, detect, + enabled, snapshots, audio, notifications, @@ -67,6 +68,7 @@ function useValue(): useValueReturn { // @ts-expect-error we know this is correct state["config"]; cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF"; + cameraStates[`${name}/enabled/state`] = enabled ? "ON" : "OFF"; cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF"; cameraStates[`${name}/snapshots/state`] = snapshots ? "ON" : "OFF"; cameraStates[`${name}/audio/state`] = audio ? "ON" : "OFF"; @@ -164,6 +166,17 @@ export function useWs(watchTopic: string, publishTopic: string) { return { value, send }; } +export function useEnabledState(camera: string): { + payload: ToggleableSetting; + send: (payload: ToggleableSetting, retain?: boolean) => void; +} { + const { + value: { payload }, + send, + } = useWs(`${camera}/enabled/state`, `${camera}/enabled/set`); + return { payload: payload as ToggleableSetting, send }; +} + export function useDetectState(camera: string): { payload: ToggleableSetting; send: (payload: ToggleableSetting, retain?: boolean) => void; diff --git a/web/src/components/camera/CameraImage.tsx b/web/src/components/camera/CameraImage.tsx index ba35d643e..fe6586fcc 100644 --- a/web/src/components/camera/CameraImage.tsx +++ b/web/src/components/camera/CameraImage.tsx @@ -5,6 +5,7 @@ import ActivityIndicator from "../indicators/activity-indicator"; import { useResizeObserver } from "@/hooks/resize-observer"; import { isDesktop } from "react-device-detect"; import { cn } from "@/lib/utils"; +import { useEnabledState } from "@/api/ws"; type CameraImageProps = { className?: string; @@ -26,7 +27,8 @@ export default function CameraImage({ const imgRef = useRef(null); const { name } = config ? config.cameras[camera] : ""; - const enabled = config ? config.cameras[camera].enabled : "True"; + const { payload: enabledState } = useEnabledState(camera); + const enabled = enabledState === "ON" || enabledState === undefined; const [{ width: containerWidth, height: containerHeight }] = useResizeObserver(containerRef); @@ -96,9 +98,7 @@ export default function CameraImage({ loading="lazy" /> ) : ( -
- Camera is disabled in config, no stream or snapshot available! -
+
)} {!imageLoaded && enabled ? (
diff --git a/web/src/components/camera/ResizingCameraImage.tsx b/web/src/components/camera/ResizingCameraImage.tsx index 81545c625..fbb57677b 100644 --- a/web/src/components/camera/ResizingCameraImage.tsx +++ b/web/src/components/camera/ResizingCameraImage.tsx @@ -108,9 +108,7 @@ export default function CameraImage({ width={scaledWidth} /> ) : ( -
- Camera is disabled in config, no stream or snapshot available! -
+
Camera is disabled.
)} {!hasLoaded && enabled ? (
void; + disabled?: boolean; // New prop for disabling }; export default function CameraFeatureToggle({ @@ -35,18 +40,28 @@ export default function CameraFeatureToggle({ Icon, title, onClick, + disabled = false, // Default to false }: CameraFeatureToggleProps) { const content = (
); @@ -54,7 +69,7 @@ export default function CameraFeatureToggle({ if (isDesktop) { return ( - {content} + {content}

{title}

diff --git a/web/src/components/menu/LiveContextMenu.tsx b/web/src/components/menu/LiveContextMenu.tsx index 969e647a0..9c775e0ac 100644 --- a/web/src/components/menu/LiveContextMenu.tsx +++ b/web/src/components/menu/LiveContextMenu.tsx @@ -39,7 +39,11 @@ import { import { cn } from "@/lib/utils"; import { useNavigate } from "react-router-dom"; import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; -import { useNotifications, useNotificationSuspend } from "@/api/ws"; +import { + useEnabledState, + useNotifications, + useNotificationSuspend, +} from "@/api/ws"; type LiveContextMenuProps = { className?: string; @@ -83,6 +87,11 @@ export default function LiveContextMenu({ }: LiveContextMenuProps) { const [showSettings, setShowSettings] = useState(false); + // camera enabled + + const { payload: enabledState, send: sendEnabled } = useEnabledState(camera); + const isEnabled = enabledState === "ON"; + // streaming settings const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } = @@ -263,7 +272,7 @@ export default function LiveContextMenu({ onClick={handleVolumeIconClick} />
sendEnabled(isEnabled ? "OFF" : "ON")} + > +
+ {isEnabled ? "Disable" : "Enable"} Camera +
+
+ + + +
Mute All Cameras
- +
Unmute All Cameras
- +
{statsState ? "Hide" : "Show"} Stream Stats
- +
navigate(`/settings?page=debug&camera=${camera}`)} + onClick={ + isEnabled + ? () => navigate(`/settings?page=debug&camera=${camera}`) + : undefined + } >
Debug View
@@ -315,10 +339,10 @@ export default function LiveContextMenu({ {cameraGroup && cameraGroup !== "default" && ( <> - +
setShowSettings(true)} + onClick={isEnabled ? () => setShowSettings(true) : undefined} >
Streaming Settings
@@ -328,10 +352,10 @@ export default function LiveContextMenu({ {preferredLiveMode == "jsmpeg" && isRestreamed && ( <> - +
Reset
@@ -342,7 +366,7 @@ export default function LiveContextMenu({ <> - +
Notifications
@@ -382,10 +406,15 @@ export default function LiveContextMenu({ <> { - sendNotification("ON"); - sendNotificationSuspend(0); - }} + disabled={!isEnabled} + onClick={ + isEnabled + ? () => { + sendNotification("ON"); + sendNotificationSuspend(0); + } + : undefined + } >
{notificationState === "ON" ? ( @@ -405,36 +434,71 @@ export default function LiveContextMenu({ Suspend for:

- handleSuspend("5")}> + handleSuspend("5") : undefined + } + > 5 minutes handleSuspend("10")} + disabled={!isEnabled} + onClick={ + isEnabled + ? () => handleSuspend("10") + : undefined + } > 10 minutes handleSuspend("30")} + disabled={!isEnabled} + onClick={ + isEnabled + ? () => handleSuspend("30") + : undefined + } > 30 minutes handleSuspend("60")} + disabled={!isEnabled} + onClick={ + isEnabled + ? () => handleSuspend("60") + : undefined + } > 1 hour handleSuspend("840")} + disabled={!isEnabled} + onClick={ + isEnabled + ? () => handleSuspend("840") + : undefined + } > 12 hours handleSuspend("1440")} + disabled={!isEnabled} + onClick={ + isEnabled + ? () => handleSuspend("1440") + : undefined + } > 24 hours handleSuspend("off")} + disabled={!isEnabled} + onClick={ + isEnabled + ? () => handleSuspend("off") + : undefined + } > Until restart diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index 4bd751469..f2b0639a4 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -22,6 +22,7 @@ import { TbExclamationCircle } from "react-icons/tb"; import { TooltipPortal } from "@radix-ui/react-tooltip"; import { baseUrl } from "@/api/baseUrl"; import { PlayerStats } from "./PlayerStats"; +import { LuVideoOff } from "react-icons/lu"; type LivePlayerProps = { cameraRef?: (ref: HTMLDivElement | null) => void; @@ -86,8 +87,13 @@ export default function LivePlayer({ // camera activity - const { activeMotion, activeTracking, objects, offline } = - useCameraActivity(cameraConfig); + const { + enabled: cameraEnabled, + activeMotion, + activeTracking, + objects, + offline, + } = useCameraActivity(cameraConfig); const cameraActive = useMemo( () => @@ -191,12 +197,37 @@ export default function LivePlayer({ setLiveReady(true); }, []); + // enabled states + + const [isReEnabling, setIsReEnabling] = useState(false); + const prevCameraEnabledRef = useRef(cameraEnabled); + + useEffect(() => { + if (!prevCameraEnabledRef.current && cameraEnabled) { + // Camera enabled + setLiveReady(false); + setIsReEnabling(true); + setKey((prevKey) => prevKey + 1); + } else if (prevCameraEnabledRef.current && !cameraEnabled) { + // Camera disabled + setLiveReady(false); + setKey((prevKey) => prevKey + 1); + } + prevCameraEnabledRef.current = cameraEnabled; + }, [cameraEnabled]); + + useEffect(() => { + if (liveReady && isReEnabling) { + setIsReEnabling(false); + } + }, [liveReady, isReEnabling]); + if (!cameraConfig) { return ; } let player; - if (!autoLive || !streamName) { + if (!autoLive || !streamName || !cameraEnabled) { player = null; } else if (preferredLiveMode == "webrtc") { player = ( @@ -267,6 +298,22 @@ export default function LivePlayer({ player = ; } + // if (cameraConfig.name == "lpr") + // console.log( + // cameraConfig.name, + // "enabled", + // cameraEnabled, + // "prev enabled", + // prevCameraEnabledRef.current, + // "offline", + // offline, + // "show still", + // showStillWithoutActivity, + // "live ready", + // liveReady, + // player, + // ); + return (
- {((showStillWithoutActivity && !liveReady) || liveReady) && ( - <> -
-
- - )} + {cameraEnabled && + ((showStillWithoutActivity && !liveReady) || liveReady) && ( + <> +
+
+ + )} {player} - {!offline && !showStillWithoutActivity && !liveReady && ( - - )} + {cameraEnabled && + !offline && + (!showStillWithoutActivity || isReEnabling) && + !liveReady && } {((showStillWithoutActivity && !liveReady) || liveReady) && objects.length > 0 && ( @@ -344,7 +393,9 @@ export default function LivePlayer({
)} + {!cameraEnabled && ( +
+
+ +

+ Camera is disabled +

+
+
+ )} +
{autoLive && !offline && @@ -378,7 +440,7 @@ export default function LivePlayer({ ((showStillWithoutActivity && !liveReady) || liveReady) && ( )} - {offline && showStillWithoutActivity && ( + {((offline && showStillWithoutActivity) || !cameraEnabled) && ( diff --git a/web/src/components/settings/ZoneEditPane.tsx b/web/src/components/settings/ZoneEditPane.tsx index 247ae8991..c6c5ee474 100644 --- a/web/src/components/settings/ZoneEditPane.tsx +++ b/web/src/components/settings/ZoneEditPane.tsx @@ -68,7 +68,7 @@ export default function ZoneEditPane({ } return Object.values(config.cameras) - .filter((conf) => conf.ui.dashboard && conf.enabled) + .filter((conf) => conf.ui.dashboard && conf.enabled_in_config) .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); }, [config]); diff --git a/web/src/hooks/use-camera-activity.ts b/web/src/hooks/use-camera-activity.ts index bbf70ba32..14a575224 100644 --- a/web/src/hooks/use-camera-activity.ts +++ b/web/src/hooks/use-camera-activity.ts @@ -1,4 +1,5 @@ import { + useEnabledState, useFrigateEvents, useInitialCameraState, useMotionActivity, @@ -15,6 +16,7 @@ import useSWR from "swr"; import { getAttributeLabels } from "@/utils/iconUtil"; type useCameraActivityReturn = { + enabled: boolean; activeTracking: boolean; activeMotion: boolean; objects: ObjectType[]; @@ -56,6 +58,7 @@ export function useCameraActivity( [objects], ); + const { payload: cameraEnabled } = useEnabledState(camera.name); const { payload: detectingMotion } = useMotionActivity(camera.name); const { payload: event } = useFrigateEvents(); const updatedEvent = useDeepMemo(event); @@ -145,12 +148,17 @@ export function useCameraActivity( return cameras[camera.name].camera_fps == 0 && stats["service"].uptime > 60; }, [camera, stats]); + const isCameraEnabled = cameraEnabled === "ON"; + return { - activeTracking: hasActiveObjects, - activeMotion: detectingMotion - ? detectingMotion === "ON" - : updatedCameraState?.motion === true, - objects, + enabled: isCameraEnabled, + activeTracking: isCameraEnabled ? hasActiveObjects : false, + activeMotion: isCameraEnabled + ? detectingMotion + ? detectingMotion === "ON" + : updatedCameraState?.motion === true + : false, + objects: isCameraEnabled ? objects : [], offline, }; } diff --git a/web/src/pages/Live.tsx b/web/src/pages/Live.tsx index 97e565ef1..016f3cba1 100644 --- a/web/src/pages/Live.tsx +++ b/web/src/pages/Live.tsx @@ -101,12 +101,14 @@ function Live() { ) { const group = config.camera_groups[cameraGroup]; return Object.values(config.cameras) - .filter((conf) => conf.enabled && group.cameras.includes(conf.name)) + .filter( + (conf) => conf.enabled_in_config && group.cameras.includes(conf.name), + ) .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); } return Object.values(config.cameras) - .filter((conf) => conf.ui.dashboard && conf.enabled) + .filter((conf) => conf.ui.dashboard && conf.enabled_in_config) .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); }, [config, cameraGroup]); diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 6eeb5bcc3..33f854ba3 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -39,6 +39,7 @@ import SearchSettingsView from "@/views/settings/SearchSettingsView"; import UiSettingsView from "@/views/settings/UiSettingsView"; import { useSearchEffect } from "@/hooks/use-overlay-state"; import { useSearchParams } from "react-router-dom"; +import { useInitialCameraState } from "@/api/ws"; const allSettingsViews = [ "UI settings", @@ -71,12 +72,33 @@ export default function Settings() { } return Object.values(config.cameras) - .filter((conf) => conf.ui.dashboard && conf.enabled) + .filter((conf) => conf.ui.dashboard && conf.enabled_in_config) .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); }, [config]); const [selectedCamera, setSelectedCamera] = useState(""); + const { payload: allCameraStates } = useInitialCameraState( + cameras.length > 0 ? cameras[0].name : "", + true, + ); + + const cameraEnabledStates = useMemo(() => { + const states: Record = {}; + if (allCameraStates) { + Object.entries(allCameraStates).forEach(([camName, state]) => { + states[camName] = state.config?.enabled ?? false; + }); + } + // fallback to config if ws data isn’t available yet + cameras.forEach((cam) => { + if (!(cam.name in states)) { + states[cam.name] = cam.enabled; + } + }); + return states; + }, [allCameraStates, cameras]); + const [filterZoneMask, setFilterZoneMask] = useState(); const handleDialog = useCallback( @@ -91,10 +113,25 @@ export default function Settings() { ); useEffect(() => { - if (cameras.length > 0 && selectedCamera === "") { - setSelectedCamera(cameras[0].name); + if (cameras.length > 0) { + if (!selectedCamera) { + // Set to first enabled camera initially if no selection + const firstEnabledCamera = + cameras.find((cam) => cameraEnabledStates[cam.name]) || cameras[0]; + setSelectedCamera(firstEnabledCamera.name); + } else if ( + !cameraEnabledStates[selectedCamera] && + page !== "camera settings" + ) { + // Switch to first enabled camera if current one is disabled, unless on "camera settings" page + const firstEnabledCamera = + cameras.find((cam) => cameraEnabledStates[cam.name]) || cameras[0]; + if (firstEnabledCamera.name !== selectedCamera) { + setSelectedCamera(firstEnabledCamera.name); + } + } } - }, [cameras, selectedCamera]); + }, [cameras, selectedCamera, cameraEnabledStates, page]); useEffect(() => { if (tabsRef.current) { @@ -177,6 +214,8 @@ export default function Settings() { allCameras={cameras} selectedCamera={selectedCamera} setSelectedCamera={setSelectedCamera} + cameraEnabledStates={cameraEnabledStates} + currentPage={page} />
)} @@ -244,17 +283,21 @@ type CameraSelectButtonProps = { allCameras: CameraConfig[]; selectedCamera: string; setSelectedCamera: React.Dispatch>; + cameraEnabledStates: Record; + currentPage: SettingsType; }; function CameraSelectButton({ allCameras, selectedCamera, setSelectedCamera, + cameraEnabledStates, + currentPage, }: CameraSelectButtonProps) { const [open, setOpen] = useState(false); if (!allCameras.length) { - return; + return null; } const trigger = ( @@ -283,19 +326,24 @@ function CameraSelectButton({ )}
- {allCameras.map((item) => ( - { - if (isChecked) { - setSelectedCamera(item.name); - setOpen(false); - } - }} - /> - ))} + {allCameras.map((item) => { + const isEnabled = cameraEnabledStates[item.name]; + const isCameraSettingsPage = currentPage === "camera settings"; + return ( + { + if (isChecked && (isEnabled || isCameraSettingsPage)) { + setSelectedCamera(item.name); + setOpen(false); + } + }} + disabled={!isEnabled && !isCameraSettingsPage} + /> + ); + })}
diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 4ec4de853..e468c534f 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -57,6 +57,7 @@ export interface CameraConfig { width: number; }; enabled: boolean; + enabled_in_config: boolean; ffmpeg: { global_args: string[]; hwaccel_args: string; diff --git a/web/src/types/ws.ts b/web/src/types/ws.ts index 397b213f6..2590d45a7 100644 --- a/web/src/types/ws.ts +++ b/web/src/types/ws.ts @@ -52,6 +52,7 @@ export type ObjectType = { }; export interface FrigateCameraState { + enabled: boolean; motion: boolean; objects: ObjectType[]; } diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index ccf06de7b..9b45c5a60 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -2,6 +2,7 @@ import { useAudioState, useAutotrackingState, useDetectState, + useEnabledState, usePtzCommand, useRecordingsState, useSnapshotsState, @@ -82,6 +83,8 @@ import { LuHistory, LuInfo, LuPictureInPicture, + LuPower, + LuPowerOff, LuVideo, LuVideoOff, LuX, @@ -185,6 +188,10 @@ export default function LiveCameraView({ ); }, [cameraMetadata]); + // camera enabled state + const { payload: enabledState } = useEnabledState(camera.name); + const cameraEnabled = enabledState === "ON"; + // click overlay for ptzs const [clickOverlay, setClickOverlay] = useState(false); @@ -470,6 +477,7 @@ export default function LiveCameraView({ setPip(false); } }} + disabled={!cameraEnabled} /> )} {supports2WayTalk && ( @@ -481,11 +489,11 @@ export default function LiveCameraView({ title={`${mic ? "Disable" : "Enable"} Two Way Talk`} onClick={() => { setMic(!mic); - // Turn on audio when enabling the mic if audio is currently off if (!mic && !audio) { setAudio(true); } }} + disabled={!cameraEnabled} /> )} {supportsAudioOutput && preferredLiveMode != "jsmpeg" && ( @@ -496,6 +504,7 @@ export default function LiveCameraView({ isActive={audio ?? false} title={`${audio ? "Disable" : "Enable"} Camera Audio`} onClick={() => setAudio(!audio)} + disabled={!cameraEnabled} /> )}
@@ -913,6 +923,7 @@ type FrigateCameraFeaturesProps = { setLowBandwidth: React.Dispatch>; supportsAudioOutput: boolean; supports2WayTalk: boolean; + cameraEnabled: boolean; }; function FrigateCameraFeatures({ camera, @@ -931,10 +942,14 @@ function FrigateCameraFeatures({ setLowBandwidth, supportsAudioOutput, supports2WayTalk, + cameraEnabled, }: FrigateCameraFeaturesProps) { const { payload: detectState, send: sendDetect } = useDetectState( camera.name, ); + const { payload: enabledState, send: sendEnabled } = useEnabledState( + camera.name, + ); const { payload: recordState, send: sendRecord } = useRecordingsState( camera.name, ); @@ -1043,6 +1058,15 @@ function FrigateCameraFeatures({ if (isDesktop || isTablet) { return ( <> + sendEnabled(enabledState == "ON" ? "OFF" : "ON")} + disabled={false} + /> sendDetect(detectState == "ON" ? "OFF" : "ON")} + disabled={!cameraEnabled} /> sendRecord(recordState == "ON" ? "OFF" : "ON")} + disabled={!cameraEnabled} /> sendSnapshot(snapshotState == "ON" ? "OFF" : "ON")} + disabled={!cameraEnabled} /> {audioDetectEnabled && ( sendAudio(audioState == "ON" ? "OFF" : "ON")} + disabled={!cameraEnabled} /> )} {autotrackingEnabled && ( @@ -1087,6 +1115,7 @@ function FrigateCameraFeatures({ onClick={() => sendAutotracking(autotrackingState == "ON" ? "OFF" : "ON") } + disabled={!cameraEnabled} /> )} diff --git a/web/src/views/settings/CameraSettingsView.tsx b/web/src/views/settings/CameraSettingsView.tsx index fa9d0ba58..e2c1ca563 100644 --- a/web/src/views/settings/CameraSettingsView.tsx +++ b/web/src/views/settings/CameraSettingsView.tsx @@ -29,7 +29,7 @@ import { MdCircle } from "react-icons/md"; import { cn } from "@/lib/utils"; import { Switch } from "@/components/ui/switch"; import { Label } from "@/components/ui/label"; -import { useAlertsState, useDetectionsState } from "@/api/ws"; +import { useAlertsState, useDetectionsState, useEnabledState } from "@/api/ws"; type CameraSettingsViewProps = { selectedCamera: string; @@ -108,6 +108,8 @@ export default function CameraSettingsView({ const watchedAlertsZones = form.watch("alerts_zones"); const watchedDetectionsZones = form.watch("detections_zones"); + const { payload: enabledState, send: sendEnabled } = + useEnabledState(selectedCamera); const { payload: alertsState, send: sendAlerts } = useAlertsState(selectedCamera); const { payload: detectionsState, send: sendDetections } = @@ -252,6 +254,31 @@ export default function CameraSettingsView({ + + Streams + + +
+ { + sendEnabled(isChecked ? "ON" : "OFF"); + }} + /> +
+ +
+
+
+ Disabling a camera completely stops Frigate's processing of this + camera's streams. Detection, recording, and debugging will be + unavailable. +
Note: This does not disable go2rtc restreams. +
+ + Review diff --git a/web/src/views/settings/NotificationsSettingsView.tsx b/web/src/views/settings/NotificationsSettingsView.tsx index edae6ba28..fcda4adb1 100644 --- a/web/src/views/settings/NotificationsSettingsView.tsx +++ b/web/src/views/settings/NotificationsSettingsView.tsx @@ -80,7 +80,7 @@ export default function NotificationView({ return Object.values(config.cameras) .filter( (conf) => - conf.enabled && + conf.enabled_in_config && conf.notifications && conf.notifications.enabled_in_config, ) From f3765bc391eb629b049fc494544a9a1dc8ce4120 Mon Sep 17 00:00:00 2001 From: leccelecce <24962424+leccelecce@users.noreply.github.com> Date: Mon, 3 Mar 2025 17:01:02 +0000 Subject: [PATCH 07/12] GenAI minor refactor (#16916) --- frigate/embeddings/maintainer.py | 200 +++++++++++++++---------------- 1 file changed, 96 insertions(+), 104 deletions(-) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index c9b6062c9..dfaed532e 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -293,6 +293,7 @@ class EmbeddingMaintainer(threading.Thread): # Embed the thumbnail self._embed_thumbnail(event_id, thumbnail) + # Run GenAI if ( camera_config.genai.enabled and self.genai_client is not None @@ -306,82 +307,7 @@ class EmbeddingMaintainer(threading.Thread): or set(event.zones) & set(camera_config.genai.required_zones) ) ): - if event.has_snapshot and camera_config.genai.use_snapshot: - with open( - os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"), - "rb", - ) as image_file: - snapshot_image = image_file.read() - - img = cv2.imdecode( - np.frombuffer(snapshot_image, dtype=np.int8), - cv2.IMREAD_COLOR, - ) - - # crop snapshot based on region before sending off to genai - height, width = img.shape[:2] - x1_rel, y1_rel, width_rel, height_rel = event.data["region"] - - x1, y1 = int(x1_rel * width), int(y1_rel * height) - cropped_image = img[ - y1 : y1 + int(height_rel * height), - x1 : x1 + int(width_rel * width), - ] - - _, buffer = cv2.imencode(".jpg", cropped_image) - snapshot_image = buffer.tobytes() - - num_thumbnails = len(self.tracked_events.get(event_id, [])) - - embed_image = ( - [snapshot_image] - if event.has_snapshot and camera_config.genai.use_snapshot - else ( - [ - data["thumbnail"] - for data in self.tracked_events[event_id] - ] - if num_thumbnails > 0 - else [thumbnail] - ) - ) - - if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0: - logger.debug( - f"Saving {num_thumbnails} thumbnails for event {event.id}" - ) - - Path( - os.path.join(CLIPS_DIR, f"genai-requests/{event.id}") - ).mkdir(parents=True, exist_ok=True) - - for idx, data in enumerate(self.tracked_events[event_id], 1): - jpg_bytes: bytes = data["thumbnail"] - - if jpg_bytes is None: - logger.warning( - f"Unable to save thumbnail {idx} for {event.id}." - ) - else: - with open( - os.path.join( - CLIPS_DIR, - f"genai-requests/{event.id}/{idx}.jpg", - ), - "wb", - ) as j: - j.write(jpg_bytes) - - # Generate the description. Call happens in a thread since it is network bound. - threading.Thread( - target=self._embed_description, - name=f"_embed_description_{event.id}", - daemon=True, - args=( - event, - embed_image, - ), - ).start() + self._process_genai_description(event, camera_config, thumbnail) # Delete tracked events based on the event_id if event_id in self.tracked_events: @@ -440,7 +366,58 @@ class EmbeddingMaintainer(threading.Thread): self.embeddings.embed_thumbnail(event_id, thumbnail) - def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None: + def _process_genai_description(self, event, camera_config, thumbnail) -> None: + if event.has_snapshot and camera_config.genai.use_snapshot: + snapshot_image = self._read_and_crop_snapshot(event, camera_config) + if not snapshot_image: + return + + num_thumbnails = len(self.tracked_events.get(event.id, [])) + + embed_image = ( + [snapshot_image] + if event.has_snapshot and camera_config.genai.use_snapshot + else ( + [data["thumbnail"] for data in self.tracked_events[event.id]] + if num_thumbnails > 0 + else [thumbnail] + ) + ) + + if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0: + logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}") + + Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir( + parents=True, exist_ok=True + ) + + for idx, data in enumerate(self.tracked_events[event.id], 1): + jpg_bytes: bytes = data["thumbnail"] + + if jpg_bytes is None: + logger.warning(f"Unable to save thumbnail {idx} for {event.id}.") + else: + with open( + os.path.join( + CLIPS_DIR, + f"genai-requests/{event.id}/{idx}.jpg", + ), + "wb", + ) as j: + j.write(jpg_bytes) + + # Generate the description. Call happens in a thread since it is network bound. + threading.Thread( + target=self._genai_embed_description, + name=f"_genai_embed_description_{event.id}", + daemon=True, + args=( + event, + embed_image, + ), + ).start() + + def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None: """Embed the description for an event.""" camera_config = self.config.cameras[event.camera] @@ -473,6 +450,45 @@ class EmbeddingMaintainer(threading.Thread): description, ) + def _read_and_crop_snapshot(self, event: Event, camera_config) -> bytes | None: + """Read, decode, and crop the snapshot image.""" + + snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg") + + if not os.path.isfile(snapshot_file): + logger.error( + f"Cannot load snapshot for {event.id}, file not found: {snapshot_file}" + ) + return None + + try: + with open(snapshot_file, "rb") as image_file: + snapshot_image = image_file.read() + + img = cv2.imdecode( + np.frombuffer(snapshot_image, dtype=np.int8), + cv2.IMREAD_COLOR, + ) + + # Crop snapshot based on region + # provide full image if region doesn't exist (manual events) + height, width = img.shape[:2] + x1_rel, y1_rel, width_rel, height_rel = event.data.get( + "region", [0, 0, 1, 1] + ) + x1, y1 = int(x1_rel * width), int(y1_rel * height) + + cropped_image = img[ + y1 : y1 + int(height_rel * height), + x1 : x1 + int(width_rel * width), + ] + + _, buffer = cv2.imencode(".jpg", cropped_image) + + return buffer.tobytes() + except Exception: + return None + def handle_regenerate_description(self, event_id: str, source: str) -> None: try: event: Event = Event.get(Event.id == event_id) @@ -492,34 +508,10 @@ class EmbeddingMaintainer(threading.Thread): ) if event.has_snapshot and source == "snapshot": - snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg") - - if not os.path.isfile(snapshot_file): - logger.error( - f"Cannot regenerate description for {event.id}, snapshot file not found: {snapshot_file}" - ) + snapshot_image = self._read_and_crop_snapshot(event, camera_config) + if not snapshot_image: return - with open(snapshot_file, "rb") as image_file: - snapshot_image = image_file.read() - img = cv2.imdecode( - np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR - ) - - # crop snapshot based on region before sending off to genai - # provide full image if region doesn't exist (manual events) - region = event.data.get("region", [0, 0, 1, 1]) - height, width = img.shape[:2] - x1_rel, y1_rel, width_rel, height_rel = region - - x1, y1 = int(x1_rel * width), int(y1_rel * height) - cropped_image = img[ - y1 : y1 + int(height_rel * height), x1 : x1 + int(width_rel * width) - ] - - _, buffer = cv2.imencode(".jpg", cropped_image) - snapshot_image = buffer.tobytes() - embed_image = ( [snapshot_image] if event.has_snapshot and source == "snapshot" @@ -530,4 +522,4 @@ class EmbeddingMaintainer(threading.Thread): ) ) - self._embed_description(event, embed_image) + self._genai_embed_description(event, embed_image) From 180b0af3c9c28172e8401123961258079ef96393 Mon Sep 17 00:00:00 2001 From: D34DC3N73R Date: Mon, 3 Mar 2025 11:53:24 -0800 Subject: [PATCH 08/12] Adapt openai.py to work with xAI (#16903) * Adapt openai.py to work with xAI It appears xAI is a bit more strict in regards to how the prompt is sent. This changes the prompt to be a dictionary with `"type": "text"` which works with OpenAI and xAI. * Adapt openai.py to work with xAI add "detail": "low" * Adapt openai.py to work with xAI Apply Ruff formatting and linting fixes --- frigate/genai/openai.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/frigate/genai/openai.py b/frigate/genai/openai.py index 4568905a3..4b1926099 100644 --- a/frigate/genai/openai.py +++ b/frigate/genai/openai.py @@ -26,23 +26,30 @@ class OpenAIClient(GenAIClient): def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: """Submit a request to OpenAI.""" encoded_images = [base64.b64encode(image).decode("utf-8") for image in images] + messages_content = [] + for image in encoded_images: + messages_content.append( + { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{image}", + "detail": "low", + }, + } + ) + messages_content.append( + { + "type": "text", + "text": prompt, + } + ) try: result = self.provider.chat.completions.create( model=self.genai_config.model, messages=[ { "role": "user", - "content": [ - { - "type": "image_url", - "image_url": { - "url": f"data:image/jpeg;base64,{image}", - "detail": "low", - }, - } - for image in encoded_images - ] - + [prompt], + "content": messages_content, }, ], timeout=self.timeout, From 2946c935eea3d492493278f621000d0806594920 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 3 Mar 2025 14:05:49 -0700 Subject: [PATCH 09/12] Disabled camera output (#16920) * Fix live cameras not showing on refresh * Fix live dashboard when birdseye is added * Handle cameras that are offline / disabled * Use black instead of green frame * Fix missing mqtt topics --- frigate/comms/mqtt.py | 6 ++ frigate/output/birdseye.py | 42 +++++++++----- frigate/output/output.py | 66 ++++++++++++++++------ frigate/util/image.py | 16 ++++++ web/src/components/player/LivePlayer.tsx | 2 +- web/src/views/live/DraggableGridLayout.tsx | 10 ++-- web/src/views/live/LiveDashboardView.tsx | 10 ++-- 7 files changed, 110 insertions(+), 42 deletions(-) diff --git a/frigate/comms/mqtt.py b/frigate/comms/mqtt.py index 9e11a0af1..316813518 100644 --- a/frigate/comms/mqtt.py +++ b/frigate/comms/mqtt.py @@ -43,6 +43,11 @@ class MqttClient(Communicator): # type: ignore[misc] def _set_initial_topics(self) -> None: """Set initial state topics.""" for camera_name, camera in self.config.cameras.items(): + self.publish( + f"{camera_name}/enabled/state", + "ON" if camera.enabled_in_config else "OFF", + retain=True, + ) self.publish( f"{camera_name}/recordings/state", "ON" if camera.record.enabled_in_config else "OFF", @@ -196,6 +201,7 @@ class MqttClient(Communicator): # type: ignore[misc] # register callbacks callback_types = [ + "enabled", "recordings", "snapshots", "detect", diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index 3d036e9d5..cd4aa26ec 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -390,8 +390,11 @@ class BirdsEyeFrameManager: def _get_enabled_state(self, camera: str) -> bool: """Fetch the latest enabled state for a camera from ZMQ.""" _, config_data = self.enabled_subscribers[camera].check_for_update() + if config_data: + self.config.cameras[camera].enabled = config_data.enabled return config_data.enabled + return self.config.cameras[camera].enabled def update_frame(self, frame: Optional[np.ndarray] = None) -> bool: @@ -704,15 +707,17 @@ class BirdsEyeFrameManager: ) -> bool: # don't process if birdseye is disabled for this camera camera_config = self.config.cameras[camera].birdseye + force_update = False # disabling birdseye is a little tricky - if not camera_config.enabled or not self._get_enabled_state(camera): + if not self._get_enabled_state(camera): # if we've rendered a frame (we have a value for last_active_frame) # then we need to set it to zero if self.cameras[camera]["last_active_frame"] > 0: self.cameras[camera]["last_active_frame"] = 0 - - return False + force_update = True + else: + return False # update the last active frame for the camera self.cameras[camera]["current_frame"] = frame.copy() @@ -723,7 +728,7 @@ class BirdsEyeFrameManager: now = datetime.datetime.now().timestamp() # limit output to 10 fps - if (now - self.last_output_time) < 1 / 10: + if not force_update and (now - self.last_output_time) < 1 / 10: return False try: @@ -735,7 +740,7 @@ class BirdsEyeFrameManager: print(traceback.format_exc()) # if the frame was updated or the fps is too low, send frame - if updated_frame or (now - self.last_output_time) > 1: + if force_update or updated_frame or (now - self.last_output_time) > 1: self.last_output_time = now return True return False @@ -783,6 +788,22 @@ class Birdseye: self.converter.start() self.broadcaster.start() + def __send_new_frame(self) -> None: + frame_bytes = self.birdseye_manager.frame.tobytes() + + if self.config.birdseye.restream: + self.birdseye_buffer[:] = frame_bytes + + try: + self.input.put_nowait(frame_bytes) + except queue.Full: + # drop frames if queue is full + pass + + def all_cameras_disabled(self) -> None: + self.birdseye_manager.clear_frame() + self.__send_new_frame() + def write_data( self, camera: str, @@ -811,16 +832,7 @@ class Birdseye: frame_time, frame, ): - frame_bytes = self.birdseye_manager.frame.tobytes() - - if self.config.birdseye.restream: - self.birdseye_buffer[:] = frame_bytes - - try: - self.input.put_nowait(frame_bytes) - except queue.Full: - # drop frames if queue is full - pass + self.__send_new_frame() def stop(self) -> None: self.config_subscriber.stop() diff --git a/frigate/output/output.py b/frigate/output/output.py index 9beb87250..e0e64e298 100644 --- a/frigate/output/output.py +++ b/frigate/output/output.py @@ -1,12 +1,12 @@ """Handle outputting raw frigate frames""" +import datetime import logging import multiprocessing as mp import os import shutil import signal import threading -from typing import Optional from wsgiref.simple_server import make_server from setproctitle import setproctitle @@ -25,11 +25,43 @@ from frigate.const import CACHE_DIR, CLIPS_DIR from frigate.output.birdseye import Birdseye from frigate.output.camera import JsmpegCamera from frigate.output.preview import PreviewRecorder -from frigate.util.image import SharedMemoryFrameManager +from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame logger = logging.getLogger(__name__) +def check_disabled_camera_update( + config: FrigateConfig, + birdseye: Birdseye | None, + previews: dict[str, PreviewRecorder], + write_times: dict[str, float], +) -> None: + """Check if camera is disabled / offline and needs an update.""" + now = datetime.datetime.now().timestamp() + has_enabled_camera = False + + for camera, last_update in write_times.items(): + if config.cameras[camera].enabled: + has_enabled_camera = True + + if now - last_update > 1: + # last camera update was more than one second ago + # need to send empty data to updaters because current + # frame is now out of date + frame = get_blank_yuv_frame( + config.cameras[camera].detect.width, + config.cameras[camera].detect.height, + ) + + if birdseye: + birdseye.write_data(camera, [], [], now, frame) + + previews[camera].write_data([], [], now, frame) + + if not has_enabled_camera and birdseye: + birdseye.all_cameras_disabled() + + def output_frames( config: FrigateConfig, ): @@ -67,10 +99,11 @@ def output_frames( } jsmpeg_cameras: dict[str, JsmpegCamera] = {} - birdseye: Optional[Birdseye] = None + birdseye: Birdseye | None = None preview_recorders: dict[str, PreviewRecorder] = {} preview_write_times: dict[str, float] = {} failed_frame_requests: dict[str, int] = {} + last_disabled_cam_check = datetime.datetime.now().timestamp() move_preview_frames("cache") @@ -89,13 +122,23 @@ def output_frames( def get_enabled_state(camera: str) -> bool: _, config_data = enabled_subscribers[camera].check_for_update() + if config_data: + config.cameras[camera].enabled = config_data.enabled return config_data.enabled - # default + return config.cameras[camera].enabled while not stop_event.is_set(): (topic, data) = detection_subscriber.check_for_update(timeout=1) + now = datetime.datetime.now().timestamp() + + if now - last_disabled_cam_check > 5: + # check disabled cameras every 5 seconds + last_disabled_cam_check = now + check_disabled_camera_update( + config, birdseye, preview_recorders, preview_write_times + ) if not topic: continue @@ -151,23 +194,10 @@ def output_frames( ) # send frames for low fps recording - generated_preview = preview_recorders[camera].write_data( + preview_recorders[camera].write_data( current_tracked_objects, motion_boxes, frame_time, frame ) preview_write_times[camera] = frame_time - - # if another camera generated a preview, - # check for any cameras that are currently offline - # and need to generate a preview - if generated_preview: - logger.debug( - "Checking for offline cameras because another camera generated a preview." - ) - for camera, time in preview_write_times.copy().items(): - if time != 0 and frame_time - time > 10: - preview_recorders[camera].flag_offline(frame_time) - preview_write_times[camera] = frame_time - frame_manager.close(frame_name) move_preview_frames("clips") diff --git a/frigate/util/image.py b/frigate/util/image.py index 7e4915821..20806372c 100644 --- a/frigate/util/image.py +++ b/frigate/util/image.py @@ -632,6 +632,22 @@ def copy_yuv_to_position( ) +def get_blank_yuv_frame(width: int, height: int) -> np.ndarray: + """Creates a black YUV 4:2:0 frame.""" + yuv_height = height * 3 // 2 + yuv_frame = np.zeros((yuv_height, width), dtype=np.uint8) + + uv_height = height // 2 + + # The U and V planes are stored after the Y plane. + u_start = height # U plane starts right after Y plane + v_start = u_start + uv_height // 2 # V plane starts after U plane + yuv_frame[u_start : u_start + uv_height, :width] = 128 + yuv_frame[v_start : v_start + uv_height, :width] = 128 + + return yuv_frame + + def yuv_region_2_yuv(frame, region): try: # TODO: does this copy the numpy array? diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index f2b0639a4..913373774 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -200,7 +200,7 @@ export default function LivePlayer({ // enabled states const [isReEnabling, setIsReEnabling] = useState(false); - const prevCameraEnabledRef = useRef(cameraEnabled); + const prevCameraEnabledRef = useRef(cameraEnabled ?? true); useEffect(() => { if (!prevCameraEnabledRef.current && cameraEnabled) { diff --git a/web/src/views/live/DraggableGridLayout.tsx b/web/src/views/live/DraggableGridLayout.tsx index 3b85de4b3..d0da3e5ac 100644 --- a/web/src/views/live/DraggableGridLayout.tsx +++ b/web/src/views/live/DraggableGridLayout.tsx @@ -396,10 +396,12 @@ export default function DraggableGridLayout({ const initialVolumeStates: VolumeState = {}; Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => { - Object.entries(groupSettings).forEach(([camera, cameraSettings]) => { - initialAudioStates[camera] = cameraSettings.playAudio ?? false; - initialVolumeStates[camera] = cameraSettings.volume ?? 1; - }); + if (groupSettings) { + Object.entries(groupSettings).forEach(([camera, cameraSettings]) => { + initialAudioStates[camera] = cameraSettings.playAudio ?? false; + initialVolumeStates[camera] = cameraSettings.volume ?? 1; + }); + } }); setAudioStates(initialAudioStates); diff --git a/web/src/views/live/LiveDashboardView.tsx b/web/src/views/live/LiveDashboardView.tsx index 45d0d5302..e59fd96ca 100644 --- a/web/src/views/live/LiveDashboardView.tsx +++ b/web/src/views/live/LiveDashboardView.tsx @@ -268,10 +268,12 @@ export default function LiveDashboardView({ const initialVolumeStates: VolumeState = {}; Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => { - Object.entries(groupSettings).forEach(([camera, cameraSettings]) => { - initialAudioStates[camera] = cameraSettings.playAudio ?? false; - initialVolumeStates[camera] = cameraSettings.volume ?? 1; - }); + if (groupSettings) { + Object.entries(groupSettings).forEach(([camera, cameraSettings]) => { + initialAudioStates[camera] = cameraSettings.playAudio ?? false; + initialVolumeStates[camera] = cameraSettings.volume ?? 1; + }); + } }); setAudioStates(initialAudioStates); From 56079d080dbd9d414adb5b4be893d576ecfaca86 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 3 Mar 2025 16:28:34 -0700 Subject: [PATCH 10/12] Quick fix (#16926) * fix * Fix * Fix incorrect default websocket value * Cleanup value setting --- frigate/object_processing.py | 14 ++++++-------- web/src/api/ws.tsx | 2 +- web/src/components/player/LivePlayer.tsx | 3 +++ web/src/hooks/use-camera-activity.ts | 4 ++-- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/frigate/object_processing.py b/frigate/object_processing.py index 783c2b2d0..a7a2fb066 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -707,16 +707,14 @@ class TrackedObjectProcessor(threading.Thread): def _get_enabled_state(self, camera: str) -> bool: _, config_data = self.enabled_subscribers[camera].check_for_update() + if config_data: - enabled = config_data.enabled + self.config.cameras[camera].enabled = config_data.enabled + if self.camera_states[camera].prev_enabled is None: - self.camera_states[camera].prev_enabled = enabled - return enabled - return ( - self.camera_states[camera].prev_enabled - if self.camera_states[camera].prev_enabled is not None - else self.config.cameras[camera].enabled - ) + self.camera_states[camera].prev_enabled = config_data.enabled + + return self.config.cameras[camera].enabled def run(self): while not self.stop_event.is_set(): diff --git a/web/src/api/ws.tsx b/web/src/api/ws.tsx index 27600993a..5eedcdbcd 100644 --- a/web/src/api/ws.tsx +++ b/web/src/api/ws.tsx @@ -174,7 +174,7 @@ export function useEnabledState(camera: string): { value: { payload }, send, } = useWs(`${camera}/enabled/state`, `${camera}/enabled/set`); - return { payload: payload as ToggleableSetting, send }; + return { payload: (payload ?? "ON") as ToggleableSetting, send }; } export function useDetectState(camera: string): { diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index 913373774..ae9fd6197 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -203,6 +203,9 @@ export default function LivePlayer({ const prevCameraEnabledRef = useRef(cameraEnabled ?? true); useEffect(() => { + if (cameraEnabled == undefined) { + return; + } if (!prevCameraEnabledRef.current && cameraEnabled) { // Camera enabled setLiveReady(false); diff --git a/web/src/hooks/use-camera-activity.ts b/web/src/hooks/use-camera-activity.ts index 14a575224..28eb8c67d 100644 --- a/web/src/hooks/use-camera-activity.ts +++ b/web/src/hooks/use-camera-activity.ts @@ -16,7 +16,7 @@ import useSWR from "swr"; import { getAttributeLabels } from "@/utils/iconUtil"; type useCameraActivityReturn = { - enabled: boolean; + enabled?: boolean; activeTracking: boolean; activeMotion: boolean; objects: ObjectType[]; @@ -148,7 +148,7 @@ export function useCameraActivity( return cameras[camera.name].camera_fps == 0 && stats["service"].uptime > 60; }, [camera, stats]); - const isCameraEnabled = cameraEnabled === "ON"; + const isCameraEnabled = cameraEnabled ? cameraEnabled === "ON" : undefined; return { enabled: isCameraEnabled, From 5210d8c0a296c269b828233bf2361bc485baf65c Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 3 Mar 2025 18:41:28 -0600 Subject: [PATCH 11/12] Add camera enable switch to mobile drawer (#16929) --- web/src/views/live/LiveCameraView.tsx | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/web/src/views/live/LiveCameraView.tsx b/web/src/views/live/LiveCameraView.tsx index 9b45c5a60..96a0ed2bd 100644 --- a/web/src/views/live/LiveCameraView.tsx +++ b/web/src/views/live/LiveCameraView.tsx @@ -1399,6 +1399,13 @@ function FrigateCameraFeatures({
+ + sendEnabled(enabledState == "ON" ? "OFF" : "ON") + } + /> Date: Tue, 4 Mar 2025 22:19:40 +0900 Subject: [PATCH 12/12] Fixed the issue where internal context copy occurs frequently. (#16931) remove cache mount in nginx build Co-authored-by: Ludis Hur --- docker/main/Dockerfile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 674add58e..7a0351240 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -39,10 +39,7 @@ ARG DEBIAN_FRONTEND ENV CCACHE_DIR /root/.ccache ENV CCACHE_MAXSIZE 2G -# bind /var/cache/apt to tmpfs to speed up nginx build -RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ - --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \ - --mount=type=cache,target=/root/.ccache \ +RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \ /deps/build_nginx.sh FROM wget AS sqlite-vec