Merge branch 'blakeblackshear:dev' into dev

This commit is contained in:
OmriAx 2025-03-11 15:46:16 +02:00 committed by GitHub
commit b769bbd6f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
125 changed files with 4812 additions and 3637 deletions

View File

@ -175,6 +175,7 @@ jobs:
files: docker/rocm/rocm.hcl files: docker/rocm/rocm.hcl
set: | set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-rocm,mode=max
*.cache-from=type=gha *.cache-from=type=gha
arm64_extra_builds: arm64_extra_builds:
runs-on: ubuntu-22.04-arm runs-on: ubuntu-22.04-arm

View File

@ -24,7 +24,7 @@ jobs:
persist-credentials: false persist-credentials: false
- uses: actions/setup-node@master - uses: actions/setup-node@master
with: with:
node-version: 16.x node-version: 20.x
- name: Install devcontainer cli - name: Install devcontainer cli
run: npm install --global @devcontainers/cli run: npm install --global @devcontainers/cli
- name: Build devcontainer - name: Build devcontainer
@ -64,6 +64,9 @@ jobs:
node-version: 20.x node-version: 20.x
- run: npm install - run: npm install
working-directory: ./web working-directory: ./web
- name: Build web
run: npm run build
working-directory: ./web
# - name: Test # - name: Test
# run: npm run test # run: npm run test
# working-directory: ./web # working-directory: ./web
@ -77,7 +80,7 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.3.0 uses: actions/setup-python@v5.4.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install requirements - name: Install requirements
@ -99,14 +102,6 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 16.x
- run: npm install
working-directory: ./web
- name: Build web
run: npm run build
working-directory: ./web
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx - name: Set up Docker Buildx

View File

@ -39,10 +39,7 @@ ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G ENV CCACHE_MAXSIZE 2G
# bind /var/cache/apt to tmpfs to speed up nginx build RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
--mount=type=cache,target=/root/.ccache \
/deps/build_nginx.sh /deps/build_nginx.sh
FROM wget AS sqlite-vec FROM wget AS sqlite-vec
@ -225,6 +222,9 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html # Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8 ENV OPENCV_FFMPEG_LOGLEVEL=8
# Set HailoRT to disable logging
ENV HAILORT_LOGGER_PATH=NONE
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
# Install dependencies # Install dependencies

View File

@ -1,7 +1,7 @@
aiofiles == 24.1.* aiofiles == 24.1.*
click == 8.1.* click == 8.1.*
# FastAPI # FastAPI
aiohttp == 3.11.2 aiohttp == 3.11.3
starlette == 0.41.2 starlette == 0.41.2
starlette-context == 0.3.6 starlette-context == 0.3.6
fastapi == 0.115.* fastapi == 0.115.*
@ -20,9 +20,9 @@ pandas == 2.2.*
peewee == 3.17.* peewee == 3.17.*
peewee_migrate == 1.13.* peewee_migrate == 1.13.*
psutil == 6.1.* psutil == 6.1.*
pydantic == 2.8.* pydantic == 2.10.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml git+https://github.com/fbcotter/py3nvml#egg=py3nvml
pytz == 2024.* pytz == 2025.*
pyzmq == 26.2.* pyzmq == 26.2.*
ruamel.yaml == 0.18.* ruamel.yaml == 0.18.*
tzlocal == 5.2 tzlocal == 5.2
@ -34,8 +34,8 @@ ws4py == 0.5.*
unidecode == 1.3.* unidecode == 1.3.*
# Image Manipulation # Image Manipulation
numpy == 1.26.* numpy == 1.26.*
opencv-python-headless == 4.10.0.* opencv-python-headless == 4.11.0.*
opencv-contrib-python == 4.9.0.* opencv-contrib-python == 4.11.0.*
scipy == 1.14.* scipy == 1.14.*
# OpenVino & ONNX # OpenVino & ONNX
openvino == 2024.4.* openvino == 2024.4.*
@ -46,7 +46,7 @@ transformers == 4.45.*
# Generative AI # Generative AI
google-generativeai == 0.8.* google-generativeai == 0.8.*
ollama == 0.3.* ollama == 0.3.*
openai == 1.51.* openai == 1.65.*
# push notifications # push notifications
py-vapid == 1.9.* py-vapid == 1.9.*
pywebpush == 2.0.* pywebpush == 2.0.*

View File

@ -1,14 +1,16 @@
## Send a subrequest to verify if the user is authenticated and has permission to access the resource. ## Send a subrequest to verify if the user is authenticated and has permission to access the resource.
auth_request /auth; auth_request /auth;
## Save the upstream metadata response headers from Authelia to variables. ## Save the upstream metadata response headers from the auth request to variables
auth_request_set $user $upstream_http_remote_user; auth_request_set $user $upstream_http_remote_user;
auth_request_set $role $upstream_http_remote_role;
auth_request_set $groups $upstream_http_remote_groups; auth_request_set $groups $upstream_http_remote_groups;
auth_request_set $name $upstream_http_remote_name; auth_request_set $name $upstream_http_remote_name;
auth_request_set $email $upstream_http_remote_email; auth_request_set $email $upstream_http_remote_email;
## Inject the metadata response headers from the variables into the request made to the backend. ## Inject the metadata response headers from the variables into the request made to the backend.
proxy_set_header Remote-User $user; proxy_set_header Remote-User $user;
proxy_set_header Remote-Role $role;
proxy_set_header Remote-Groups $groups; proxy_set_header Remote-Groups $groups;
proxy_set_header Remote-Email $email; proxy_set_header Remote-Email $email;
proxy_set_header Remote-Name $name; proxy_set_header Remote-Name $name;

View File

@ -2,79 +2,49 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable # https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ARG ROCM=5.7.3 ARG ROCM=6.3.3
ARG AMDGPU=gfx900 ARG AMDGPU=gfx900
ARG HSA_OVERRIDE_GFX_VERSION ARG HSA_OVERRIDE_GFX_VERSION
ARG HSA_OVERRIDE ARG HSA_OVERRIDE
####################################################################### #######################################################################
FROM ubuntu:focal as rocm FROM wget AS rocm
ARG ROCM ARG ROCM
ARG AMDGPU
RUN apt-get update && apt-get -y upgrade RUN apt update && \
RUN apt-get -y install gnupg wget apt install -y wget gpg && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/$ROCM/ubuntu/jammy/amdgpu-install_6.3.60303-1_all.deb && \
RUN mkdir --parents --mode=0755 /etc/apt/keyrings apt install -y ./rocm.deb && \
apt update && \
RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null apt install -y rocm
COPY docker/rocm/rocm.list /etc/apt/sources.list.d/
COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
RUN apt-get update
RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer
RUN apt-get -y install --no-install-recommends migraphx-dev
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ RUN cd /opt/rocm-$ROCM/lib && \
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \
cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/ RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
#######################################################################
FROM --platform=linux/amd64 debian:12 as debian-base
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
RUN apt-get -y install python3
#######################################################################
# ROCm does not come with migraphx wrappers for python 3.9, so we build it here
FROM debian-base as debian-build
ARG ROCM
COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
RUN ln -s /opt/rocm-$ROCM /opt/rocm
RUN apt-get -y install g++ cmake
RUN apt-get -y install python3-pybind11 python3-distutils python3-dev
WORKDIR /opt/build
COPY docker/rocm/migraphx .
RUN mkdir build && cd build && cmake .. && make install
####################################################################### #######################################################################
FROM deps AS deps-prelim FROM deps AS deps-prelim
# need this to install libnuma1 RUN apt-get update && apt-get install -y libnuma1
RUN apt-get update
# no ugprade?!?!
RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/ WORKDIR /opt/frigate
COPY --from=rootfs / / COPY --from=rootfs / /
# Temporarily disabled to see if a new wheel can be built to support py3.11 RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
#COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt && python3 get-pip.py "pip" --break-system-packages
#RUN python3 -m pip install --upgrade pip \ RUN python3 -m pip config set global.break-system-packages true
# && pip3 uninstall -y onnxruntime-openvino \
# && pip3 install -r /requirements.txt COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
####################################################################### #######################################################################
FROM scratch AS rocm-dist FROM scratch AS rocm-dist
@ -87,12 +57,11 @@ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/ COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ / COPY --from=rocm /opt/rocm-dist/ /
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-311-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
####################################################################### #######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0 FROM deps-prelim AS rocm-prelim-hsa-override0
\ ENV HSA_ENABLE_SDMA=0
ENV HSA_ENABLE_SDMA=0 ENV MIGRAPHX_ENABLE_NHWC=1
COPY --from=rocm-dist / / COPY --from=rocm-dist / /

View File

@ -1,26 +0,0 @@
cmake_minimum_required(VERSION 3.1)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
project(migraphx_py)
include_directories(/opt/rocm/include)
find_package(pybind11 REQUIRED)
pybind11_add_module(migraphx migraphx_py.cpp)
target_link_libraries(migraphx PRIVATE /opt/rocm/lib/libmigraphx.so /opt/rocm/lib/libmigraphx_tf.so /opt/rocm/lib/libmigraphx_onnx.so)
install(TARGETS migraphx
COMPONENT python
LIBRARY DESTINATION /opt/rocm/lib
)

View File

@ -1,582 +0,0 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <migraphx/program.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/json.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#ifdef HAVE_GPU
#include <migraphx/gpu/hip.hpp>
#endif
using half = half_float::half;
namespace py = pybind11;
#ifdef __clang__
#define MIGRAPHX_PUSH_UNUSED_WARNING \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"")
#define MIGRAPHX_POP_WARNING _Pragma("clang diagnostic pop")
#else
#define MIGRAPHX_PUSH_UNUSED_WARNING
#define MIGRAPHX_POP_WARNING
#endif
#define MIGRAPHX_PYBIND11_MODULE(...) \
MIGRAPHX_PUSH_UNUSED_WARNING \
PYBIND11_MODULE(__VA_ARGS__) \
MIGRAPHX_POP_WARNING
#define MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM(x, t) .value(#x, migraphx::shape::type_t::x)
namespace migraphx {
migraphx::value to_value(py::kwargs kwargs);
migraphx::value to_value(py::list lst);
template <class T, class F>
void visit_py(T x, F f)
{
if(py::isinstance<py::kwargs>(x))
{
f(to_value(x.template cast<py::kwargs>()));
}
else if(py::isinstance<py::list>(x))
{
f(to_value(x.template cast<py::list>()));
}
else if(py::isinstance<py::bool_>(x))
{
f(x.template cast<bool>());
}
else if(py::isinstance<py::int_>(x) or py::hasattr(x, "__index__"))
{
f(x.template cast<int>());
}
else if(py::isinstance<py::float_>(x))
{
f(x.template cast<float>());
}
else if(py::isinstance<py::str>(x))
{
f(x.template cast<std::string>());
}
else if(py::isinstance<migraphx::shape::dynamic_dimension>(x))
{
f(migraphx::to_value(x.template cast<migraphx::shape::dynamic_dimension>()));
}
else
{
MIGRAPHX_THROW("VISIT_PY: Unsupported data type!");
}
}
migraphx::value to_value(py::list lst)
{
migraphx::value v = migraphx::value::array{};
for(auto val : lst)
{
visit_py(val, [&](auto py_val) { v.push_back(py_val); });
}
return v;
}
migraphx::value to_value(py::kwargs kwargs)
{
migraphx::value v = migraphx::value::object{};
for(auto arg : kwargs)
{
auto&& key = py::str(arg.first);
auto&& val = arg.second;
visit_py(val, [&](auto py_val) { v[key] = py_val; });
}
return v;
}
} // namespace migraphx
namespace pybind11 {
namespace detail {
template <>
struct npy_format_descriptor<half>
{
static std::string format()
{
// following: https://docs.python.org/3/library/struct.html#format-characters
return "e";
}
static constexpr auto name() { return _("half"); }
};
} // namespace detail
} // namespace pybind11
template <class F>
void visit_type(const migraphx::shape& s, F f)
{
s.visit_type(f);
}
template <class T, class F>
void visit(const migraphx::raw_data<T>& x, F f)
{
x.visit(f);
}
template <class F>
void visit_types(F f)
{
migraphx::shape::visit_types(f);
}
template <class T>
py::buffer_info to_buffer_info(T& x)
{
migraphx::shape s = x.get_shape();
assert(s.type() != migraphx::shape::tuple_type);
if(s.dynamic())
MIGRAPHX_THROW("MIGRAPHX PYTHON: dynamic shape argument passed to to_buffer_info");
auto strides = s.strides();
std::transform(
strides.begin(), strides.end(), strides.begin(), [&](auto i) { return i * s.type_size(); });
py::buffer_info b;
visit_type(s, [&](auto as) {
// migraphx use int8_t data to store bool type, we need to
// explicitly specify the data type as bool for python
if(s.type() == migraphx::shape::bool_type)
{
b = py::buffer_info(x.data(),
as.size(),
py::format_descriptor<bool>::format(),
s.ndim(),
s.lens(),
strides);
}
else
{
b = py::buffer_info(x.data(),
as.size(),
py::format_descriptor<decltype(as())>::format(),
s.ndim(),
s.lens(),
strides);
}
});
return b;
}
migraphx::shape to_shape(const py::buffer_info& info)
{
migraphx::shape::type_t t;
std::size_t n = 0;
visit_types([&](auto as) {
if(info.format == py::format_descriptor<decltype(as())>::format() or
(info.format == "l" and py::format_descriptor<decltype(as())>::format() == "q") or
(info.format == "L" and py::format_descriptor<decltype(as())>::format() == "Q"))
{
t = as.type_enum();
n = sizeof(as());
}
else if(info.format == "?" and py::format_descriptor<decltype(as())>::format() == "b")
{
t = migraphx::shape::bool_type;
n = sizeof(bool);
}
});
if(n == 0)
{
MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type " + info.format);
}
auto strides = info.strides;
std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t {
return n > 0 ? i / n : 0;
});
// scalar support
if(info.shape.empty())
{
return migraphx::shape{t};
}
else
{
return migraphx::shape{t, info.shape, strides};
}
}
MIGRAPHX_PYBIND11_MODULE(migraphx, m)
{
py::class_<migraphx::shape> shape_cls(m, "shape");
shape_cls
.def(py::init([](py::kwargs kwargs) {
auto v = migraphx::to_value(kwargs);
auto t = migraphx::shape::parse_type(v.get("type", "float"));
if(v.contains("dyn_dims"))
{
auto dyn_dims =
migraphx::from_value<std::vector<migraphx::shape::dynamic_dimension>>(
v.at("dyn_dims"));
return migraphx::shape(t, dyn_dims);
}
auto lens = v.get<std::size_t>("lens", {1});
if(v.contains("strides"))
return migraphx::shape(t, lens, v.at("strides").to_vector<std::size_t>());
else
return migraphx::shape(t, lens);
}))
.def("type", &migraphx::shape::type)
.def("lens", &migraphx::shape::lens)
.def("strides", &migraphx::shape::strides)
.def("ndim", &migraphx::shape::ndim)
.def("elements", &migraphx::shape::elements)
.def("bytes", &migraphx::shape::bytes)
.def("type_string", &migraphx::shape::type_string)
.def("type_size", &migraphx::shape::type_size)
.def("dyn_dims", &migraphx::shape::dyn_dims)
.def("packed", &migraphx::shape::packed)
.def("transposed", &migraphx::shape::transposed)
.def("broadcasted", &migraphx::shape::broadcasted)
.def("standard", &migraphx::shape::standard)
.def("scalar", &migraphx::shape::scalar)
.def("dynamic", &migraphx::shape::dynamic)
.def("__eq__", std::equal_to<migraphx::shape>{})
.def("__ne__", std::not_equal_to<migraphx::shape>{})
.def("__repr__", [](const migraphx::shape& s) { return migraphx::to_string(s); });
py::enum_<migraphx::shape::type_t>(shape_cls, "type_t")
MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM);
py::class_<migraphx::shape::dynamic_dimension>(shape_cls, "dynamic_dimension")
.def(py::init<>())
.def(py::init<std::size_t, std::size_t>())
.def(py::init<std::size_t, std::size_t, std::set<std::size_t>>())
.def_readwrite("min", &migraphx::shape::dynamic_dimension::min)
.def_readwrite("max", &migraphx::shape::dynamic_dimension::max)
.def_readwrite("optimals", &migraphx::shape::dynamic_dimension::optimals)
.def("is_fixed", &migraphx::shape::dynamic_dimension::is_fixed);
py::class_<migraphx::argument>(m, "argument", py::buffer_protocol())
.def_buffer([](migraphx::argument& x) -> py::buffer_info { return to_buffer_info(x); })
.def(py::init([](py::buffer b) {
py::buffer_info info = b.request();
return migraphx::argument(to_shape(info), info.ptr);
}))
.def("get_shape", &migraphx::argument::get_shape)
.def("data_ptr",
[](migraphx::argument& x) { return reinterpret_cast<std::uintptr_t>(x.data()); })
.def("tolist",
[](migraphx::argument& x) {
py::list l{x.get_shape().elements()};
visit(x, [&](auto data) { l = py::cast(data.to_vector()); });
return l;
})
.def("__eq__", std::equal_to<migraphx::argument>{})
.def("__ne__", std::not_equal_to<migraphx::argument>{})
.def("__repr__", [](const migraphx::argument& x) { return migraphx::to_string(x); });
py::class_<migraphx::target>(m, "target");
py::class_<migraphx::instruction_ref>(m, "instruction_ref")
.def("shape", [](migraphx::instruction_ref i) { return i->get_shape(); })
.def("op", [](migraphx::instruction_ref i) { return i->get_operator(); });
py::class_<migraphx::module, std::unique_ptr<migraphx::module, py::nodelete>>(m, "module")
.def("print", [](const migraphx::module& mm) { std::cout << mm << std::endl; })
.def(
"add_instruction",
[](migraphx::module& mm,
const migraphx::operation& op,
std::vector<migraphx::instruction_ref>& args,
std::vector<migraphx::module*>& mod_args) {
return mm.add_instruction(op, args, mod_args);
},
py::arg("op"),
py::arg("args"),
py::arg("mod_args") = std::vector<migraphx::module*>{})
.def(
"add_literal",
[](migraphx::module& mm, py::buffer data) {
py::buffer_info info = data.request();
auto literal_shape = to_shape(info);
return mm.add_literal(literal_shape, reinterpret_cast<char*>(info.ptr));
},
py::arg("data"))
.def(
"add_parameter",
[](migraphx::module& mm, const std::string& name, const migraphx::shape shape) {
return mm.add_parameter(name, shape);
},
py::arg("name"),
py::arg("shape"))
.def(
"add_return",
[](migraphx::module& mm, std::vector<migraphx::instruction_ref>& args) {
return mm.add_return(args);
},
py::arg("args"))
.def("__repr__", [](const migraphx::module& mm) { return migraphx::to_string(mm); });
py::class_<migraphx::program>(m, "program")
.def(py::init([]() { return migraphx::program(); }))
.def("get_parameter_names", &migraphx::program::get_parameter_names)
.def("get_parameter_shapes", &migraphx::program::get_parameter_shapes)
.def("get_output_shapes", &migraphx::program::get_output_shapes)
.def("is_compiled", &migraphx::program::is_compiled)
.def(
"compile",
[](migraphx::program& p,
const migraphx::target& t,
bool offload_copy,
bool fast_math,
bool exhaustive_tune) {
migraphx::compile_options options;
options.offload_copy = offload_copy;
options.fast_math = fast_math;
options.exhaustive_tune = exhaustive_tune;
p.compile(t, options);
},
py::arg("t"),
py::arg("offload_copy") = true,
py::arg("fast_math") = true,
py::arg("exhaustive_tune") = false)
.def("get_main_module", [](const migraphx::program& p) { return p.get_main_module(); })
.def(
"create_module",
[](migraphx::program& p, const std::string& name) { return p.create_module(name); },
py::arg("name"))
.def("run",
[](migraphx::program& p, py::dict params) {
migraphx::parameter_map pm;
for(auto x : params)
{
std::string key = x.first.cast<std::string>();
py::buffer b = x.second.cast<py::buffer>();
py::buffer_info info = b.request();
pm[key] = migraphx::argument(to_shape(info), info.ptr);
}
return p.eval(pm);
})
.def("run_async",
[](migraphx::program& p,
py::dict params,
std::uintptr_t stream,
std::string stream_name) {
migraphx::parameter_map pm;
for(auto x : params)
{
std::string key = x.first.cast<std::string>();
py::buffer b = x.second.cast<py::buffer>();
py::buffer_info info = b.request();
pm[key] = migraphx::argument(to_shape(info), info.ptr);
}
migraphx::execution_environment exec_env{
migraphx::any_ptr(reinterpret_cast<void*>(stream), stream_name), true};
return p.eval(pm, exec_env);
})
.def("sort", &migraphx::program::sort)
.def("print", [](const migraphx::program& p) { std::cout << p << std::endl; })
.def("__eq__", std::equal_to<migraphx::program>{})
.def("__ne__", std::not_equal_to<migraphx::program>{})
.def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); });
py::class_<migraphx::operation> op(m, "op");
op.def(py::init([](const std::string& name, py::kwargs kwargs) {
migraphx::value v = migraphx::value::object{};
if(kwargs)
{
v = migraphx::to_value(kwargs);
}
return migraphx::make_op(name, v);
}))
.def("name", &migraphx::operation::name);
py::enum_<migraphx::op::pooling_mode>(op, "pooling_mode")
.value("average", migraphx::op::pooling_mode::average)
.value("max", migraphx::op::pooling_mode::max)
.value("lpnorm", migraphx::op::pooling_mode::lpnorm);
py::enum_<migraphx::op::rnn_direction>(op, "rnn_direction")
.value("forward", migraphx::op::rnn_direction::forward)
.value("reverse", migraphx::op::rnn_direction::reverse)
.value("bidirectional", migraphx::op::rnn_direction::bidirectional);
m.def(
"argument_from_pointer",
[](const migraphx::shape shape, const int64_t address) {
return migraphx::argument(shape, reinterpret_cast<void*>(address));
},
py::arg("shape"),
py::arg("address"));
m.def(
"parse_tf",
[](const std::string& filename,
bool is_nhwc,
unsigned int batch_size,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::vector<std::string> output_names) {
return migraphx::parse_tf(
filename, migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names});
},
"Parse tf protobuf (default format is nhwc)",
py::arg("filename"),
py::arg("is_nhwc") = true,
py::arg("batch_size") = 1,
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("output_names") = std::vector<std::string>());
m.def(
"parse_onnx",
[](const std::string& filename,
unsigned int default_dim_value,
migraphx::shape::dynamic_dimension default_dyn_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
map_dyn_input_dims,
bool skip_unknown_operators,
bool print_program_on_error,
int64_t max_loop_iterations) {
migraphx::onnx_options options;
options.default_dim_value = default_dim_value;
options.default_dyn_dim_value = default_dyn_dim_value;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = print_program_on_error;
options.max_loop_iterations = max_loop_iterations;
return migraphx::parse_onnx(filename, options);
},
"Parse onnx file",
py::arg("filename"),
py::arg("default_dim_value") = 0,
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("map_dyn_input_dims") =
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false,
py::arg("max_loop_iterations") = 10);
m.def(
"parse_onnx_buffer",
[](const std::string& onnx_buffer,
unsigned int default_dim_value,
migraphx::shape::dynamic_dimension default_dyn_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
map_dyn_input_dims,
bool skip_unknown_operators,
bool print_program_on_error) {
migraphx::onnx_options options;
options.default_dim_value = default_dim_value;
options.default_dyn_dim_value = default_dyn_dim_value;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = print_program_on_error;
return migraphx::parse_onnx_buffer(onnx_buffer, options);
},
"Parse onnx file",
py::arg("filename"),
py::arg("default_dim_value") = 0,
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("map_dyn_input_dims") =
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false);
m.def(
"load",
[](const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
return migraphx::load(name, options);
},
"Load MIGraphX program",
py::arg("filename"),
py::arg("format") = "msgpack");
m.def(
"save",
[](const migraphx::program& p, const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
return migraphx::save(p, name, options);
},
"Save MIGraphX program",
py::arg("p"),
py::arg("filename"),
py::arg("format") = "msgpack");
m.def("get_target", &migraphx::make_target);
m.def("create_argument", [](const migraphx::shape& s, const std::vector<double>& values) {
if(values.size() != s.elements())
MIGRAPHX_THROW("Values and shape elements do not match");
migraphx::argument a{s};
a.fill(values.begin(), values.end());
return a;
});
m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0);
m.def("fill_argument", &migraphx::fill_argument, py::arg("s"), py::arg("value"));
m.def("quantize_fp16",
&migraphx::quantize_fp16,
py::arg("prog"),
py::arg("ins_names") = std::vector<std::string>{"all"});
m.def("quantize_int8",
&migraphx::quantize_int8,
py::arg("prog"),
py::arg("t"),
py::arg("calibration") = std::vector<migraphx::parameter_map>{},
py::arg("ins_names") = std::vector<std::string>{"dot", "convolution"});
#ifdef HAVE_GPU
m.def("allocate_gpu", &migraphx::gpu::allocate_gpu, py::arg("s"), py::arg("host") = false);
m.def("to_gpu", &migraphx::gpu::to_gpu, py::arg("arg"), py::arg("host") = false);
m.def("from_gpu", &migraphx::gpu::from_gpu);
m.def("gpu_sync", [] { migraphx::gpu::gpu_sync(); });
#endif
#ifdef VERSION_INFO
m.attr("__version__") = VERSION_INFO;
#else
m.attr("__version__") = "dev";
#endif
}

View File

@ -1 +1 @@
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.3.3/onnxruntime_rocm-1.20.1-cp311-cp311-linux_x86_64.whl

View File

@ -1,3 +0,0 @@
Package: *
Pin: release o=repo.radeon.com
Pin-Priority: 600

View File

@ -2,7 +2,7 @@ variable "AMDGPU" {
default = "gfx900" default = "gfx900"
} }
variable "ROCM" { variable "ROCM" {
default = "5.7.3" default = "6.3.3"
} }
variable "HSA_OVERRIDE_GFX_VERSION" { variable "HSA_OVERRIDE_GFX_VERSION" {
default = "" default = ""
@ -10,6 +10,13 @@ variable "HSA_OVERRIDE_GFX_VERSION" {
variable "HSA_OVERRIDE" { variable "HSA_OVERRIDE" {
default = "1" default = "1"
} }
target wget {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "wget"
}
target deps { target deps {
dockerfile = "docker/main/Dockerfile" dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"] platforms = ["linux/amd64"]
@ -26,6 +33,7 @@ target rocm {
dockerfile = "docker/rocm/Dockerfile" dockerfile = "docker/rocm/Dockerfile"
contexts = { contexts = {
deps = "target:deps", deps = "target:deps",
wget = "target:wget",
rootfs = "target:rootfs" rootfs = "target:rootfs"
} }
platforms = ["linux/amd64"] platforms = ["linux/amd64"]

View File

@ -1 +0,0 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/5.7.3 focal main

View File

@ -86,6 +86,9 @@ RUN apt-get -qq update \
libx264-163 libx265-199 libegl1 \ libx264-163 libx265-199 libegl1 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Fixes "Error loading shared libs"
RUN mkdir -p /etc/ld.so.conf.d && echo /usr/lib/ffmpeg/jetson/lib/ > /etc/ld.so.conf.d/ffmpeg.conf
COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \ --mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \

View File

@ -7,7 +7,7 @@ title: Camera Configuration
Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa. Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa.
A camera is enabled by default but can be temporarily disabled by using `enabled: False`. Existing tracked objects and recordings can still be accessed. Live streams, recording and detecting are not working. Camera specific configurations will be used. A camera is enabled by default but can be disabled by using `enabled: False`. Cameras that are disabled through the configuration file will not appear in the Frigate UI and will not consume system resources.
Each role can only be assigned to one input per camera. The options for roles are as follows: Each role can only be assigned to one input per camera. The options for roles are as follows:

View File

@ -5,7 +5,7 @@ title: Generative AI
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail. Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI. Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle, or can optionally be sent earlier after a number of significantly changed frames, for example in use in more real-time notifications. Descriptions can also be regenerated manually via the Frigate UI. Note that if you are manually entering a description for tracked objects prior to its end, this will be overwritten by the generated response.
## Configuration ## Configuration
@ -148,6 +148,15 @@ While generating simple descriptions of detected objects is useful, understandin
Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`. Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`.
If looking to get notifications earlier than when an object ceases to be tracked, an additional send trigger can be configured of `after_significant_updates`.
```yaml
genai:
send_triggers:
tracked_object_end: true # default
after_significant_updates: 3 # how many updates to a tracked object before we should send an image
```
## Custom Prompts ## Custom Prompts
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows: Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:

View File

@ -115,7 +115,7 @@ lpr:
Ensure that: Ensure that:
- Your camera has a clear, well-lit view of the plate. - Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate, Frigate certainly won't be able to. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling.
- The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream. - The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream.
- A `car` is detected first, as LPR only runs on recognized vehicles. - A `car` is detected first, as LPR only runs on recognized vehicles.

View File

@ -183,32 +183,46 @@ The default dashboard ("All Cameras") will always use Smart Streaming and the fi
::: :::
### Disabling cameras
Cameras can be temporarily disabled through the Frigate UI and through [MQTT](/integrations/mqtt#frigatecamera_nameenabledset) to conserve system resources. When disabled, Frigate's ffmpeg processes are terminated — recording stops, object detection is paused, and the Live dashboard displays a blank image with a disabled message. Review items, tracked objects, and historical footage for disabled cameras can still be accessed via the UI.
For restreamed cameras, go2rtc remains active but does not use system resources for decoding or processing unless there are active external consumers (such as the Advanced Camera Card in Home Assistant using a go2rtc source).
Note that disabling a camera through the config file (`enabled: False`) removes all related UI elements, including historical footage access. To retain access while disabling the camera, keep it enabled in the config and use the UI or MQTT to disable it temporarily.
## Live view FAQ ## Live view FAQ
1. Why don't I have audio in my Live view? 1. **Why don't I have audio in my Live view?**
You must use go2rtc to hear audio in your live streams. If you have go2rtc already configured, you need to ensure your camera is sending PCMA/PCMU or AAC audio. If you can't change your camera's audio codec, you need to [transcode the audio](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg) using go2rtc. You must use go2rtc to hear audio in your live streams. If you have go2rtc already configured, you need to ensure your camera is sending PCMA/PCMU or AAC audio. If you can't change your camera's audio codec, you need to [transcode the audio](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg) using go2rtc.
Note that the low bandwidth mode player is a video-only stream. You should not expect to hear audio when in low bandwidth mode, even if you've set up go2rtc. Note that the low bandwidth mode player is a video-only stream. You should not expect to hear audio when in low bandwidth mode, even if you've set up go2rtc.
2. Frigate shows that my live stream is in "low bandwidth mode". What does this mean? 2. **Frigate shows that my live stream is in "low bandwidth mode". What does this mean?**
Frigate intelligently selects the live streaming technology based on a number of factors (user-selected modes like two-way talk, camera settings, browser capabilities, available bandwidth) and prioritizes showing an actual up-to-date live view of your camera's stream as quickly as possible. Frigate intelligently selects the live streaming technology based on a number of factors (user-selected modes like two-way talk, camera settings, browser capabilities, available bandwidth) and prioritizes showing an actual up-to-date live view of your camera's stream as quickly as possible.
When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. You can also try using the _Reset_ button to force a reload of your stream. When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. You can also try using the _Reset_ button to force a reload of your stream.
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the recommendations above or ensure you have enough bandwidth available. If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the recommendations above or ensure you have enough bandwidth available.
3. It doesn't seem like my cameras are streaming on the Live dashboard. Why? 3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?**
On the default Live dashboard ("All Cameras"), your camera images will update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any activity is detected, cameras seamlessly switch to a full-resolution live stream. If you want to customize this behavior, use a camera group. On the default Live dashboard ("All Cameras"), your camera images will update once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any activity is detected, cameras seamlessly switch to a full-resolution live stream. If you want to customize this behavior, use a camera group.
4. I see a strange diagonal line on my live view, but my recordings look fine. How can I fix it? 4. **I see a strange diagonal line on my live view, but my recordings look fine. How can I fix it?**
This is caused by incorrect dimensions set in your detect width or height (or incorrectly auto-detected), causing the jsmpeg player's rendering engine to display a slightly distorted image. You should enlarge the width and height of your `detect` resolution up to a standard aspect ratio (example: 640x352 becomes 640x360, and 800x443 becomes 800x450, 2688x1520 becomes 2688x1512, etc). If changing the resolution to match a standard (4:3, 16:9, or 32:9, etc) aspect ratio does not solve the issue, you can enable "compatibility mode" in your camera group dashboard's stream settings. Depending on your browser and device, more than a few cameras in compatibility mode may not be supported, so only use this option if changing your `detect` width and height fails to resolve the color artifacts and diagonal line. This is caused by incorrect dimensions set in your detect width or height (or incorrectly auto-detected), causing the jsmpeg player's rendering engine to display a slightly distorted image. You should enlarge the width and height of your `detect` resolution up to a standard aspect ratio (example: 640x352 becomes 640x360, and 800x443 becomes 800x450, 2688x1520 becomes 2688x1512, etc). If changing the resolution to match a standard (4:3, 16:9, or 32:9, etc) aspect ratio does not solve the issue, you can enable "compatibility mode" in your camera group dashboard's stream settings. Depending on your browser and device, more than a few cameras in compatibility mode may not be supported, so only use this option if changing your `detect` width and height fails to resolve the color artifacts and diagonal line.
5. How does "smart streaming" work? 5. **How does "smart streaming" work?**
Because a static image of a scene looks exactly the same as a live stream with no motion or activity, smart streaming updates your camera images once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any activity (motion or object/audio detection) occurs, cameras seamlessly switch to a live stream. Because a static image of a scene looks exactly the same as a live stream with no motion or activity, smart streaming updates your camera images once per minute when no detectable activity is occurring to conserve bandwidth and resources. As soon as any activity (motion or object/audio detection) occurs, cameras seamlessly switch to a live stream.
This static image is pulled from the stream defined in your config with the `detect` role. When activity is detected, images from the `detect` stream immediately begin updating at ~5 frames per second so you can see the activity until the live player is loaded and begins playing. This usually only takes a second or two. If the live player times out, buffers, or has streaming errors, the jsmpeg player is loaded and plays a video-only stream from the `detect` role. When activity ends, the players are destroyed and a static image is displayed until activity is detected again, and the process repeats. This static image is pulled from the stream defined in your config with the `detect` role. When activity is detected, images from the `detect` stream immediately begin updating at ~5 frames per second so you can see the activity until the live player is loaded and begins playing. This usually only takes a second or two. If the live player times out, buffers, or has streaming errors, the jsmpeg player is loaded and plays a video-only stream from the `detect` role. When activity ends, the players are destroyed and a static image is displayed until activity is detected again, and the process repeats.
This is Frigate's default and recommended setting because it results in a significant bandwidth savings, especially for high resolution cameras. This is Frigate's default and recommended setting because it results in a significant bandwidth savings, especially for high resolution cameras.
6. I have unmuted some cameras on my dashboard, but I do not hear sound. Why? 6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?**
If your camera is streaming (as indicated by a red dot in the upper right, or if it has been set to continuous streaming mode), your browser may be blocking audio until you interact with the page. This is an intentional browser limitation. See [this article](https://developer.mozilla.org/en-US/docs/Web/Media/Autoplay_guide#autoplay_availability). Many browsers have a whitelist feature to change this behavior. If your camera is streaming (as indicated by a red dot in the upper right, or if it has been set to continuous streaming mode), your browser may be blocking audio until you interact with the page. This is an intentional browser limitation. See [this article](https://developer.mozilla.org/en-US/docs/Web/Media/Autoplay_guide#autoplay_availability). Many browsers have a whitelist feature to change this behavior.

View File

@ -49,7 +49,7 @@ This does not affect using hardware for accelerating other tasks such as [semant
# Officially Supported Detectors # Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## Edge TPU Detector ## Edge TPU Detector
@ -312,7 +312,7 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
#### YOLOv9 #### YOLOv9
[YOLOv9](https://github.com/MultimediaTechLab/YOLO) models are supported, but not included by default. [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
:::tip :::tip
@ -449,7 +449,7 @@ model:
### Setup ### Setup
The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`. Support for AMD GPUs is provided using the [ONNX detector](#ONNX). In order to utilize the AMD GPU for object detection use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
### Docker settings for GPU access ### Docker settings for GPU access
@ -528,29 +528,9 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/
### Supported Models ### Supported Models
There is no default model provided, the following formats are supported: See [ONNX supported models](#supported-models) for supported models, there are some caveats:
- D-FINE models are not supported
#### YOLO-NAS - YOLO-NAS models are known to not run well on integrated GPUs
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
rocm:
type: rocm
model:
model_type: yolonas
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## ONNX ## ONNX
@ -615,7 +595,7 @@ model:
#### YOLOv9 #### YOLOv9
[YOLOv9](https://github.com/MultimediaTechLab/YOLO) models are supported, but not included by default. [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
:::tip :::tip
@ -644,7 +624,7 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
#### D-FINE #### D-FINE
[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the YOLO-NAS model for use in Frigate. [D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
:::warning :::warning

View File

@ -183,6 +183,8 @@ record:
sync_recordings: True sync_recordings: True
``` ```
This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart.
:::warning :::warning
The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary. The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary.

View File

@ -255,6 +255,8 @@ ffmpeg:
# Optional: Detect configuration # Optional: Detect configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level
detect: detect:
# Optional: enables detection for the camera (default: shown below)
enabled: False
# Optional: width of the frame for the input with the detect role (default: use native stream resolution) # Optional: width of the frame for the input with the detect role (default: use native stream resolution)
width: 1280 width: 1280
# Optional: height of the frame for the input with the detect role (default: use native stream resolution) # Optional: height of the frame for the input with the detect role (default: use native stream resolution)
@ -262,8 +264,6 @@ detect:
# Optional: desired fps for your camera for the input with the detect role (default: shown below) # Optional: desired fps for your camera for the input with the detect role (default: shown below)
# NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera.
fps: 5 fps: 5
# Optional: enables detection for the camera (default: True)
enabled: True
# Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate) # Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate)
min_initialized: 2 min_initialized: 2
# Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate) # Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate)
@ -813,6 +813,12 @@ cameras:
- cat - cat
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify) # Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
required_zones: [] required_zones: []
# Optional: What triggers to use to send frames for a tracked object to generative AI (default: shown below)
send_triggers:
# Once the object is no longer tracked
tracked_object_end: True
# Optional: After X many significant updates are received (default: shown below)
after_significant_updates: None
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below) # Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False debug_save_thumbnails: False

View File

@ -151,8 +151,6 @@ cameras:
- path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection - path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection
roles: roles:
- detect - detect
detect:
enabled: False # <---- disable detection until you have a working camera feed
``` ```
### Step 2: Start Frigate ### Step 2: Start Frigate
@ -307,7 +305,7 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu
### Step 7: Complete config ### Step 7: Complete config
At this point you have a complete config with basic functionality. At this point you have a complete config with basic functionality.
- View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples. - View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples.
- View [full config reference](../configuration/reference.md) for a complete list of configuration options. - View [full config reference](../configuration/reference.md) for a complete list of configuration options.

View File

@ -222,6 +222,14 @@ Publishes the rms value for audio detected on this camera.
**NOTE:** Requires audio detection to be enabled **NOTE:** Requires audio detection to be enabled
### `frigate/<camera_name>/enabled/set`
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/enabled/state`
Topic with current state of processing for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/detect/set` ### `frigate/<camera_name>/detect/set`
Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`. Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`.

View File

@ -28,11 +28,11 @@ Not all model types are supported by all detectors, so it's important to choose
## Supported detector types ## Supported detector types
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), and ROCm (`rocm`) detectors. Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), and ONNX (`onnx`) detectors.
:::warning :::warning
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 and later. Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later.
::: :::
@ -42,7 +42,7 @@ Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` | | [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` | | [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` | | [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` | | [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `onnx` | `yolonas` |
_\* Requires Frigate 0.15_ _\* Requires Frigate 0.15_

View File

@ -22,6 +22,7 @@ from markupsafe import escape
from peewee import operator from peewee import operator
from pydantic import ValidationError from pydantic import ValidationError
from frigate.api.auth import require_role
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody from frigate.api.defs.request.app_body import AppConfigSetBody
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
@ -201,7 +202,7 @@ def config_raw():
) )
@router.post("/config/save") @router.post("/config/save", dependencies=[Depends(require_role(["admin"]))])
def config_save(save_option: str, body: Any = Body(media_type="text/plain")): def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
new_config = body.decode() new_config = body.decode()
if not new_config: if not new_config:
@ -326,7 +327,7 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
) )
@router.put("/config/set") @router.put("/config/set", dependencies=[Depends(require_role(["admin"]))])
def config_set(request: Request, body: AppConfigSetBody): def config_set(request: Request, body: AppConfigSetBody):
config_file = find_config_file() config_file = find_config_file()
@ -542,7 +543,7 @@ async def logs(
) )
@router.post("/restart") @router.post("/restart", dependencies=[Depends(require_role(["admin"]))])
def restart(): def restart():
try: try:
restart_frigate() restart_frigate()

View File

@ -11,8 +11,9 @@ import secrets
import time import time
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from typing import List
from fastapi import APIRouter, Request, Response from fastapi import APIRouter, Depends, HTTPException, Request, Response
from fastapi.responses import JSONResponse, RedirectResponse from fastapi.responses import JSONResponse, RedirectResponse
from joserfc import jwt from joserfc import jwt
from peewee import DoesNotExist from peewee import DoesNotExist
@ -22,6 +23,7 @@ from frigate.api.defs.request.app_body import (
AppPostLoginBody, AppPostLoginBody,
AppPostUsersBody, AppPostUsersBody,
AppPutPasswordBody, AppPutPasswordBody,
AppPutRoleBody,
) )
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import AuthConfig, ProxyConfig from frigate.config import AuthConfig, ProxyConfig
@ -134,7 +136,7 @@ def get_jwt_secret() -> str:
logger.debug("Using jwt secret from .jwt_secret file in config directory.") logger.debug("Using jwt secret from .jwt_secret file in config directory.")
with open(jwt_secret_file) as f: with open(jwt_secret_file) as f:
try: try:
jwt_secret = f.readline() jwt_secret = f.readline().strip()
except Exception: except Exception:
logger.warning( logger.warning(
"Unable to read jwt token from .jwt_secret file in config directory. A new jwt token will be created at each startup." "Unable to read jwt token from .jwt_secret file in config directory. A new jwt token will be created at each startup."
@ -169,8 +171,10 @@ def verify_password(password, password_hash):
return secrets.compare_digest(password_hash, compare_hash) return secrets.compare_digest(password_hash, compare_hash)
def create_encoded_jwt(user, expiration, secret): def create_encoded_jwt(user, role, expiration, secret):
return jwt.encode({"alg": "HS256"}, {"sub": user, "exp": expiration}, secret) return jwt.encode(
{"alg": "HS256"}, {"sub": user, "role": role, "exp": expiration}, secret
)
def set_jwt_cookie(response: Response, cookie_name, encoded_jwt, expiration, secure): def set_jwt_cookie(response: Response, cookie_name, encoded_jwt, expiration, secure):
@ -184,7 +188,48 @@ def set_jwt_cookie(response: Response, cookie_name, encoded_jwt, expiration, sec
) )
# Endpoint for use with nginx auth_request async def get_current_user(request: Request):
JWT_COOKIE_NAME = request.app.frigate_config.auth.cookie_name
encoded_token = request.cookies.get(JWT_COOKIE_NAME)
if not encoded_token:
return JSONResponse(content={"message": "No JWT token found"}, status_code=401)
try:
token = jwt.decode(encoded_token, request.app.jwt_token)
if "sub" not in token.claims or "role" not in token.claims:
return JSONResponse(
content={"message": "Invalid JWT token"}, status_code=401
)
return {"username": token.claims["sub"], "role": token.claims["role"]}
except Exception as e:
logger.error(f"Error parsing JWT: {e}")
return JSONResponse(content={"message": "Invalid JWT token"}, status_code=401)
def require_role(required_roles: List[str]):
async def role_checker(request: Request):
# Get role from header (could be comma-separated)
role_header = request.headers.get("remote-role")
roles = [r.strip() for r in role_header.split(",")] if role_header else []
# Check if we have any roles
if not roles:
raise HTTPException(status_code=403, detail="Role not provided")
# Check if any role matches required_roles
if not any(role in required_roles for role in roles):
raise HTTPException(
status_code=403,
detail=f"Role {', '.join(roles)} not authorized. Required: {', '.join(required_roles)}",
)
# Return the first matching role
return next((role for role in roles if role in required_roles), roles[0])
return role_checker
# Endpoints
@router.get("/auth") @router.get("/auth")
def auth(request: Request): def auth(request: Request):
auth_config: AuthConfig = request.app.frigate_config.auth auth_config: AuthConfig = request.app.frigate_config.auth
@ -195,6 +240,8 @@ def auth(request: Request):
# dont require auth if the request is on the internal port # dont require auth if the request is on the internal port
# this header is set by Frigate's nginx proxy, so it cant be spoofed # this header is set by Frigate's nginx proxy, so it cant be spoofed
if int(request.headers.get("x-server-port", default=0)) == 5000: if int(request.headers.get("x-server-port", default=0)) == 5000:
success_response.headers["remote-user"] = "anonymous"
success_response.headers["remote-role"] = "admin"
return success_response return success_response
fail_response = Response("", status_code=401) fail_response = Response("", status_code=401)
@ -211,14 +258,25 @@ def auth(request: Request):
if not auth_config.enabled: if not auth_config.enabled:
# pass the user header value from the upstream proxy if a mapping is specified # pass the user header value from the upstream proxy if a mapping is specified
# or use anonymous if none are specified # or use anonymous if none are specified
if proxy_config.header_map.user is not None: user_header = proxy_config.header_map.user
upstream_user_header_value = request.headers.get( role_header = proxy_config.header_map.role
proxy_config.header_map.user, success_response.headers["remote-user"] = (
default="anonymous", request.headers.get(user_header, default="anonymous")
) if user_header
success_response.headers["remote-user"] = upstream_user_header_value else "anonymous"
else: )
success_response.headers["remote-user"] = "anonymous" role_header = proxy_config.header_map.role
role = (
request.headers.get(role_header, default="viewer")
if role_header
else "viewer"
)
# if comma-separated with "admin", use "admin", else "viewer"
success_response.headers["remote-role"] = (
"admin" if role and "admin" in role else "viewer"
)
return success_response return success_response
# now apply authentication # now apply authentication
@ -251,11 +309,15 @@ def auth(request: Request):
if "sub" not in token.claims: if "sub" not in token.claims:
logger.debug("user not set in jwt token") logger.debug("user not set in jwt token")
return fail_response return fail_response
if "role" not in token.claims:
logger.debug("role not set in jwt token")
return fail_response
if "exp" not in token.claims: if "exp" not in token.claims:
logger.debug("exp not set in jwt token") logger.debug("exp not set in jwt token")
return fail_response return fail_response
user = token.claims.get("sub") user = token.claims.get("sub")
role = token.claims.get("role")
current_time = int(time.time()) current_time = int(time.time())
# if the jwt is expired # if the jwt is expired
@ -283,7 +345,7 @@ def auth(request: Request):
return fail_response return fail_response
new_expiration = current_time + JWT_SESSION_LENGTH new_expiration = current_time + JWT_SESSION_LENGTH
new_encoded_jwt = create_encoded_jwt( new_encoded_jwt = create_encoded_jwt(
user, new_expiration, request.app.jwt_token user, role, new_expiration, request.app.jwt_token
) )
set_jwt_cookie( set_jwt_cookie(
success_response, success_response,
@ -294,6 +356,7 @@ def auth(request: Request):
) )
success_response.headers["remote-user"] = user success_response.headers["remote-user"] = user
success_response.headers["remote-role"] = role
return success_response return success_response
except Exception as e: except Exception as e:
logger.error(f"Error parsing jwt: {e}") logger.error(f"Error parsing jwt: {e}")
@ -302,8 +365,10 @@ def auth(request: Request):
@router.get("/profile") @router.get("/profile")
def profile(request: Request): def profile(request: Request):
username = request.headers.get("remote-user") username = request.headers.get("remote-user", "anonymous")
return JSONResponse(content={"username": username}) role = request.headers.get("remote-role", "viewer")
return JSONResponse(content={"username": username, "role": role})
@router.get("/logout") @router.get("/logout")
@ -333,8 +398,11 @@ def login(request: Request, body: AppPostLoginBody):
password_hash = db_user.password_hash password_hash = db_user.password_hash
if verify_password(password, password_hash): if verify_password(password, password_hash):
role = getattr(db_user, "role", "viewer")
if role not in ["admin", "viewer"]:
role = "viewer" # Enforce valid roles
expiration = int(time.time()) + JWT_SESSION_LENGTH expiration = int(time.time()) + JWT_SESSION_LENGTH
encoded_jwt = create_encoded_jwt(user, expiration, request.app.jwt_token) encoded_jwt = create_encoded_jwt(user, role, expiration, request.app.jwt_token)
response = Response("", 200) response = Response("", 200)
set_jwt_cookie( set_jwt_cookie(
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
@ -343,25 +411,31 @@ def login(request: Request, body: AppPostLoginBody):
return JSONResponse(content={"message": "Login failed"}, status_code=401) return JSONResponse(content={"message": "Login failed"}, status_code=401)
@router.get("/users") @router.get("/users", dependencies=[Depends(require_role(["admin"]))])
def get_users(): def get_users():
exports = User.select(User.username).order_by(User.username).dicts().iterator() exports = (
User.select(User.username, User.role).order_by(User.username).dicts().iterator()
)
return JSONResponse([e for e in exports]) return JSONResponse([e for e in exports])
@router.post("/users") @router.post("/users", dependencies=[Depends(require_role(["admin"]))])
def create_user(request: Request, body: AppPostUsersBody): def create_user(
request: Request,
body: AppPostUsersBody,
):
HASH_ITERATIONS = request.app.frigate_config.auth.hash_iterations HASH_ITERATIONS = request.app.frigate_config.auth.hash_iterations
if not re.match("^[A-Za-z0-9._]+$", body.username): if not re.match("^[A-Za-z0-9._]+$", body.username):
JSONResponse(content={"message": "Invalid username"}, status_code=400) return JSONResponse(content={"message": "Invalid username"}, status_code=400)
role = body.role if body.role in ["admin", "viewer"] else "viewer"
password_hash = hash_password(body.password, iterations=HASH_ITERATIONS) password_hash = hash_password(body.password, iterations=HASH_ITERATIONS)
User.insert( User.insert(
{ {
User.username: body.username, User.username: body.username,
User.password_hash: password_hash, User.password_hash: password_hash,
User.role: role,
User.notification_tokens: [], User.notification_tokens: [],
} }
).execute() ).execute()
@ -375,15 +449,61 @@ def delete_user(username: str):
@router.put("/users/{username}/password") @router.put("/users/{username}/password")
def update_password(request: Request, username: str, body: AppPutPasswordBody): async def update_password(
request: Request,
username: str,
body: AppPutPasswordBody,
):
current_user = await get_current_user(request)
if isinstance(current_user, JSONResponse):
# auth failed
return current_user
current_username = current_user.get("username")
current_role = current_user.get("role")
# viewers can only change their own password
if current_role == "viewer" and current_username != username:
raise HTTPException(
status_code=403, detail="Viewers can only update their own password"
)
HASH_ITERATIONS = request.app.frigate_config.auth.hash_iterations HASH_ITERATIONS = request.app.frigate_config.auth.hash_iterations
password_hash = hash_password(body.password, iterations=HASH_ITERATIONS) password_hash = hash_password(body.password, iterations=HASH_ITERATIONS)
User.set_by_id(username, {User.password_hash: password_hash})
User.set_by_id( return JSONResponse(content={"success": True})
username,
{
User.password_hash: password_hash, @router.put(
}, "/users/{username}/role",
) dependencies=[Depends(require_role(["admin"]))],
)
async def update_role(
request: Request,
username: str,
body: AppPutRoleBody,
):
current_user = await get_current_user(request)
if isinstance(current_user, JSONResponse):
# auth failed
return current_user
current_role = current_user.get("role")
# viewers can't change anyone's role
if current_role == "viewer":
raise HTTPException(
status_code=403, detail="Admin role is required to change user roles"
)
if username == "admin":
return JSONResponse(
content={"message": "Cannot modify admin user's role"}, status_code=403
)
if body.role not in ["admin", "viewer"]:
return JSONResponse(
content={"message": "Role must be 'admin' or 'viewer'"}, status_code=400
)
User.set_by_id(username, {User.role: body.role})
return JSONResponse(content={"success": True}) return JSONResponse(content={"success": True})

View File

@ -6,12 +6,13 @@ import random
import shutil import shutil
import string import string
from fastapi import APIRouter, Request, UploadFile from fastapi import APIRouter, Depends, Request, UploadFile
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filename from pathvalidate import sanitize_filename
from peewee import DoesNotExist from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.const import FACE_DIR from frigate.const import FACE_DIR
from frigate.embeddings import EmbeddingsContext from frigate.embeddings import EmbeddingsContext
@ -44,7 +45,7 @@ def get_faces():
return JSONResponse(status_code=200, content=face_dict) return JSONResponse(status_code=200, content=face_dict)
@router.post("/faces/reprocess") @router.post("/faces/reprocess", dependencies=[Depends(require_role(["admin"]))])
def reclassify_face(request: Request, body: dict = None): def reclassify_face(request: Request, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -121,7 +122,7 @@ def train_face(request: Request, name: str, body: dict = None):
) )
@router.post("/faces/{name}/create") @router.post("/faces/{name}/create", dependencies=[Depends(require_role(["admin"]))])
async def create_face(request: Request, name: str): async def create_face(request: Request, name: str):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -138,7 +139,7 @@ async def create_face(request: Request, name: str):
) )
@router.post("/faces/{name}/register") @router.post("/faces/{name}/register", dependencies=[Depends(require_role(["admin"]))])
async def register_face(request: Request, name: str, file: UploadFile): async def register_face(request: Request, name: str, file: UploadFile):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(
@ -154,7 +155,7 @@ async def register_face(request: Request, name: str, file: UploadFile):
) )
@router.post("/faces/{name}/delete") @router.post("/faces/{name}/delete", dependencies=[Depends(require_role(["admin"]))])
def deregister_faces(request: Request, name: str, body: dict = None): def deregister_faces(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled: if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse( return JSONResponse(

View File

@ -1,3 +1,5 @@
from typing import Optional
from pydantic import BaseModel from pydantic import BaseModel
@ -12,8 +14,13 @@ class AppPutPasswordBody(BaseModel):
class AppPostUsersBody(BaseModel): class AppPostUsersBody(BaseModel):
username: str username: str
password: str password: str
role: Optional[str] = "viewer"
class AppPostLoginBody(BaseModel): class AppPostLoginBody(BaseModel):
user: str user: str
password: str password: str
class AppPutRoleBody(BaseModel):
role: str

View File

@ -14,6 +14,7 @@ from fastapi.responses import JSONResponse
from peewee import JOIN, DoesNotExist, fn, operator from peewee import JOIN, DoesNotExist, fn, operator
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role
from frigate.api.defs.query.events_query_parameters import ( from frigate.api.defs.query.events_query_parameters import (
DEFAULT_TIME_RANGE, DEFAULT_TIME_RANGE,
EventsQueryParams, EventsQueryParams,
@ -39,6 +40,7 @@ from frigate.api.defs.response.event_response import (
) )
from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
from frigate.const import CLIPS_DIR from frigate.const import CLIPS_DIR
from frigate.embeddings import EmbeddingsContext from frigate.embeddings import EmbeddingsContext
from frigate.events.external import ExternalEventProcessor from frigate.events.external import ExternalEventProcessor
@ -708,7 +710,11 @@ def event(event_id: str):
return JSONResponse(content="Event not found", status_code=404) return JSONResponse(content="Event not found", status_code=404)
@router.post("/events/{event_id}/retain", response_model=GenericResponse) @router.post(
"/events/{event_id}/retain",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def set_retain(event_id: str): def set_retain(event_id: str):
try: try:
event = Event.get(Event.id == event_id) event = Event.get(Event.id == event_id)
@ -928,7 +934,11 @@ def false_positive(request: Request, event_id: str):
) )
@router.delete("/events/{event_id}/retain", response_model=GenericResponse) @router.delete(
"/events/{event_id}/retain",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def delete_retain(event_id: str): def delete_retain(event_id: str):
try: try:
event = Event.get(Event.id == event_id) event = Event.get(Event.id == event_id)
@ -947,7 +957,11 @@ def delete_retain(event_id: str):
) )
@router.post("/events/{event_id}/sub_label", response_model=GenericResponse) @router.post(
"/events/{event_id}/sub_label",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def set_sub_label( def set_sub_label(
request: Request, request: Request,
event_id: str, event_id: str,
@ -956,27 +970,16 @@ def set_sub_label(
try: try:
event: Event = Event.get(Event.id == event_id) event: Event = Event.get(Event.id == event_id)
except DoesNotExist: except DoesNotExist:
if not body.camera:
return JSONResponse(
content=(
{
"success": False,
"message": "Event "
+ event_id
+ " not found and camera is not provided.",
}
),
status_code=404,
)
event = None event = None
if request.app.detected_frames_processor: if request.app.detected_frames_processor:
tracked_obj: TrackedObject = ( tracked_obj: TrackedObject = None
request.app.detected_frames_processor.camera_states[
event.camera if event else body.camera for state in request.app.detected_frames_processor.camera_states.values():
].tracked_objects.get(event_id) tracked_obj = state.tracked_objects.get(event_id)
)
if tracked_obj is not None:
break
else: else:
tracked_obj = None tracked_obj = None
@ -995,23 +998,9 @@ def set_sub_label(
new_sub_label = None new_sub_label = None
new_score = None new_score = None
if tracked_obj: request.app.event_metadata_updater.publish(
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score) EventMetadataTypeEnum.sub_label, (event_id, new_sub_label, new_score)
)
# update timeline items
Timeline.update(
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
).where(Timeline.source_id == event_id).execute()
if event:
event.sub_label = new_sub_label
data = event.data
if new_sub_label is None:
data["sub_label_score"] = None
elif new_score is not None:
data["sub_label_score"] = new_score
event.data = data
event.save()
return JSONResponse( return JSONResponse(
content={ content={
@ -1022,7 +1011,11 @@ def set_sub_label(
) )
@router.post("/events/{event_id}/description", response_model=GenericResponse) @router.post(
"/events/{event_id}/description",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def set_description( def set_description(
request: Request, request: Request,
event_id: str, event_id: str,
@ -1069,7 +1062,11 @@ def set_description(
) )
@router.put("/events/{event_id}/description/regenerate", response_model=GenericResponse) @router.put(
"/events/{event_id}/description/regenerate",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def regenerate_description( def regenerate_description(
request: Request, event_id: str, params: RegenerateQueryParameters = Depends() request: Request, event_id: str, params: RegenerateQueryParameters = Depends()
): ):
@ -1084,7 +1081,9 @@ def regenerate_description(
camera_config = request.app.frigate_config.cameras[event.camera] camera_config = request.app.frigate_config.cameras[event.camera]
if camera_config.genai.enabled: if camera_config.genai.enabled:
request.app.event_metadata_updater.publish((event.id, params.source)) request.app.event_metadata_updater.publish(
EventMetadataTypeEnum.regenerate_description, (event.id, params.source)
)
return JSONResponse( return JSONResponse(
content=( content=(
@ -1137,14 +1136,22 @@ def delete_single_event(event_id: str, request: Request) -> dict:
return {"success": True, "message": f"Event {event_id} deleted"} return {"success": True, "message": f"Event {event_id} deleted"}
@router.delete("/events/{event_id}", response_model=GenericResponse) @router.delete(
"/events/{event_id}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def delete_event(request: Request, event_id: str): def delete_event(request: Request, event_id: str):
result = delete_single_event(event_id, request) result = delete_single_event(event_id, request)
status_code = 200 if result["success"] else 404 status_code = 200 if result["success"] else 404
return JSONResponse(content=result, status_code=status_code) return JSONResponse(content=result, status_code=status_code)
@router.delete("/events/", response_model=EventMultiDeleteResponse) @router.delete(
"/events/",
response_model=EventMultiDeleteResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def delete_events(request: Request, body: EventsDeleteBody): def delete_events(request: Request, body: EventsDeleteBody):
if not body.event_ids: if not body.event_ids:
return JSONResponse( return JSONResponse(
@ -1170,7 +1177,11 @@ def delete_events(request: Request, body: EventsDeleteBody):
return JSONResponse(content=response, status_code=200) return JSONResponse(content=response, status_code=200)
@router.post("/events/{camera_name}/{label}/create", response_model=EventCreateResponse) @router.post(
"/events/{camera_name}/{label}/create",
response_model=EventCreateResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def create_event( def create_event(
request: Request, request: Request,
camera_name: str, camera_name: str,
@ -1226,7 +1237,11 @@ def create_event(
) )
@router.put("/events/{event_id}/end", response_model=GenericResponse) @router.put(
"/events/{event_id}/end",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def end_event(request: Request, event_id: str, body: EventsEndBody): def end_event(request: Request, event_id: str, body: EventsEndBody):
try: try:
end_time = body.end_time or datetime.datetime.now().timestamp() end_time = body.end_time or datetime.datetime.now().timestamp()

View File

@ -6,11 +6,12 @@ import string
from pathlib import Path from pathlib import Path
import psutil import psutil
from fastapi import APIRouter, Request from fastapi import APIRouter, Depends, Request
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from peewee import DoesNotExist from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
from frigate.api.defs.request.export_rename_body import ExportRenameBody from frigate.api.defs.request.export_rename_body import ExportRenameBody
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
@ -130,7 +131,9 @@ def export_recording(
) )
@router.patch("/export/{event_id}/rename") @router.patch(
"/export/{event_id}/rename", dependencies=[Depends(require_role(["admin"]))]
)
def export_rename(event_id: str, body: ExportRenameBody): def export_rename(event_id: str, body: ExportRenameBody):
try: try:
export: Export = Export.get(Export.id == event_id) export: Export = Export.get(Export.id == event_id)
@ -158,7 +161,7 @@ def export_rename(event_id: str, body: ExportRenameBody):
) )
@router.delete("/export/{event_id}") @router.delete("/export/{event_id}", dependencies=[Depends(require_role(["admin"]))])
def export_delete(event_id: str): def export_delete(event_id: str):
try: try:
export: Export = Export.get(Export.id == event_id) export: Export = Export.get(Export.id == event_id)

View File

@ -12,6 +12,7 @@ from fastapi.responses import JSONResponse
from peewee import Case, DoesNotExist, fn, operator from peewee import Case, DoesNotExist, fn, operator
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role
from frigate.api.defs.query.review_query_parameters import ( from frigate.api.defs.query.review_query_parameters import (
ReviewActivityMotionQueryParams, ReviewActivityMotionQueryParams,
ReviewQueryParams, ReviewQueryParams,
@ -343,7 +344,11 @@ def set_multiple_reviewed(body: ReviewModifyMultipleBody):
) )
@router.post("/reviews/delete", response_model=GenericResponse) @router.post(
"/reviews/delete",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
)
def delete_reviews(body: ReviewModifyMultipleBody): def delete_reviews(body: ReviewModifyMultipleBody):
list_of_ids = body.ids list_of_ids = body.ids
reviews = ( reviews = (

View File

@ -20,10 +20,7 @@ from frigate.camera import CameraMetrics, PTZMetrics
from frigate.comms.base_communicator import Communicator from frigate.comms.base_communicator import Communicator
from frigate.comms.config_updater import ConfigPublisher from frigate.comms.config_updater import ConfigPublisher
from frigate.comms.dispatcher import Dispatcher from frigate.comms.dispatcher import Dispatcher
from frigate.comms.event_metadata_updater import ( from frigate.comms.event_metadata_updater import EventMetadataPublisher
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.comms.inter_process import InterProcessCommunicator from frigate.comms.inter_process import InterProcessCommunicator
from frigate.comms.mqtt import MqttClient from frigate.comms.mqtt import MqttClient
from frigate.comms.webpush import WebPushClient from frigate.comms.webpush import WebPushClient
@ -327,9 +324,7 @@ class FrigateApp:
def init_inter_process_communicator(self) -> None: def init_inter_process_communicator(self) -> None:
self.inter_process_communicator = InterProcessCommunicator() self.inter_process_communicator = InterProcessCommunicator()
self.inter_config_updater = ConfigPublisher() self.inter_config_updater = ConfigPublisher()
self.event_metadata_updater = EventMetadataPublisher( self.event_metadata_updater = EventMetadataPublisher()
EventMetadataTypeEnum.regenerate_description
)
self.inter_zmq_proxy = ZmqProxy() self.inter_zmq_proxy = ZmqProxy()
def init_onvif(self) -> None: def init_onvif(self) -> None:
@ -600,6 +595,7 @@ class FrigateApp:
User.insert( User.insert(
{ {
User.username: "admin", User.username: "admin",
User.role: "admin",
User.password_hash: password_hash, User.password_hash: password_hash,
User.notification_tokens: [], User.notification_tokens: [],
} }
@ -620,6 +616,7 @@ class FrigateApp:
) )
User.replace( User.replace(
username="admin", username="admin",
role="admin",
password_hash=password_hash, password_hash=password_hash,
notification_tokens=[], notification_tokens=[],
).execute() ).execute()

View File

@ -20,7 +20,7 @@ class CameraActivityManager:
self.all_zone_labels: dict[str, set[str]] = {} self.all_zone_labels: dict[str, set[str]] = {}
for camera_config in config.cameras.values(): for camera_config in config.cameras.values():
if not camera_config.enabled: if not camera_config.enabled_in_config:
continue continue
self.last_camera_activity[camera_config.name] = {} self.last_camera_activity[camera_config.name] = {}

View File

@ -55,6 +55,7 @@ class Dispatcher:
self._camera_settings_handlers: dict[str, Callable] = { self._camera_settings_handlers: dict[str, Callable] = {
"audio": self._on_audio_command, "audio": self._on_audio_command,
"detect": self._on_detect_command, "detect": self._on_detect_command,
"enabled": self._on_enabled_command,
"improve_contrast": self._on_motion_improve_contrast_command, "improve_contrast": self._on_motion_improve_contrast_command,
"ptz_autotracker": self._on_ptz_autotracker_command, "ptz_autotracker": self._on_ptz_autotracker_command,
"motion": self._on_motion_command, "motion": self._on_motion_command,
@ -167,6 +168,7 @@ class Dispatcher:
for camera in camera_status.keys(): for camera in camera_status.keys():
camera_status[camera]["config"] = { camera_status[camera]["config"] = {
"detect": self.config.cameras[camera].detect.enabled, "detect": self.config.cameras[camera].detect.enabled,
"enabled": self.config.cameras[camera].enabled,
"snapshots": self.config.cameras[camera].snapshots.enabled, "snapshots": self.config.cameras[camera].snapshots.enabled,
"record": self.config.cameras[camera].record.enabled, "record": self.config.cameras[camera].record.enabled,
"audio": self.config.cameras[camera].audio.enabled, "audio": self.config.cameras[camera].audio.enabled,
@ -278,6 +280,27 @@ class Dispatcher:
self.config_updater.publish(f"config/detect/{camera_name}", detect_settings) self.config_updater.publish(f"config/detect/{camera_name}", detect_settings)
self.publish(f"{camera_name}/detect/state", payload, retain=True) self.publish(f"{camera_name}/detect/state", payload, retain=True)
def _on_enabled_command(self, camera_name: str, payload: str) -> None:
"""Callback for camera topic."""
camera_settings = self.config.cameras[camera_name]
if payload == "ON":
if not self.config.cameras[camera_name].enabled_in_config:
logger.error(
"Camera must be enabled in the config to be turned on via MQTT."
)
return
if not camera_settings.enabled:
logger.info(f"Turning on camera {camera_name}")
camera_settings.enabled = True
elif payload == "OFF":
if camera_settings.enabled:
logger.info(f"Turning off camera {camera_name}")
camera_settings.enabled = False
self.config_updater.publish(f"config/enabled/{camera_name}", camera_settings)
self.publish(f"{camera_name}/enabled/state", payload, retain=True)
def _on_motion_command(self, camera_name: str, payload: str) -> None: def _on_motion_command(self, camera_name: str, payload: str) -> None:
"""Callback for motion topic.""" """Callback for motion topic."""
detect_settings = self.config.cameras[camera_name].detect detect_settings = self.config.cameras[camera_name].detect

View File

@ -2,9 +2,6 @@
import logging import logging
from enum import Enum from enum import Enum
from typing import Optional
from frigate.events.types import RegenerateDescriptionEnum
from .zmq_proxy import Publisher, Subscriber from .zmq_proxy import Publisher, Subscriber
@ -14,6 +11,7 @@ logger = logging.getLogger(__name__)
class EventMetadataTypeEnum(str, Enum): class EventMetadataTypeEnum(str, Enum):
all = "" all = ""
regenerate_description = "regenerate_description" regenerate_description = "regenerate_description"
sub_label = "sub_label"
class EventMetadataPublisher(Publisher): class EventMetadataPublisher(Publisher):
@ -21,12 +19,11 @@ class EventMetadataPublisher(Publisher):
topic_base = "event_metadata/" topic_base = "event_metadata/"
def __init__(self, topic: EventMetadataTypeEnum) -> None: def __init__(self) -> None:
topic = topic.value super().__init__()
super().__init__(topic)
def publish(self, payload: tuple[str, RegenerateDescriptionEnum]) -> None: def publish(self, topic: EventMetadataTypeEnum, payload: any) -> None:
super().publish(payload) super().publish(payload, topic.value)
class EventMetadataSubscriber(Subscriber): class EventMetadataSubscriber(Subscriber):
@ -35,17 +32,14 @@ class EventMetadataSubscriber(Subscriber):
topic_base = "event_metadata/" topic_base = "event_metadata/"
def __init__(self, topic: EventMetadataTypeEnum) -> None: def __init__(self, topic: EventMetadataTypeEnum) -> None:
topic = topic.value super().__init__(topic.value)
super().__init__(topic)
def check_for_update( def check_for_update(self, timeout: float = 1) -> tuple | None:
self, timeout: float = 1
) -> Optional[tuple[EventMetadataTypeEnum, str, RegenerateDescriptionEnum]]:
return super().check_for_update(timeout) return super().check_for_update(timeout)
def _return_object(self, topic: str, payload: any) -> any: def _return_object(self, topic: str, payload: tuple) -> tuple:
if payload is None: if payload is None:
return (None, None, None) return (None, None)
topic = EventMetadataTypeEnum[topic[len(self.topic_base) :]] topic = EventMetadataTypeEnum[topic[len(self.topic_base) :]]
event_id, source = payload return (topic, payload)
return (topic, event_id, RegenerateDescriptionEnum(source))

View File

@ -43,6 +43,11 @@ class MqttClient(Communicator): # type: ignore[misc]
def _set_initial_topics(self) -> None: def _set_initial_topics(self) -> None:
"""Set initial state topics.""" """Set initial state topics."""
for camera_name, camera in self.config.cameras.items(): for camera_name, camera in self.config.cameras.items():
self.publish(
f"{camera_name}/enabled/state",
"ON" if camera.enabled_in_config else "OFF",
retain=True,
)
self.publish( self.publish(
f"{camera_name}/recordings/state", f"{camera_name}/recordings/state",
"ON" if camera.record.enabled_in_config else "OFF", "ON" if camera.record.enabled_in_config else "OFF",
@ -196,6 +201,7 @@ class MqttClient(Communicator): # type: ignore[misc]
# register callbacks # register callbacks
callback_types = [ callback_types = [
"enabled",
"recordings", "recordings",
"snapshots", "snapshots",
"detect", "detect",

View File

@ -102,6 +102,9 @@ class CameraConfig(FrigateBaseModel):
zones: dict[str, ZoneConfig] = Field( zones: dict[str, ZoneConfig] = Field(
default_factory=dict, title="Zone configuration." default_factory=dict, title="Zone configuration."
) )
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of camera."
)
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr() _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()

View File

@ -32,6 +32,7 @@ class StationaryConfig(FrigateBaseModel):
class DetectConfig(FrigateBaseModel): class DetectConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Detection Enabled.")
height: Optional[int] = Field( height: Optional[int] = Field(
default=None, title="Height of the stream for the detect role." default=None, title="Height of the stream for the detect role."
) )
@ -41,7 +42,6 @@ class DetectConfig(FrigateBaseModel):
fps: int = Field( fps: int = Field(
default=5, title="Number of frames per second to process through detection." default=5, title="Number of frames per second to process through detection."
) )
enabled: bool = Field(default=True, title="Detection Enabled.")
min_initialized: Optional[int] = Field( min_initialized: Optional[int] = Field(
default=None, default=None,
title="Minimum number of consecutive hits for an object to be initialized by the tracker.", title="Minimum number of consecutive hits for an object to be initialized by the tracker.",

View File

@ -16,6 +16,17 @@ class GenAIProviderEnum(str, Enum):
ollama = "ollama" ollama = "ollama"
class GenAISendTriggersConfig(BaseModel):
tracked_object_end: bool = Field(
default=True, title="Send once the object is no longer tracked."
)
after_significant_updates: Optional[int] = Field(
default=None,
title="Send an early request to generative AI when X frames accumulated.",
ge=1,
)
# uses BaseModel because some global attributes are not available at the camera level # uses BaseModel because some global attributes are not available at the camera level
class GenAICameraConfig(BaseModel): class GenAICameraConfig(BaseModel):
enabled: bool = Field(default=False, title="Enable GenAI for camera.") enabled: bool = Field(default=False, title="Enable GenAI for camera.")
@ -42,6 +53,10 @@ class GenAICameraConfig(BaseModel):
default=False, default=False,
title="Save thumbnails sent to generative AI for debugging purposes.", title="Save thumbnails sent to generative AI for debugging purposes.",
) )
send_triggers: GenAISendTriggersConfig = Field(
default_factory=GenAISendTriggersConfig,
title="What triggers to use to send frames to generative AI for a tracked object.",
)
@field_validator("required_zones", mode="before") @field_validator("required_zones", mode="before")
@classmethod @classmethod

View File

@ -516,6 +516,7 @@ class FrigateConfig(FrigateBaseModel):
camera_config.detect.stationary.interval = stationary_threshold camera_config.detect.stationary.interval = stationary_threshold
# set config pre-value # set config pre-value
camera_config.enabled_in_config = camera_config.enabled
camera_config.audio.enabled_in_config = camera_config.audio.enabled camera_config.audio.enabled_in_config = camera_config.audio.enabled
camera_config.record.enabled_in_config = camera_config.record.enabled camera_config.record.enabled_in_config = camera_config.record.enabled
camera_config.notifications.enabled_in_config = ( camera_config.notifications.enabled_in_config = (

View File

@ -37,3 +37,5 @@ class LoggerConfig(FrigateBaseModel):
for log, level in log_levels.items(): for log, level in log_levels.items():
logging.getLogger(log).setLevel(level.value.upper()) logging.getLogger(log).setLevel(level.value.upper())
return self

View File

@ -12,6 +12,10 @@ class HeaderMappingConfig(FrigateBaseModel):
user: str = Field( user: str = Field(
default=None, title="Header name from upstream proxy to identify user." default=None, title="Header name from upstream proxy to identify user."
) )
role: str = Field(
default=None,
title="Header name from upstream proxy to identify user role.",
)
class ProxyConfig(FrigateBaseModel): class ProxyConfig(FrigateBaseModel):

View File

@ -8,12 +8,11 @@ from typing import List, Optional, Tuple
import cv2 import cv2
import numpy as np import numpy as np
import requests
from Levenshtein import distance from Levenshtein import distance
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
from shapely.geometry import Polygon from shapely.geometry import Polygon
from frigate.const import FRIGATE_LOCALHOST from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
from frigate.util.image import area from frigate.util.image import area
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -34,10 +33,10 @@ class LicensePlateProcessingMixin:
self.batch_size = 6 self.batch_size = 6
# Detection specific parameters # Detection specific parameters
self.min_size = 3 self.min_size = 8
self.max_size = 960 self.max_size = 960
self.box_thresh = 0.8 self.box_thresh = 0.6
self.mask_thresh = 0.8 self.mask_thresh = 0.6
def _detect(self, image: np.ndarray) -> List[np.ndarray]: def _detect(self, image: np.ndarray) -> List[np.ndarray]:
""" """
@ -158,47 +157,40 @@ class LicensePlateProcessingMixin:
logger.debug("Model runners not loaded") logger.debug("Model runners not loaded")
return [], [], [] return [], [], []
plate_points = self._detect(image) boxes = self._detect(image)
if len(plate_points) == 0: if len(boxes) == 0:
logger.debug("No points found by OCR detector model") logger.debug("No boxes found by OCR detector model")
return [], [], [] return [], [], []
plate_points = self._sort_polygon(list(plate_points)) boxes = self._sort_boxes(list(boxes))
plate_images = [self._crop_license_plate(image, x) for x in plate_points] plate_images = [self._crop_license_plate(image, x) for x in boxes]
rotated_images, _ = self._classify(plate_images)
# debug rotated and classification result
if WRITE_DEBUG_IMAGES: if WRITE_DEBUG_IMAGES:
current_time = int(datetime.datetime.now().timestamp()) current_time = int(datetime.datetime.now().timestamp())
for i, img in enumerate(plate_images): for i, img in enumerate(plate_images):
cv2.imwrite( cv2.imwrite(
f"debug/frames/license_plate_rotated_{current_time}_{i + 1}.jpg", f"debug/frames/license_plate_cropped_{current_time}_{i + 1}.jpg",
img,
)
for i, img in enumerate(rotated_images):
cv2.imwrite(
f"debug/frames/license_plate_classified_{current_time}_{i + 1}.jpg",
img, img,
) )
# keep track of the index of each image for correct area calc later # keep track of the index of each image for correct area calc later
sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in rotated_images]) sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in plate_images])
reverse_mapping = { reverse_mapping = {
idx: original_idx for original_idx, idx in enumerate(sorted_indices) idx: original_idx for original_idx, idx in enumerate(sorted_indices)
} }
results, confidences = self._recognize(rotated_images) results, confidences = self._recognize(plate_images)
if results: if results:
license_plates = [""] * len(rotated_images) license_plates = [""] * len(plate_images)
average_confidences = [[0.0]] * len(rotated_images) average_confidences = [[0.0]] * len(plate_images)
areas = [0] * len(rotated_images) areas = [0] * len(plate_images)
# map results back to original image order # map results back to original image order
for i, (plate, conf) in enumerate(zip(results, confidences)): for i, (plate, conf) in enumerate(zip(results, confidences)):
original_idx = reverse_mapping[i] original_idx = reverse_mapping[i]
height, width = rotated_images[original_idx].shape[:2] height, width = plate_images[original_idx].shape[:2]
area = height * width area = height * width
average_confidence = conf average_confidence = conf
@ -206,7 +198,7 @@ class LicensePlateProcessingMixin:
# set to True to write each cropped image for debugging # set to True to write each cropped image for debugging
if False: if False:
save_image = cv2.cvtColor( save_image = cv2.cvtColor(
rotated_images[original_idx], cv2.COLOR_RGB2BGR plate_images[original_idx], cv2.COLOR_RGB2BGR
) )
filename = f"debug/frames/plate_{original_idx}_{plate}_{area}.jpg" filename = f"debug/frames/plate_{original_idx}_{plate}_{area}.jpg"
cv2.imwrite(filename, save_image) cv2.imwrite(filename, save_image)
@ -328,7 +320,7 @@ class LicensePlateProcessingMixin:
# Use pyclipper to shrink the polygon slightly based on the computed distance. # Use pyclipper to shrink the polygon slightly based on the computed distance.
offset = PyclipperOffset() offset = PyclipperOffset()
offset.AddPath(points, JT_ROUND, ET_CLOSEDPOLYGON) offset.AddPath(points, JT_ROUND, ET_CLOSEDPOLYGON)
points = np.array(offset.Execute(distance * 1.5)).reshape((-1, 1, 2)) points = np.array(offset.Execute(distance * 1.75)).reshape((-1, 1, 2))
# get the minimum bounding box around the shrunken polygon. # get the minimum bounding box around the shrunken polygon.
box, min_side = self._get_min_boxes(points) box, min_side = self._get_min_boxes(points)
@ -453,46 +445,64 @@ class LicensePlateProcessingMixin:
) )
@staticmethod @staticmethod
def _clockwise_order(point: np.ndarray) -> np.ndarray: def _clockwise_order(pts: np.ndarray) -> np.ndarray:
""" """
Arrange the points of a polygon in clockwise order based on their angular positions Arrange the points of a polygon in order: top-left, top-right, bottom-right, bottom-left.
around the polygon's center. taken from https://github.com/PyImageSearch/imutils/blob/master/imutils/perspective.py
Args: Args:
point (np.ndarray): Array of points of the polygon. pts (np.ndarray): Array of points of the polygon.
Returns: Returns:
np.ndarray: Points ordered in clockwise direction. np.ndarray: Points ordered clockwise starting from top-left.
""" """
center = point.mean(axis=0) # Sort the points based on their x-coordinates
return point[ x_sorted = pts[np.argsort(pts[:, 0]), :]
np.argsort(np.arctan2(point[:, 1] - center[1], point[:, 0] - center[0]))
] # Separate the left-most and right-most points
left_most = x_sorted[:2, :]
right_most = x_sorted[2:, :]
# Sort the left-most coordinates by y-coordinates
left_most = left_most[np.argsort(left_most[:, 1]), :]
(tl, bl) = left_most # Top-left and bottom-left
# Use the top-left as an anchor to calculate distances to right points
# The further point will be the bottom-right
distances = np.sqrt(
((tl[0] - right_most[:, 0]) ** 2) + ((tl[1] - right_most[:, 1]) ** 2)
)
# Sort right points by distance (descending)
right_idx = np.argsort(distances)[::-1]
(br, tr) = right_most[right_idx, :] # Bottom-right and top-right
return np.array([tl, tr, br, bl])
@staticmethod @staticmethod
def _sort_polygon(points): def _sort_boxes(boxes):
""" """
Sort polygons based on their position in the image. If polygons are close in vertical Sort polygons based on their position in the image. If boxes are close in vertical
position (within 5 pixels), sort them by horizontal position. position (within 5 pixels), sort them by horizontal position.
Args: Args:
points: List of polygons to sort. points: detected text boxes with shape [4, 2]
Returns: Returns:
List: Sorted list of polygons. List: sorted boxes(array) with shape [4, 2]
""" """
points.sort(key=lambda x: (x[0][1], x[0][0])) boxes.sort(key=lambda x: (x[0][1], x[0][0]))
for i in range(len(points) - 1): for i in range(len(boxes) - 1):
for j in range(i, -1, -1): for j in range(i, -1, -1):
if abs(points[j + 1][0][1] - points[j][0][1]) < 5 and ( if abs(boxes[j + 1][0][1] - boxes[j][0][1]) < 5 and (
points[j + 1][0][0] < points[j][0][0] boxes[j + 1][0][0] < boxes[j][0][0]
): ):
temp = points[j] temp = boxes[j]
points[j] = points[j + 1] boxes[j] = boxes[j + 1]
points[j + 1] = temp boxes[j + 1] = temp
else: else:
break break
return points return boxes
@staticmethod @staticmethod
def _zero_pad(image: np.ndarray) -> np.ndarray: def _zero_pad(image: np.ndarray) -> np.ndarray:
@ -583,9 +593,11 @@ class LicensePlateProcessingMixin:
for j in range(len(outputs)): for j in range(len(outputs)):
label, score = outputs[j] label, score = outputs[j]
results[indices[i + j]] = [label, score] results[indices[i + j]] = [label, score]
# make sure we have high confidence if we need to flip a box, this will be rare in lpr # make sure we have high confidence if we need to flip a box
if "180" in label and score >= 0.9: if "180" in label and score >= 0.7:
images[indices[i + j]] = cv2.rotate(images[indices[i + j]], 1) images[indices[i + j]] = cv2.rotate(
images[indices[i + j]], cv2.ROTATE_180
)
return images, results return images, results
@ -682,7 +694,7 @@ class LicensePlateProcessingMixin:
) )
height, width = image.shape[0:2] height, width = image.shape[0:2]
if height * 1.0 / width >= 1.5: if height * 1.0 / width >= 1.5:
image = np.rot90(image, k=3) image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
return image return image
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]: def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
@ -942,9 +954,23 @@ class LicensePlateProcessingMixin:
return return
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420) license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
# Expand the license_plate_box by 30%
box_array = np.array(license_plate_box)
expansion = (box_array[2:] - box_array[:2]) * 0.30
expanded_box = np.array(
[
license_plate_box[0] - expansion[0],
license_plate_box[1] - expansion[1],
license_plate_box[2] + expansion[0],
license_plate_box[3] + expansion[1],
]
).clip(0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2)
# Crop using the expanded box
license_plate_frame = license_plate_frame[ license_plate_frame = license_plate_frame[
license_plate_box[1] : license_plate_box[3], int(expanded_box[1]) : int(expanded_box[3]),
license_plate_box[0] : license_plate_box[2], int(expanded_box[0]) : int(expanded_box[2]),
] ]
# double the size of the license plate frame for better OCR # double the size of the license plate frame for better OCR
@ -1032,22 +1058,15 @@ class LicensePlateProcessingMixin:
) )
# Send the result to the API # Send the result to the API
resp = requests.post( self.sub_label_publisher.publish(
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label", EventMetadataTypeEnum.sub_label, (id, sub_label, avg_confidence)
json={
"camera": obj_data.get("camera"),
"subLabel": sub_label,
"subLabelScore": avg_confidence,
},
) )
self.detected_license_plates[id] = {
if resp.status_code == 200: "plate": top_plate,
self.detected_license_plates[id] = { "char_confidences": top_char_confidences,
"plate": top_plate, "area": top_area,
"char_confidences": top_char_confidences, "obj_data": obj_data,
"area": top_area, }
"obj_data": obj_data,
}
def handle_request(self, topic, request_data) -> dict[str, any] | None: def handle_request(self, topic, request_data) -> dict[str, any] | None:
return return

View File

@ -8,6 +8,7 @@ import numpy as np
from peewee import DoesNotExist from peewee import DoesNotExist
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.comms.event_metadata_updater import EventMetadataPublisher
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.data_processing.common.license_plate.mixin import ( from frigate.data_processing.common.license_plate.mixin import (
WRITE_DEBUG_IMAGES, WRITE_DEBUG_IMAGES,
@ -30,6 +31,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
model_runner: LicensePlateModelRunner, model_runner: LicensePlateModelRunner,
detected_license_plates: dict[str, dict[str, any]], detected_license_plates: dict[str, dict[str, any]],
@ -38,6 +40,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
self.model_runner = model_runner self.model_runner = model_runner
self.lpr_config = config.lpr self.lpr_config = config.lpr
self.config = config self.config = config
self.sub_label_publisher = sub_label_publisher
super().__init__(config, metrics, model_runner) super().__init__(config, metrics, model_runner)
def process_data( def process_data(

View File

@ -5,10 +5,13 @@ import os
import cv2 import cv2
import numpy as np import numpy as np
import requests
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import FRIGATE_LOCALHOST, MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
from frigate.util.object import calculate_region from frigate.util.object import calculate_region
from ..types import DataProcessorMetrics from ..types import DataProcessorMetrics
@ -23,9 +26,15 @@ logger = logging.getLogger(__name__)
class BirdRealTimeProcessor(RealTimeProcessorApi): class BirdRealTimeProcessor(RealTimeProcessorApi):
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics): def __init__(
self,
config: FrigateConfig,
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics,
):
super().__init__(config, metrics) super().__init__(config, metrics)
self.interpreter: Interpreter = None self.interpreter: Interpreter = None
self.sub_label_publisher = sub_label_publisher
self.tensor_input_details: dict[str, any] = None self.tensor_input_details: dict[str, any] = None
self.tensor_output_details: dict[str, any] = None self.tensor_output_details: dict[str, any] = None
self.detected_birds: dict[str, float] = {} self.detected_birds: dict[str, float] = {}
@ -134,17 +143,10 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
logger.debug(f"Score {score} is worse than previous score {previous_score}") logger.debug(f"Score {score} is worse than previous score {previous_score}")
return return
resp = requests.post( self.sub_label_publisher.publish(
f"{FRIGATE_LOCALHOST}/api/events/{obj_data['id']}/sub_label", EventMetadataTypeEnum.sub_label, (id, self.labelmap[best_id], score)
json={
"camera": obj_data.get("camera"),
"subLabel": self.labelmap[best_id],
"subLabelScore": score,
},
) )
self.detected_birds[obj_data["id"]] = score
if resp.status_code == 200:
self.detected_birds[obj_data["id"]] = score
def handle_request(self, topic, request_data): def handle_request(self, topic, request_data):
return None return None

View File

@ -11,11 +11,14 @@ from typing import Optional
import cv2 import cv2
import numpy as np import numpy as np
import requests
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataTypeEnum,
)
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import FACE_DIR, FRIGATE_LOCALHOST, MODEL_CACHE_DIR from frigate.const import FACE_DIR, MODEL_CACHE_DIR
from frigate.util.image import area from frigate.util.image import area
from ..types import DataProcessorMetrics from ..types import DataProcessorMetrics
@ -28,9 +31,15 @@ MIN_MATCHING_FACES = 2
class FaceRealTimeProcessor(RealTimeProcessorApi): class FaceRealTimeProcessor(RealTimeProcessorApi):
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics): def __init__(
self,
config: FrigateConfig,
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics,
):
super().__init__(config, metrics) super().__init__(config, metrics)
self.face_config = config.face_recognition self.face_config = config.face_recognition
self.sub_label_publisher = sub_label_publisher
self.face_detector: cv2.FaceDetectorYN = None self.face_detector: cv2.FaceDetectorYN = None
self.landmark_detector: cv2.face.FacemarkLBF = None self.landmark_detector: cv2.face.FacemarkLBF = None
self.recognizer: cv2.face.LBPHFaceRecognizer = None self.recognizer: cv2.face.LBPHFaceRecognizer = None
@ -349,18 +358,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.__update_metrics(datetime.datetime.now().timestamp() - start) self.__update_metrics(datetime.datetime.now().timestamp() - start)
return return
resp = requests.post( self.sub_label_publisher.publish(
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label", EventMetadataTypeEnum.sub_label, (id, sub_label, score)
json={
"camera": obj_data.get("camera"),
"subLabel": sub_label,
"subLabelScore": score,
},
) )
self.detected_faces[id] = face_score
if resp.status_code == 200:
self.detected_faces[id] = face_score
self.__update_metrics(datetime.datetime.now().timestamp() - start) self.__update_metrics(datetime.datetime.now().timestamp() - start)
def handle_request(self, topic, request_data) -> dict[str, any] | None: def handle_request(self, topic, request_data) -> dict[str, any] | None:

View File

@ -4,6 +4,7 @@ import logging
import numpy as np import numpy as np
from frigate.comms.event_metadata_updater import EventMetadataPublisher
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.data_processing.common.license_plate.mixin import ( from frigate.data_processing.common.license_plate.mixin import (
LicensePlateProcessingMixin, LicensePlateProcessingMixin,
@ -22,6 +23,7 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
def __init__( def __init__(
self, self,
config: FrigateConfig, config: FrigateConfig,
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics, metrics: DataProcessorMetrics,
model_runner: LicensePlateModelRunner, model_runner: LicensePlateModelRunner,
detected_license_plates: dict[str, dict[str, any]], detected_license_plates: dict[str, dict[str, any]],
@ -30,6 +32,7 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
self.model_runner = model_runner self.model_runner = model_runner
self.lpr_config = config.lpr self.lpr_config = config.lpr
self.config = config self.config = config
self.sub_label_publisher = sub_label_publisher
super().__init__(config, metrics) super().__init__(config, metrics)
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray): def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):

View File

@ -99,5 +99,5 @@ class ONNXDetector(DetectionApi):
return post_process_yolov9(predictions, self.w, self.h) return post_process_yolov9(predictions, self.w, self.h)
else: else:
raise Exception( raise Exception(
f"{self.onnx_model_type} is currently not supported for rocm. See the docs for more info on supported models." f"{self.onnx_model_type} is currently not supported for onnx. See the docs for more info on supported models."
) )

View File

@ -1,170 +0,0 @@
import ctypes
import logging
import os
import subprocess
import sys
import cv2
import numpy as np
from pydantic import Field
from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
PixelFormatEnum,
)
logger = logging.getLogger(__name__)
DETECTOR_KEY = "rocm"
def detect_gfx_version():
return subprocess.getoutput(
"unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo | grep gfx |head -1|awk '{print $2}'"
)
def auto_override_gfx_version():
# If environment variable already in place, do not override
gfx_version = detect_gfx_version()
old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION")
if old_override not in (None, ""):
logger.warning(
f"AMD/ROCm: detected {gfx_version} but HSA_OVERRIDE_GFX_VERSION already present ({old_override}), not overriding!"
)
return old_override
mapping = {
"gfx90c": "9.0.0",
"gfx1031": "10.3.0",
"gfx1103": "11.0.0",
}
override = mapping.get(gfx_version)
if override is not None:
logger.warning(
f"AMD/ROCm: detected {gfx_version}, overriding HSA_OVERRIDE_GFX_VERSION={override}"
)
os.putenv("HSA_OVERRIDE_GFX_VERSION", override)
return override
return ""
class ROCmDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
conserve_cpu: bool = Field(
default=True,
title="Conserve CPU at the expense of latency (and reduced max throughput)",
)
auto_override_gfx: bool = Field(
default=True, title="Automatically detect and override gfx version"
)
class ROCmDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: ROCmDetectorConfig):
if detector_config.auto_override_gfx:
auto_override_gfx_version()
try:
sys.path.append("/opt/rocm/lib")
import migraphx
logger.info("AMD/ROCm: loaded migraphx module")
except ModuleNotFoundError:
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
raise
if detector_config.conserve_cpu:
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
self.h = detector_config.model.height
self.w = detector_config.model.width
self.rocm_model_type = detector_config.model.model_type
self.rocm_model_px = detector_config.model.input_pixel_format
path = detector_config.model.path
mxr_path = os.path.splitext(path)[0] + ".mxr"
if path.endswith(".mxr"):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
self.model = migraphx.load(mxr_path)
elif os.path.exists(mxr_path):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
self.model = migraphx.load(mxr_path)
else:
logger.info(f"AMD/ROCm: loading model from {path}")
if (
path.endswith(".tf")
or path.endswith(".tf2")
or path.endswith(".tflite")
):
# untested
self.model = migraphx.parse_tf(path)
else:
self.model = migraphx.parse_onnx(path)
logger.info("AMD/ROCm: compiling the model")
self.model.compile(
migraphx.get_target("gpu"), offload_copy=True, fast_math=True
)
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
os.makedirs(os.path.join(MODEL_CACHE_DIR, "rocm"), exist_ok=True)
migraphx.save(self.model, mxr_path)
logger.info("AMD/ROCm: model loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_parameter_names()[0]
model_input_shape = tuple(
self.model.get_parameter_shapes()[model_input_name].lens()
)
tensor_input = cv2.dnn.blobFromImage(
tensor_input[0],
1.0,
(model_input_shape[3], model_input_shape[2]),
None,
swapRB=self.rocm_model_px == PixelFormatEnum.bgr,
).astype(np.uint8)
detector_result = self.model.run({model_input_name: tensor_input})[0]
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
tensor_output = np.ctypeslib.as_array(
addr, shape=detector_result.get_shape().lens()
)
if self.rocm_model_type == ModelTypeEnum.yolonas:
predictions = tensor_output
detections = np.zeros((20, 6), np.float32)
for i, prediction in enumerate(predictions):
if i == 20:
break
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
# when running in GPU mode, empty predictions in the output have class_id of -1
if class_id < 0:
break
detections[i] = [
class_id,
confidence,
y_min / self.h,
x_min / self.w,
y_max / self.h,
x_max / self.w,
]
return detections
else:
raise Exception(
f"{self.rocm_model_type} is currently not supported for rocm. See the docs for more info on supported models."
)

View File

@ -15,6 +15,7 @@ from playhouse.sqliteq import SqliteQueueDatabase
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder
from frigate.comms.event_metadata_updater import ( from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
EventMetadataSubscriber, EventMetadataSubscriber,
EventMetadataTypeEnum, EventMetadataTypeEnum,
) )
@ -43,12 +44,16 @@ from frigate.data_processing.real_time.license_plate import (
LicensePlateRealTimeProcessor, LicensePlateRealTimeProcessor,
) )
from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum
from frigate.events.types import EventTypeEnum from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
from frigate.genai import get_genai_client from frigate.genai import get_genai_client
from frigate.models import Event from frigate.models import Event
from frigate.types import TrackedObjectUpdateTypesEnum from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import serialize from frigate.util.builtin import serialize
from frigate.util.image import SharedMemoryFrameManager, calculate_region from frigate.util.image import (
SharedMemoryFrameManager,
calculate_region,
ensure_jpeg_bytes,
)
from frigate.util.path import get_event_thumbnail_bytes from frigate.util.path import get_event_thumbnail_bytes
from .embeddings import Embeddings from .embeddings import Embeddings
@ -85,6 +90,7 @@ class EmbeddingMaintainer(threading.Thread):
self.event_subscriber = EventUpdateSubscriber() self.event_subscriber = EventUpdateSubscriber()
self.event_end_subscriber = EventEndSubscriber() self.event_end_subscriber = EventEndSubscriber()
self.event_metadata_publisher = EventMetadataPublisher()
self.event_metadata_subscriber = EventMetadataSubscriber( self.event_metadata_subscriber = EventMetadataSubscriber(
EventMetadataTypeEnum.regenerate_description EventMetadataTypeEnum.regenerate_description
) )
@ -104,15 +110,27 @@ class EmbeddingMaintainer(threading.Thread):
self.realtime_processors: list[RealTimeProcessorApi] = [] self.realtime_processors: list[RealTimeProcessorApi] = []
if self.config.face_recognition.enabled: if self.config.face_recognition.enabled:
self.realtime_processors.append(FaceRealTimeProcessor(self.config, metrics)) self.realtime_processors.append(
FaceRealTimeProcessor(
self.config, self.event_metadata_publisher, metrics
)
)
if self.config.classification.bird.enabled: if self.config.classification.bird.enabled:
self.realtime_processors.append(BirdRealTimeProcessor(self.config, metrics)) self.realtime_processors.append(
BirdRealTimeProcessor(
self.config, self.event_metadata_publisher, metrics
)
)
if self.config.lpr.enabled: if self.config.lpr.enabled:
self.realtime_processors.append( self.realtime_processors.append(
LicensePlateRealTimeProcessor( LicensePlateRealTimeProcessor(
self.config, metrics, lpr_model_runner, self.detected_license_plates self.config,
self.event_metadata_publisher,
metrics,
lpr_model_runner,
self.detected_license_plates,
) )
) )
@ -122,12 +140,17 @@ class EmbeddingMaintainer(threading.Thread):
if self.config.lpr.enabled: if self.config.lpr.enabled:
self.post_processors.append( self.post_processors.append(
LicensePlatePostProcessor( LicensePlatePostProcessor(
self.config, metrics, lpr_model_runner, self.detected_license_plates self.config,
self.event_metadata_publisher,
metrics,
lpr_model_runner,
self.detected_license_plates,
) )
) )
self.stop_event = stop_event self.stop_event = stop_event
self.tracked_events: dict[str, list[any]] = {} self.tracked_events: dict[str, list[any]] = {}
self.early_request_sent: dict[str, bool] = {}
self.genai_client = get_genai_client(config) self.genai_client = get_genai_client(config)
# recordings data # recordings data
@ -145,6 +168,7 @@ class EmbeddingMaintainer(threading.Thread):
self.event_subscriber.stop() self.event_subscriber.stop()
self.event_end_subscriber.stop() self.event_end_subscriber.stop()
self.recordings_subscriber.stop() self.recordings_subscriber.stop()
self.event_metadata_publisher.stop()
self.event_metadata_subscriber.stop() self.event_metadata_subscriber.stop()
self.embeddings_responder.stop() self.embeddings_responder.stop()
self.requestor.stop() self.requestor.stop()
@ -236,6 +260,43 @@ class EmbeddingMaintainer(threading.Thread):
self.tracked_events[data["id"]].append(data) self.tracked_events[data["id"]].append(data)
# check if we're configured to send an early request after a minimum number of updates received
if (
self.genai_client is not None
and camera_config.genai.send_triggers.after_significant_updates
):
if (
len(self.tracked_events.get(data["id"], []))
>= camera_config.genai.send_triggers.after_significant_updates
and data["id"] not in self.early_request_sent
):
if data["has_clip"] and data["has_snapshot"]:
event: Event = Event.get(Event.id == data["id"])
if (
not camera_config.genai.objects
or event.label in camera_config.genai.objects
) and (
not camera_config.genai.required_zones
or set(data["entered_zones"])
& set(camera_config.genai.required_zones)
):
logger.debug(f"{camera} sending early request to GenAI")
self.early_request_sent[data["id"]] = True
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
[
data["thumbnail"]
for data in self.tracked_events[data["id"]]
],
),
).start()
self.frame_manager.close(frame_name) self.frame_manager.close(frame_name)
def _process_finalized(self) -> None: def _process_finalized(self) -> None:
@ -293,10 +354,11 @@ class EmbeddingMaintainer(threading.Thread):
# Embed the thumbnail # Embed the thumbnail
self._embed_thumbnail(event_id, thumbnail) self._embed_thumbnail(event_id, thumbnail)
# Run GenAI
if ( if (
camera_config.genai.enabled camera_config.genai.enabled
and camera_config.genai.send_triggers.tracked_object_end
and self.genai_client is not None and self.genai_client is not None
and event.data.get("description") is None
and ( and (
not camera_config.genai.objects not camera_config.genai.objects
or event.label in camera_config.genai.objects or event.label in camera_config.genai.objects
@ -306,82 +368,7 @@ class EmbeddingMaintainer(threading.Thread):
or set(event.zones) & set(camera_config.genai.required_zones) or set(event.zones) & set(camera_config.genai.required_zones)
) )
): ):
if event.has_snapshot and camera_config.genai.use_snapshot: self._process_genai_description(event, camera_config, thumbnail)
with open(
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
"rb",
) as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# crop snapshot based on region before sending off to genai
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data["region"]
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
snapshot_image = buffer.tobytes()
num_thumbnails = len(self.tracked_events.get(event_id, []))
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.genai.use_snapshot
else (
[
data["thumbnail"]
for data in self.tracked_events[event_id]
]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(
f"Saving {num_thumbnails} thumbnails for event {event.id}"
)
Path(
os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")
).mkdir(parents=True, exist_ok=True)
for idx, data in enumerate(self.tracked_events[event_id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(
f"Unable to save thumbnail {idx} for {event.id}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._embed_description,
name=f"_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
# Delete tracked events based on the event_id # Delete tracked events based on the event_id
if event_id in self.tracked_events: if event_id in self.tracked_events:
@ -407,15 +394,17 @@ class EmbeddingMaintainer(threading.Thread):
def _process_event_metadata(self): def _process_event_metadata(self):
# Check for regenerate description requests # Check for regenerate description requests
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update( (topic, payload) = self.event_metadata_subscriber.check_for_update(timeout=0.01)
timeout=0.01
)
if topic is None: if topic is None:
return return
event_id, source = payload
if event_id: if event_id:
self.handle_regenerate_description(event_id, source) self.handle_regenerate_description(
event_id, RegenerateDescriptionEnum(source)
)
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]: def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
"""Return jpg thumbnail of a region of the frame.""" """Return jpg thumbnail of a region of the frame."""
@ -440,7 +429,61 @@ class EmbeddingMaintainer(threading.Thread):
self.embeddings.embed_thumbnail(event_id, thumbnail) self.embeddings.embed_thumbnail(event_id, thumbnail)
def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None: def _process_genai_description(self, event, camera_config, thumbnail) -> None:
if event.has_snapshot and camera_config.genai.use_snapshot:
snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
return
num_thumbnails = len(self.tracked_events.get(event.id, []))
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.genai.use_snapshot
else (
[data["thumbnail"] for data in self.tracked_events[event.id]]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}")
Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir(
parents=True, exist_ok=True
)
for idx, data in enumerate(self.tracked_events[event.id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(f"Unable to save thumbnail {idx} for {event.id}.")
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event.""" """Embed the description for an event."""
camera_config = self.config.cameras[event.camera] camera_config = self.config.cameras[event.camera]
@ -473,6 +516,45 @@ class EmbeddingMaintainer(threading.Thread):
description, description,
) )
def _read_and_crop_snapshot(self, event: Event, camera_config) -> bytes | None:
"""Read, decode, and crop the snapshot image."""
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot load snapshot for {event.id}, file not found: {snapshot_file}"
)
return None
try:
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# Crop snapshot based on region
# provide full image if region doesn't exist (manual events)
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data.get(
"region", [0, 0, 1, 1]
)
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
return buffer.tobytes()
except Exception:
return None
def handle_regenerate_description(self, event_id: str, source: str) -> None: def handle_regenerate_description(self, event_id: str, source: str) -> None:
try: try:
event: Event = Event.get(Event.id == event_id) event: Event = Event.get(Event.id == event_id)
@ -487,39 +569,18 @@ class EmbeddingMaintainer(threading.Thread):
thumbnail = get_event_thumbnail_bytes(event) thumbnail = get_event_thumbnail_bytes(event)
# ensure we have a jpeg to pass to the model
thumbnail = ensure_jpeg_bytes(thumbnail)
logger.debug( logger.debug(
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}" f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"
) )
if event.has_snapshot and source == "snapshot": if event.has_snapshot and source == "snapshot":
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg") snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot regenerate description for {event.id}, snapshot file not found: {snapshot_file}"
)
return return
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR
)
# crop snapshot based on region before sending off to genai
# provide full image if region doesn't exist (manual events)
region = event.data.get("region", [0, 0, 1, 1])
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = region
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height), x1 : x1 + int(width_rel * width)
]
_, buffer = cv2.imencode(".jpg", cropped_image)
snapshot_image = buffer.tobytes()
embed_image = ( embed_image = (
[snapshot_image] [snapshot_image]
if event.has_snapshot and source == "snapshot" if event.has_snapshot and source == "snapshot"
@ -530,4 +591,4 @@ class EmbeddingMaintainer(threading.Thread):
) )
) )
self._embed_description(event, embed_image) self._genai_embed_description(event, embed_image)

View File

@ -135,8 +135,13 @@ class AudioEventMaintainer(threading.Thread):
# create communication for audio detections # create communication for audio detections
self.requestor = InterProcessRequestor() self.requestor = InterProcessRequestor()
self.config_subscriber = ConfigSubscriber(f"config/audio/{camera.name}") self.config_subscriber = ConfigSubscriber(f"config/audio/{camera.name}")
self.enabled_subscriber = ConfigSubscriber(
f"config/enabled/{camera.name}", True
)
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio) self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio)
self.was_enabled = camera.enabled
def detect_audio(self, audio) -> None: def detect_audio(self, audio) -> None:
if not self.config.audio.enabled or self.stop_event.is_set(): if not self.config.audio.enabled or self.stop_event.is_set():
return return
@ -248,6 +253,23 @@ class AudioEventMaintainer(threading.Thread):
f"Failed to end audio event {detection['id']} with status code {resp.status_code}" f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
) )
def expire_all_detections(self) -> None:
"""Immediately end all current detections"""
now = datetime.datetime.now().timestamp()
for label, detection in list(self.detections.items()):
if detection:
self.requestor.send_data(f"{self.config.name}/audio/{label}", "OFF")
resp = requests.put(
f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end",
json={"end_time": now},
)
if resp.status_code == 200:
self.detections[label] = None
else:
self.logger.warning(
f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
)
def start_or_restart_ffmpeg(self) -> None: def start_or_restart_ffmpeg(self) -> None:
self.audio_listener = start_or_restart_ffmpeg( self.audio_listener = start_or_restart_ffmpeg(
self.ffmpeg_cmd, self.ffmpeg_cmd,
@ -283,10 +305,41 @@ class AudioEventMaintainer(threading.Thread):
self.logger.error(f"Error reading audio data from ffmpeg process: {e}") self.logger.error(f"Error reading audio data from ffmpeg process: {e}")
log_and_restart() log_and_restart()
def _update_enabled_state(self) -> bool:
"""Fetch the latest config and update enabled state."""
_, config_data = self.enabled_subscriber.check_for_update()
if config_data:
self.config.enabled = config_data.enabled
return config_data.enabled
return self.config.enabled
def run(self) -> None: def run(self) -> None:
self.start_or_restart_ffmpeg() if self._update_enabled_state():
self.start_or_restart_ffmpeg()
while not self.stop_event.is_set(): while not self.stop_event.is_set():
enabled = self._update_enabled_state()
if enabled != self.was_enabled:
if enabled:
self.logger.debug(
f"Enabling audio detections for {self.config.name}"
)
self.start_or_restart_ffmpeg()
else:
self.logger.debug(
f"Disabling audio detections for {self.config.name}, ending events"
)
self.expire_all_detections()
stop_ffmpeg(self.audio_listener, self.logger)
self.audio_listener = None
self.was_enabled = enabled
continue
if not enabled:
time.sleep(0.1)
continue
# check if there is an updated config # check if there is an updated config
( (
updated_topic, updated_topic,
@ -298,10 +351,12 @@ class AudioEventMaintainer(threading.Thread):
self.read_audio() self.read_audio()
stop_ffmpeg(self.audio_listener, self.logger) if self.audio_listener:
stop_ffmpeg(self.audio_listener, self.logger)
self.logpipe.close() self.logpipe.close()
self.requestor.stop() self.requestor.stop()
self.config_subscriber.stop() self.config_subscriber.stop()
self.enabled_subscriber.stop()
self.detection_publisher.stop() self.detection_publisher.stop()

View File

@ -26,23 +26,30 @@ class OpenAIClient(GenAIClient):
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to OpenAI.""" """Submit a request to OpenAI."""
encoded_images = [base64.b64encode(image).decode("utf-8") for image in images] encoded_images = [base64.b64encode(image).decode("utf-8") for image in images]
messages_content = []
for image in encoded_images:
messages_content.append(
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image}",
"detail": "low",
},
}
)
messages_content.append(
{
"type": "text",
"text": prompt,
}
)
try: try:
result = self.provider.chat.completions.create( result = self.provider.chat.completions.create(
model=self.genai_config.model, model=self.genai_config.model,
messages=[ messages=[
{ {
"role": "user", "role": "user",
"content": [ "content": messages_content,
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image}",
"detail": "low",
},
}
for image in encoded_images
]
+ [prompt],
}, },
], ],
timeout=self.timeout, timeout=self.timeout,

View File

@ -117,5 +117,9 @@ class RecordingsToDelete(Model): # type: ignore[misc]
class User(Model): # type: ignore[misc] class User(Model): # type: ignore[misc]
username = CharField(null=False, primary_key=True, max_length=30) username = CharField(null=False, primary_key=True, max_length=30)
role = CharField(
max_length=20,
default="admin",
)
password_hash = CharField(null=False, max_length=120) password_hash = CharField(null=False, max_length=120)
notification_tokens = JSONField() notification_tokens = JSONField()

View File

@ -17,7 +17,6 @@ from frigate.detectors.detector_config import (
InputDTypeEnum, InputDTypeEnum,
InputTensorEnum, InputTensorEnum,
) )
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
from frigate.util.builtin import EventsPerSecond, load_labels from frigate.util.builtin import EventsPerSecond, load_labels
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
from frigate.util.services import listen from frigate.util.services import listen
@ -52,13 +51,7 @@ class LocalObjectDetector(ObjectDetector):
self.labels = load_labels(labels) self.labels = load_labels(labels)
if detector_config: if detector_config:
if detector_config.type == ROCM_DETECTOR_KEY: self.input_transform = tensor_transform(detector_config.model.input_tensor)
# ROCm requires NHWC as input
self.input_transform = None
else:
self.input_transform = tensor_transform(
detector_config.model.input_tensor
)
self.dtype = detector_config.model.input_dtype self.dtype = detector_config.model.input_dtype
else: else:

View File

@ -9,9 +9,15 @@ from typing import Callable, Optional
import cv2 import cv2
import numpy as np import numpy as np
from peewee import DoesNotExist
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.dispatcher import Dispatcher from frigate.comms.dispatcher import Dispatcher
from frigate.comms.event_metadata_updater import (
EventMetadataSubscriber,
EventMetadataTypeEnum,
)
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
from frigate.comms.inter_process import InterProcessRequestor from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import ( from frigate.config import (
@ -23,6 +29,7 @@ from frigate.config import (
) )
from frigate.const import UPDATE_CAMERA_ACTIVITY from frigate.const import UPDATE_CAMERA_ACTIVITY
from frigate.events.types import EventStateEnum, EventTypeEnum from frigate.events.types import EventStateEnum, EventTypeEnum
from frigate.models import Event, Timeline
from frigate.ptz.autotrack import PtzAutoTrackerThread from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.track.tracked_object import TrackedObject from frigate.track.tracked_object import TrackedObject
from frigate.util.image import ( from frigate.util.image import (
@ -61,6 +68,7 @@ class CameraState:
self.previous_frame_id = None self.previous_frame_id = None
self.callbacks = defaultdict(list) self.callbacks = defaultdict(list)
self.ptz_autotracker_thread = ptz_autotracker_thread self.ptz_autotracker_thread = ptz_autotracker_thread
self.prev_enabled = self.camera_config.enabled
def get_current_frame(self, draw_options={}): def get_current_frame(self, draw_options={}):
with self.current_frame_lock: with self.current_frame_lock:
@ -310,6 +318,7 @@ class CameraState:
# TODO: can i switch to looking this up and only changing when an event ends? # TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects # maintain best objects
camera_activity: dict[str, list[any]] = { camera_activity: dict[str, list[any]] = {
"enabled": True,
"motion": len(motion_boxes) > 0, "motion": len(motion_boxes) > 0,
"objects": [], "objects": [],
} }
@ -437,10 +446,15 @@ class TrackedObjectProcessor(threading.Thread):
self.last_motion_detected: dict[str, float] = {} self.last_motion_detected: dict[str, float] = {}
self.ptz_autotracker_thread = ptz_autotracker_thread self.ptz_autotracker_thread = ptz_autotracker_thread
self.config_enabled_subscriber = ConfigSubscriber("config/enabled/")
self.requestor = InterProcessRequestor() self.requestor = InterProcessRequestor()
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.video) self.detection_publisher = DetectionPublisher(DetectionTypeEnum.video)
self.event_sender = EventUpdatePublisher() self.event_sender = EventUpdatePublisher()
self.event_end_subscriber = EventEndSubscriber() self.event_end_subscriber = EventEndSubscriber()
self.sub_label_subscriber = EventMetadataSubscriber(
EventMetadataTypeEnum.sub_label
)
self.camera_activity: dict[str, dict[str, any]] = {} self.camera_activity: dict[str, dict[str, any]] = {}
@ -679,8 +693,115 @@ class TrackedObjectProcessor(threading.Thread):
"""Returns the latest frame time for a given camera.""" """Returns the latest frame time for a given camera."""
return self.camera_states[camera].current_frame_time return self.camera_states[camera].current_frame_time
def set_sub_label(
self, event_id: str, sub_label: str | None, score: float | None
) -> None:
"""Update sub label for given event id."""
tracked_obj: TrackedObject = None
for state in self.camera_states.values():
tracked_obj = state.tracked_objects.get(event_id)
if tracked_obj is not None:
break
try:
event: Event = Event.get(Event.id == event_id)
except DoesNotExist:
event = None
if not tracked_obj and not event:
return
if tracked_obj:
tracked_obj.obj_data["sub_label"] = (sub_label, score)
if event:
event.sub_label = sub_label
data = event.data
if sub_label is None:
data["sub_label_score"] = None
elif score is not None:
data["sub_label_score"] = score
event.data = data
event.save()
# update timeline items
Timeline.update(
data=Timeline.data.update({"sub_label": (sub_label, score)})
).where(Timeline.source_id == event_id).execute()
return True
def force_end_all_events(self, camera: str, camera_state: CameraState):
"""Ends all active events on camera when disabling."""
last_frame_name = camera_state.previous_frame_id
for obj_id, obj in list(camera_state.tracked_objects.items()):
if "end_time" not in obj.obj_data:
logger.debug(f"Camera {camera} disabled, ending active event {obj_id}")
obj.obj_data["end_time"] = datetime.datetime.now().timestamp()
# end callbacks
for callback in camera_state.callbacks["end"]:
callback(camera, obj, last_frame_name)
# camera activity callbacks
for callback in camera_state.callbacks["camera_activity"]:
callback(
camera,
{"enabled": False, "motion": 0, "objects": []},
)
def run(self): def run(self):
while not self.stop_event.is_set(): while not self.stop_event.is_set():
# check for config updates
while True:
(
updated_enabled_topic,
updated_enabled_config,
) = self.config_enabled_subscriber.check_for_update()
if not updated_enabled_topic:
break
camera_name = updated_enabled_topic.rpartition("/")[-1]
self.config.cameras[
camera_name
].enabled = updated_enabled_config.enabled
if self.camera_states[camera_name].prev_enabled is None:
self.camera_states[
camera_name
].prev_enabled = updated_enabled_config.enabled
# manage camera disabled state
for camera, config in self.config.cameras.items():
if not config.enabled_in_config:
continue
current_enabled = config.enabled
camera_state = self.camera_states[camera]
if camera_state.prev_enabled and not current_enabled:
logger.debug(f"Not processing objects for disabled camera {camera}")
self.force_end_all_events(camera, camera_state)
camera_state.prev_enabled = current_enabled
if not current_enabled:
continue
# check for sub label updates
while True:
(topic, payload) = self.sub_label_subscriber.check_for_update(
timeout=0.1
)
if not topic:
break
(event_id, sub_label, score) = payload
self.set_sub_label(event_id, sub_label, score)
try: try:
( (
camera, camera,
@ -693,6 +814,10 @@ class TrackedObjectProcessor(threading.Thread):
except queue.Empty: except queue.Empty:
continue continue
if not self.config.cameras[camera].enabled:
logger.debug(f"Camera {camera} disabled, skipping update")
continue
camera_state = self.camera_states[camera] camera_state = self.camera_states[camera]
camera_state.update( camera_state.update(
@ -735,4 +860,7 @@ class TrackedObjectProcessor(threading.Thread):
self.detection_publisher.stop() self.detection_publisher.stop()
self.event_sender.stop() self.event_sender.stop()
self.event_end_subscriber.stop() self.event_end_subscriber.stop()
self.sub_label_subscriber.stop()
self.config_enabled_subscriber.stop()
logger.info("Exiting object processor...") logger.info("Exiting object processor...")

View File

@ -10,6 +10,7 @@ import queue
import subprocess as sp import subprocess as sp
import threading import threading
import traceback import traceback
from typing import Optional
import cv2 import cv2
import numpy as np import numpy as np
@ -380,8 +381,11 @@ class BirdsEyeFrameManager:
if mode == BirdseyeModeEnum.objects and object_box_count > 0: if mode == BirdseyeModeEnum.objects and object_box_count > 0:
return True return True
def update_frame(self, frame: np.ndarray): def update_frame(self, frame: Optional[np.ndarray] = None) -> bool:
"""Update to a new frame for birdseye.""" """
Update birdseye, optionally with a new frame.
When no frame is passed, check the layout and update for any disabled cameras.
"""
# determine how many cameras are tracking objects within the last inactivity_threshold seconds # determine how many cameras are tracking objects within the last inactivity_threshold seconds
active_cameras: set[str] = set( active_cameras: set[str] = set(
@ -389,11 +393,14 @@ class BirdsEyeFrameManager:
cam cam
for cam, cam_data in self.cameras.items() for cam, cam_data in self.cameras.items()
if self.config.cameras[cam].birdseye.enabled if self.config.cameras[cam].birdseye.enabled
and self.config.cameras[cam].enabled_in_config
and self.config.cameras[cam].enabled
and cam_data["last_active_frame"] > 0 and cam_data["last_active_frame"] > 0
and cam_data["current_frame_time"] - cam_data["last_active_frame"] and cam_data["current_frame_time"] - cam_data["last_active_frame"]
< self.inactivity_threshold < self.inactivity_threshold
] ]
) )
logger.debug(f"Active cameras: {active_cameras}")
max_cameras = self.config.birdseye.layout.max_cameras max_cameras = self.config.birdseye.layout.max_cameras
max_camera_refresh = False max_camera_refresh = False
@ -411,118 +418,125 @@ class BirdsEyeFrameManager:
- self.cameras[active_camera]["last_active_frame"] - self.cameras[active_camera]["last_active_frame"]
), ),
) )
active_cameras = limited_active_cameras[ active_cameras = limited_active_cameras[:max_cameras]
: self.config.birdseye.layout.max_cameras
]
max_camera_refresh = True max_camera_refresh = True
self.last_refresh_time = now self.last_refresh_time = now
# if there are no active cameras # Track if the frame changes
frame_changed = False
# If no active cameras and layout is already empty, no update needed
if len(active_cameras) == 0: if len(active_cameras) == 0:
# if the layout is already cleared # if the layout is already cleared
if len(self.camera_layout) == 0: if len(self.camera_layout) == 0:
return False return False
# if the layout needs to be cleared # if the layout needs to be cleared
else: self.camera_layout = []
self.camera_layout = [] self.active_cameras = set()
self.active_cameras = set()
self.clear_frame()
return True
# check if we need to reset the layout because there is a different number of cameras
if len(self.active_cameras) - len(active_cameras) == 0:
if len(self.active_cameras) == 1 and self.active_cameras != active_cameras:
reset_layout = True
elif max_camera_refresh:
reset_layout = True
else:
reset_layout = False
else:
reset_layout = True
# reset the layout if it needs to be different
if reset_layout:
logger.debug("Added new cameras, resetting layout...")
self.clear_frame() self.clear_frame()
self.active_cameras = active_cameras frame_changed = True
else:
# this also converts added_cameras from a set to a list since we need # Determine if layout needs resetting
# to pop elements in order if len(self.active_cameras) - len(active_cameras) == 0:
active_cameras_to_add = sorted( if (
active_cameras, len(self.active_cameras) == 1
# sort cameras by order and by name if the order is the same and self.active_cameras != active_cameras
key=lambda active_camera: ( ):
self.config.cameras[active_camera].birdseye.order, reset_layout = True
active_camera, elif max_camera_refresh:
), reset_layout = True
)
if len(active_cameras) == 1:
# show single camera as fullscreen
camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1])
# center camera view in canvas and ensure that it fits
if scaled_width < self.canvas.width:
coefficient = 1
x_offset = int((self.canvas.width - scaled_width) / 2)
else: else:
coefficient = self.canvas.width / scaled_width reset_layout = False
x_offset = int(
(self.canvas.width - (scaled_width * coefficient)) / 2
)
self.camera_layout = [
[
(
camera,
(
x_offset,
0,
int(scaled_width * coefficient),
int(self.canvas.height * coefficient),
),
)
]
]
else: else:
# calculate optimal layout reset_layout = True
coefficient = self.canvas.get_coefficient(len(active_cameras))
calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas if reset_layout:
while calculating: logger.debug("Resetting Birdseye layout...")
if self.stop_event.is_set(): self.clear_frame()
return self.active_cameras = active_cameras
layout_candidate = self.calculate_layout( # this also converts added_cameras from a set to a list since we need
active_cameras_to_add, # to pop elements in order
coefficient, active_cameras_to_add = sorted(
active_cameras,
# sort cameras by order and by name if the order is the same
key=lambda active_camera: (
self.config.cameras[active_camera].birdseye.order,
active_camera,
),
)
if len(active_cameras) == 1:
# show single camera as fullscreen
camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(
self.canvas.height * camera_dims[0] / camera_dims[1]
) )
if not layout_candidate: # center camera view in canvas and ensure that it fits
if coefficient < 10: if scaled_width < self.canvas.width:
coefficient += 1 coefficient = 1
continue x_offset = int((self.canvas.width - scaled_width) / 2)
else: else:
logger.error("Error finding appropriate birdseye layout") coefficient = self.canvas.width / scaled_width
x_offset = int(
(self.canvas.width - (scaled_width * coefficient)) / 2
)
self.camera_layout = [
[
(
camera,
(
x_offset,
0,
int(scaled_width * coefficient),
int(self.canvas.height * coefficient),
),
)
]
]
else:
# calculate optimal layout
coefficient = self.canvas.get_coefficient(len(active_cameras))
calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
while calculating:
if self.stop_event.is_set():
return return
calculating = False layout_candidate = self.calculate_layout(
self.canvas.set_coefficient(len(active_cameras), coefficient) active_cameras_to_add, coefficient
)
self.camera_layout = layout_candidate if not layout_candidate:
if coefficient < 10:
coefficient += 1
continue
else:
logger.error(
"Error finding appropriate birdseye layout"
)
return
calculating = False
self.canvas.set_coefficient(len(active_cameras), coefficient)
for row in self.camera_layout: self.camera_layout = layout_candidate
for position in row: frame_changed = True
self.copy_to_position(
position[1],
position[0],
self.cameras[position[0]]["current_frame"],
)
return True # Draw the layout
for row in self.camera_layout:
for position in row:
src_frame = self.cameras[position[0]]["current_frame"]
if src_frame is None or src_frame.size == 0:
logger.debug(f"Skipping invalid frame for {position[0]}")
continue
self.copy_to_position(position[1], position[0], src_frame)
if frame is not None: # Frame presence indicates a potential change
frame_changed = True
return frame_changed
def calculate_layout( def calculate_layout(
self, self,
@ -676,30 +690,29 @@ class BirdsEyeFrameManager:
frame: np.ndarray, frame: np.ndarray,
) -> bool: ) -> bool:
# don't process if birdseye is disabled for this camera # don't process if birdseye is disabled for this camera
camera_config = self.config.cameras[camera].birdseye camera_config = self.config.cameras[camera]
force_update = False
if not camera_config.enabled:
return False
# disabling birdseye is a little tricky # disabling birdseye is a little tricky
if not camera_config.enabled: if not camera_config.birdseye.enabled or not camera_config.enabled:
# if we've rendered a frame (we have a value for last_active_frame) # if we've rendered a frame (we have a value for last_active_frame)
# then we need to set it to zero # then we need to set it to zero
if self.cameras[camera]["last_active_frame"] > 0: if self.cameras[camera]["last_active_frame"] > 0:
self.cameras[camera]["last_active_frame"] = 0 self.cameras[camera]["last_active_frame"] = 0
force_update = True
return False else:
return False
# update the last active frame for the camera # update the last active frame for the camera
self.cameras[camera]["current_frame"] = frame.copy() self.cameras[camera]["current_frame"] = frame.copy()
self.cameras[camera]["current_frame_time"] = frame_time self.cameras[camera]["current_frame_time"] = frame_time
if self.camera_active(camera_config.mode, object_count, motion_count): if self.camera_active(camera_config.birdseye.mode, object_count, motion_count):
self.cameras[camera]["last_active_frame"] = frame_time self.cameras[camera]["last_active_frame"] = frame_time
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
# limit output to 10 fps # limit output to 10 fps
if (now - self.last_output_time) < 1 / 10: if not force_update and (now - self.last_output_time) < 1 / 10:
return False return False
try: try:
@ -711,7 +724,7 @@ class BirdsEyeFrameManager:
print(traceback.format_exc()) print(traceback.format_exc())
# if the frame was updated or the fps is too low, send frame # if the frame was updated or the fps is too low, send frame
if updated_frame or (now - self.last_output_time) > 1: if force_update or updated_frame or (now - self.last_output_time) > 1:
self.last_output_time = now self.last_output_time = now
return True return True
return False return False
@ -741,8 +754,10 @@ class Birdseye:
"birdseye", self.converter, websocket_server, stop_event "birdseye", self.converter, websocket_server, stop_event
) )
self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) self.birdseye_manager = BirdsEyeFrameManager(config, stop_event)
self.config_subscriber = ConfigSubscriber("config/birdseye/") self.config_enabled_subscriber = ConfigSubscriber("config/enabled/")
self.birdseye_subscriber = ConfigSubscriber("config/birdseye/")
self.frame_manager = SharedMemoryFrameManager() self.frame_manager = SharedMemoryFrameManager()
self.stop_event = stop_event
if config.birdseye.restream: if config.birdseye.restream:
self.birdseye_buffer = self.frame_manager.create( self.birdseye_buffer = self.frame_manager.create(
@ -753,6 +768,22 @@ class Birdseye:
self.converter.start() self.converter.start()
self.broadcaster.start() self.broadcaster.start()
def __send_new_frame(self) -> None:
frame_bytes = self.birdseye_manager.frame.tobytes()
if self.config.birdseye.restream:
self.birdseye_buffer[:] = frame_bytes
try:
self.input.put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
def all_cameras_disabled(self) -> None:
self.birdseye_manager.clear_frame()
self.__send_new_frame()
def write_data( def write_data(
self, self,
camera: str, camera: str,
@ -764,15 +795,27 @@ class Birdseye:
# check if there is an updated config # check if there is an updated config
while True: while True:
( (
updated_topic, updated_birdseye_topic,
updated_birdseye_config, updated_birdseye_config,
) = self.config_subscriber.check_for_update() ) = self.birdseye_subscriber.check_for_update()
if not updated_topic: (
updated_enabled_topic,
updated_enabled_config,
) = self.config_enabled_subscriber.check_for_update()
if not updated_birdseye_topic and not updated_enabled_topic:
break break
camera_name = updated_topic.rpartition("/")[-1] if updated_birdseye_config:
self.config.cameras[camera_name].birdseye = updated_birdseye_config camera_name = updated_birdseye_topic.rpartition("/")[-1]
self.config.cameras[camera_name].birdseye = updated_birdseye_config
if updated_enabled_config:
camera_name = updated_enabled_topic.rpartition("/")[-1]
self.config.cameras[
camera_name
].enabled = updated_enabled_config.enabled
if self.birdseye_manager.update( if self.birdseye_manager.update(
camera, camera,
@ -781,18 +824,10 @@ class Birdseye:
frame_time, frame_time,
frame, frame,
): ):
frame_bytes = self.birdseye_manager.frame.tobytes() self.__send_new_frame()
if self.config.birdseye.restream:
self.birdseye_buffer[:] = frame_bytes
try:
self.input.put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
def stop(self) -> None: def stop(self) -> None:
self.config_subscriber.stop() self.birdseye_subscriber.stop()
self.config_enabled_subscriber.stop()
self.converter.join() self.converter.join()
self.broadcaster.join() self.broadcaster.join()

View File

@ -1,12 +1,12 @@
"""Handle outputting raw frigate frames""" """Handle outputting raw frigate frames"""
import datetime
import logging import logging
import multiprocessing as mp import multiprocessing as mp
import os import os
import shutil import shutil
import signal import signal
import threading import threading
from typing import Optional
from wsgiref.simple_server import make_server from wsgiref.simple_server import make_server
from setproctitle import setproctitle from setproctitle import setproctitle
@ -17,6 +17,7 @@ from ws4py.server.wsgirefserver import (
) )
from ws4py.server.wsgiutils import WebSocketWSGIApplication from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.ws import WebSocket from frigate.comms.ws import WebSocket
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
@ -24,11 +25,51 @@ from frigate.const import CACHE_DIR, CLIPS_DIR
from frigate.output.birdseye import Birdseye from frigate.output.birdseye import Birdseye
from frigate.output.camera import JsmpegCamera from frigate.output.camera import JsmpegCamera
from frigate.output.preview import PreviewRecorder from frigate.output.preview import PreviewRecorder
from frigate.util.image import SharedMemoryFrameManager from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def check_disabled_camera_update(
config: FrigateConfig,
birdseye: Birdseye | None,
previews: dict[str, PreviewRecorder],
write_times: dict[str, float],
) -> None:
"""Check if camera is disabled / offline and needs an update."""
now = datetime.datetime.now().timestamp()
has_enabled_camera = False
for camera, last_update in write_times.items():
offline_time = now - last_update
if config.cameras[camera].enabled:
has_enabled_camera = True
else:
# flag camera as offline when it is disabled
previews[camera].flag_offline(now)
if offline_time > 1:
# last camera update was more than 1 second ago
# need to send empty data to birdseye because current
# frame is now out of date
if birdseye and offline_time < 10:
# we only need to send blank frames to birdseye at the beginning of a camera being offline
birdseye.write_data(
camera,
[],
[],
now,
get_blank_yuv_frame(
config.cameras[camera].detect.width,
config.cameras[camera].detect.height,
),
)
if not has_enabled_camera and birdseye:
birdseye.all_cameras_disabled()
def output_frames( def output_frames(
config: FrigateConfig, config: FrigateConfig,
): ):
@ -59,11 +100,18 @@ def output_frames(
detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video)
enabled_subscribers = {
camera: ConfigSubscriber(f"config/enabled/{camera}", True)
for camera in config.cameras.keys()
if config.cameras[camera].enabled_in_config
}
jsmpeg_cameras: dict[str, JsmpegCamera] = {} jsmpeg_cameras: dict[str, JsmpegCamera] = {}
birdseye: Optional[Birdseye] = None birdseye: Birdseye | None = None
preview_recorders: dict[str, PreviewRecorder] = {} preview_recorders: dict[str, PreviewRecorder] = {}
preview_write_times: dict[str, float] = {} preview_write_times: dict[str, float] = {}
failed_frame_requests: dict[str, int] = {} failed_frame_requests: dict[str, int] = {}
last_disabled_cam_check = datetime.datetime.now().timestamp()
move_preview_frames("cache") move_preview_frames("cache")
@ -80,8 +128,25 @@ def output_frames(
websocket_thread.start() websocket_thread.start()
def get_enabled_state(camera: str) -> bool:
_, config_data = enabled_subscribers[camera].check_for_update()
if config_data:
config.cameras[camera].enabled = config_data.enabled
return config_data.enabled
return config.cameras[camera].enabled
while not stop_event.is_set(): while not stop_event.is_set():
(topic, data) = detection_subscriber.check_for_update(timeout=1) (topic, data) = detection_subscriber.check_for_update(timeout=1)
now = datetime.datetime.now().timestamp()
if now - last_disabled_cam_check > 5:
# check disabled cameras every 5 seconds
last_disabled_cam_check = now
check_disabled_camera_update(
config, birdseye, preview_recorders, preview_write_times
)
if not topic: if not topic:
continue continue
@ -95,6 +160,9 @@ def output_frames(
_, _,
) = data ) = data
if not get_enabled_state(camera):
continue
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv) frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
if frame is None: if frame is None:
@ -110,6 +178,12 @@ def output_frames(
else: else:
failed_frame_requests[camera] = 0 failed_frame_requests[camera] = 0
# send frames for low fps recording
preview_recorders[camera].write_data(
current_tracked_objects, motion_boxes, frame_time, frame
)
preview_write_times[camera] = frame_time
# send camera frame to ffmpeg process if websockets are connected # send camera frame to ffmpeg process if websockets are connected
if any( if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
@ -133,24 +207,6 @@ def output_frames(
frame, frame,
) )
# send frames for low fps recording
generated_preview = preview_recorders[camera].write_data(
current_tracked_objects, motion_boxes, frame_time, frame
)
preview_write_times[camera] = frame_time
# if another camera generated a preview,
# check for any cameras that are currently offline
# and need to generate a preview
if generated_preview:
logger.debug(
"Checking for offline cameras because another camera generated a preview."
)
for camera, time in preview_write_times.copy().items():
if time != 0 and frame_time - time > 10:
preview_recorders[camera].flag_offline(frame_time)
preview_write_times[camera] = frame_time
frame_manager.close(frame_name) frame_manager.close(frame_name)
move_preview_frames("clips") move_preview_frames("clips")
@ -184,6 +240,9 @@ def output_frames(
if birdseye is not None: if birdseye is not None:
birdseye.stop() birdseye.stop()
for subscriber in enabled_subscribers.values():
subscriber.stop()
websocket_server.manager.close_all() websocket_server.manager.close_all()
websocket_server.manager.stop() websocket_server.manager.stop()
websocket_server.manager.join() websocket_server.manager.join()

View File

@ -23,7 +23,7 @@ from frigate.ffmpeg_presets import (
) )
from frigate.models import Previews from frigate.models import Previews
from frigate.object_processing import TrackedObject from frigate.object_processing import TrackedObject
from frigate.util.image import copy_yuv_to_position, get_yuv_crop from frigate.util.image import copy_yuv_to_position, get_blank_yuv_frame, get_yuv_crop
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -153,6 +153,7 @@ class PreviewRecorder:
self.config = config self.config = config
self.start_time = 0 self.start_time = 0
self.last_output_time = 0 self.last_output_time = 0
self.offline = False
self.output_frames = [] self.output_frames = []
if config.detect.width > config.detect.height: if config.detect.width > config.detect.height:
@ -241,6 +242,17 @@ class PreviewRecorder:
self.last_output_time = ts self.last_output_time = ts
self.output_frames.append(ts) self.output_frames.append(ts)
def reset_frame_cache(self, frame_time: float) -> None:
self.segment_end = (
(datetime.datetime.now() + datetime.timedelta(hours=1))
.astimezone(datetime.timezone.utc)
.replace(minute=0, second=0, microsecond=0)
.timestamp()
)
self.start_time = frame_time
self.last_output_time = frame_time
self.output_frames: list[float] = []
def should_write_frame( def should_write_frame(
self, self,
current_tracked_objects: list[dict[str, any]], current_tracked_objects: list[dict[str, any]],
@ -307,7 +319,9 @@ class PreviewRecorder:
motion_boxes: list[list[int]], motion_boxes: list[list[int]],
frame_time: float, frame_time: float,
frame: np.ndarray, frame: np.ndarray,
) -> bool: ) -> None:
self.offline = False
# check for updated record config # check for updated record config
_, updated_record_config = self.config_subscriber.check_for_update() _, updated_record_config = self.config_subscriber.check_for_update()
@ -319,7 +333,7 @@ class PreviewRecorder:
self.start_time = frame_time self.start_time = frame_time
self.output_frames.append(frame_time) self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame) self.write_frame_to_cache(frame_time, frame)
return False return
# check if PREVIEW clip should be generated and cached frames reset # check if PREVIEW clip should be generated and cached frames reset
if frame_time >= self.segment_end: if frame_time >= self.segment_end:
@ -340,32 +354,35 @@ class PreviewRecorder:
f"Not saving preview for {self.config.name} because there are no saved frames." f"Not saving preview for {self.config.name} because there are no saved frames."
) )
# reset frame cache self.reset_frame_cache(frame_time)
self.segment_end = (
(datetime.datetime.now() + datetime.timedelta(hours=1))
.astimezone(datetime.timezone.utc)
.replace(minute=0, second=0, microsecond=0)
.timestamp()
)
self.start_time = frame_time
self.last_output_time = frame_time
self.output_frames: list[float] = []
# include first frame to ensure consistent duration # include first frame to ensure consistent duration
if self.config.record.enabled: if self.config.record.enabled:
self.output_frames.append(frame_time) self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame) self.write_frame_to_cache(frame_time, frame)
return True return
elif self.should_write_frame(current_tracked_objects, motion_boxes, frame_time): elif self.should_write_frame(current_tracked_objects, motion_boxes, frame_time):
self.output_frames.append(frame_time) self.output_frames.append(frame_time)
self.write_frame_to_cache(frame_time, frame) self.write_frame_to_cache(frame_time, frame)
return False return
def flag_offline(self, frame_time: float) -> None: def flag_offline(self, frame_time: float) -> None:
if not self.offline:
self.write_frame_to_cache(
frame_time,
get_blank_yuv_frame(
self.config.detect.width, self.config.detect.height
),
)
self.offline = True
# check if PREVIEW clip should be generated and cached frames reset # check if PREVIEW clip should be generated and cached frames reset
if frame_time >= self.segment_end: if frame_time >= self.segment_end:
if len(self.output_frames) == 0: if len(self.output_frames) == 0:
# camera has been offline for entire hour
# we have no preview to create
self.reset_frame_cache(frame_time)
return return
old_frame_path = get_cache_image_name( old_frame_path = get_cache_image_name(
@ -382,16 +399,7 @@ class PreviewRecorder:
self.requestor, self.requestor,
).start() ).start()
# reset frame cache self.reset_frame_cache(frame_time)
self.segment_end = (
(datetime.datetime.now() + datetime.timedelta(hours=1))
.astimezone(datetime.timezone.utc)
.replace(minute=0, second=0, microsecond=0)
.timestamp()
)
self.start_time = frame_time
self.last_output_time = frame_time
self.output_frames = []
def stop(self) -> None: def stop(self) -> None:
self.requestor.stop() self.requestor.stop()

View File

@ -150,6 +150,7 @@ class ReviewSegmentMaintainer(threading.Thread):
self.requestor = InterProcessRequestor() self.requestor = InterProcessRequestor()
self.record_config_subscriber = ConfigSubscriber("config/record/") self.record_config_subscriber = ConfigSubscriber("config/record/")
self.review_config_subscriber = ConfigSubscriber("config/review/") self.review_config_subscriber = ConfigSubscriber("config/review/")
self.enabled_config_subscriber = ConfigSubscriber("config/enabled/")
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all) self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all)
# manual events # manual events
@ -450,7 +451,16 @@ class ReviewSegmentMaintainer(threading.Thread):
updated_review_config, updated_review_config,
) = self.review_config_subscriber.check_for_update() ) = self.review_config_subscriber.check_for_update()
if not updated_record_topic and not updated_review_topic: (
updated_enabled_topic,
updated_enabled_config,
) = self.enabled_config_subscriber.check_for_update()
if (
not updated_record_topic
and not updated_review_topic
and not updated_enabled_topic
):
break break
if updated_record_topic: if updated_record_topic:
@ -461,6 +471,12 @@ class ReviewSegmentMaintainer(threading.Thread):
camera_name = updated_review_topic.rpartition("/")[-1] camera_name = updated_review_topic.rpartition("/")[-1]
self.config.cameras[camera_name].review = updated_review_config self.config.cameras[camera_name].review = updated_review_config
if updated_enabled_config:
camera_name = updated_enabled_topic.rpartition("/")[-1]
self.config.cameras[
camera_name
].enabled = updated_enabled_config.enabled
(topic, data) = self.detection_subscriber.check_for_update(timeout=1) (topic, data) = self.detection_subscriber.check_for_update(timeout=1)
if not topic: if not topic:
@ -494,7 +510,10 @@ class ReviewSegmentMaintainer(threading.Thread):
current_segment = self.active_review_segments.get(camera) current_segment = self.active_review_segments.get(camera)
if not self.config.cameras[camera].record.enabled: if (
not self.config.cameras[camera].enabled
or not self.config.cameras[camera].record.enabled
):
if current_segment: if current_segment:
self.end_segment(camera) self.end_segment(camera)
continue continue

View File

@ -504,7 +504,7 @@ class TestHttpReview(BaseTestHttp):
def test_post_reviews_delete_no_body(self): def test_post_reviews_delete_no_body(self):
with TestClient(self.app) as client: with TestClient(self.app) as client:
super().insert_mock_review_segment("123456.random") super().insert_mock_review_segment("123456.random")
response = client.post("/reviews/delete") response = client.post("/reviews/delete", headers={"remote-role": "admin"})
# Missing ids # Missing ids
assert response.status_code == 422 assert response.status_code == 422
@ -512,7 +512,9 @@ class TestHttpReview(BaseTestHttp):
with TestClient(self.app) as client: with TestClient(self.app) as client:
super().insert_mock_review_segment("123456.random") super().insert_mock_review_segment("123456.random")
body = {"ids": [""]} body = {"ids": [""]}
response = client.post("/reviews/delete", json=body) response = client.post(
"/reviews/delete", json=body, headers={"remote-role": "admin"}
)
# Missing ids # Missing ids
assert response.status_code == 422 assert response.status_code == 422
@ -521,7 +523,9 @@ class TestHttpReview(BaseTestHttp):
id = "123456.random" id = "123456.random"
super().insert_mock_review_segment(id) super().insert_mock_review_segment(id)
body = {"ids": ["1"]} body = {"ids": ["1"]}
response = client.post("/reviews/delete", json=body) response = client.post(
"/reviews/delete", json=body, headers={"remote-role": "admin"}
)
assert response.status_code == 200 assert response.status_code == 200
response_json = response.json() response_json = response.json()
assert response_json["success"] == True assert response_json["success"] == True
@ -536,7 +540,9 @@ class TestHttpReview(BaseTestHttp):
id = "123456.random" id = "123456.random"
super().insert_mock_review_segment(id) super().insert_mock_review_segment(id)
body = {"ids": [id]} body = {"ids": [id]}
response = client.post("/reviews/delete", json=body) response = client.post(
"/reviews/delete", json=body, headers={"remote-role": "admin"}
)
assert response.status_code == 200 assert response.status_code == 200
response_json = response.json() response_json = response.json()
assert response_json["success"] == True assert response_json["success"] == True
@ -558,7 +564,9 @@ class TestHttpReview(BaseTestHttp):
assert len(recordings_ids_in_db_before) == 2 assert len(recordings_ids_in_db_before) == 2
body = {"ids": ids} body = {"ids": ids}
response = client.post("/reviews/delete", json=body) response = client.post(
"/reviews/delete", json=body, headers={"remote-role": "admin"}
)
assert response.status_code == 200 assert response.status_code == 200
response_json = response.json() response_json = response.json()
assert response_json["success"] == True assert response_json["success"] == True

View File

@ -2,6 +2,7 @@ import datetime
import logging import logging
import os import os
import unittest import unittest
from unittest.mock import Mock
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from peewee_migrate import Router from peewee_migrate import Router
@ -10,6 +11,7 @@ from playhouse.sqlite_ext import SqliteExtDatabase
from playhouse.sqliteq import SqliteQueueDatabase from playhouse.sqliteq import SqliteQueueDatabase
from frigate.api.fastapi_app import create_fastapi_app from frigate.api.fastapi_app import create_fastapi_app
from frigate.comms.event_metadata_updater import EventMetadataPublisher
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.const import BASE_DIR, CACHE_DIR from frigate.const import BASE_DIR, CACHE_DIR
from frigate.models import Event, Recordings, Timeline from frigate.models import Event, Recordings, Timeline
@ -172,7 +174,7 @@ class TestHttp(unittest.TestCase):
event = client.get(f"/events/{id}").json() event = client.get(f"/events/{id}").json()
assert event assert event
assert event["id"] == id assert event["id"] == id
client.delete(f"/events/{id}") client.delete(f"/events/{id}", headers={"remote-role": "admin"})
event = client.get(f"/events/{id}").json() event = client.get(f"/events/{id}").json()
assert event == "Event not found" assert event == "Event not found"
@ -192,12 +194,12 @@ class TestHttp(unittest.TestCase):
with TestClient(app) as client: with TestClient(app) as client:
_insert_mock_event(id) _insert_mock_event(id)
client.post(f"/events/{id}/retain") client.post(f"/events/{id}/retain", headers={"remote-role": "admin"})
event = client.get(f"/events/{id}").json() event = client.get(f"/events/{id}").json()
assert event assert event
assert event["id"] == id assert event["id"] == id
assert event["retain_indefinitely"] is True assert event["retain_indefinitely"] is True
client.delete(f"/events/{id}/retain") client.delete(f"/events/{id}/retain", headers={"remote-role": "admin"})
event = client.get(f"/events/{id}").json() event = client.get(f"/events/{id}").json()
assert event assert event
assert event["id"] == id assert event["id"] == id
@ -243,6 +245,7 @@ class TestHttp(unittest.TestCase):
assert len(events) == 1 assert len(events) == 1
def test_set_delete_sub_label(self): def test_set_delete_sub_label(self):
mock_event_updater = Mock(spec=EventMetadataPublisher)
app = create_fastapi_app( app = create_fastapi_app(
FrigateConfig(**self.minimal_config), FrigateConfig(**self.minimal_config),
self.db, self.db,
@ -252,16 +255,24 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None, mock_event_updater,
) )
id = "123456.random" id = "123456.random"
sub_label = "sub" sub_label = "sub"
def update_event(topic, payload):
event = Event.get(id=id)
event.sub_label = payload[1]
event.save()
mock_event_updater.publish.side_effect = update_event
with TestClient(app) as client: with TestClient(app) as client:
_insert_mock_event(id) _insert_mock_event(id)
new_sub_label_response = client.post( new_sub_label_response = client.post(
f"/events/{id}/sub_label", f"/events/{id}/sub_label",
json={"subLabel": sub_label}, json={"subLabel": sub_label},
headers={"remote-role": "admin"},
) )
assert new_sub_label_response.status_code == 200 assert new_sub_label_response.status_code == 200
event = client.get(f"/events/{id}").json() event = client.get(f"/events/{id}").json()
@ -271,6 +282,7 @@ class TestHttp(unittest.TestCase):
empty_sub_label_response = client.post( empty_sub_label_response = client.post(
f"/events/{id}/sub_label", f"/events/{id}/sub_label",
json={"subLabel": ""}, json={"subLabel": ""},
headers={"remote-role": "admin"},
) )
assert empty_sub_label_response.status_code == 200 assert empty_sub_label_response.status_code == 200
event = client.get(f"/events/{id}").json() event = client.get(f"/events/{id}").json()
@ -279,6 +291,7 @@ class TestHttp(unittest.TestCase):
assert event["sub_label"] == None assert event["sub_label"] == None
def test_sub_label_list(self): def test_sub_label_list(self):
mock_event_updater = Mock(spec=EventMetadataPublisher)
app = create_fastapi_app( app = create_fastapi_app(
FrigateConfig(**self.minimal_config), FrigateConfig(**self.minimal_config),
self.db, self.db,
@ -288,16 +301,24 @@ class TestHttp(unittest.TestCase):
None, None,
None, None,
None, None,
None, mock_event_updater,
) )
id = "123456.random" id = "123456.random"
sub_label = "sub" sub_label = "sub"
def update_event(topic, payload):
event = Event.get(id=id)
event.sub_label = payload[1]
event.save()
mock_event_updater.publish.side_effect = update_event
with TestClient(app) as client: with TestClient(app) as client:
_insert_mock_event(id) _insert_mock_event(id)
client.post( client.post(
f"/events/{id}/sub_label", f"/events/{id}/sub_label",
json={"subLabel": sub_label}, json={"subLabel": sub_label},
headers={"remote-role": "admin"},
) )
sub_labels = client.get("/sub_labels").json() sub_labels = client.get("/sub_labels").json()
assert sub_labels assert sub_labels

View File

@ -300,6 +300,12 @@ def migrate_016_0(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]
"""Handle migrating frigate config to 0.16-0""" """Handle migrating frigate config to 0.16-0"""
new_config = config.copy() new_config = config.copy()
# migrate config that does not have detect -> enabled explicitly set to have it enabled
if new_config.get("detect", {}).get("enabled") is None:
detect_config = new_config.get("detect", {})
detect_config["enabled"] = True
new_config["detect"] = detect_config
for name, camera in config.get("cameras", {}).items(): for name, camera in config.get("cameras", {}).items():
camera_config: dict[str, dict[str, any]] = camera.copy() camera_config: dict[str, dict[str, any]] = camera.copy()

View File

@ -632,6 +632,22 @@ def copy_yuv_to_position(
) )
def get_blank_yuv_frame(width: int, height: int) -> np.ndarray:
"""Creates a black YUV 4:2:0 frame."""
yuv_height = height * 3 // 2
yuv_frame = np.zeros((yuv_height, width), dtype=np.uint8)
uv_height = height // 2
# The U and V planes are stored after the Y plane.
u_start = height # U plane starts right after Y plane
v_start = u_start + uv_height // 2 # V plane starts after U plane
yuv_frame[u_start : u_start + uv_height, :width] = 128
yuv_frame[v_start : v_start + uv_height, :width] = 128
return yuv_frame
def yuv_region_2_yuv(frame, region): def yuv_region_2_yuv(frame, region):
try: try:
# TODO: does this copy the numpy array? # TODO: does this copy the numpy array?
@ -959,3 +975,22 @@ def get_histogram(image, x_min, y_min, x_max, y_max):
[image_bgr], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256] [image_bgr], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]
) )
return cv2.normalize(hist, hist).flatten() return cv2.normalize(hist, hist).flatten()
def ensure_jpeg_bytes(image_data):
"""Ensure image data is jpeg bytes for genai"""
try:
img_array = np.frombuffer(image_data, dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
if img is None:
return image_data
success, encoded_img = cv2.imencode(".jpg", img)
if success:
return encoded_img.tobytes()
except Exception as e:
logger.warning(f"Error when converting thumbnail to jpeg for genai: {e}")
return image_data

View File

@ -362,7 +362,7 @@ def get_intel_gpu_stats(sriov: bool) -> dict[str, str]:
if video_frame is not None: if video_frame is not None:
video[key].append(float(video_frame)) video[key].append(float(video_frame))
if render["global"]: if render["global"] and video["global"]:
results["gpu"] = ( results["gpu"] = (
f"{round(((sum(render['global']) / len(render['global'])) + (sum(video['global']) / len(video['global']))) / 2, 2)}%" f"{round(((sum(render['global']) / len(render['global'])) + (sum(video['global']) / len(video['global']))) / 2, 2)}%"
) )

View File

@ -108,8 +108,20 @@ def capture_frames(
frame_rate.start() frame_rate.start()
skipped_eps = EventsPerSecond() skipped_eps = EventsPerSecond()
skipped_eps.start() skipped_eps.start()
config_subscriber = ConfigSubscriber(f"config/enabled/{config.name}", True)
def get_enabled_state():
"""Fetch the latest enabled state from ZMQ."""
_, config_data = config_subscriber.check_for_update()
if config_data:
return config_data.enabled
return config.enabled
while not stop_event.is_set():
if not get_enabled_state():
logger.debug(f"Stopping capture thread for disabled {config.name}")
break
while True:
fps.value = frame_rate.eps() fps.value = frame_rate.eps()
skipped_fps.value = skipped_eps.eps() skipped_fps.value = skipped_eps.eps()
current_frame.value = datetime.datetime.now().timestamp() current_frame.value = datetime.datetime.now().timestamp()
@ -178,26 +190,38 @@ class CameraWatchdog(threading.Thread):
self.stop_event = stop_event self.stop_event = stop_event
self.sleeptime = self.config.ffmpeg.retry_interval self.sleeptime = self.config.ffmpeg.retry_interval
def run(self): self.config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True)
self.start_ffmpeg_detect() self.was_enabled = self.config.enabled
for c in self.config.ffmpeg_cmds: def _update_enabled_state(self) -> bool:
if "detect" in c["roles"]: """Fetch the latest config and update enabled state."""
continue _, config_data = self.config_subscriber.check_for_update()
logpipe = LogPipe( if config_data:
f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}" self.config.enabled = config_data.enabled
) return config_data.enabled
self.ffmpeg_other_processes.append(
{ return self.config.enabled
"cmd": c["cmd"],
"roles": c["roles"], def run(self):
"logpipe": logpipe, if self._update_enabled_state():
"process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe), self.start_all_ffmpeg()
}
)
time.sleep(self.sleeptime) time.sleep(self.sleeptime)
while not self.stop_event.wait(self.sleeptime): while not self.stop_event.wait(self.sleeptime):
enabled = self._update_enabled_state()
if enabled != self.was_enabled:
if enabled:
self.logger.debug(f"Enabling camera {self.camera_name}")
self.start_all_ffmpeg()
else:
self.logger.debug(f"Disabling camera {self.camera_name}")
self.stop_all_ffmpeg()
self.was_enabled = enabled
continue
if not enabled:
continue
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive(): if not self.capture_thread.is_alive():
@ -279,11 +303,9 @@ class CameraWatchdog(threading.Thread):
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
) )
stop_ffmpeg(self.ffmpeg_detect_process, self.logger) self.stop_all_ffmpeg()
for p in self.ffmpeg_other_processes:
stop_ffmpeg(p["process"], self.logger)
p["logpipe"].close()
self.logpipe.close() self.logpipe.close()
self.config_subscriber.stop()
def start_ffmpeg_detect(self): def start_ffmpeg_detect(self):
ffmpeg_cmd = [ ffmpeg_cmd = [
@ -306,6 +328,43 @@ class CameraWatchdog(threading.Thread):
) )
self.capture_thread.start() self.capture_thread.start()
def start_all_ffmpeg(self):
"""Start all ffmpeg processes (detection and others)."""
logger.debug(f"Starting all ffmpeg processes for {self.camera_name}")
self.start_ffmpeg_detect()
for c in self.config.ffmpeg_cmds:
if "detect" in c["roles"]:
continue
logpipe = LogPipe(
f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}"
)
self.ffmpeg_other_processes.append(
{
"cmd": c["cmd"],
"roles": c["roles"],
"logpipe": logpipe,
"process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe),
}
)
def stop_all_ffmpeg(self):
"""Stop all ffmpeg processes (detection and others)."""
logger.debug(f"Stopping all ffmpeg processes for {self.camera_name}")
if self.capture_thread is not None and self.capture_thread.is_alive():
self.capture_thread.join(timeout=5)
if self.capture_thread.is_alive():
self.logger.warning(
f"Capture thread for {self.camera_name} did not stop gracefully."
)
if self.ffmpeg_detect_process is not None:
stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
self.ffmpeg_detect_process = None
for p in self.ffmpeg_other_processes[:]:
if p["process"] is not None:
stop_ffmpeg(p["process"], self.logger)
p["logpipe"].close()
self.ffmpeg_other_processes.clear()
def get_latest_segment_datetime(self, latest_segment: datetime.datetime) -> int: def get_latest_segment_datetime(self, latest_segment: datetime.datetime) -> int:
"""Checks if ffmpeg is still writing recording segments to cache.""" """Checks if ffmpeg is still writing recording segments to cache."""
cache_files = sorted( cache_files = sorted(
@ -539,7 +598,8 @@ def process_frames(
exit_on_empty: bool = False, exit_on_empty: bool = False,
): ):
next_region_update = get_tomorrow_at_time(2) next_region_update = get_tomorrow_at_time(2)
config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True) detect_config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True)
enabled_config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True)
fps_tracker = EventsPerSecond() fps_tracker = EventsPerSecond()
fps_tracker.start() fps_tracker.start()
@ -549,9 +609,43 @@ def process_frames(
region_min_size = get_min_region_size(model_config) region_min_size = get_min_region_size(model_config)
prev_enabled = None
while not stop_event.is_set(): while not stop_event.is_set():
_, enabled_config = enabled_config_subscriber.check_for_update()
current_enabled = (
enabled_config.enabled
if enabled_config
else (prev_enabled if prev_enabled is not None else True)
)
if prev_enabled is None:
prev_enabled = current_enabled
if prev_enabled and not current_enabled and camera_metrics.frame_queue.empty():
logger.debug(f"Camera {camera_name} disabled, clearing tracked objects")
# Clear norfair's dictionaries
object_tracker.tracked_objects.clear()
object_tracker.disappeared.clear()
object_tracker.stationary_box_history.clear()
object_tracker.positions.clear()
object_tracker.track_id_map.clear()
# Clear internal norfair states
for trackers_by_type in object_tracker.trackers.values():
for tracker in trackers_by_type.values():
tracker.tracked_objects = []
for tracker in object_tracker.default_tracker.values():
tracker.tracked_objects = []
prev_enabled = current_enabled
if not current_enabled:
time.sleep(0.1)
continue
# check for updated detect config # check for updated detect config
_, updated_detect_config = config_subscriber.check_for_update() _, updated_detect_config = detect_config_subscriber.check_for_update()
if updated_detect_config: if updated_detect_config:
detect_config = updated_detect_config detect_config = updated_detect_config
@ -845,4 +939,5 @@ def process_frames(
motion_detector.stop() motion_detector.stop()
requestor.stop() requestor.stop()
config_subscriber.stop() detect_config_subscriber.stop()
enabled_config_subscriber.stop()

View File

@ -0,0 +1,37 @@
"""Peewee migrations -- 029_add_user_role.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import peewee as pw
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.sql(
'ALTER TABLE "user" ADD COLUMN "role" VARCHAR(20) NOT NULL DEFAULT \'admin\''
)
migrator.sql('UPDATE "user" SET "role" = \'admin\' WHERE "role" IS NULL')
def rollback(migrator, database, fake=False, **kwargs):
migrator.sql('ALTER TABLE "user" DROP COLUMN "role"')

2446
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -14,45 +14,45 @@
"coverage": "vitest run --coverage" "coverage": "vitest run --coverage"
}, },
"dependencies": { "dependencies": {
"@cycjimmy/jsmpeg-player": "^6.1.1", "@cycjimmy/jsmpeg-player": "^6.1.2",
"@hookform/resolvers": "^3.9.0", "@hookform/resolvers": "^3.9.0",
"@melloware/react-logviewer": "^6.1.2", "@melloware/react-logviewer": "^6.1.2",
"@radix-ui/react-alert-dialog": "^1.1.2", "@radix-ui/react-alert-dialog": "^1.1.6",
"@radix-ui/react-aspect-ratio": "^1.1.0", "@radix-ui/react-aspect-ratio": "^1.1.2",
"@radix-ui/react-checkbox": "^1.1.2", "@radix-ui/react-checkbox": "^1.1.4",
"@radix-ui/react-context-menu": "^2.2.2", "@radix-ui/react-context-menu": "^2.2.6",
"@radix-ui/react-dialog": "^1.1.2", "@radix-ui/react-dialog": "^1.1.6",
"@radix-ui/react-dropdown-menu": "^2.1.2", "@radix-ui/react-dropdown-menu": "^2.1.6",
"@radix-ui/react-hover-card": "^1.1.2", "@radix-ui/react-hover-card": "^1.1.6",
"@radix-ui/react-label": "^2.1.0", "@radix-ui/react-label": "^2.1.2",
"@radix-ui/react-popover": "^1.1.2", "@radix-ui/react-popover": "^1.1.6",
"@radix-ui/react-radio-group": "^1.2.1", "@radix-ui/react-radio-group": "^1.2.3",
"@radix-ui/react-scroll-area": "^1.2.0", "@radix-ui/react-scroll-area": "^1.2.3",
"@radix-ui/react-select": "^2.1.2", "@radix-ui/react-select": "^2.1.6",
"@radix-ui/react-separator": "^1.1.0", "@radix-ui/react-separator": "^1.1.2",
"@radix-ui/react-slider": "^1.2.1", "@radix-ui/react-slider": "^1.2.3",
"@radix-ui/react-slot": "^1.1.2", "@radix-ui/react-slot": "^1.1.2",
"@radix-ui/react-switch": "^1.1.1", "@radix-ui/react-switch": "^1.1.3",
"@radix-ui/react-tabs": "^1.1.1", "@radix-ui/react-tabs": "^1.1.3",
"@radix-ui/react-toggle": "^1.1.0", "@radix-ui/react-toggle": "^1.1.2",
"@radix-ui/react-toggle-group": "^1.1.0", "@radix-ui/react-toggle-group": "^1.1.2",
"@radix-ui/react-tooltip": "^1.1.3", "@radix-ui/react-tooltip": "^1.1.8",
"apexcharts": "^3.52.0", "apexcharts": "^3.52.0",
"axios": "^1.7.7", "axios": "^1.7.7",
"class-variance-authority": "^0.7.0", "class-variance-authority": "^0.7.1",
"clsx": "^2.1.1", "clsx": "^2.1.1",
"cmdk": "^1.0.0", "cmdk": "^1.0.0",
"copy-to-clipboard": "^3.3.3", "copy-to-clipboard": "^3.3.3",
"date-fns": "^3.6.0", "date-fns": "^3.6.0",
"embla-carousel-react": "^8.2.0", "embla-carousel-react": "^8.2.0",
"framer-motion": "^11.5.4", "framer-motion": "^11.5.4",
"hls.js": "^1.5.17", "hls.js": "^1.5.20",
"idb-keyval": "^6.2.1", "idb-keyval": "^6.2.1",
"immer": "^10.1.1", "immer": "^10.1.1",
"konva": "^9.3.16", "konva": "^9.3.18",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"lucide-react": "^0.407.0", "lucide-react": "^0.477.0",
"monaco-yaml": "^5.2.2", "monaco-yaml": "^5.3.1",
"next-themes": "^0.3.0", "next-themes": "^0.3.0",
"nosleep.js": "^0.12.0", "nosleep.js": "^0.12.0",
"react": "^18.3.1", "react": "^18.3.1",
@ -62,10 +62,10 @@
"react-dom": "^18.3.1", "react-dom": "^18.3.1",
"react-grid-layout": "^1.5.0", "react-grid-layout": "^1.5.0",
"react-hook-form": "^7.52.1", "react-hook-form": "^7.52.1",
"react-icons": "^5.2.1", "react-icons": "^5.5.0",
"react-konva": "^18.2.10", "react-konva": "^18.2.10",
"react-router-dom": "^6.26.0", "react-router-dom": "^6.26.0",
"react-swipeable": "^7.0.1", "react-swipeable": "^7.0.2",
"react-tracked": "^2.0.1", "react-tracked": "^2.0.1",
"react-transition-group": "^4.4.5", "react-transition-group": "^4.4.5",
"react-use-websocket": "^4.8.1", "react-use-websocket": "^4.8.1",
@ -75,7 +75,7 @@
"sonner": "^1.5.0", "sonner": "^1.5.0",
"sort-by": "^1.2.0", "sort-by": "^1.2.0",
"strftime": "^0.10.3", "strftime": "^0.10.3",
"swr": "^2.2.5", "swr": "^2.3.2",
"tailwind-merge": "^2.4.0", "tailwind-merge": "^2.4.0",
"tailwind-scrollbar": "^3.1.0", "tailwind-scrollbar": "^3.1.0",
"tailwindcss-animate": "^1.0.7", "tailwindcss-animate": "^1.0.7",
@ -97,8 +97,8 @@
"@types/strftime": "^0.9.8", "@types/strftime": "^0.9.8",
"@typescript-eslint/eslint-plugin": "^7.5.0", "@typescript-eslint/eslint-plugin": "^7.5.0",
"@typescript-eslint/parser": "^7.5.0", "@typescript-eslint/parser": "^7.5.0",
"@vitejs/plugin-react-swc": "^3.7.1", "@vitejs/plugin-react-swc": "^3.8.0",
"@vitest/coverage-v8": "^2.0.5", "@vitest/coverage-v8": "^3.0.7",
"autoprefixer": "^10.4.20", "autoprefixer": "^10.4.20",
"eslint": "^8.57.0", "eslint": "^8.57.0",
"eslint-config-prettier": "^9.1.0", "eslint-config-prettier": "^9.1.0",
@ -115,8 +115,8 @@
"prettier": "^3.3.3", "prettier": "^3.3.3",
"prettier-plugin-tailwindcss": "^0.6.5", "prettier-plugin-tailwindcss": "^0.6.5",
"tailwindcss": "^3.4.9", "tailwindcss": "^3.4.9",
"typescript": "^5.5.4", "typescript": "^5.8.2",
"vite": "^5.4.0", "vite": "^6.2.0",
"vitest": "^2.0.5" "vitest": "^3.0.7"
} }
} }

View File

@ -10,6 +10,8 @@ import { Suspense, lazy } from "react";
import { Redirect } from "./components/navigation/Redirect"; import { Redirect } from "./components/navigation/Redirect";
import { cn } from "./lib/utils"; import { cn } from "./lib/utils";
import { isPWA } from "./utils/isPWA"; import { isPWA } from "./utils/isPWA";
import ProtectedRoute from "@/components/auth/ProtectedRoute";
import { AuthProvider } from "@/context/auth-context";
const Live = lazy(() => import("@/pages/Live")); const Live = lazy(() => import("@/pages/Live"));
const Events = lazy(() => import("@/pages/Events")); const Events = lazy(() => import("@/pages/Events"));
@ -21,45 +23,58 @@ const Settings = lazy(() => import("@/pages/Settings"));
const UIPlayground = lazy(() => import("@/pages/UIPlayground")); const UIPlayground = lazy(() => import("@/pages/UIPlayground"));
const FaceLibrary = lazy(() => import("@/pages/FaceLibrary")); const FaceLibrary = lazy(() => import("@/pages/FaceLibrary"));
const Logs = lazy(() => import("@/pages/Logs")); const Logs = lazy(() => import("@/pages/Logs"));
const AccessDenied = lazy(() => import("@/pages/AccessDenied"));
function App() { function App() {
return ( return (
<Providers> <Providers>
<BrowserRouter basename={window.baseUrl}> <AuthProvider>
<Wrapper> <BrowserRouter basename={window.baseUrl}>
<div className="size-full overflow-hidden"> <Wrapper>
{isDesktop && <Sidebar />} <div className="size-full overflow-hidden">
{isDesktop && <Statusbar />} {isDesktop && <Sidebar />}
{isMobile && <Bottombar />} {isDesktop && <Statusbar />}
<div {isMobile && <Bottombar />}
id="pageRoot" <div
className={cn( id="pageRoot"
"absolute right-0 top-0 overflow-hidden", className={cn(
isMobile "absolute right-0 top-0 overflow-hidden",
? `bottom-${isPWA ? 16 : 12} left-0 md:bottom-16 landscape:bottom-14 landscape:md:bottom-16` isMobile
: "bottom-8 left-[52px]", ? `bottom-${isPWA ? 16 : 12} left-0 md:bottom-16 landscape:bottom-14 landscape:md:bottom-16`
)} : "bottom-8 left-[52px]",
> )}
<Suspense> >
<Routes> <Suspense>
<Route index element={<Live />} /> <Routes>
<Route path="/events" element={<Redirect to="/review" />} /> <Route
<Route path="/review" element={<Events />} /> element={
<Route path="/explore" element={<Explore />} /> <ProtectedRoute requiredRoles={["viewer", "admin"]} />
<Route path="/export" element={<Exports />} /> }
<Route path="/system" element={<System />} /> >
<Route path="/settings" element={<Settings />} /> <Route index element={<Live />} />
<Route path="/config" element={<ConfigEditor />} /> <Route path="/review" element={<Events />} />
<Route path="/logs" element={<Logs />} /> <Route path="/explore" element={<Explore />} />
<Route path="/playground" element={<UIPlayground />} /> <Route path="/export" element={<Exports />} />
<Route path="/faces" element={<FaceLibrary />} /> <Route path="/settings" element={<Settings />} />
<Route path="*" element={<Redirect to="/" />} /> </Route>
</Routes> <Route
</Suspense> element={<ProtectedRoute requiredRoles={["admin"]} />}
>
<Route path="/system" element={<System />} />
<Route path="/config" element={<ConfigEditor />} />
<Route path="/logs" element={<Logs />} />
<Route path="/faces" element={<FaceLibrary />} />
<Route path="/playground" element={<UIPlayground />} />
</Route>
<Route path="/unauthorized" element={<AccessDenied />} />
<Route path="*" element={<Redirect to="/" />} />
</Routes>
</Suspense>
</div>
</div> </div>
</div> </Wrapper>
</Wrapper> </BrowserRouter>
</BrowserRouter> </AuthProvider>
</Providers> </Providers>
); );
} }

View File

@ -56,6 +56,7 @@ function useValue(): useValueReturn {
const { const {
record, record,
detect, detect,
enabled,
snapshots, snapshots,
audio, audio,
notifications, notifications,
@ -67,6 +68,7 @@ function useValue(): useValueReturn {
// @ts-expect-error we know this is correct // @ts-expect-error we know this is correct
state["config"]; state["config"];
cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF"; cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF";
cameraStates[`${name}/enabled/state`] = enabled ? "ON" : "OFF";
cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF"; cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF";
cameraStates[`${name}/snapshots/state`] = snapshots ? "ON" : "OFF"; cameraStates[`${name}/snapshots/state`] = snapshots ? "ON" : "OFF";
cameraStates[`${name}/audio/state`] = audio ? "ON" : "OFF"; cameraStates[`${name}/audio/state`] = audio ? "ON" : "OFF";
@ -164,6 +166,17 @@ export function useWs(watchTopic: string, publishTopic: string) {
return { value, send }; return { value, send };
} }
export function useEnabledState(camera: string): {
payload: ToggleableSetting;
send: (payload: ToggleableSetting, retain?: boolean) => void;
} {
const {
value: { payload },
send,
} = useWs(`${camera}/enabled/state`, `${camera}/enabled/set`);
return { payload: (payload ?? "ON") as ToggleableSetting, send };
}
export function useDetectState(camera: string): { export function useDetectState(camera: string): {
payload: ToggleableSetting; payload: ToggleableSetting;
send: (payload: ToggleableSetting, retain?: boolean) => void; send: (payload: ToggleableSetting, retain?: boolean) => void;

View File

@ -20,24 +20,23 @@ import {
import { useForm } from "react-hook-form"; import { useForm } from "react-hook-form";
import { zodResolver } from "@hookform/resolvers/zod"; import { zodResolver } from "@hookform/resolvers/zod";
import { z } from "zod"; import { z } from "zod";
import { AuthContext } from "@/context/auth-context";
interface UserAuthFormProps extends React.HTMLAttributes<HTMLDivElement> {} interface UserAuthFormProps extends React.HTMLAttributes<HTMLDivElement> {}
export function UserAuthForm({ className, ...props }: UserAuthFormProps) { export function UserAuthForm({ className, ...props }: UserAuthFormProps) {
const [isLoading, setIsLoading] = React.useState<boolean>(false); const [isLoading, setIsLoading] = React.useState<boolean>(false);
const { login } = React.useContext(AuthContext);
const formSchema = z.object({ const formSchema = z.object({
user: z.string(), user: z.string().min(1, "Username is required"),
password: z.string(), password: z.string().min(1, "Password is required"),
}); });
const form = useForm<z.infer<typeof formSchema>>({ const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema), resolver: zodResolver(formSchema),
mode: "onChange", mode: "onChange",
defaultValues: { defaultValues: { user: "", password: "" },
user: "",
password: "",
},
}); });
const onSubmit = async (values: z.infer<typeof formSchema>) => { const onSubmit = async (values: z.infer<typeof formSchema>) => {
@ -50,11 +49,14 @@ export function UserAuthForm({ className, ...props }: UserAuthFormProps) {
password: values.password, password: values.password,
}, },
{ {
headers: { headers: { "X-CSRF-TOKEN": 1 },
"X-CSRF-TOKEN": 1,
},
}, },
); );
const profileRes = await axios.get("/profile", { withCredentials: true });
login({
username: profileRes.data.username,
role: profileRes.data.role || "viewer",
});
window.location.href = baseUrl; window.location.href = baseUrl;
} catch (error) { } catch (error) {
if (axios.isAxiosError(error)) { if (axios.isAxiosError(error)) {
@ -85,7 +87,7 @@ export function UserAuthForm({ className, ...props }: UserAuthFormProps) {
return ( return (
<div className={cn("grid gap-6", className)} {...props}> <div className={cn("grid gap-6", className)} {...props}>
<Form {...form}> <Form {...form}>
<form onSubmit={form.handleSubmit(onSubmit)}> <form onSubmit={form.handleSubmit(onSubmit)} className="space-y-4">
<FormField <FormField
name="user" name="user"
render={({ field }) => ( render={({ field }) => (

View File

@ -0,0 +1,40 @@
import { useContext } from "react";
import { Navigate, Outlet } from "react-router-dom";
import { AuthContext } from "@/context/auth-context";
import ActivityIndicator from "../indicators/activity-indicator";
export default function ProtectedRoute({
requiredRoles,
}: {
requiredRoles: ("admin" | "viewer")[];
}) {
const { auth } = useContext(AuthContext);
if (auth.isLoading) {
return (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
);
}
// Unauthenticated mode
if (!auth.isAuthenticated) {
return <Outlet />;
}
// Authenticated mode (8971): require login
if (!auth.user) {
return <Navigate to="/login" replace />;
}
// If role is null (shouldnt happen if isAuthenticated, but type safety), fallback
// though isAuthenticated should catch this
if (auth.user.role === null) {
return <Outlet />;
}
if (!requiredRoles.includes(auth.user.role)) {
return <Navigate to="/unauthorized" replace />;
}
return <Outlet />;
}

View File

@ -5,6 +5,7 @@ import ActivityIndicator from "../indicators/activity-indicator";
import { useResizeObserver } from "@/hooks/resize-observer"; import { useResizeObserver } from "@/hooks/resize-observer";
import { isDesktop } from "react-device-detect"; import { isDesktop } from "react-device-detect";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useEnabledState } from "@/api/ws";
type CameraImageProps = { type CameraImageProps = {
className?: string; className?: string;
@ -26,7 +27,8 @@ export default function CameraImage({
const imgRef = useRef<HTMLImageElement | null>(null); const imgRef = useRef<HTMLImageElement | null>(null);
const { name } = config ? config.cameras[camera] : ""; const { name } = config ? config.cameras[camera] : "";
const enabled = config ? config.cameras[camera].enabled : "True"; const { payload: enabledState } = useEnabledState(camera);
const enabled = enabledState === "ON" || enabledState === undefined;
const [{ width: containerWidth, height: containerHeight }] = const [{ width: containerWidth, height: containerHeight }] =
useResizeObserver(containerRef); useResizeObserver(containerRef);
@ -96,9 +98,7 @@ export default function CameraImage({
loading="lazy" loading="lazy"
/> />
) : ( ) : (
<div className="pt-6 text-center"> <div className="size-full rounded-lg border-2 border-muted bg-background_alt text-center md:rounded-2xl" />
Camera is disabled in config, no stream or snapshot available!
</div>
)} )}
{!imageLoaded && enabled ? ( {!imageLoaded && enabled ? (
<div className="absolute bottom-0 left-0 right-0 top-0 flex items-center justify-center"> <div className="absolute bottom-0 left-0 right-0 top-0 flex items-center justify-center">

View File

@ -108,9 +108,7 @@ export default function CameraImage({
width={scaledWidth} width={scaledWidth}
/> />
) : ( ) : (
<div className="pt-6 text-center"> <div className="pt-6 text-center">Camera is disabled.</div>
Camera is disabled in config, no stream or snapshot available!
</div>
)} )}
{!hasLoaded && enabled ? ( {!hasLoaded && enabled ? (
<div <div

View File

@ -11,11 +11,15 @@ const variants = {
primary: { primary: {
active: "font-bold text-white bg-selected rounded-lg", active: "font-bold text-white bg-selected rounded-lg",
inactive: "text-secondary-foreground bg-secondary rounded-lg", inactive: "text-secondary-foreground bg-secondary rounded-lg",
disabled:
"text-secondary-foreground bg-secondary rounded-lg cursor-not-allowed opacity-50",
}, },
overlay: { overlay: {
active: "font-bold text-white bg-selected rounded-full", active: "font-bold text-white bg-selected rounded-full",
inactive: inactive:
"text-primary rounded-full bg-gradient-to-br from-gray-400 to-gray-500 bg-gray-500", "text-primary rounded-full bg-gradient-to-br from-gray-400 to-gray-500 bg-gray-500",
disabled:
"bg-gradient-to-br from-gray-400 to-gray-500 bg-gray-500 rounded-full cursor-not-allowed opacity-50",
}, },
}; };
@ -26,6 +30,7 @@ type CameraFeatureToggleProps = {
Icon: IconType; Icon: IconType;
title: string; title: string;
onClick?: () => void; onClick?: () => void;
disabled?: boolean; // New prop for disabling
}; };
export default function CameraFeatureToggle({ export default function CameraFeatureToggle({
@ -35,18 +40,28 @@ export default function CameraFeatureToggle({
Icon, Icon,
title, title,
onClick, onClick,
disabled = false, // Default to false
}: CameraFeatureToggleProps) { }: CameraFeatureToggleProps) {
const content = ( const content = (
<div <div
onClick={onClick} onClick={disabled ? undefined : onClick}
className={cn( className={cn(
"flex flex-col items-center justify-center", "flex flex-col items-center justify-center",
variants[variant][isActive ? "active" : "inactive"], disabled
? variants[variant].disabled
: variants[variant][isActive ? "active" : "inactive"],
className, className,
)} )}
> >
<Icon <Icon
className={`size-5 md:m-[6px] ${isActive ? "text-white" : "text-secondary-foreground"}`} className={cn(
"size-5 md:m-[6px]",
disabled
? "text-gray-400"
: isActive
? "text-white"
: "text-secondary-foreground",
)}
/> />
</div> </div>
); );
@ -54,7 +69,7 @@ export default function CameraFeatureToggle({
if (isDesktop) { if (isDesktop) {
return ( return (
<Tooltip> <Tooltip>
<TooltipTrigger>{content}</TooltipTrigger> <TooltipTrigger disabled={disabled}>{content}</TooltipTrigger>
<TooltipContent side="bottom"> <TooltipContent side="bottom">
<p>{title}</p> <p>{title}</p>
</TooltipContent> </TooltipContent>

View File

@ -281,10 +281,13 @@ function NewGroupDialog({
.catch((error) => { .catch((error) => {
setOpen(false); setOpen(false);
setEditState("none"); setEditState("none");
toast.error( const errorMessage =
`Failed to save config changes: ${error.response.data.message}`, error.response?.data?.message ||
{ position: "top-center" }, error.response?.data?.detail ||
); "Unknown error";
toast.error(`Failed to save config changes: ${errorMessage}`, {
position: "top-center",
});
}) })
.finally(() => { .finally(() => {
setIsLoading(false); setIsLoading(false);
@ -725,10 +728,13 @@ export function CameraGroupEdit({
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( const errorMessage =
`Failed to save config changes: ${error.response.data.message}`, error.response?.data?.message ||
{ position: "top-center" }, error.response?.data?.detail ||
); "Unknown error";
toast.error(`Failed to save config changes: ${errorMessage}`, {
position: "top-center",
});
}) })
.finally(() => { .finally(() => {
setIsLoading(false); setIsLoading(false);

View File

@ -18,6 +18,7 @@ type CameraFilterButtonProps = {
groups: [string, CameraGroupConfig][]; groups: [string, CameraGroupConfig][];
selectedCameras: string[] | undefined; selectedCameras: string[] | undefined;
hideText?: boolean; hideText?: boolean;
mainCamera?: string;
updateCameraFilter: (cameras: string[] | undefined) => void; updateCameraFilter: (cameras: string[] | undefined) => void;
}; };
export function CamerasFilterButton({ export function CamerasFilterButton({
@ -25,6 +26,7 @@ export function CamerasFilterButton({
groups, groups,
selectedCameras, selectedCameras,
hideText = isMobile, hideText = isMobile,
mainCamera,
updateCameraFilter, updateCameraFilter,
}: CameraFilterButtonProps) { }: CameraFilterButtonProps) {
const [open, setOpen] = useState(false); const [open, setOpen] = useState(false);
@ -74,6 +76,7 @@ export function CamerasFilterButton({
allCameras={allCameras} allCameras={allCameras}
groups={groups} groups={groups}
currentCameras={currentCameras} currentCameras={currentCameras}
mainCamera={mainCamera}
setCurrentCameras={setCurrentCameras} setCurrentCameras={setCurrentCameras}
setOpen={setOpen} setOpen={setOpen}
updateCameraFilter={updateCameraFilter} updateCameraFilter={updateCameraFilter}
@ -120,6 +123,7 @@ export function CamerasFilterButton({
type CamerasFilterContentProps = { type CamerasFilterContentProps = {
allCameras: string[]; allCameras: string[];
currentCameras: string[] | undefined; currentCameras: string[] | undefined;
mainCamera?: string;
groups: [string, CameraGroupConfig][]; groups: [string, CameraGroupConfig][];
setCurrentCameras: (cameras: string[] | undefined) => void; setCurrentCameras: (cameras: string[] | undefined) => void;
setOpen: (open: boolean) => void; setOpen: (open: boolean) => void;
@ -128,6 +132,7 @@ type CamerasFilterContentProps = {
export function CamerasFilterContent({ export function CamerasFilterContent({
allCameras, allCameras,
currentCameras, currentCameras,
mainCamera,
groups, groups,
setCurrentCameras, setCurrentCameras,
setOpen, setOpen,
@ -178,12 +183,29 @@ export function CamerasFilterContent({
key={item} key={item}
isChecked={currentCameras?.includes(item) ?? false} isChecked={currentCameras?.includes(item) ?? false}
label={item.replaceAll("_", " ")} label={item.replaceAll("_", " ")}
disabled={
mainCamera !== undefined &&
currentCameras !== undefined &&
item === mainCamera
} // Disable only if mainCamera exists and cameras are filtered
onCheckedChange={(isChecked) => { onCheckedChange={(isChecked) => {
if (
mainCamera !== undefined && // Only enforce if mainCamera is defined
item === mainCamera &&
!isChecked &&
currentCameras !== undefined
) {
return; // Prevent deselecting mainCamera when filtered and mainCamera is defined
}
if (isChecked) { if (isChecked) {
const updatedCameras = currentCameras const updatedCameras = currentCameras
? [...currentCameras] ? [...currentCameras]
: []; : mainCamera !== undefined && item !== mainCamera // If mainCamera exists and this isnt it
updatedCameras.push(item); ? [mainCamera] // Start with mainCamera when transitioning from undefined
: []; // Otherwise start empty
if (!updatedCameras.includes(item)) {
updatedCameras.push(item);
}
setCurrentCameras(updatedCameras); setCurrentCameras(updatedCameras);
} else { } else {
const updatedCameras = currentCameras const updatedCameras = currentCameras

View File

@ -49,6 +49,7 @@ type ReviewFilterGroupProps = {
motionOnly: boolean; motionOnly: boolean;
filterList?: FilterList; filterList?: FilterList;
showReviewed: boolean; showReviewed: boolean;
mainCamera?: string;
setShowReviewed: (show: boolean) => void; setShowReviewed: (show: boolean) => void;
onUpdateFilter: (filter: ReviewFilter) => void; onUpdateFilter: (filter: ReviewFilter) => void;
setMotionOnly: React.Dispatch<React.SetStateAction<boolean>>; setMotionOnly: React.Dispatch<React.SetStateAction<boolean>>;
@ -63,6 +64,7 @@ export default function ReviewFilterGroup({
motionOnly, motionOnly,
filterList, filterList,
showReviewed, showReviewed,
mainCamera,
setShowReviewed, setShowReviewed,
onUpdateFilter, onUpdateFilter,
setMotionOnly, setMotionOnly,
@ -185,6 +187,7 @@ export default function ReviewFilterGroup({
allCameras={filterValues.cameras} allCameras={filterValues.cameras}
groups={groups} groups={groups}
selectedCameras={filter?.cameras} selectedCameras={filter?.cameras}
mainCamera={mainCamera}
updateCameraFilter={(newCameras) => { updateCameraFilter={(newCameras) => {
onUpdateFilter({ ...filter, cameras: newCameras }); onUpdateFilter({ ...filter, cameras: newCameras });
}} }}

View File

@ -44,8 +44,12 @@ export default function SearchActionGroup({
pullLatestData(); pullLatestData();
} }
}) })
.catch(() => { .catch((error) => {
toast.error("Failed to delete tracked objects.", { const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(`Failed to delete tracked objects.: ${errorMessage}`, {
position: "top-center", position: "top-center",
}); });
}); });

View File

@ -16,7 +16,7 @@ import {
PopoverTrigger, PopoverTrigger,
} from "@/components/ui/popover"; } from "@/components/ui/popover";
import { getUnitSize } from "@/utils/storageUtil"; import { getUnitSize } from "@/utils/storageUtil";
import { LuAlertCircle } from "react-icons/lu"; import { CiCircleAlert } from "react-icons/ci";
type CameraStorage = { type CameraStorage = {
[key: string]: { [key: string]: {
@ -199,7 +199,7 @@ export function CombinedStorageGraph({
className="focus:outline-none" className="focus:outline-none"
aria-label="Unused Storage Information" aria-label="Unused Storage Information"
> >
<LuAlertCircle <CiCircleAlert
className="size-5" className="size-5"
aria-label="Unused Storage Information" aria-label="Unused Storage Information"
/> />

View File

@ -1,5 +1,5 @@
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { LuLoader2 } from "react-icons/lu"; import { AiOutlineLoading3Quarters } from "react-icons/ai";
export default function ActivityIndicator({ className = "w-full", size = 30 }) { export default function ActivityIndicator({ className = "w-full", size = 30 }) {
return ( return (
@ -7,7 +7,7 @@ export default function ActivityIndicator({ className = "w-full", size = 30 }) {
className={cn("flex items-center justify-center", className)} className={cn("flex items-center justify-center", className)}
aria-label="Loading…" aria-label="Loading…"
> >
<LuLoader2 className="animate-spin" size={size} /> <AiOutlineLoading3Quarters className="animate-spin" size={size} />
</div> </div>
); );
} }

View File

@ -18,22 +18,52 @@ import {
} from "../ui/dropdown-menu"; } from "../ui/dropdown-menu";
import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer"; import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
import { DialogClose } from "../ui/dialog"; import { DialogClose } from "../ui/dialog";
import { LuLogOut } from "react-icons/lu"; import { LuLogOut, LuSquarePen } from "react-icons/lu";
import useSWR from "swr"; import useSWR from "swr";
import { useState } from "react";
import axios from "axios";
import { toast } from "sonner";
import SetPasswordDialog from "../overlay/SetPasswordDialog";
type AccountSettingsProps = { type AccountSettingsProps = {
className?: string; className?: string;
}; };
export default function AccountSettings({ className }: AccountSettingsProps) { export default function AccountSettings({ className }: AccountSettingsProps) {
const { data: profile } = useSWR("profile"); const { data: profile } = useSWR("profile");
const { data: config } = useSWR("config"); const { data: config } = useSWR("config");
const logoutUrl = config?.proxy?.logout_url || `${baseUrl}api/logout`; const logoutUrl = config?.proxy?.logout_url || `${baseUrl}api/logout`;
const [passwordDialogOpen, setPasswordDialogOpen] = useState(false);
const Container = isDesktop ? DropdownMenu : Drawer; const Container = isDesktop ? DropdownMenu : Drawer;
const Trigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger; const Trigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger;
const Content = isDesktop ? DropdownMenuContent : DrawerContent; const Content = isDesktop ? DropdownMenuContent : DrawerContent;
const MenuItem = isDesktop ? DropdownMenuItem : DialogClose; const MenuItem = isDesktop ? DropdownMenuItem : DialogClose;
const handlePasswordSave = async (password: string) => {
if (!profile?.username || profile.username === "anonymous") return;
axios
.put(`users/${profile.username}/password`, { password })
.then((response) => {
if (response.status === 200) {
setPasswordDialogOpen(false);
toast.success("Password updated successfully.", {
position: "top-center",
});
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(`Error setting password: ${errorMessage}`, {
position: "top-center",
});
});
};
return ( return (
<Container modal={!isDesktop}> <Container modal={!isDesktop}>
<Trigger> <Trigger>
@ -65,9 +95,22 @@ export default function AccountSettings({ className }: AccountSettingsProps) {
> >
<div className="scrollbar-container w-full flex-col overflow-y-auto overflow-x-hidden"> <div className="scrollbar-container w-full flex-col overflow-y-auto overflow-x-hidden">
<DropdownMenuLabel> <DropdownMenuLabel>
Current User: {profile?.username || "anonymous"} Current User: {profile?.username || "anonymous"}{" "}
{profile?.role && `(${profile.role})`}
</DropdownMenuLabel> </DropdownMenuLabel>
<DropdownMenuSeparator className={isDesktop ? "mt-3" : "mt-1"} /> <DropdownMenuSeparator className={isDesktop ? "mt-3" : "mt-1"} />
{profile?.username && profile.username !== "anonymous" && (
<MenuItem
className={
isDesktop ? "cursor-pointer" : "flex items-center p-2 text-sm"
}
aria-label="Set Password"
onClick={() => setPasswordDialogOpen(true)}
>
<LuSquarePen className="mr-2 size-4" />
<span>Set Password</span>
</MenuItem>
)}
<MenuItem <MenuItem
className={ className={
isDesktop ? "cursor-pointer" : "flex items-center p-2 text-sm" isDesktop ? "cursor-pointer" : "flex items-center p-2 text-sm"
@ -81,6 +124,12 @@ export default function AccountSettings({ className }: AccountSettingsProps) {
</MenuItem> </MenuItem>
</div> </div>
</Content> </Content>
<SetPasswordDialog
show={passwordDialogOpen}
onSave={handlePasswordSave}
onCancel={() => setPasswordDialogOpen(false)}
username={profile?.username}
/>
</Container> </Container>
); );
} }

View File

@ -5,7 +5,7 @@ import {
LuList, LuList,
LuLogOut, LuLogOut,
LuMoon, LuMoon,
LuPenSquare, LuSquarePen,
LuRotateCw, LuRotateCw,
LuSettings, LuSettings,
LuSun, LuSun,
@ -24,7 +24,6 @@ import {
DropdownMenuSubTrigger, DropdownMenuSubTrigger,
DropdownMenuTrigger, DropdownMenuTrigger,
} from "../ui/dropdown-menu"; } from "../ui/dropdown-menu";
import { Link } from "react-router-dom"; import { Link } from "react-router-dom";
import { CgDarkMode } from "react-icons/cg"; import { CgDarkMode } from "react-icons/cg";
import { import {
@ -33,10 +32,8 @@ import {
useTheme, useTheme,
} from "@/context/theme-provider"; } from "@/context/theme-provider";
import { IoColorPalette } from "react-icons/io5"; import { IoColorPalette } from "react-icons/io5";
import { useState } from "react"; import { useState } from "react";
import { useRestart } from "@/api/ws"; import { useRestart } from "@/api/ws";
import { import {
Tooltip, Tooltip,
TooltipContent, TooltipContent,
@ -55,21 +52,27 @@ import { TooltipPortal } from "@radix-ui/react-tooltip";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import useSWR from "swr"; import useSWR from "swr";
import RestartDialog from "../overlay/dialog/RestartDialog"; import RestartDialog from "../overlay/dialog/RestartDialog";
import { useIsAdmin } from "@/hooks/use-is-admin";
import SetPasswordDialog from "../overlay/SetPasswordDialog";
import { toast } from "sonner";
import axios from "axios";
type GeneralSettingsProps = { type GeneralSettingsProps = {
className?: string; className?: string;
}; };
export default function GeneralSettings({ className }: GeneralSettingsProps) { export default function GeneralSettings({ className }: GeneralSettingsProps) {
const { data: profile } = useSWR("profile"); const { data: profile } = useSWR("profile");
const { data: config } = useSWR("config"); const { data: config } = useSWR("config");
const logoutUrl = config?.proxy?.logout_url || "/api/logout"; const logoutUrl = config?.proxy?.logout_url || "/api/logout";
// settings
const { theme, colorScheme, setTheme, setColorScheme } = useTheme(); const { theme, colorScheme, setTheme, setColorScheme } = useTheme();
const [restartDialogOpen, setRestartDialogOpen] = useState(false); const [restartDialogOpen, setRestartDialogOpen] = useState(false);
const [passwordDialogOpen, setPasswordDialogOpen] = useState(false);
const { send: sendRestart } = useRestart(); const { send: sendRestart } = useRestart();
const isAdmin = useIsAdmin();
const Container = isDesktop ? DropdownMenu : Drawer; const Container = isDesktop ? DropdownMenu : Drawer;
const Trigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger; const Trigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger;
const Content = isDesktop ? DropdownMenuContent : DrawerContent; const Content = isDesktop ? DropdownMenuContent : DrawerContent;
@ -79,6 +82,29 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
const SubItemContent = isDesktop ? DropdownMenuSubContent : DialogContent; const SubItemContent = isDesktop ? DropdownMenuSubContent : DialogContent;
const Portal = isDesktop ? DropdownMenuPortal : DialogPortal; const Portal = isDesktop ? DropdownMenuPortal : DialogPortal;
const handlePasswordSave = async (password: string) => {
if (!profile?.username || profile.username === "anonymous") return;
axios
.put(`users/${profile.username}/password`, { password })
.then((response) => {
if (response.status === 200) {
setPasswordDialogOpen(false);
toast.success("Password updated successfully.", {
position: "top-center",
});
}
})
.catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(`Error setting password: ${errorMessage}`, {
position: "top-center",
});
});
};
return ( return (
<> <>
<Container modal={!isDesktop}> <Container modal={!isDesktop}>
@ -121,13 +147,28 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
> >
<div className="scrollbar-container w-full flex-col overflow-y-auto overflow-x-hidden"> <div className="scrollbar-container w-full flex-col overflow-y-auto overflow-x-hidden">
{isMobile && ( {isMobile && (
<> <div className="mb-2">
<DropdownMenuLabel> <DropdownMenuLabel>
Current User: {profile?.username || "anonymous"} Current User: {profile?.username || "anonymous"}{" "}
{profile?.role && `(${profile.role})`}
</DropdownMenuLabel> </DropdownMenuLabel>
<DropdownMenuSeparator <DropdownMenuSeparator
className={isDesktop ? "mt-3" : "mt-1"} className={isDesktop ? "mt-3" : "mt-1"}
/> />
{profile?.username && profile.username !== "anonymous" && (
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label="Set Password"
onClick={() => setPasswordDialogOpen(true)}
>
<LuSquarePen className="mr-2 size-4" />
<span>Set Password</span>
</MenuItem>
)}
<MenuItem <MenuItem
className={ className={
isDesktop isDesktop
@ -141,39 +182,45 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
<span>Logout</span> <span>Logout</span>
</a> </a>
</MenuItem> </MenuItem>
</div>
)}
{isAdmin && (
<>
<DropdownMenuLabel>System</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuGroup className={isDesktop ? "" : "flex flex-col"}>
<Link to="/system#general">
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex w-full items-center p-2 text-sm"
}
aria-label="System metrics"
>
<LuActivity className="mr-2 size-4" />
<span>System metrics</span>
</MenuItem>
</Link>
<Link to="/logs">
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex w-full items-center p-2 text-sm"
}
aria-label="System logs"
>
<LuList className="mr-2 size-4" />
<span>System logs</span>
</MenuItem>
</Link>
</DropdownMenuGroup>
</> </>
)} )}
<DropdownMenuLabel>System</DropdownMenuLabel> <DropdownMenuLabel
<DropdownMenuSeparator /> className={isDesktop && isAdmin ? "mt-3" : "mt-1"}
<DropdownMenuGroup className={isDesktop ? "" : "flex flex-col"}> >
<Link to="/system#general">
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex w-full items-center p-2 text-sm"
}
aria-label="System metrics"
>
<LuActivity className="mr-2 size-4" />
<span>System metrics</span>
</MenuItem>
</Link>
<Link to="/logs">
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex w-full items-center p-2 text-sm"
}
aria-label="System logs"
>
<LuList className="mr-2 size-4" />
<span>System logs</span>
</MenuItem>
</Link>
</DropdownMenuGroup>
<DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}>
Configuration Configuration
</DropdownMenuLabel> </DropdownMenuLabel>
<DropdownMenuSeparator /> <DropdownMenuSeparator />
@ -191,143 +238,143 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
<span>Settings</span> <span>Settings</span>
</MenuItem> </MenuItem>
</Link> </Link>
<Link to="/config"> {isAdmin && (
<MenuItem <>
className={ <Link to="/config">
isDesktop
? "cursor-pointer"
: "flex w-full items-center p-2 text-sm"
}
aria-label="Configuration editor"
>
<LuPenSquare className="mr-2 size-4" />
<span>Configuration editor</span>
</MenuItem>
</Link>
<DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}>
Appearance
</DropdownMenuLabel>
<DropdownMenuSeparator />
<SubItem>
<SubItemTrigger
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
>
<LuSunMoon className="mr-2 size-4" />
<span>Dark Mode</span>
</SubItemTrigger>
<Portal>
<SubItemContent
className={
isDesktop ? "" : "w-[92%] rounded-lg md:rounded-2xl"
}
>
<span tabIndex={0} className="sr-only" />
<MenuItem <MenuItem
className={ className={
isDesktop isDesktop
? "cursor-pointer" ? "cursor-pointer"
: "flex items-center p-2 text-sm" : "flex w-full items-center p-2 text-sm"
} }
aria-label="Light mode" aria-label="Configuration editor"
onClick={() => setTheme("light")}
> >
{theme === "light" ? ( <LuSquarePen className="mr-2 size-4" />
<> <span>Configuration editor</span>
<LuSun className="mr-2 size-4 rotate-0 scale-100 transition-all dark:-rotate-90 dark:scale-0" />
Light
</>
) : (
<span className="ml-6 mr-2">Light</span>
)}
</MenuItem> </MenuItem>
<MenuItem </Link>
className={ </>
isDesktop )}
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label="Dark mode"
onClick={() => setTheme("dark")}
>
{theme === "dark" ? (
<>
<LuMoon className="mr-2 size-4 rotate-90 scale-0 transition-all dark:rotate-0 dark:scale-100" />
Dark
</>
) : (
<span className="ml-6 mr-2">Dark</span>
)}
</MenuItem>
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label="Use the system settings for light or dark mode"
onClick={() => setTheme("system")}
>
{theme === "system" ? (
<>
<CgDarkMode className="mr-2 size-4 scale-100 transition-all" />
System
</>
) : (
<span className="ml-6 mr-2">System</span>
)}
</MenuItem>
</SubItemContent>
</Portal>
</SubItem>
<SubItem>
<SubItemTrigger
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
>
<LuSunMoon className="mr-2 size-4" />
<span>Theme</span>
</SubItemTrigger>
<Portal>
<SubItemContent
className={
isDesktop ? "" : "w-[92%] rounded-lg md:rounded-2xl"
}
>
<span tabIndex={0} className="sr-only" />
{colorSchemes.map((scheme) => (
<MenuItem
key={scheme}
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label={`Color scheme - ${scheme}`}
onClick={() => setColorScheme(scheme)}
>
{scheme === colorScheme ? (
<>
<IoColorPalette className="mr-2 size-4 rotate-0 scale-100 transition-all" />
{friendlyColorSchemeName(scheme)}
</>
) : (
<span className="ml-6 mr-2">
{friendlyColorSchemeName(scheme)}
</span>
)}
</MenuItem>
))}
</SubItemContent>
</Portal>
</SubItem>
</DropdownMenuGroup> </DropdownMenuGroup>
<DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}>
Appearance
</DropdownMenuLabel>
<DropdownMenuSeparator />
<SubItem>
<SubItemTrigger
className={
isDesktop ? "cursor-pointer" : "flex items-center p-2 text-sm"
}
>
<LuSunMoon className="mr-2 size-4" />
<span>Dark Mode</span>
</SubItemTrigger>
<Portal>
<SubItemContent
className={
isDesktop ? "" : "w-[92%] rounded-lg md:rounded-2xl"
}
>
<span tabIndex={0} className="sr-only" />
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label="Light mode"
onClick={() => setTheme("light")}
>
{theme === "light" ? (
<>
<LuSun className="mr-2 size-4 rotate-0 scale-100 transition-all dark:-rotate-90 dark:scale-0" />
Light
</>
) : (
<span className="ml-6 mr-2">Light</span>
)}
</MenuItem>
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label="Dark mode"
onClick={() => setTheme("dark")}
>
{theme === "dark" ? (
<>
<LuMoon className="mr-2 size-4 rotate-90 scale-0 transition-all dark:rotate-0 dark:scale-100" />
Dark
</>
) : (
<span className="ml-6 mr-2">Dark</span>
)}
</MenuItem>
<MenuItem
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label="Use the system settings for light or dark mode"
onClick={() => setTheme("system")}
>
{theme === "system" ? (
<>
<CgDarkMode className="mr-2 size-4 scale-100 transition-all" />
System
</>
) : (
<span className="ml-6 mr-2">System</span>
)}
</MenuItem>
</SubItemContent>
</Portal>
</SubItem>
<SubItem>
<SubItemTrigger
className={
isDesktop ? "cursor-pointer" : "flex items-center p-2 text-sm"
}
>
<LuSunMoon className="mr-2 size-4" />
<span>Theme</span>
</SubItemTrigger>
<Portal>
<SubItemContent
className={
isDesktop ? "" : "w-[92%] rounded-lg md:rounded-2xl"
}
>
<span tabIndex={0} className="sr-only" />
{colorSchemes.map((scheme) => (
<MenuItem
key={scheme}
className={
isDesktop
? "cursor-pointer"
: "flex items-center p-2 text-sm"
}
aria-label={`Color scheme - ${scheme}`}
onClick={() => setColorScheme(scheme)}
>
{scheme === colorScheme ? (
<>
<IoColorPalette className="mr-2 size-4 rotate-0 scale-100 transition-all" />
{friendlyColorSchemeName(scheme)}
</>
) : (
<span className="ml-6 mr-2">
{friendlyColorSchemeName(scheme)}
</span>
)}
</MenuItem>
))}
</SubItemContent>
</Portal>
</SubItem>
<DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}> <DropdownMenuLabel className={isDesktop ? "mt-3" : "mt-1"}>
Help Help
</DropdownMenuLabel> </DropdownMenuLabel>
@ -357,17 +404,25 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
<span>GitHub</span> <span>GitHub</span>
</MenuItem> </MenuItem>
</a> </a>
<DropdownMenuSeparator className={isDesktop ? "mt-3" : "mt-1"} /> {isAdmin && (
<MenuItem <>
className={ <DropdownMenuSeparator
isDesktop ? "cursor-pointer" : "flex items-center p-2 text-sm" className={isDesktop ? "mt-3" : "mt-1"}
} />
aria-label="Restart Frigate" <MenuItem
onClick={() => setRestartDialogOpen(true)} className={
> isDesktop
<LuRotateCw className="mr-2 size-4" /> ? "cursor-pointer"
<span>Restart Frigate</span> : "flex items-center p-2 text-sm"
</MenuItem> }
aria-label="Restart Frigate"
onClick={() => setRestartDialogOpen(true)}
>
<LuRotateCw className="mr-2 size-4" />
<span>Restart Frigate</span>
</MenuItem>
</>
)}
</div> </div>
</Content> </Content>
</Container> </Container>
@ -376,6 +431,12 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) {
onClose={() => setRestartDialogOpen(false)} onClose={() => setRestartDialogOpen(false)}
onRestart={() => sendRestart("restart")} onRestart={() => sendRestart("restart")}
/> />
<SetPasswordDialog
show={passwordDialogOpen}
onSave={handlePasswordSave}
onCancel={() => setPasswordDialogOpen(false)}
username={profile?.username}
/>
</> </>
); );
} }

View File

@ -39,7 +39,11 @@ import {
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import { useNotifications, useNotificationSuspend } from "@/api/ws"; import {
useEnabledState,
useNotifications,
useNotificationSuspend,
} from "@/api/ws";
type LiveContextMenuProps = { type LiveContextMenuProps = {
className?: string; className?: string;
@ -83,6 +87,11 @@ export default function LiveContextMenu({
}: LiveContextMenuProps) { }: LiveContextMenuProps) {
const [showSettings, setShowSettings] = useState(false); const [showSettings, setShowSettings] = useState(false);
// camera enabled
const { payload: enabledState, send: sendEnabled } = useEnabledState(camera);
const isEnabled = enabledState === "ON";
// streaming settings // streaming settings
const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } = const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } =
@ -263,7 +272,7 @@ export default function LiveContextMenu({
onClick={handleVolumeIconClick} onClick={handleVolumeIconClick}
/> />
<VolumeSlider <VolumeSlider
disabled={!audioState} disabled={!audioState || !isEnabled}
className="my-3 ml-0.5 rounded-lg bg-background/60" className="my-3 ml-0.5 rounded-lg bg-background/60"
value={[volumeState ?? 0]} value={[volumeState ?? 0]}
min={0} min={0}
@ -280,34 +289,49 @@ export default function LiveContextMenu({
<ContextMenuItem> <ContextMenuItem>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={muteAll} onClick={() => sendEnabled(isEnabled ? "OFF" : "ON")}
>
<div className="text-primary">
{isEnabled ? "Disable" : "Enable"} Camera
</div>
</div>
</ContextMenuItem>
<ContextMenuSeparator />
<ContextMenuItem disabled={!isEnabled}>
<div
className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={isEnabled ? muteAll : undefined}
> >
<div className="text-primary">Mute All Cameras</div> <div className="text-primary">Mute All Cameras</div>
</div> </div>
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={unmuteAll} onClick={isEnabled ? unmuteAll : undefined}
> >
<div className="text-primary">Unmute All Cameras</div> <div className="text-primary">Unmute All Cameras</div>
</div> </div>
</ContextMenuItem> </ContextMenuItem>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={toggleStats} onClick={isEnabled ? toggleStats : undefined}
> >
<div className="text-primary"> <div className="text-primary">
{statsState ? "Hide" : "Show"} Stream Stats {statsState ? "Hide" : "Show"} Stream Stats
</div> </div>
</div> </div>
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={() => navigate(`/settings?page=debug&camera=${camera}`)} onClick={
isEnabled
? () => navigate(`/settings?page=debug&camera=${camera}`)
: undefined
}
> >
<div className="text-primary">Debug View</div> <div className="text-primary">Debug View</div>
</div> </div>
@ -315,10 +339,10 @@ export default function LiveContextMenu({
{cameraGroup && cameraGroup !== "default" && ( {cameraGroup && cameraGroup !== "default" && (
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={() => setShowSettings(true)} onClick={isEnabled ? () => setShowSettings(true) : undefined}
> >
<div className="text-primary">Streaming Settings</div> <div className="text-primary">Streaming Settings</div>
</div> </div>
@ -328,10 +352,10 @@ export default function LiveContextMenu({
{preferredLiveMode == "jsmpeg" && isRestreamed && ( {preferredLiveMode == "jsmpeg" && isRestreamed && (
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={resetPreferredLiveMode} onClick={isEnabled ? resetPreferredLiveMode : undefined}
> >
<div className="text-primary">Reset</div> <div className="text-primary">Reset</div>
</div> </div>
@ -342,7 +366,7 @@ export default function LiveContextMenu({
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuSub> <ContextMenuSub>
<ContextMenuSubTrigger> <ContextMenuSubTrigger disabled={!isEnabled}>
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<span>Notifications</span> <span>Notifications</span>
</div> </div>
@ -382,10 +406,15 @@ export default function LiveContextMenu({
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem <ContextMenuItem
onClick={() => { disabled={!isEnabled}
sendNotification("ON"); onClick={
sendNotificationSuspend(0); isEnabled
}} ? () => {
sendNotification("ON");
sendNotificationSuspend(0);
}
: undefined
}
> >
<div className="flex w-full flex-col gap-2"> <div className="flex w-full flex-col gap-2">
{notificationState === "ON" ? ( {notificationState === "ON" ? (
@ -405,36 +434,71 @@ export default function LiveContextMenu({
Suspend for: Suspend for:
</p> </p>
<div className="space-y-1"> <div className="space-y-1">
<ContextMenuItem onClick={() => handleSuspend("5")}> <ContextMenuItem
disabled={!isEnabled}
onClick={
isEnabled ? () => handleSuspend("5") : undefined
}
>
5 minutes 5 minutes
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("10")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("10")
: undefined
}
> >
10 minutes 10 minutes
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("30")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("30")
: undefined
}
> >
30 minutes 30 minutes
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("60")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("60")
: undefined
}
> >
1 hour 1 hour
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("840")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("840")
: undefined
}
> >
12 hours 12 hours
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("1440")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("1440")
: undefined
}
> >
24 hours 24 hours
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("off")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("off")
: undefined
}
> >
Until restart Until restart
</ContextMenuItem> </ContextMenuItem>

View File

@ -4,7 +4,8 @@ import { FrigateConfig } from "@/types/frigateConfig";
import { baseUrl } from "@/api/baseUrl"; import { baseUrl } from "@/api/baseUrl";
import { toast } from "sonner"; import { toast } from "sonner";
import axios from "axios"; import axios from "axios";
import { LuCamera, LuDownload, LuMoreVertical, LuTrash2 } from "react-icons/lu"; import { LuCamera, LuDownload, LuTrash2 } from "react-icons/lu";
import { FiMoreVertical } from "react-icons/fi";
import { FaArrowsRotate } from "react-icons/fa6"; import { FaArrowsRotate } from "react-icons/fa6";
import { MdImageSearch } from "react-icons/md"; import { MdImageSearch } from "react-icons/md";
import FrigatePlusIcon from "@/components/icons/FrigatePlusIcon"; import FrigatePlusIcon from "@/components/icons/FrigatePlusIcon";
@ -73,8 +74,12 @@ export default function SearchResultActions({
refreshResults(); refreshResults();
} }
}) })
.catch(() => { .catch((error) => {
toast.error("Failed to delete tracked object.", { const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(`Failed to delete tracked object: ${errorMessage}`, {
position: "top-center", position: "top-center",
}); });
}); });
@ -216,7 +221,7 @@ export default function SearchResultActions({
<DropdownMenu> <DropdownMenu>
<DropdownMenuTrigger> <DropdownMenuTrigger>
<LuMoreVertical className="size-5 cursor-pointer text-primary-variant hover:text-primary" /> <FiMoreVertical className="size-5 cursor-pointer text-primary-variant hover:text-primary" />
</DropdownMenuTrigger> </DropdownMenuTrigger>
<DropdownMenuContent align="end">{menuItems}</DropdownMenuContent> <DropdownMenuContent align="end">{menuItems}</DropdownMenuContent>
</DropdownMenu> </DropdownMenu>

View File

@ -2,6 +2,7 @@ import { Button } from "../ui/button";
import { import {
Form, Form,
FormControl, FormControl,
FormDescription,
FormField, FormField,
FormItem, FormItem,
FormLabel, FormLabel,
@ -12,20 +13,31 @@ import { zodResolver } from "@hookform/resolvers/zod";
import { useForm } from "react-hook-form"; import { useForm } from "react-hook-form";
import { z } from "zod"; import { z } from "zod";
import ActivityIndicator from "../indicators/activity-indicator"; import ActivityIndicator from "../indicators/activity-indicator";
import { useState } from "react"; import { useEffect, useState } from "react";
import { import {
Dialog, Dialog,
DialogContent, DialogContent,
DialogDescription,
DialogFooter, DialogFooter,
DialogHeader, DialogHeader,
DialogTitle, DialogTitle,
} from "../ui/dialog"; } from "../ui/dialog";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "../ui/select";
import { Shield, User } from "lucide-react";
import { LuCheck, LuX } from "react-icons/lu";
type CreateUserOverlayProps = { type CreateUserOverlayProps = {
show: boolean; show: boolean;
onCreate: (user: string, password: string) => void; onCreate: (user: string, password: string, role: "admin" | "viewer") => void;
onCancel: () => void; onCancel: () => void;
}; };
export default function CreateUserDialog({ export default function CreateUserDialog({
show, show,
onCreate, onCreate,
@ -33,15 +45,22 @@ export default function CreateUserDialog({
}: CreateUserOverlayProps) { }: CreateUserOverlayProps) {
const [isLoading, setIsLoading] = useState<boolean>(false); const [isLoading, setIsLoading] = useState<boolean>(false);
const formSchema = z.object({ const formSchema = z
user: z .object({
.string() user: z
.min(1) .string()
.regex(/^[A-Za-z0-9._]+$/, { .min(1, "Username is required")
message: "Username may only include letters, numbers, . or _", .regex(/^[A-Za-z0-9._]+$/, {
}), message: "Username may only include letters, numbers, . or _",
password: z.string(), }),
}); password: z.string().min(1, "Password is required"),
confirmPassword: z.string().min(1, "Please confirm your password"),
role: z.enum(["admin", "viewer"]),
})
.refine((data) => data.password === data.confirmPassword, {
message: "Passwords don't match",
path: ["confirmPassword"],
});
const form = useForm<z.infer<typeof formSchema>>({ const form = useForm<z.infer<typeof formSchema>>({
resolver: zodResolver(formSchema), resolver: zodResolver(formSchema),
@ -49,32 +68,93 @@ export default function CreateUserDialog({
defaultValues: { defaultValues: {
user: "", user: "",
password: "", password: "",
confirmPassword: "",
role: "viewer",
}, },
}); });
const onSubmit = async (values: z.infer<typeof formSchema>) => { const onSubmit = async (values: z.infer<typeof formSchema>) => {
setIsLoading(true); setIsLoading(true);
await onCreate(values.user, values.password); await onCreate(values.user, values.password, values.role);
form.reset(); form.reset();
setIsLoading(false); setIsLoading(false);
}; };
// Check if passwords match for real-time feedback
const password = form.watch("password");
const confirmPassword = form.watch("confirmPassword");
const passwordsMatch = password === confirmPassword;
const showMatchIndicator = password && confirmPassword;
useEffect(() => {
if (!show) {
form.reset({
user: "",
password: "",
role: "viewer",
});
}
}, [show, form]);
const handleCancel = () => {
form.reset({
user: "",
password: "",
role: "viewer",
});
onCancel();
};
return ( return (
<Dialog open={show} onOpenChange={onCancel}> <Dialog open={show} onOpenChange={onCancel}>
<DialogContent> <DialogContent className="sm:max-w-[425px]">
<DialogHeader> <DialogHeader>
<DialogTitle>Create User</DialogTitle> <DialogTitle>Create New User</DialogTitle>
<DialogDescription>
Add a new user account and specify an role for access to areas of
the Frigate UI.
</DialogDescription>
</DialogHeader> </DialogHeader>
<Form {...form}> <Form {...form}>
<form onSubmit={form.handleSubmit(onSubmit)}> <form
onSubmit={form.handleSubmit(onSubmit)}
className="space-y-5 py-4"
>
<FormField <FormField
name="user" name="user"
render={({ field }) => ( render={({ field }) => (
<FormItem> <FormItem>
<FormLabel>User</FormLabel> <FormLabel className="text-sm font-medium">
Username
</FormLabel>
<FormControl> <FormControl>
<Input <Input
className="text-md w-full border border-input bg-background p-2 hover:bg-accent hover:text-accent-foreground dark:[color-scheme:dark]" placeholder="Enter username"
className="h-10"
{...field}
/>
</FormControl>
<FormDescription className="text-xs text-muted-foreground">
Only letters, numbers, periods and underscores allowed.
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
<FormField
name="password"
render={({ field }) => (
<FormItem>
<FormLabel className="text-sm font-medium">
Password
</FormLabel>
<FormControl>
<Input
placeholder="Enter password"
type="password"
className="h-10"
{...field} {...field}
/> />
</FormControl> </FormControl>
@ -82,30 +162,121 @@ export default function CreateUserDialog({
</FormItem> </FormItem>
)} )}
/> />
<FormField <FormField
name="password" name="confirmPassword"
render={({ field }) => ( render={({ field }) => (
<FormItem> <FormItem>
<FormLabel>Password</FormLabel> <FormLabel className="text-sm font-medium">
Confirm Password
</FormLabel>
<FormControl> <FormControl>
<Input <Input
className="text-md w-full border border-input bg-background p-2 hover:bg-accent hover:text-accent-foreground dark:[color-scheme:dark]" placeholder="Confirm password"
type="password" type="password"
className="h-10"
{...field} {...field}
/> />
</FormControl> </FormControl>
{showMatchIndicator && (
<div className="mt-1 flex items-center gap-1.5 text-xs">
{passwordsMatch ? (
<>
<LuCheck className="size-3.5 text-green-500" />
<span className="text-green-600">
Passwords match
</span>
</>
) : (
<>
<LuX className="size-3.5 text-red-500" />
<span className="text-red-600">
Passwords don't match
</span>
</>
)}
</div>
)}
<FormMessage />
</FormItem> </FormItem>
)} )}
/> />
<DialogFooter className="mt-4">
<Button <FormField
variant="select" name="role"
aria-label="Create user" render={({ field }) => (
disabled={isLoading} <FormItem>
> <FormLabel className="text-sm font-medium">Role</FormLabel>
{isLoading && <ActivityIndicator className="mr-2 h-4 w-4" />} <Select
Create User onValueChange={field.onChange}
</Button> defaultValue={field.value}
>
<FormControl>
<SelectTrigger className="h-10">
<SelectValue placeholder="Select a role" />
</SelectTrigger>
</FormControl>
<SelectContent>
<SelectItem
value="admin"
className="flex items-center gap-2"
>
<div className="flex items-center gap-2">
<Shield className="h-4 w-4 text-primary" />
<span>Admin</span>
</div>
</SelectItem>
<SelectItem
value="viewer"
className="flex items-center gap-2"
>
<div className="flex items-center gap-2">
<User className="h-4 w-4 text-muted-foreground" />
<span>Viewer</span>
</div>
</SelectItem>
</SelectContent>
</Select>
<FormDescription className="text-xs text-muted-foreground">
Admins have full access to all features in the Frigate UI.
Viewers are limited to viewing cameras, review items, and
historical footage in the UI.
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
<DialogFooter className="flex gap-2 pt-2 sm:justify-end">
<div className="flex flex-1 flex-col justify-end">
<div className="flex flex-row gap-2 pt-5">
<Button
className="flex flex-1"
aria-label="Cancel"
disabled={isLoading}
onClick={handleCancel}
type="button"
>
Cancel
</Button>
<Button
variant="select"
aria-label="Save"
disabled={isLoading || !form.formState.isValid}
className="flex flex-1"
type="submit"
>
{isLoading ? (
<div className="flex flex-row items-center gap-2">
<ActivityIndicator />
<span>Saving...</span>
</div>
) : (
"Save"
)}
</Button>
</div>
</div>
</DialogFooter> </DialogFooter>
</form> </form>
</Form> </Form>

View File

@ -6,34 +6,61 @@ import {
DialogHeader, DialogHeader,
DialogTitle, DialogTitle,
} from "../ui/dialog"; } from "../ui/dialog";
import { DialogDescription } from "@radix-ui/react-dialog";
type SetPasswordProps = { type DeleteUserDialogProps = {
show: boolean; show: boolean;
username?: string;
onDelete: () => void; onDelete: () => void;
onCancel: () => void; onCancel: () => void;
}; };
export default function DeleteUserDialog({ export default function DeleteUserDialog({
show, show,
username,
onDelete, onDelete,
onCancel, onCancel,
}: SetPasswordProps) { }: DeleteUserDialogProps) {
return ( return (
<Dialog open={show} onOpenChange={onCancel}> <Dialog open={show} onOpenChange={onCancel}>
<DialogContent> <DialogContent className="sm:max-w-[425px]">
<DialogHeader> <DialogHeader className="flex flex-col items-center gap-2 sm:items-start">
<DialogTitle>Delete User</DialogTitle> <div className="space-y-1 text-center sm:text-left">
<DialogTitle>Delete User</DialogTitle>
<DialogDescription>
This action cannot be undone. This will permanently delete the
user account and remove all associated data.
</DialogDescription>
</div>
</DialogHeader> </DialogHeader>
<div>Are you sure?</div>
<DialogFooter> <div className="my-4 rounded-md border border-destructive/20 bg-destructive/5 p-4 text-center text-sm">
<Button <p className="font-medium text-destructive">
className="flex items-center gap-1" Are you sure you want to delete{" "}
aria-label="Confirm delete" <span className="font-bold">{username}</span>?
variant="destructive" </p>
size="sm" </div>
onClick={onDelete}
> <DialogFooter className="flex flex-col-reverse gap-2 sm:flex-row sm:justify-end">
Delete <div className="flex flex-1 flex-col justify-end">
</Button> <div className="flex flex-row gap-2 pt-5">
<Button
className="flex flex-1"
aria-label="Cancel"
onClick={onCancel}
type="button"
>
Cancel
</Button>
<Button
variant="destructive"
aria-label="Delete"
className="flex flex-1"
onClick={onDelete}
>
Delete User
</Button>
</div>
</div>
</DialogFooter> </DialogFooter>
</DialogContent> </DialogContent>
</Dialog> </Dialog>

View File

@ -99,16 +99,13 @@ export default function ExportDialog({
} }
}) })
.catch((error) => { .catch((error) => {
if (error.response?.data?.message) { const errorMessage =
toast.error( error.response?.data?.message ||
`Failed to start export: ${error.response.data.message}`, error.response?.data?.detail ||
{ position: "top-center" }, "Unknown error";
); toast.error(`Failed to start export: ${errorMessage}`, {
} else { position: "top-center",
toast.error(`Failed to start export: ${error.message}`, { });
position: "top-center",
});
}
}); });
}, [camera, name, range, setRange, setName, setMode]); }, [camera, name, range, setRange, setName, setMode]);

View File

@ -106,16 +106,13 @@ export default function MobileReviewSettingsDrawer({
} }
}) })
.catch((error) => { .catch((error) => {
if (error.response?.data?.message) { const errorMessage =
toast.error( error.response?.data?.message ||
`Failed to start export: ${error.response.data.message}`, error.response?.data?.detail ||
{ position: "top-center" }, "Unknown error";
); toast.error(`Failed to start export: ${errorMessage}`, {
} else { position: "top-center",
toast.error(`Failed to start export: ${error.message}`, { });
position: "top-center",
});
}
}); });
}, [camera, name, range, setRange, setName, setMode]); }, [camera, name, range, setRange, setName, setMode]);

View File

@ -0,0 +1,119 @@
import { Button } from "../ui/button";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from "../ui/dialog";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "../ui/select";
import { useState } from "react";
import { LuShield, LuUser } from "react-icons/lu";
type RoleChangeDialogProps = {
show: boolean;
username: string;
currentRole: "admin" | "viewer";
onSave: (role: "admin" | "viewer") => void;
onCancel: () => void;
};
export default function RoleChangeDialog({
show,
username,
currentRole,
onSave,
onCancel,
}: RoleChangeDialogProps) {
const [selectedRole, setSelectedRole] = useState<"admin" | "viewer">(
currentRole,
);
return (
<Dialog open={show} onOpenChange={onCancel}>
<DialogContent className="sm:max-w-[425px]">
<DialogHeader>
<DialogTitle className="text-xl font-semibold">
Change User Role
</DialogTitle>
<DialogDescription>
Update permissions for{" "}
<span className="font-medium">{username}</span>
</DialogDescription>
</DialogHeader>
<div className="py-6">
<div className="mb-4 text-sm text-muted-foreground">
<p>Select the appropriate role for this user:</p>
<ul className="mt-2 space-y-1 pl-5">
<li>
<span className="font-medium">Admin:</span> Full access to all
features.
</li>
<li>
<span className="font-medium">Viewer:</span> Limited to Live
dashboards, Review, Explore, and Exports only.
</li>
</ul>
</div>
<Select
value={selectedRole}
onValueChange={(value) =>
setSelectedRole(value as "admin" | "viewer")
}
>
<SelectTrigger className="w-full">
<SelectValue placeholder="Select a role" />
</SelectTrigger>
<SelectContent>
<SelectItem value="admin" className="flex items-center gap-2">
<div className="flex items-center gap-2">
<LuShield className="size-4 text-primary" />
<span>Admin</span>
</div>
</SelectItem>
<SelectItem value="viewer" className="flex items-center gap-2">
<div className="flex items-center gap-2">
<LuUser className="size-4 text-primary" />
<span>Viewer</span>
</div>
</SelectItem>
</SelectContent>
</Select>
</div>
<DialogFooter className="flex gap-3 sm:justify-end">
<div className="flex flex-1 flex-col justify-end">
<div className="flex flex-row gap-2 pt-5">
<Button
className="flex flex-1"
aria-label="Cancel"
onClick={onCancel}
type="button"
>
Cancel
</Button>
<Button
variant="select"
aria-label="Save"
className="flex flex-1"
onClick={() => onSave(selectedRole)}
disabled={selectedRole === currentRole}
>
Save
</Button>
</div>
</div>
</DialogFooter>
</DialogContent>
</Dialog>
);
}

View File

@ -1,50 +1,202 @@
"use client";
import { Button } from "../ui/button"; import { Button } from "../ui/button";
import { Input } from "../ui/input"; import { Input } from "../ui/input";
import { useState } from "react"; import { useState, useEffect } from "react";
import { import {
Dialog, Dialog,
DialogContent, DialogContent,
DialogDescription,
DialogFooter, DialogFooter,
DialogHeader, DialogHeader,
DialogTitle, DialogTitle,
} from "../ui/dialog"; } from "../ui/dialog";
import { Label } from "../ui/label";
import { LuCheck, LuX } from "react-icons/lu";
type SetPasswordProps = { type SetPasswordProps = {
show: boolean; show: boolean;
onSave: (password: string) => void; onSave: (password: string) => void;
onCancel: () => void; onCancel: () => void;
username?: string;
}; };
export default function SetPasswordDialog({ export default function SetPasswordDialog({
show, show,
onSave, onSave,
onCancel, onCancel,
username,
}: SetPasswordProps) { }: SetPasswordProps) {
const [password, setPassword] = useState<string>(); const [password, setPassword] = useState<string>("");
const [confirmPassword, setConfirmPassword] = useState<string>("");
const [passwordStrength, setPasswordStrength] = useState<number>(0);
const [error, setError] = useState<string | null>(null);
// Reset state when dialog opens/closes
useEffect(() => {
if (show) {
setPassword("");
setConfirmPassword("");
setError(null);
}
}, [show]);
// Simple password strength calculation
useEffect(() => {
if (!password) {
setPasswordStrength(0);
return;
}
let strength = 0;
// Length check
if (password.length >= 8) strength += 1;
// Contains number
if (/\d/.test(password)) strength += 1;
// Contains special char
if (/[!@#$%^&*(),.?":{}|<>]/.test(password)) strength += 1;
// Contains uppercase
if (/[A-Z]/.test(password)) strength += 1;
setPasswordStrength(strength);
}, [password]);
const handleSave = () => {
if (!password) {
setError("Password cannot be empty");
return;
}
if (password !== confirmPassword) {
setError("Passwords do not match");
return;
}
onSave(password);
};
const getStrengthLabel = () => {
if (!password) return "";
if (passwordStrength <= 1) return "Weak";
if (passwordStrength === 2) return "Medium";
if (passwordStrength === 3) return "Strong";
return "Very Strong";
};
const getStrengthColor = () => {
if (!password) return "bg-gray-200";
if (passwordStrength <= 1) return "bg-red-500";
if (passwordStrength === 2) return "bg-yellow-500";
if (passwordStrength === 3) return "bg-green-500";
return "bg-green-600";
};
return ( return (
<Dialog open={show} onOpenChange={onCancel}> <Dialog open={show} onOpenChange={onCancel}>
<DialogContent onOpenAutoFocus={(e) => e.preventDefault()}> <DialogContent className="sm:max-w-[425px]">
<DialogHeader> <DialogHeader className="space-y-2">
<DialogTitle>Set Password</DialogTitle> <DialogTitle>
{username ? `Update Password for ${username}` : "Set Password"}
</DialogTitle>
<DialogDescription>
Create a strong password to secure this account.
</DialogDescription>
</DialogHeader> </DialogHeader>
<Input
className="text-md w-full border border-input bg-background p-2 hover:bg-accent hover:text-accent-foreground dark:[color-scheme:dark]" <div className="space-y-4 py-4">
type="password" <div className="space-y-2">
value={password} <Label htmlFor="password">New Password</Label>
onChange={(event) => setPassword(event.target.value)} <Input
/> id="password"
<DialogFooter> className="h-10"
<Button type="password"
className="flex items-center gap-1" value={password}
aria-label="Save Password" onChange={(event) => {
variant="select" setPassword(event.target.value);
size="sm" setError(null);
onClick={() => { }}
onSave(password!); placeholder="Enter new password"
}} autoFocus
> />
Save
</Button> {/* Password strength indicator */}
{password && (
<div className="mt-2 space-y-1">
<div className="flex h-1.5 w-full overflow-hidden rounded-full bg-secondary-foreground">
<div
className={`${getStrengthColor()} transition-all duration-300`}
style={{ width: `${(passwordStrength / 3) * 100}%` }}
/>
</div>
<p className="text-xs text-muted-foreground">
Password strength:{" "}
<span className="font-medium">{getStrengthLabel()}</span>
</p>
</div>
)}
</div>
<div className="space-y-2">
<Label htmlFor="confirm-password">Confirm Password</Label>
<Input
id="confirm-password"
className="h-10"
type="password"
value={confirmPassword}
onChange={(event) => {
setConfirmPassword(event.target.value);
setError(null);
}}
placeholder="Confirm new password"
/>
{/* Password match indicator */}
{password && confirmPassword && (
<div className="mt-1 flex items-center gap-1.5 text-xs">
{password === confirmPassword ? (
<>
<LuCheck className="size-3.5 text-green-500" />
<span className="text-green-600">Passwords match</span>
</>
) : (
<>
<LuX className="size-3.5 text-red-500" />
<span className="text-red-600">Passwords don't match</span>
</>
)}
</div>
)}
</div>
{error && (
<div className="rounded-md bg-destructive/10 p-3 text-sm text-destructive">
{error}
</div>
)}
</div>
<DialogFooter className="flex flex-col-reverse gap-2 sm:flex-row sm:justify-end">
<div className="flex flex-1 flex-col justify-end">
<div className="flex flex-row gap-2 pt-5">
<Button
className="flex flex-1"
aria-label="Cancel"
onClick={onCancel}
type="button"
>
Cancel
</Button>
<Button
variant="select"
aria-label="Save"
className="flex flex-1"
onClick={handleSave}
disabled={!password || password !== confirmPassword}
>
Save
</Button>
</div>
</div>
</DialogFooter> </DialogFooter>
</DialogContent> </DialogContent>
</Dialog> </Dialog>

View File

@ -87,10 +87,13 @@ export function AnnotationSettingsPane({
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( const errorMessage =
`Failed to save config changes: ${error.response.data.message}`, error.response?.data?.message ||
{ position: "top-center" }, error.response?.data?.detail ||
); "Unknown error";
toast.error(`Failed to save config changes: ${errorMessage}`, {
position: "top-center",
});
}) })
.finally(() => { .finally(() => {
setIsLoading(false); setIsLoading(false);

View File

@ -23,7 +23,6 @@ import {
LuEar, LuEar,
LuFolderX, LuFolderX,
LuPlay, LuPlay,
LuPlayCircle,
LuSettings, LuSettings,
LuTruck, LuTruck,
} from "react-icons/lu"; } from "react-icons/lu";
@ -54,6 +53,7 @@ import {
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { ObjectPath } from "./ObjectPath"; import { ObjectPath } from "./ObjectPath";
import { getLifecycleItemDescription } from "@/utils/lifecycleUtil"; import { getLifecycleItemDescription } from "@/utils/lifecycleUtil";
import { IoPlayCircleOutline } from "react-icons/io5";
type ObjectLifecycleProps = { type ObjectLifecycleProps = {
className?: string; className?: string;
@ -733,7 +733,7 @@ export function LifecycleIcon({
case "gone": case "gone":
return <IoMdExit className={cn(className)} />; return <IoMdExit className={cn(className)} />;
case "active": case "active":
return <LuPlayCircle className={cn(className)} />; return <IoPlayCircleOutline className={cn(className)} />;
case "stationary": case "stationary":
return <LuCircle className={cn(className)} />; return <LuCircle className={cn(className)} />;
case "entered_zone": case "entered_zone":

View File

@ -394,8 +394,12 @@ function ObjectDetailsTab({
}, },
); );
}) })
.catch(() => { .catch((error) => {
toast.error("Failed to update the description", { const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(`Failed to update the description: ${errorMessage}`, {
position: "top-center", position: "top-center",
}); });
setDesc(search.data.description); setDesc(search.data.description);
@ -422,11 +426,13 @@ function ObjectDetailsTab({
} }
}) })
.catch((error) => { .catch((error) => {
const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error( toast.error(
`Failed to call ${capitalizeAll(config?.genai.provider.replaceAll("_", " ") ?? "Generative AI")} for a new description: ${error.response.data.message}`, `Failed to call ${capitalizeAll(config?.genai.provider.replaceAll("_", " ") ?? "Generative AI")} for a new description: ${errorMessage}`,
{ { position: "top-center" },
position: "top-center",
},
); );
}); });
}, },
@ -492,8 +498,12 @@ function ObjectDetailsTab({
setIsSubLabelDialogOpen(false); setIsSubLabelDialogOpen(false);
} }
}) })
.catch(() => { .catch((error) => {
toast.error("Failed to update sub label.", { const errorMessage =
error.response?.data?.message ||
error.response?.data?.detail ||
"Unknown error";
toast.error(`Failed to update sub label: ${errorMessage}`, {
position: "top-center", position: "top-center",
}); });
}); });

View File

@ -22,6 +22,7 @@ import { TbExclamationCircle } from "react-icons/tb";
import { TooltipPortal } from "@radix-ui/react-tooltip"; import { TooltipPortal } from "@radix-ui/react-tooltip";
import { baseUrl } from "@/api/baseUrl"; import { baseUrl } from "@/api/baseUrl";
import { PlayerStats } from "./PlayerStats"; import { PlayerStats } from "./PlayerStats";
import { LuVideoOff } from "react-icons/lu";
type LivePlayerProps = { type LivePlayerProps = {
cameraRef?: (ref: HTMLDivElement | null) => void; cameraRef?: (ref: HTMLDivElement | null) => void;
@ -86,8 +87,13 @@ export default function LivePlayer({
// camera activity // camera activity
const { activeMotion, activeTracking, objects, offline } = const {
useCameraActivity(cameraConfig); enabled: cameraEnabled,
activeMotion,
activeTracking,
objects,
offline,
} = useCameraActivity(cameraConfig);
const cameraActive = useMemo( const cameraActive = useMemo(
() => () =>
@ -191,12 +197,40 @@ export default function LivePlayer({
setLiveReady(true); setLiveReady(true);
}, []); }, []);
// enabled states
const [isReEnabling, setIsReEnabling] = useState(false);
const prevCameraEnabledRef = useRef(cameraEnabled ?? true);
useEffect(() => {
if (cameraEnabled == undefined) {
return;
}
if (!prevCameraEnabledRef.current && cameraEnabled) {
// Camera enabled
setLiveReady(false);
setIsReEnabling(true);
setKey((prevKey) => prevKey + 1);
} else if (prevCameraEnabledRef.current && !cameraEnabled) {
// Camera disabled
setLiveReady(false);
setKey((prevKey) => prevKey + 1);
}
prevCameraEnabledRef.current = cameraEnabled;
}, [cameraEnabled]);
useEffect(() => {
if (liveReady && isReEnabling) {
setIsReEnabling(false);
}
}, [liveReady, isReEnabling]);
if (!cameraConfig) { if (!cameraConfig) {
return <ActivityIndicator />; return <ActivityIndicator />;
} }
let player; let player;
if (!autoLive || !streamName) { if (!autoLive || !streamName || !cameraEnabled) {
player = null; player = null;
} else if (preferredLiveMode == "webrtc") { } else if (preferredLiveMode == "webrtc") {
player = ( player = (
@ -267,6 +301,22 @@ export default function LivePlayer({
player = <ActivityIndicator />; player = <ActivityIndicator />;
} }
// if (cameraConfig.name == "lpr")
// console.log(
// cameraConfig.name,
// "enabled",
// cameraEnabled,
// "prev enabled",
// prevCameraEnabledRef.current,
// "offline",
// offline,
// "show still",
// showStillWithoutActivity,
// "live ready",
// liveReady,
// player,
// );
return ( return (
<div <div
ref={cameraRef ?? internalContainerRef} ref={cameraRef ?? internalContainerRef}
@ -287,16 +337,18 @@ export default function LivePlayer({
} }
}} }}
> >
{((showStillWithoutActivity && !liveReady) || liveReady) && ( {cameraEnabled &&
<> ((showStillWithoutActivity && !liveReady) || liveReady) && (
<div className="pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl"></div> <>
<div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl"></div> <div className="pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl"></div>
</> <div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl"></div>
)} </>
)}
{player} {player}
{!offline && !showStillWithoutActivity && !liveReady && ( {cameraEnabled &&
<ActivityIndicator /> !offline &&
)} (!showStillWithoutActivity || isReEnabling) &&
!liveReady && <ActivityIndicator />}
{((showStillWithoutActivity && !liveReady) || liveReady) && {((showStillWithoutActivity && !liveReady) || liveReady) &&
objects.length > 0 && ( objects.length > 0 && (
@ -344,7 +396,9 @@ export default function LivePlayer({
<div <div
className={cn( className={cn(
"absolute inset-0 w-full", "absolute inset-0 w-full",
showStillWithoutActivity && !liveReady ? "visible" : "invisible", showStillWithoutActivity && !liveReady && !isReEnabling
? "visible"
: "invisible",
)} )}
> >
<AutoUpdatingCameraImage <AutoUpdatingCameraImage
@ -371,6 +425,17 @@ export default function LivePlayer({
</div> </div>
)} )}
{!cameraEnabled && (
<div className="relative flex h-full w-full items-center justify-center">
<div className="flex h-32 flex-col items-center justify-center rounded-lg p-4 md:h-48 md:w-48">
<LuVideoOff className="mb-2 size-8 md:size-10" />
<p className="max-w-32 text-center text-sm md:max-w-40 md:text-base">
Camera is disabled
</p>
</div>
</div>
)}
<div className="absolute right-2 top-2"> <div className="absolute right-2 top-2">
{autoLive && {autoLive &&
!offline && !offline &&
@ -378,7 +443,7 @@ export default function LivePlayer({
((showStillWithoutActivity && !liveReady) || liveReady) && ( ((showStillWithoutActivity && !liveReady) || liveReady) && (
<MdCircle className="mr-2 size-2 animate-pulse text-danger shadow-danger drop-shadow-md" /> <MdCircle className="mr-2 size-2 animate-pulse text-danger shadow-danger drop-shadow-md" />
)} )}
{offline && showStillWithoutActivity && ( {((offline && showStillWithoutActivity) || !cameraEnabled) && (
<Chip <Chip
className={`z-0 flex items-start justify-between space-x-1 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 text-xs capitalize`} className={`z-0 flex items-start justify-between space-x-1 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 text-xs capitalize`}
> >

View File

@ -176,10 +176,13 @@ export default function MotionMaskEditPane({
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( const errorMessage =
`Failed to save config changes: ${error.response.data.message}`, error.response?.data?.message ||
{ position: "top-center" }, error.response?.data?.detail ||
); "Unknown error";
toast.error(`Failed to save config changes: ${errorMessage}`, {
position: "top-center",
});
}) })
.finally(() => { .finally(() => {
setIsLoading(false); setIsLoading(false);

View File

@ -208,10 +208,13 @@ export default function ObjectMaskEditPane({
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( const errorMessage =
`Failed to save config changes: ${error.response.data.message}`, error.response?.data?.message ||
{ position: "top-center" }, error.response?.data?.detail ||
); "Unknown error";
toast.error(`Failed to save config changes: ${errorMessage}`, {
position: "top-center",
});
}) })
.finally(() => { .finally(() => {
setIsLoading(false); setIsLoading(false);

View File

@ -186,10 +186,13 @@ export default function PolygonItem({
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( const errorMessage =
`Failed to save config changes: ${error.response.data.message}`, error.response?.data?.message ||
{ position: "top-center" }, error.response?.data?.detail ||
); "Unknown error";
toast.error(`Failed to save config changes: ${errorMessage}`, {
position: "top-center",
});
}) })
.finally(() => { .finally(() => {
setIsLoading(false); setIsLoading(false);

View File

@ -68,7 +68,7 @@ export default function ZoneEditPane({
} }
return Object.values(config.cameras) return Object.values(config.cameras)
.filter((conf) => conf.ui.dashboard && conf.enabled) .filter((conf) => conf.ui.dashboard && conf.enabled_in_config)
.sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order);
}, [config]); }, [config]);
@ -414,10 +414,13 @@ export default function ZoneEditPane({
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( const errorMessage =
`Failed to save config changes: ${error.response.data.message}`, error.response?.data?.message ||
{ position: "top-center" }, error.response?.data?.detail ||
); "Unknown error";
toast.error(`Failed to save config changes: ${errorMessage}`, {
position: "top-center",
});
}) })
.finally(() => { .finally(() => {
setIsLoading(false); setIsLoading(false);

View File

@ -0,0 +1,74 @@
import axios from "axios";
import { createContext, useEffect, useState } from "react";
import useSWR from "swr";
interface AuthState {
user: { username: string; role: "admin" | "viewer" | null } | null;
isLoading: boolean;
isAuthenticated: boolean; // true if auth is required
}
interface AuthContextType {
auth: AuthState;
login: (user: AuthState["user"]) => void;
logout: () => void;
}
export const AuthContext = createContext<AuthContextType>({
auth: { user: null, isLoading: true, isAuthenticated: false },
login: () => {},
logout: () => {},
});
export function AuthProvider({ children }: { children: React.ReactNode }) {
const [auth, setAuth] = useState<AuthState>({
user: null,
isLoading: true,
isAuthenticated: false,
});
const { data: profile, error } = useSWR("/profile", {
revalidateOnFocus: false,
revalidateOnReconnect: true,
fetcher: (url) =>
axios.get(url, { withCredentials: true }).then((res) => res.data),
});
useEffect(() => {
if (error) {
if (axios.isAxiosError(error) && error.response?.status === 401) {
// auth required but not logged in
setAuth({ user: null, isLoading: false, isAuthenticated: true });
}
return;
}
if (profile) {
if (profile.username && profile.username !== "anonymous") {
const newUser = {
username: profile.username,
role: profile.role || "viewer",
};
setAuth({ user: newUser, isLoading: false, isAuthenticated: true });
} else {
// Unauthenticated mode (anonymous)
setAuth({ user: null, isLoading: false, isAuthenticated: false });
}
}
}, [profile, error]);
const login = (user: AuthState["user"]) => {
setAuth({ user, isLoading: false, isAuthenticated: true });
};
const logout = () => {
setAuth({ user: null, isLoading: false, isAuthenticated: true });
axios.get("/logout", { withCredentials: true });
};
return (
<AuthContext.Provider value={{ auth, login, logout }}>
{children}
</AuthContext.Provider>
);
}

View File

@ -6,6 +6,7 @@ import { IconContext } from "react-icons";
import { TooltipProvider } from "@/components/ui/tooltip"; import { TooltipProvider } from "@/components/ui/tooltip";
import { StatusBarMessagesProvider } from "@/context/statusbar-provider"; import { StatusBarMessagesProvider } from "@/context/statusbar-provider";
import { StreamingSettingsProvider } from "./streaming-settings-provider"; import { StreamingSettingsProvider } from "./streaming-settings-provider";
import { AuthProvider } from "./auth-context";
type TProvidersProps = { type TProvidersProps = {
children: ReactNode; children: ReactNode;
@ -14,19 +15,21 @@ type TProvidersProps = {
function providers({ children }: TProvidersProps) { function providers({ children }: TProvidersProps) {
return ( return (
<RecoilRoot> <RecoilRoot>
<ApiProvider> <AuthProvider>
<ThemeProvider defaultTheme="system" storageKey="frigate-ui-theme"> <ApiProvider>
<TooltipProvider> <ThemeProvider defaultTheme="system" storageKey="frigate-ui-theme">
<IconContext.Provider value={{ size: "20" }}> <TooltipProvider>
<StatusBarMessagesProvider> <IconContext.Provider value={{ size: "20" }}>
<StreamingSettingsProvider> <StatusBarMessagesProvider>
{children} <StreamingSettingsProvider>
</StreamingSettingsProvider> {children}
</StatusBarMessagesProvider> </StreamingSettingsProvider>
</IconContext.Provider> </StatusBarMessagesProvider>
</TooltipProvider> </IconContext.Provider>
</ThemeProvider> </TooltipProvider>
</ApiProvider> </ThemeProvider>
</ApiProvider>
</AuthProvider>
</RecoilRoot> </RecoilRoot>
); );
} }

Some files were not shown because too many files have changed in this diff Show More