Add more i18n key again

Merge branch 'dev' of https://github.com/ZhaiSoul/frigate into dev
This commit is contained in:
ZhaiSoul 2025-03-05 00:03:42 +08:00
commit d0d0756496
55 changed files with 1355 additions and 1412 deletions

View File

@ -175,6 +175,7 @@ jobs:
files: docker/rocm/rocm.hcl files: docker/rocm/rocm.hcl
set: | set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-rocm,mode=max
*.cache-from=type=gha *.cache-from=type=gha
arm64_extra_builds: arm64_extra_builds:
runs-on: ubuntu-22.04-arm runs-on: ubuntu-22.04-arm

View File

@ -39,10 +39,7 @@ ARG DEBIAN_FRONTEND
ENV CCACHE_DIR /root/.ccache ENV CCACHE_DIR /root/.ccache
ENV CCACHE_MAXSIZE 2G ENV CCACHE_MAXSIZE 2G
# bind /var/cache/apt to tmpfs to speed up nginx build RUN --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \
--mount=type=cache,target=/root/.ccache \
/deps/build_nginx.sh /deps/build_nginx.sh
FROM wget AS sqlite-vec FROM wget AS sqlite-vec
@ -225,6 +222,9 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html # Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8 ENV OPENCV_FFMPEG_LOGLEVEL=8
# Set HailoRT to disable logging
ENV HAILORT_LOGGER_PATH=NONE
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}" ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
# Install dependencies # Install dependencies

View File

@ -2,79 +2,49 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable # https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ARG ROCM=5.7.3 ARG ROCM=6.3.3
ARG AMDGPU=gfx900 ARG AMDGPU=gfx900
ARG HSA_OVERRIDE_GFX_VERSION ARG HSA_OVERRIDE_GFX_VERSION
ARG HSA_OVERRIDE ARG HSA_OVERRIDE
####################################################################### #######################################################################
FROM ubuntu:focal as rocm FROM wget AS rocm
ARG ROCM ARG ROCM
ARG AMDGPU
RUN apt-get update && apt-get -y upgrade RUN apt update && \
RUN apt-get -y install gnupg wget apt install -y wget gpg && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/$ROCM/ubuntu/jammy/amdgpu-install_6.3.60303-1_all.deb && \
RUN mkdir --parents --mode=0755 /etc/apt/keyrings apt install -y ./rocm.deb && \
apt update && \
RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null apt install -y rocm
COPY docker/rocm/rocm.list /etc/apt/sources.list.d/
COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
RUN apt-get update
RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer
RUN apt-get -y install --no-install-recommends migraphx-dev
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ RUN cd /opt/rocm-$ROCM/lib && \
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \
cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/ RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf RUN echo /opt/rocm/lib|tee /opt/rocm-dist/etc/ld.so.conf.d/rocm.conf
#######################################################################
FROM --platform=linux/amd64 debian:12 as debian-base
RUN apt-get update && apt-get -y upgrade
RUN apt-get -y install --no-install-recommends libelf1 libdrm2 libdrm-amdgpu1 libnuma1 kmod
RUN apt-get -y install python3
#######################################################################
# ROCm does not come with migraphx wrappers for python 3.9, so we build it here
FROM debian-base as debian-build
ARG ROCM
COPY --from=rocm /opt/rocm-$ROCM /opt/rocm-$ROCM
RUN ln -s /opt/rocm-$ROCM /opt/rocm
RUN apt-get -y install g++ cmake
RUN apt-get -y install python3-pybind11 python3-distutils python3-dev
WORKDIR /opt/build
COPY docker/rocm/migraphx .
RUN mkdir build && cd build && cmake .. && make install
####################################################################### #######################################################################
FROM deps AS deps-prelim FROM deps AS deps-prelim
# need this to install libnuma1 RUN apt-get update && apt-get install -y libnuma1
RUN apt-get update
# no ugprade?!?!
RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/ WORKDIR /opt/frigate
COPY --from=rootfs / / COPY --from=rootfs / /
# Temporarily disabled to see if a new wheel can be built to support py3.11 RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
#COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt && python3 get-pip.py "pip" --break-system-packages
#RUN python3 -m pip install --upgrade pip \ RUN python3 -m pip config set global.break-system-packages true
# && pip3 uninstall -y onnxruntime-openvino \
# && pip3 install -r /requirements.txt COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
####################################################################### #######################################################################
FROM scratch AS rocm-dist FROM scratch AS rocm-dist
@ -87,12 +57,11 @@ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/ COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ / COPY --from=rocm /opt/rocm-dist/ /
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-311-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
####################################################################### #######################################################################
FROM deps-prelim AS rocm-prelim-hsa-override0 FROM deps-prelim AS rocm-prelim-hsa-override0
\ ENV HSA_ENABLE_SDMA=0
ENV HSA_ENABLE_SDMA=0 ENV MIGRAPHX_ENABLE_NHWC=1
COPY --from=rocm-dist / / COPY --from=rocm-dist / /

View File

@ -1,26 +0,0 @@
cmake_minimum_required(VERSION 3.1)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
project(migraphx_py)
include_directories(/opt/rocm/include)
find_package(pybind11 REQUIRED)
pybind11_add_module(migraphx migraphx_py.cpp)
target_link_libraries(migraphx PRIVATE /opt/rocm/lib/libmigraphx.so /opt/rocm/lib/libmigraphx_tf.so /opt/rocm/lib/libmigraphx_onnx.so)
install(TARGETS migraphx
COMPONENT python
LIBRARY DESTINATION /opt/rocm/lib
)

View File

@ -1,582 +0,0 @@
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <migraphx/program.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/operation.hpp>
#include <migraphx/quantization.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ref/target.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/tf.hpp>
#include <migraphx/onnx.hpp>
#include <migraphx/load_save.hpp>
#include <migraphx/register_target.hpp>
#include <migraphx/json.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp>
#ifdef HAVE_GPU
#include <migraphx/gpu/hip.hpp>
#endif
using half = half_float::half;
namespace py = pybind11;
#ifdef __clang__
#define MIGRAPHX_PUSH_UNUSED_WARNING \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"")
#define MIGRAPHX_POP_WARNING _Pragma("clang diagnostic pop")
#else
#define MIGRAPHX_PUSH_UNUSED_WARNING
#define MIGRAPHX_POP_WARNING
#endif
#define MIGRAPHX_PYBIND11_MODULE(...) \
MIGRAPHX_PUSH_UNUSED_WARNING \
PYBIND11_MODULE(__VA_ARGS__) \
MIGRAPHX_POP_WARNING
#define MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM(x, t) .value(#x, migraphx::shape::type_t::x)
namespace migraphx {
migraphx::value to_value(py::kwargs kwargs);
migraphx::value to_value(py::list lst);
template <class T, class F>
void visit_py(T x, F f)
{
if(py::isinstance<py::kwargs>(x))
{
f(to_value(x.template cast<py::kwargs>()));
}
else if(py::isinstance<py::list>(x))
{
f(to_value(x.template cast<py::list>()));
}
else if(py::isinstance<py::bool_>(x))
{
f(x.template cast<bool>());
}
else if(py::isinstance<py::int_>(x) or py::hasattr(x, "__index__"))
{
f(x.template cast<int>());
}
else if(py::isinstance<py::float_>(x))
{
f(x.template cast<float>());
}
else if(py::isinstance<py::str>(x))
{
f(x.template cast<std::string>());
}
else if(py::isinstance<migraphx::shape::dynamic_dimension>(x))
{
f(migraphx::to_value(x.template cast<migraphx::shape::dynamic_dimension>()));
}
else
{
MIGRAPHX_THROW("VISIT_PY: Unsupported data type!");
}
}
migraphx::value to_value(py::list lst)
{
migraphx::value v = migraphx::value::array{};
for(auto val : lst)
{
visit_py(val, [&](auto py_val) { v.push_back(py_val); });
}
return v;
}
migraphx::value to_value(py::kwargs kwargs)
{
migraphx::value v = migraphx::value::object{};
for(auto arg : kwargs)
{
auto&& key = py::str(arg.first);
auto&& val = arg.second;
visit_py(val, [&](auto py_val) { v[key] = py_val; });
}
return v;
}
} // namespace migraphx
namespace pybind11 {
namespace detail {
template <>
struct npy_format_descriptor<half>
{
static std::string format()
{
// following: https://docs.python.org/3/library/struct.html#format-characters
return "e";
}
static constexpr auto name() { return _("half"); }
};
} // namespace detail
} // namespace pybind11
template <class F>
void visit_type(const migraphx::shape& s, F f)
{
s.visit_type(f);
}
template <class T, class F>
void visit(const migraphx::raw_data<T>& x, F f)
{
x.visit(f);
}
template <class F>
void visit_types(F f)
{
migraphx::shape::visit_types(f);
}
template <class T>
py::buffer_info to_buffer_info(T& x)
{
migraphx::shape s = x.get_shape();
assert(s.type() != migraphx::shape::tuple_type);
if(s.dynamic())
MIGRAPHX_THROW("MIGRAPHX PYTHON: dynamic shape argument passed to to_buffer_info");
auto strides = s.strides();
std::transform(
strides.begin(), strides.end(), strides.begin(), [&](auto i) { return i * s.type_size(); });
py::buffer_info b;
visit_type(s, [&](auto as) {
// migraphx use int8_t data to store bool type, we need to
// explicitly specify the data type as bool for python
if(s.type() == migraphx::shape::bool_type)
{
b = py::buffer_info(x.data(),
as.size(),
py::format_descriptor<bool>::format(),
s.ndim(),
s.lens(),
strides);
}
else
{
b = py::buffer_info(x.data(),
as.size(),
py::format_descriptor<decltype(as())>::format(),
s.ndim(),
s.lens(),
strides);
}
});
return b;
}
migraphx::shape to_shape(const py::buffer_info& info)
{
migraphx::shape::type_t t;
std::size_t n = 0;
visit_types([&](auto as) {
if(info.format == py::format_descriptor<decltype(as())>::format() or
(info.format == "l" and py::format_descriptor<decltype(as())>::format() == "q") or
(info.format == "L" and py::format_descriptor<decltype(as())>::format() == "Q"))
{
t = as.type_enum();
n = sizeof(as());
}
else if(info.format == "?" and py::format_descriptor<decltype(as())>::format() == "b")
{
t = migraphx::shape::bool_type;
n = sizeof(bool);
}
});
if(n == 0)
{
MIGRAPHX_THROW("MIGRAPHX PYTHON: Unsupported data type " + info.format);
}
auto strides = info.strides;
std::transform(strides.begin(), strides.end(), strides.begin(), [&](auto i) -> std::size_t {
return n > 0 ? i / n : 0;
});
// scalar support
if(info.shape.empty())
{
return migraphx::shape{t};
}
else
{
return migraphx::shape{t, info.shape, strides};
}
}
MIGRAPHX_PYBIND11_MODULE(migraphx, m)
{
py::class_<migraphx::shape> shape_cls(m, "shape");
shape_cls
.def(py::init([](py::kwargs kwargs) {
auto v = migraphx::to_value(kwargs);
auto t = migraphx::shape::parse_type(v.get("type", "float"));
if(v.contains("dyn_dims"))
{
auto dyn_dims =
migraphx::from_value<std::vector<migraphx::shape::dynamic_dimension>>(
v.at("dyn_dims"));
return migraphx::shape(t, dyn_dims);
}
auto lens = v.get<std::size_t>("lens", {1});
if(v.contains("strides"))
return migraphx::shape(t, lens, v.at("strides").to_vector<std::size_t>());
else
return migraphx::shape(t, lens);
}))
.def("type", &migraphx::shape::type)
.def("lens", &migraphx::shape::lens)
.def("strides", &migraphx::shape::strides)
.def("ndim", &migraphx::shape::ndim)
.def("elements", &migraphx::shape::elements)
.def("bytes", &migraphx::shape::bytes)
.def("type_string", &migraphx::shape::type_string)
.def("type_size", &migraphx::shape::type_size)
.def("dyn_dims", &migraphx::shape::dyn_dims)
.def("packed", &migraphx::shape::packed)
.def("transposed", &migraphx::shape::transposed)
.def("broadcasted", &migraphx::shape::broadcasted)
.def("standard", &migraphx::shape::standard)
.def("scalar", &migraphx::shape::scalar)
.def("dynamic", &migraphx::shape::dynamic)
.def("__eq__", std::equal_to<migraphx::shape>{})
.def("__ne__", std::not_equal_to<migraphx::shape>{})
.def("__repr__", [](const migraphx::shape& s) { return migraphx::to_string(s); });
py::enum_<migraphx::shape::type_t>(shape_cls, "type_t")
MIGRAPHX_SHAPE_VISIT_TYPES(MIGRAPHX_PYTHON_GENERATE_SHAPE_ENUM);
py::class_<migraphx::shape::dynamic_dimension>(shape_cls, "dynamic_dimension")
.def(py::init<>())
.def(py::init<std::size_t, std::size_t>())
.def(py::init<std::size_t, std::size_t, std::set<std::size_t>>())
.def_readwrite("min", &migraphx::shape::dynamic_dimension::min)
.def_readwrite("max", &migraphx::shape::dynamic_dimension::max)
.def_readwrite("optimals", &migraphx::shape::dynamic_dimension::optimals)
.def("is_fixed", &migraphx::shape::dynamic_dimension::is_fixed);
py::class_<migraphx::argument>(m, "argument", py::buffer_protocol())
.def_buffer([](migraphx::argument& x) -> py::buffer_info { return to_buffer_info(x); })
.def(py::init([](py::buffer b) {
py::buffer_info info = b.request();
return migraphx::argument(to_shape(info), info.ptr);
}))
.def("get_shape", &migraphx::argument::get_shape)
.def("data_ptr",
[](migraphx::argument& x) { return reinterpret_cast<std::uintptr_t>(x.data()); })
.def("tolist",
[](migraphx::argument& x) {
py::list l{x.get_shape().elements()};
visit(x, [&](auto data) { l = py::cast(data.to_vector()); });
return l;
})
.def("__eq__", std::equal_to<migraphx::argument>{})
.def("__ne__", std::not_equal_to<migraphx::argument>{})
.def("__repr__", [](const migraphx::argument& x) { return migraphx::to_string(x); });
py::class_<migraphx::target>(m, "target");
py::class_<migraphx::instruction_ref>(m, "instruction_ref")
.def("shape", [](migraphx::instruction_ref i) { return i->get_shape(); })
.def("op", [](migraphx::instruction_ref i) { return i->get_operator(); });
py::class_<migraphx::module, std::unique_ptr<migraphx::module, py::nodelete>>(m, "module")
.def("print", [](const migraphx::module& mm) { std::cout << mm << std::endl; })
.def(
"add_instruction",
[](migraphx::module& mm,
const migraphx::operation& op,
std::vector<migraphx::instruction_ref>& args,
std::vector<migraphx::module*>& mod_args) {
return mm.add_instruction(op, args, mod_args);
},
py::arg("op"),
py::arg("args"),
py::arg("mod_args") = std::vector<migraphx::module*>{})
.def(
"add_literal",
[](migraphx::module& mm, py::buffer data) {
py::buffer_info info = data.request();
auto literal_shape = to_shape(info);
return mm.add_literal(literal_shape, reinterpret_cast<char*>(info.ptr));
},
py::arg("data"))
.def(
"add_parameter",
[](migraphx::module& mm, const std::string& name, const migraphx::shape shape) {
return mm.add_parameter(name, shape);
},
py::arg("name"),
py::arg("shape"))
.def(
"add_return",
[](migraphx::module& mm, std::vector<migraphx::instruction_ref>& args) {
return mm.add_return(args);
},
py::arg("args"))
.def("__repr__", [](const migraphx::module& mm) { return migraphx::to_string(mm); });
py::class_<migraphx::program>(m, "program")
.def(py::init([]() { return migraphx::program(); }))
.def("get_parameter_names", &migraphx::program::get_parameter_names)
.def("get_parameter_shapes", &migraphx::program::get_parameter_shapes)
.def("get_output_shapes", &migraphx::program::get_output_shapes)
.def("is_compiled", &migraphx::program::is_compiled)
.def(
"compile",
[](migraphx::program& p,
const migraphx::target& t,
bool offload_copy,
bool fast_math,
bool exhaustive_tune) {
migraphx::compile_options options;
options.offload_copy = offload_copy;
options.fast_math = fast_math;
options.exhaustive_tune = exhaustive_tune;
p.compile(t, options);
},
py::arg("t"),
py::arg("offload_copy") = true,
py::arg("fast_math") = true,
py::arg("exhaustive_tune") = false)
.def("get_main_module", [](const migraphx::program& p) { return p.get_main_module(); })
.def(
"create_module",
[](migraphx::program& p, const std::string& name) { return p.create_module(name); },
py::arg("name"))
.def("run",
[](migraphx::program& p, py::dict params) {
migraphx::parameter_map pm;
for(auto x : params)
{
std::string key = x.first.cast<std::string>();
py::buffer b = x.second.cast<py::buffer>();
py::buffer_info info = b.request();
pm[key] = migraphx::argument(to_shape(info), info.ptr);
}
return p.eval(pm);
})
.def("run_async",
[](migraphx::program& p,
py::dict params,
std::uintptr_t stream,
std::string stream_name) {
migraphx::parameter_map pm;
for(auto x : params)
{
std::string key = x.first.cast<std::string>();
py::buffer b = x.second.cast<py::buffer>();
py::buffer_info info = b.request();
pm[key] = migraphx::argument(to_shape(info), info.ptr);
}
migraphx::execution_environment exec_env{
migraphx::any_ptr(reinterpret_cast<void*>(stream), stream_name), true};
return p.eval(pm, exec_env);
})
.def("sort", &migraphx::program::sort)
.def("print", [](const migraphx::program& p) { std::cout << p << std::endl; })
.def("__eq__", std::equal_to<migraphx::program>{})
.def("__ne__", std::not_equal_to<migraphx::program>{})
.def("__repr__", [](const migraphx::program& p) { return migraphx::to_string(p); });
py::class_<migraphx::operation> op(m, "op");
op.def(py::init([](const std::string& name, py::kwargs kwargs) {
migraphx::value v = migraphx::value::object{};
if(kwargs)
{
v = migraphx::to_value(kwargs);
}
return migraphx::make_op(name, v);
}))
.def("name", &migraphx::operation::name);
py::enum_<migraphx::op::pooling_mode>(op, "pooling_mode")
.value("average", migraphx::op::pooling_mode::average)
.value("max", migraphx::op::pooling_mode::max)
.value("lpnorm", migraphx::op::pooling_mode::lpnorm);
py::enum_<migraphx::op::rnn_direction>(op, "rnn_direction")
.value("forward", migraphx::op::rnn_direction::forward)
.value("reverse", migraphx::op::rnn_direction::reverse)
.value("bidirectional", migraphx::op::rnn_direction::bidirectional);
m.def(
"argument_from_pointer",
[](const migraphx::shape shape, const int64_t address) {
return migraphx::argument(shape, reinterpret_cast<void*>(address));
},
py::arg("shape"),
py::arg("address"));
m.def(
"parse_tf",
[](const std::string& filename,
bool is_nhwc,
unsigned int batch_size,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::vector<std::string> output_names) {
return migraphx::parse_tf(
filename, migraphx::tf_options{is_nhwc, batch_size, map_input_dims, output_names});
},
"Parse tf protobuf (default format is nhwc)",
py::arg("filename"),
py::arg("is_nhwc") = true,
py::arg("batch_size") = 1,
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("output_names") = std::vector<std::string>());
m.def(
"parse_onnx",
[](const std::string& filename,
unsigned int default_dim_value,
migraphx::shape::dynamic_dimension default_dyn_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
map_dyn_input_dims,
bool skip_unknown_operators,
bool print_program_on_error,
int64_t max_loop_iterations) {
migraphx::onnx_options options;
options.default_dim_value = default_dim_value;
options.default_dyn_dim_value = default_dyn_dim_value;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = print_program_on_error;
options.max_loop_iterations = max_loop_iterations;
return migraphx::parse_onnx(filename, options);
},
"Parse onnx file",
py::arg("filename"),
py::arg("default_dim_value") = 0,
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("map_dyn_input_dims") =
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false,
py::arg("max_loop_iterations") = 10);
m.def(
"parse_onnx_buffer",
[](const std::string& onnx_buffer,
unsigned int default_dim_value,
migraphx::shape::dynamic_dimension default_dyn_dim_value,
std::unordered_map<std::string, std::vector<std::size_t>> map_input_dims,
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>
map_dyn_input_dims,
bool skip_unknown_operators,
bool print_program_on_error) {
migraphx::onnx_options options;
options.default_dim_value = default_dim_value;
options.default_dyn_dim_value = default_dyn_dim_value;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = print_program_on_error;
return migraphx::parse_onnx_buffer(onnx_buffer, options);
},
"Parse onnx file",
py::arg("filename"),
py::arg("default_dim_value") = 0,
py::arg("default_dyn_dim_value") = migraphx::shape::dynamic_dimension{1, 1},
py::arg("map_input_dims") = std::unordered_map<std::string, std::vector<std::size_t>>(),
py::arg("map_dyn_input_dims") =
std::unordered_map<std::string, std::vector<migraphx::shape::dynamic_dimension>>(),
py::arg("skip_unknown_operators") = false,
py::arg("print_program_on_error") = false);
m.def(
"load",
[](const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
return migraphx::load(name, options);
},
"Load MIGraphX program",
py::arg("filename"),
py::arg("format") = "msgpack");
m.def(
"save",
[](const migraphx::program& p, const std::string& name, const std::string& format) {
migraphx::file_options options;
options.format = format;
return migraphx::save(p, name, options);
},
"Save MIGraphX program",
py::arg("p"),
py::arg("filename"),
py::arg("format") = "msgpack");
m.def("get_target", &migraphx::make_target);
m.def("create_argument", [](const migraphx::shape& s, const std::vector<double>& values) {
if(values.size() != s.elements())
MIGRAPHX_THROW("Values and shape elements do not match");
migraphx::argument a{s};
a.fill(values.begin(), values.end());
return a;
});
m.def("generate_argument", &migraphx::generate_argument, py::arg("s"), py::arg("seed") = 0);
m.def("fill_argument", &migraphx::fill_argument, py::arg("s"), py::arg("value"));
m.def("quantize_fp16",
&migraphx::quantize_fp16,
py::arg("prog"),
py::arg("ins_names") = std::vector<std::string>{"all"});
m.def("quantize_int8",
&migraphx::quantize_int8,
py::arg("prog"),
py::arg("t"),
py::arg("calibration") = std::vector<migraphx::parameter_map>{},
py::arg("ins_names") = std::vector<std::string>{"dot", "convolution"});
#ifdef HAVE_GPU
m.def("allocate_gpu", &migraphx::gpu::allocate_gpu, py::arg("s"), py::arg("host") = false);
m.def("to_gpu", &migraphx::gpu::to_gpu, py::arg("arg"), py::arg("host") = false);
m.def("from_gpu", &migraphx::gpu::from_gpu);
m.def("gpu_sync", [] { migraphx::gpu::gpu_sync(); });
#endif
#ifdef VERSION_INFO
m.attr("__version__") = VERSION_INFO;
#else
m.attr("__version__") = "dev";
#endif
}

View File

@ -1 +1 @@
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v6.3.3/onnxruntime_rocm-1.20.1-cp311-cp311-linux_x86_64.whl

View File

@ -1,3 +0,0 @@
Package: *
Pin: release o=repo.radeon.com
Pin-Priority: 600

View File

@ -2,7 +2,7 @@ variable "AMDGPU" {
default = "gfx900" default = "gfx900"
} }
variable "ROCM" { variable "ROCM" {
default = "5.7.3" default = "6.3.3"
} }
variable "HSA_OVERRIDE_GFX_VERSION" { variable "HSA_OVERRIDE_GFX_VERSION" {
default = "" default = ""
@ -10,6 +10,13 @@ variable "HSA_OVERRIDE_GFX_VERSION" {
variable "HSA_OVERRIDE" { variable "HSA_OVERRIDE" {
default = "1" default = "1"
} }
target wget {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"]
target = "wget"
}
target deps { target deps {
dockerfile = "docker/main/Dockerfile" dockerfile = "docker/main/Dockerfile"
platforms = ["linux/amd64"] platforms = ["linux/amd64"]
@ -26,6 +33,7 @@ target rocm {
dockerfile = "docker/rocm/Dockerfile" dockerfile = "docker/rocm/Dockerfile"
contexts = { contexts = {
deps = "target:deps", deps = "target:deps",
wget = "target:wget",
rootfs = "target:rootfs" rootfs = "target:rootfs"
} }
platforms = ["linux/amd64"] platforms = ["linux/amd64"]

View File

@ -1 +0,0 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/5.7.3 focal main

View File

@ -49,7 +49,7 @@ This does not affect using hardware for accelerating other tasks such as [semant
# Officially Supported Detectors # Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## Edge TPU Detector ## Edge TPU Detector
@ -367,7 +367,7 @@ model:
### Setup ### Setup
The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`. Support for AMD GPUs is provided using the [ONNX detector](#ONNX). In order to utilize the AMD GPU for object detection use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
### Docker settings for GPU access ### Docker settings for GPU access
@ -446,29 +446,9 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/
### Supported Models ### Supported Models
There is no default model provided, the following formats are supported: See [ONNX supported models](#supported-models) for supported models, there are some caveats:
- D-FINE models are not supported
#### YOLO-NAS - YOLO-NAS models are known to not run well on integrated GPUs
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. See [the models section](#downloading-yolo-nas-model) for more information on downloading the YOLO-NAS model for use in Frigate.
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
rocm:
type: rocm
model:
model_type: yolonas
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## ONNX ## ONNX
@ -562,30 +542,15 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
#### D-FINE #### D-FINE
[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. [D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
To export as ONNX: :::warning
1. Clone: https://github.com/Peterande/D-FINE and install all dependencies. D-FINE is currently not supported on OpenVINO
2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE).
3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)`
4. Run the export, making sure you select the right config, for your checkpoint.
Example:
```
python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth
```
:::tip
Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually.
Make sure you change the batch size to 1 before exporting.
::: :::
After placing the downloaded onnx model in your config folder, you can use the following configuration: After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
```yaml ```yaml
detectors: detectors:
@ -784,6 +749,29 @@ Some model types are not included in Frigate by default.
Here are some tips for getting different model types Here are some tips for getting different model types
### Downloading D-FINE Model
To export as ONNX:
1. Clone: https://github.com/Peterande/D-FINE and install all dependencies.
2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE).
3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)`
4. Run the export, making sure you select the right config, for your checkpoint.
Example:
```
python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth
```
:::tip
Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually.
Make sure you change the batch size to 1 before exporting.
:::
### Downloading YOLO-NAS Model ### Downloading YOLO-NAS Model
You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb). You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).

View File

@ -222,6 +222,14 @@ Publishes the rms value for audio detected on this camera.
**NOTE:** Requires audio detection to be enabled **NOTE:** Requires audio detection to be enabled
### `frigate/<camera_name>/enabled/set`
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.
### `frigate/<camera_name>/enabled/state`
Topic with current state of processing for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/detect/set` ### `frigate/<camera_name>/detect/set`
Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`. Topic to turn object detection for a camera on and off. Expected values are `ON` and `OFF`.

View File

@ -28,11 +28,11 @@ Not all model types are supported by all detectors, so it's important to choose
## Supported detector types ## Supported detector types
Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), ONNX (`onnx`), and ROCm (`rocm`) detectors. Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVino (`openvino`), and ONNX (`onnx`) detectors.
:::warning :::warning
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 and later. Using Frigate+ models with `onnx` is only available with Frigate 0.15 and later.
::: :::
@ -42,7 +42,7 @@ Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` | | [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` | | [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` | | [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` | | [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `onnx` | `yolonas` |
_\* Requires Frigate 0.15_ _\* Requires Frigate 0.15_

View File

@ -20,7 +20,7 @@ class CameraActivityManager:
self.all_zone_labels: dict[str, set[str]] = {} self.all_zone_labels: dict[str, set[str]] = {}
for camera_config in config.cameras.values(): for camera_config in config.cameras.values():
if not camera_config.enabled: if not camera_config.enabled_in_config:
continue continue
self.last_camera_activity[camera_config.name] = {} self.last_camera_activity[camera_config.name] = {}

View File

@ -55,6 +55,7 @@ class Dispatcher:
self._camera_settings_handlers: dict[str, Callable] = { self._camera_settings_handlers: dict[str, Callable] = {
"audio": self._on_audio_command, "audio": self._on_audio_command,
"detect": self._on_detect_command, "detect": self._on_detect_command,
"enabled": self._on_enabled_command,
"improve_contrast": self._on_motion_improve_contrast_command, "improve_contrast": self._on_motion_improve_contrast_command,
"ptz_autotracker": self._on_ptz_autotracker_command, "ptz_autotracker": self._on_ptz_autotracker_command,
"motion": self._on_motion_command, "motion": self._on_motion_command,
@ -167,6 +168,7 @@ class Dispatcher:
for camera in camera_status.keys(): for camera in camera_status.keys():
camera_status[camera]["config"] = { camera_status[camera]["config"] = {
"detect": self.config.cameras[camera].detect.enabled, "detect": self.config.cameras[camera].detect.enabled,
"enabled": self.config.cameras[camera].enabled,
"snapshots": self.config.cameras[camera].snapshots.enabled, "snapshots": self.config.cameras[camera].snapshots.enabled,
"record": self.config.cameras[camera].record.enabled, "record": self.config.cameras[camera].record.enabled,
"audio": self.config.cameras[camera].audio.enabled, "audio": self.config.cameras[camera].audio.enabled,
@ -278,6 +280,27 @@ class Dispatcher:
self.config_updater.publish(f"config/detect/{camera_name}", detect_settings) self.config_updater.publish(f"config/detect/{camera_name}", detect_settings)
self.publish(f"{camera_name}/detect/state", payload, retain=True) self.publish(f"{camera_name}/detect/state", payload, retain=True)
def _on_enabled_command(self, camera_name: str, payload: str) -> None:
"""Callback for camera topic."""
camera_settings = self.config.cameras[camera_name]
if payload == "ON":
if not self.config.cameras[camera_name].enabled_in_config:
logger.error(
"Camera must be enabled in the config to be turned on via MQTT."
)
return
if not camera_settings.enabled:
logger.info(f"Turning on camera {camera_name}")
camera_settings.enabled = True
elif payload == "OFF":
if camera_settings.enabled:
logger.info(f"Turning off camera {camera_name}")
camera_settings.enabled = False
self.config_updater.publish(f"config/enabled/{camera_name}", camera_settings)
self.publish(f"{camera_name}/enabled/state", payload, retain=True)
def _on_motion_command(self, camera_name: str, payload: str) -> None: def _on_motion_command(self, camera_name: str, payload: str) -> None:
"""Callback for motion topic.""" """Callback for motion topic."""
detect_settings = self.config.cameras[camera_name].detect detect_settings = self.config.cameras[camera_name].detect

View File

@ -43,6 +43,11 @@ class MqttClient(Communicator): # type: ignore[misc]
def _set_initial_topics(self) -> None: def _set_initial_topics(self) -> None:
"""Set initial state topics.""" """Set initial state topics."""
for camera_name, camera in self.config.cameras.items(): for camera_name, camera in self.config.cameras.items():
self.publish(
f"{camera_name}/enabled/state",
"ON" if camera.enabled_in_config else "OFF",
retain=True,
)
self.publish( self.publish(
f"{camera_name}/recordings/state", f"{camera_name}/recordings/state",
"ON" if camera.record.enabled_in_config else "OFF", "ON" if camera.record.enabled_in_config else "OFF",
@ -196,6 +201,7 @@ class MqttClient(Communicator): # type: ignore[misc]
# register callbacks # register callbacks
callback_types = [ callback_types = [
"enabled",
"recordings", "recordings",
"snapshots", "snapshots",
"detect", "detect",

View File

@ -102,6 +102,9 @@ class CameraConfig(FrigateBaseModel):
zones: dict[str, ZoneConfig] = Field( zones: dict[str, ZoneConfig] = Field(
default_factory=dict, title="Zone configuration." default_factory=dict, title="Zone configuration."
) )
enabled_in_config: Optional[bool] = Field(
default=None, title="Keep track of original state of camera."
)
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr() _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()

View File

@ -516,6 +516,7 @@ class FrigateConfig(FrigateBaseModel):
camera_config.detect.stationary.interval = stationary_threshold camera_config.detect.stationary.interval = stationary_threshold
# set config pre-value # set config pre-value
camera_config.enabled_in_config = camera_config.enabled
camera_config.audio.enabled_in_config = camera_config.audio.enabled camera_config.audio.enabled_in_config = camera_config.audio.enabled
camera_config.record.enabled_in_config = camera_config.record.enabled camera_config.record.enabled_in_config = camera_config.record.enabled
camera_config.notifications.enabled_in_config = ( camera_config.notifications.enabled_in_config = (

View File

@ -99,5 +99,5 @@ class ONNXDetector(DetectionApi):
return post_process_yolov9(predictions, self.w, self.h) return post_process_yolov9(predictions, self.w, self.h)
else: else:
raise Exception( raise Exception(
f"{self.onnx_model_type} is currently not supported for rocm. See the docs for more info on supported models." f"{self.onnx_model_type} is currently not supported for onnx. See the docs for more info on supported models."
) )

View File

@ -1,170 +0,0 @@
import ctypes
import logging
import os
import subprocess
import sys
import cv2
import numpy as np
from pydantic import Field
from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
ModelTypeEnum,
PixelFormatEnum,
)
logger = logging.getLogger(__name__)
DETECTOR_KEY = "rocm"
def detect_gfx_version():
return subprocess.getoutput(
"unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo | grep gfx |head -1|awk '{print $2}'"
)
def auto_override_gfx_version():
# If environment variable already in place, do not override
gfx_version = detect_gfx_version()
old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION")
if old_override not in (None, ""):
logger.warning(
f"AMD/ROCm: detected {gfx_version} but HSA_OVERRIDE_GFX_VERSION already present ({old_override}), not overriding!"
)
return old_override
mapping = {
"gfx90c": "9.0.0",
"gfx1031": "10.3.0",
"gfx1103": "11.0.0",
}
override = mapping.get(gfx_version)
if override is not None:
logger.warning(
f"AMD/ROCm: detected {gfx_version}, overriding HSA_OVERRIDE_GFX_VERSION={override}"
)
os.putenv("HSA_OVERRIDE_GFX_VERSION", override)
return override
return ""
class ROCmDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
conserve_cpu: bool = Field(
default=True,
title="Conserve CPU at the expense of latency (and reduced max throughput)",
)
auto_override_gfx: bool = Field(
default=True, title="Automatically detect and override gfx version"
)
class ROCmDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: ROCmDetectorConfig):
if detector_config.auto_override_gfx:
auto_override_gfx_version()
try:
sys.path.append("/opt/rocm/lib")
import migraphx
logger.info("AMD/ROCm: loaded migraphx module")
except ModuleNotFoundError:
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
raise
if detector_config.conserve_cpu:
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
self.h = detector_config.model.height
self.w = detector_config.model.width
self.rocm_model_type = detector_config.model.model_type
self.rocm_model_px = detector_config.model.input_pixel_format
path = detector_config.model.path
mxr_path = os.path.splitext(path)[0] + ".mxr"
if path.endswith(".mxr"):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
self.model = migraphx.load(mxr_path)
elif os.path.exists(mxr_path):
logger.info(f"AMD/ROCm: loading parsed model from {mxr_path}")
self.model = migraphx.load(mxr_path)
else:
logger.info(f"AMD/ROCm: loading model from {path}")
if (
path.endswith(".tf")
or path.endswith(".tf2")
or path.endswith(".tflite")
):
# untested
self.model = migraphx.parse_tf(path)
else:
self.model = migraphx.parse_onnx(path)
logger.info("AMD/ROCm: compiling the model")
self.model.compile(
migraphx.get_target("gpu"), offload_copy=True, fast_math=True
)
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
os.makedirs(os.path.join(MODEL_CACHE_DIR, "rocm"), exist_ok=True)
migraphx.save(self.model, mxr_path)
logger.info("AMD/ROCm: model loaded")
def detect_raw(self, tensor_input):
model_input_name = self.model.get_parameter_names()[0]
model_input_shape = tuple(
self.model.get_parameter_shapes()[model_input_name].lens()
)
tensor_input = cv2.dnn.blobFromImage(
tensor_input[0],
1.0,
(model_input_shape[3], model_input_shape[2]),
None,
swapRB=self.rocm_model_px == PixelFormatEnum.bgr,
).astype(np.uint8)
detector_result = self.model.run({model_input_name: tensor_input})[0]
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
tensor_output = np.ctypeslib.as_array(
addr, shape=detector_result.get_shape().lens()
)
if self.rocm_model_type == ModelTypeEnum.yolonas:
predictions = tensor_output
detections = np.zeros((20, 6), np.float32)
for i, prediction in enumerate(predictions):
if i == 20:
break
(_, x_min, y_min, x_max, y_max, confidence, class_id) = prediction
# when running in GPU mode, empty predictions in the output have class_id of -1
if class_id < 0:
break
detections[i] = [
class_id,
confidence,
y_min / self.h,
x_min / self.w,
y_max / self.h,
x_max / self.w,
]
return detections
else:
raise Exception(
f"{self.rocm_model_type} is currently not supported for rocm. See the docs for more info on supported models."
)

View File

@ -293,6 +293,7 @@ class EmbeddingMaintainer(threading.Thread):
# Embed the thumbnail # Embed the thumbnail
self._embed_thumbnail(event_id, thumbnail) self._embed_thumbnail(event_id, thumbnail)
# Run GenAI
if ( if (
camera_config.genai.enabled camera_config.genai.enabled
and self.genai_client is not None and self.genai_client is not None
@ -306,82 +307,7 @@ class EmbeddingMaintainer(threading.Thread):
or set(event.zones) & set(camera_config.genai.required_zones) or set(event.zones) & set(camera_config.genai.required_zones)
) )
): ):
if event.has_snapshot and camera_config.genai.use_snapshot: self._process_genai_description(event, camera_config, thumbnail)
with open(
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
"rb",
) as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# crop snapshot based on region before sending off to genai
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data["region"]
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
snapshot_image = buffer.tobytes()
num_thumbnails = len(self.tracked_events.get(event_id, []))
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.genai.use_snapshot
else (
[
data["thumbnail"]
for data in self.tracked_events[event_id]
]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(
f"Saving {num_thumbnails} thumbnails for event {event.id}"
)
Path(
os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")
).mkdir(parents=True, exist_ok=True)
for idx, data in enumerate(self.tracked_events[event_id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(
f"Unable to save thumbnail {idx} for {event.id}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._embed_description,
name=f"_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
# Delete tracked events based on the event_id # Delete tracked events based on the event_id
if event_id in self.tracked_events: if event_id in self.tracked_events:
@ -440,7 +366,58 @@ class EmbeddingMaintainer(threading.Thread):
self.embeddings.embed_thumbnail(event_id, thumbnail) self.embeddings.embed_thumbnail(event_id, thumbnail)
def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None: def _process_genai_description(self, event, camera_config, thumbnail) -> None:
if event.has_snapshot and camera_config.genai.use_snapshot:
snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
return
num_thumbnails = len(self.tracked_events.get(event.id, []))
embed_image = (
[snapshot_image]
if event.has_snapshot and camera_config.genai.use_snapshot
else (
[data["thumbnail"] for data in self.tracked_events[event.id]]
if num_thumbnails > 0
else [thumbnail]
)
)
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
logger.debug(f"Saving {num_thumbnails} thumbnails for event {event.id}")
Path(os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")).mkdir(
parents=True, exist_ok=True
)
for idx, data in enumerate(self.tracked_events[event.id], 1):
jpg_bytes: bytes = data["thumbnail"]
if jpg_bytes is None:
logger.warning(f"Unable to save thumbnail {idx} for {event.id}.")
else:
with open(
os.path.join(
CLIPS_DIR,
f"genai-requests/{event.id}/{idx}.jpg",
),
"wb",
) as j:
j.write(jpg_bytes)
# Generate the description. Call happens in a thread since it is network bound.
threading.Thread(
target=self._genai_embed_description,
name=f"_genai_embed_description_{event.id}",
daemon=True,
args=(
event,
embed_image,
),
).start()
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
"""Embed the description for an event.""" """Embed the description for an event."""
camera_config = self.config.cameras[event.camera] camera_config = self.config.cameras[event.camera]
@ -473,6 +450,45 @@ class EmbeddingMaintainer(threading.Thread):
description, description,
) )
def _read_and_crop_snapshot(self, event: Event, camera_config) -> bytes | None:
"""Read, decode, and crop the snapshot image."""
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot load snapshot for {event.id}, file not found: {snapshot_file}"
)
return None
try:
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8),
cv2.IMREAD_COLOR,
)
# Crop snapshot based on region
# provide full image if region doesn't exist (manual events)
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = event.data.get(
"region", [0, 0, 1, 1]
)
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height),
x1 : x1 + int(width_rel * width),
]
_, buffer = cv2.imencode(".jpg", cropped_image)
return buffer.tobytes()
except Exception:
return None
def handle_regenerate_description(self, event_id: str, source: str) -> None: def handle_regenerate_description(self, event_id: str, source: str) -> None:
try: try:
event: Event = Event.get(Event.id == event_id) event: Event = Event.get(Event.id == event_id)
@ -492,34 +508,10 @@ class EmbeddingMaintainer(threading.Thread):
) )
if event.has_snapshot and source == "snapshot": if event.has_snapshot and source == "snapshot":
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg") snapshot_image = self._read_and_crop_snapshot(event, camera_config)
if not snapshot_image:
if not os.path.isfile(snapshot_file):
logger.error(
f"Cannot regenerate description for {event.id}, snapshot file not found: {snapshot_file}"
)
return return
with open(snapshot_file, "rb") as image_file:
snapshot_image = image_file.read()
img = cv2.imdecode(
np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR
)
# crop snapshot based on region before sending off to genai
# provide full image if region doesn't exist (manual events)
region = event.data.get("region", [0, 0, 1, 1])
height, width = img.shape[:2]
x1_rel, y1_rel, width_rel, height_rel = region
x1, y1 = int(x1_rel * width), int(y1_rel * height)
cropped_image = img[
y1 : y1 + int(height_rel * height), x1 : x1 + int(width_rel * width)
]
_, buffer = cv2.imencode(".jpg", cropped_image)
snapshot_image = buffer.tobytes()
embed_image = ( embed_image = (
[snapshot_image] [snapshot_image]
if event.has_snapshot and source == "snapshot" if event.has_snapshot and source == "snapshot"
@ -530,4 +522,4 @@ class EmbeddingMaintainer(threading.Thread):
) )
) )
self._embed_description(event, embed_image) self._genai_embed_description(event, embed_image)

View File

@ -26,23 +26,30 @@ class OpenAIClient(GenAIClient):
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]: def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to OpenAI.""" """Submit a request to OpenAI."""
encoded_images = [base64.b64encode(image).decode("utf-8") for image in images] encoded_images = [base64.b64encode(image).decode("utf-8") for image in images]
messages_content = []
for image in encoded_images:
messages_content.append(
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image}",
"detail": "low",
},
}
)
messages_content.append(
{
"type": "text",
"text": prompt,
}
)
try: try:
result = self.provider.chat.completions.create( result = self.provider.chat.completions.create(
model=self.genai_config.model, model=self.genai_config.model,
messages=[ messages=[
{ {
"role": "user", "role": "user",
"content": [ "content": messages_content,
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image}",
"detail": "low",
},
}
for image in encoded_images
]
+ [prompt],
}, },
], ],
timeout=self.timeout, timeout=self.timeout,

View File

@ -17,7 +17,6 @@ from frigate.detectors.detector_config import (
InputDTypeEnum, InputDTypeEnum,
InputTensorEnum, InputTensorEnum,
) )
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
from frigate.util.builtin import EventsPerSecond, load_labels from frigate.util.builtin import EventsPerSecond, load_labels
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
from frigate.util.services import listen from frigate.util.services import listen
@ -52,13 +51,7 @@ class LocalObjectDetector(ObjectDetector):
self.labels = load_labels(labels) self.labels = load_labels(labels)
if detector_config: if detector_config:
if detector_config.type == ROCM_DETECTOR_KEY: self.input_transform = tensor_transform(detector_config.model.input_tensor)
# ROCm requires NHWC as input
self.input_transform = None
else:
self.input_transform = tensor_transform(
detector_config.model.input_tensor
)
self.dtype = detector_config.model.input_dtype self.dtype = detector_config.model.input_dtype
else: else:

View File

@ -10,6 +10,7 @@ from typing import Callable, Optional
import cv2 import cv2
import numpy as np import numpy as np
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
from frigate.comms.dispatcher import Dispatcher from frigate.comms.dispatcher import Dispatcher
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
@ -61,6 +62,7 @@ class CameraState:
self.previous_frame_id = None self.previous_frame_id = None
self.callbacks = defaultdict(list) self.callbacks = defaultdict(list)
self.ptz_autotracker_thread = ptz_autotracker_thread self.ptz_autotracker_thread = ptz_autotracker_thread
self.prev_enabled = self.camera_config.enabled
def get_current_frame(self, draw_options={}): def get_current_frame(self, draw_options={}):
with self.current_frame_lock: with self.current_frame_lock:
@ -310,6 +312,7 @@ class CameraState:
# TODO: can i switch to looking this up and only changing when an event ends? # TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects # maintain best objects
camera_activity: dict[str, list[any]] = { camera_activity: dict[str, list[any]] = {
"enabled": True,
"motion": len(motion_boxes) > 0, "motion": len(motion_boxes) > 0,
"objects": [], "objects": [],
} }
@ -437,6 +440,11 @@ class TrackedObjectProcessor(threading.Thread):
self.last_motion_detected: dict[str, float] = {} self.last_motion_detected: dict[str, float] = {}
self.ptz_autotracker_thread = ptz_autotracker_thread self.ptz_autotracker_thread = ptz_autotracker_thread
self.enabled_subscribers = {
camera: ConfigSubscriber(f"config/enabled/{camera}", True)
for camera in config.cameras.keys()
}
self.requestor = InterProcessRequestor() self.requestor = InterProcessRequestor()
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.video) self.detection_publisher = DetectionPublisher(DetectionTypeEnum.video)
self.event_sender = EventUpdatePublisher() self.event_sender = EventUpdatePublisher()
@ -679,8 +687,53 @@ class TrackedObjectProcessor(threading.Thread):
"""Returns the latest frame time for a given camera.""" """Returns the latest frame time for a given camera."""
return self.camera_states[camera].current_frame_time return self.camera_states[camera].current_frame_time
def force_end_all_events(self, camera: str, camera_state: CameraState):
"""Ends all active events on camera when disabling."""
last_frame_name = camera_state.previous_frame_id
for obj_id, obj in list(camera_state.tracked_objects.items()):
if "end_time" not in obj.obj_data:
logger.debug(f"Camera {camera} disabled, ending active event {obj_id}")
obj.obj_data["end_time"] = datetime.datetime.now().timestamp()
# end callbacks
for callback in camera_state.callbacks["end"]:
callback(camera, obj, last_frame_name)
# camera activity callbacks
for callback in camera_state.callbacks["camera_activity"]:
callback(
camera,
{"enabled": False, "motion": 0, "objects": []},
)
def _get_enabled_state(self, camera: str) -> bool:
_, config_data = self.enabled_subscribers[camera].check_for_update()
if config_data:
self.config.cameras[camera].enabled = config_data.enabled
if self.camera_states[camera].prev_enabled is None:
self.camera_states[camera].prev_enabled = config_data.enabled
return self.config.cameras[camera].enabled
def run(self): def run(self):
while not self.stop_event.is_set(): while not self.stop_event.is_set():
for camera, config in self.config.cameras.items():
if not config.enabled_in_config:
continue
current_enabled = self._get_enabled_state(camera)
camera_state = self.camera_states[camera]
if camera_state.prev_enabled and not current_enabled:
logger.debug(f"Not processing objects for disabled camera {camera}")
self.force_end_all_events(camera, camera_state)
camera_state.prev_enabled = current_enabled
if not current_enabled:
continue
try: try:
( (
camera, camera,
@ -693,6 +746,10 @@ class TrackedObjectProcessor(threading.Thread):
except queue.Empty: except queue.Empty:
continue continue
if not self._get_enabled_state(camera):
logger.debug(f"Camera {camera} disabled, skipping update")
continue
camera_state = self.camera_states[camera] camera_state = self.camera_states[camera]
camera_state.update( camera_state.update(
@ -735,4 +792,7 @@ class TrackedObjectProcessor(threading.Thread):
self.detection_publisher.stop() self.detection_publisher.stop()
self.event_sender.stop() self.event_sender.stop()
self.event_end_subscriber.stop() self.event_end_subscriber.stop()
for subscriber in self.enabled_subscribers.values():
subscriber.stop()
logger.info("Exiting object processor...") logger.info("Exiting object processor...")

View File

@ -10,6 +10,7 @@ import queue
import subprocess as sp import subprocess as sp
import threading import threading
import traceback import traceback
from typing import Optional
import cv2 import cv2
import numpy as np import numpy as np
@ -280,6 +281,12 @@ class BirdsEyeFrameManager:
self.stop_event = stop_event self.stop_event = stop_event
self.inactivity_threshold = config.birdseye.inactivity_threshold self.inactivity_threshold = config.birdseye.inactivity_threshold
self.enabled_subscribers = {
cam: ConfigSubscriber(f"config/enabled/{cam}", True)
for cam in config.cameras.keys()
if config.cameras[cam].enabled_in_config
}
if config.birdseye.layout.max_cameras: if config.birdseye.layout.max_cameras:
self.last_refresh_time = 0 self.last_refresh_time = 0
@ -380,8 +387,21 @@ class BirdsEyeFrameManager:
if mode == BirdseyeModeEnum.objects and object_box_count > 0: if mode == BirdseyeModeEnum.objects and object_box_count > 0:
return True return True
def update_frame(self, frame: np.ndarray): def _get_enabled_state(self, camera: str) -> bool:
"""Update to a new frame for birdseye.""" """Fetch the latest enabled state for a camera from ZMQ."""
_, config_data = self.enabled_subscribers[camera].check_for_update()
if config_data:
self.config.cameras[camera].enabled = config_data.enabled
return config_data.enabled
return self.config.cameras[camera].enabled
def update_frame(self, frame: Optional[np.ndarray] = None) -> bool:
"""
Update birdseye, optionally with a new frame.
When no frame is passed, check the layout and update for any disabled cameras.
"""
# determine how many cameras are tracking objects within the last inactivity_threshold seconds # determine how many cameras are tracking objects within the last inactivity_threshold seconds
active_cameras: set[str] = set( active_cameras: set[str] = set(
@ -389,11 +409,14 @@ class BirdsEyeFrameManager:
cam cam
for cam, cam_data in self.cameras.items() for cam, cam_data in self.cameras.items()
if self.config.cameras[cam].birdseye.enabled if self.config.cameras[cam].birdseye.enabled
and self.config.cameras[cam].enabled_in_config
and self._get_enabled_state(cam)
and cam_data["last_active_frame"] > 0 and cam_data["last_active_frame"] > 0
and cam_data["current_frame_time"] - cam_data["last_active_frame"] and cam_data["current_frame_time"] - cam_data["last_active_frame"]
< self.inactivity_threshold < self.inactivity_threshold
] ]
) )
logger.debug(f"Active cameras: {active_cameras}")
max_cameras = self.config.birdseye.layout.max_cameras max_cameras = self.config.birdseye.layout.max_cameras
max_camera_refresh = False max_camera_refresh = False
@ -411,118 +434,125 @@ class BirdsEyeFrameManager:
- self.cameras[active_camera]["last_active_frame"] - self.cameras[active_camera]["last_active_frame"]
), ),
) )
active_cameras = limited_active_cameras[ active_cameras = limited_active_cameras[:max_cameras]
: self.config.birdseye.layout.max_cameras
]
max_camera_refresh = True max_camera_refresh = True
self.last_refresh_time = now self.last_refresh_time = now
# if there are no active cameras # Track if the frame changes
frame_changed = False
# If no active cameras and layout is already empty, no update needed
if len(active_cameras) == 0: if len(active_cameras) == 0:
# if the layout is already cleared # if the layout is already cleared
if len(self.camera_layout) == 0: if len(self.camera_layout) == 0:
return False return False
# if the layout needs to be cleared # if the layout needs to be cleared
else: self.camera_layout = []
self.camera_layout = [] self.active_cameras = set()
self.active_cameras = set()
self.clear_frame()
return True
# check if we need to reset the layout because there is a different number of cameras
if len(self.active_cameras) - len(active_cameras) == 0:
if len(self.active_cameras) == 1 and self.active_cameras != active_cameras:
reset_layout = True
elif max_camera_refresh:
reset_layout = True
else:
reset_layout = False
else:
reset_layout = True
# reset the layout if it needs to be different
if reset_layout:
logger.debug("Added new cameras, resetting layout...")
self.clear_frame() self.clear_frame()
self.active_cameras = active_cameras frame_changed = True
else:
# this also converts added_cameras from a set to a list since we need # Determine if layout needs resetting
# to pop elements in order if len(self.active_cameras) - len(active_cameras) == 0:
active_cameras_to_add = sorted( if (
active_cameras, len(self.active_cameras) == 1
# sort cameras by order and by name if the order is the same and self.active_cameras != active_cameras
key=lambda active_camera: ( ):
self.config.cameras[active_camera].birdseye.order, reset_layout = True
active_camera, elif max_camera_refresh:
), reset_layout = True
)
if len(active_cameras) == 1:
# show single camera as fullscreen
camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1])
# center camera view in canvas and ensure that it fits
if scaled_width < self.canvas.width:
coefficient = 1
x_offset = int((self.canvas.width - scaled_width) / 2)
else: else:
coefficient = self.canvas.width / scaled_width reset_layout = False
x_offset = int(
(self.canvas.width - (scaled_width * coefficient)) / 2
)
self.camera_layout = [
[
(
camera,
(
x_offset,
0,
int(scaled_width * coefficient),
int(self.canvas.height * coefficient),
),
)
]
]
else: else:
# calculate optimal layout reset_layout = True
coefficient = self.canvas.get_coefficient(len(active_cameras))
calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas if reset_layout:
while calculating: logger.debug("Resetting Birdseye layout...")
if self.stop_event.is_set(): self.clear_frame()
return self.active_cameras = active_cameras
layout_candidate = self.calculate_layout( # this also converts added_cameras from a set to a list since we need
active_cameras_to_add, # to pop elements in order
coefficient, active_cameras_to_add = sorted(
active_cameras,
# sort cameras by order and by name if the order is the same
key=lambda active_camera: (
self.config.cameras[active_camera].birdseye.order,
active_camera,
),
)
if len(active_cameras) == 1:
# show single camera as fullscreen
camera = active_cameras_to_add[0]
camera_dims = self.cameras[camera]["dimensions"].copy()
scaled_width = int(
self.canvas.height * camera_dims[0] / camera_dims[1]
) )
if not layout_candidate: # center camera view in canvas and ensure that it fits
if coefficient < 10: if scaled_width < self.canvas.width:
coefficient += 1 coefficient = 1
continue x_offset = int((self.canvas.width - scaled_width) / 2)
else: else:
logger.error("Error finding appropriate birdseye layout") coefficient = self.canvas.width / scaled_width
x_offset = int(
(self.canvas.width - (scaled_width * coefficient)) / 2
)
self.camera_layout = [
[
(
camera,
(
x_offset,
0,
int(scaled_width * coefficient),
int(self.canvas.height * coefficient),
),
)
]
]
else:
# calculate optimal layout
coefficient = self.canvas.get_coefficient(len(active_cameras))
calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
while calculating:
if self.stop_event.is_set():
return return
calculating = False layout_candidate = self.calculate_layout(
self.canvas.set_coefficient(len(active_cameras), coefficient) active_cameras_to_add, coefficient
)
self.camera_layout = layout_candidate if not layout_candidate:
if coefficient < 10:
coefficient += 1
continue
else:
logger.error(
"Error finding appropriate birdseye layout"
)
return
calculating = False
self.canvas.set_coefficient(len(active_cameras), coefficient)
for row in self.camera_layout: self.camera_layout = layout_candidate
for position in row: frame_changed = True
self.copy_to_position(
position[1],
position[0],
self.cameras[position[0]]["current_frame"],
)
return True # Draw the layout
for row in self.camera_layout:
for position in row:
src_frame = self.cameras[position[0]]["current_frame"]
if src_frame is None or src_frame.size == 0:
logger.debug(f"Skipping invalid frame for {position[0]}")
continue
self.copy_to_position(position[1], position[0], src_frame)
if frame is not None: # Frame presence indicates a potential change
frame_changed = True
return frame_changed
def calculate_layout( def calculate_layout(
self, self,
@ -677,18 +707,17 @@ class BirdsEyeFrameManager:
) -> bool: ) -> bool:
# don't process if birdseye is disabled for this camera # don't process if birdseye is disabled for this camera
camera_config = self.config.cameras[camera].birdseye camera_config = self.config.cameras[camera].birdseye
force_update = False
if not camera_config.enabled:
return False
# disabling birdseye is a little tricky # disabling birdseye is a little tricky
if not camera_config.enabled: if not self._get_enabled_state(camera):
# if we've rendered a frame (we have a value for last_active_frame) # if we've rendered a frame (we have a value for last_active_frame)
# then we need to set it to zero # then we need to set it to zero
if self.cameras[camera]["last_active_frame"] > 0: if self.cameras[camera]["last_active_frame"] > 0:
self.cameras[camera]["last_active_frame"] = 0 self.cameras[camera]["last_active_frame"] = 0
force_update = True
return False else:
return False
# update the last active frame for the camera # update the last active frame for the camera
self.cameras[camera]["current_frame"] = frame.copy() self.cameras[camera]["current_frame"] = frame.copy()
@ -699,7 +728,7 @@ class BirdsEyeFrameManager:
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
# limit output to 10 fps # limit output to 10 fps
if (now - self.last_output_time) < 1 / 10: if not force_update and (now - self.last_output_time) < 1 / 10:
return False return False
try: try:
@ -711,11 +740,16 @@ class BirdsEyeFrameManager:
print(traceback.format_exc()) print(traceback.format_exc())
# if the frame was updated or the fps is too low, send frame # if the frame was updated or the fps is too low, send frame
if updated_frame or (now - self.last_output_time) > 1: if force_update or updated_frame or (now - self.last_output_time) > 1:
self.last_output_time = now self.last_output_time = now
return True return True
return False return False
def stop(self):
"""Clean up subscribers when stopping."""
for subscriber in self.enabled_subscribers.values():
subscriber.stop()
class Birdseye: class Birdseye:
def __init__( def __init__(
@ -743,6 +777,7 @@ class Birdseye:
self.birdseye_manager = BirdsEyeFrameManager(config, stop_event) self.birdseye_manager = BirdsEyeFrameManager(config, stop_event)
self.config_subscriber = ConfigSubscriber("config/birdseye/") self.config_subscriber = ConfigSubscriber("config/birdseye/")
self.frame_manager = SharedMemoryFrameManager() self.frame_manager = SharedMemoryFrameManager()
self.stop_event = stop_event
if config.birdseye.restream: if config.birdseye.restream:
self.birdseye_buffer = self.frame_manager.create( self.birdseye_buffer = self.frame_manager.create(
@ -753,6 +788,22 @@ class Birdseye:
self.converter.start() self.converter.start()
self.broadcaster.start() self.broadcaster.start()
def __send_new_frame(self) -> None:
frame_bytes = self.birdseye_manager.frame.tobytes()
if self.config.birdseye.restream:
self.birdseye_buffer[:] = frame_bytes
try:
self.input.put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
def all_cameras_disabled(self) -> None:
self.birdseye_manager.clear_frame()
self.__send_new_frame()
def write_data( def write_data(
self, self,
camera: str, camera: str,
@ -781,18 +832,10 @@ class Birdseye:
frame_time, frame_time,
frame, frame,
): ):
frame_bytes = self.birdseye_manager.frame.tobytes() self.__send_new_frame()
if self.config.birdseye.restream:
self.birdseye_buffer[:] = frame_bytes
try:
self.input.put_nowait(frame_bytes)
except queue.Full:
# drop frames if queue is full
pass
def stop(self) -> None: def stop(self) -> None:
self.config_subscriber.stop() self.config_subscriber.stop()
self.birdseye_manager.stop()
self.converter.join() self.converter.join()
self.broadcaster.join() self.broadcaster.join()

View File

@ -1,12 +1,12 @@
"""Handle outputting raw frigate frames""" """Handle outputting raw frigate frames"""
import datetime
import logging import logging
import multiprocessing as mp import multiprocessing as mp
import os import os
import shutil import shutil
import signal import signal
import threading import threading
from typing import Optional
from wsgiref.simple_server import make_server from wsgiref.simple_server import make_server
from setproctitle import setproctitle from setproctitle import setproctitle
@ -17,6 +17,7 @@ from ws4py.server.wsgirefserver import (
) )
from ws4py.server.wsgiutils import WebSocketWSGIApplication from ws4py.server.wsgiutils import WebSocketWSGIApplication
from frigate.comms.config_updater import ConfigSubscriber
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
from frigate.comms.ws import WebSocket from frigate.comms.ws import WebSocket
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
@ -24,11 +25,43 @@ from frigate.const import CACHE_DIR, CLIPS_DIR
from frigate.output.birdseye import Birdseye from frigate.output.birdseye import Birdseye
from frigate.output.camera import JsmpegCamera from frigate.output.camera import JsmpegCamera
from frigate.output.preview import PreviewRecorder from frigate.output.preview import PreviewRecorder
from frigate.util.image import SharedMemoryFrameManager from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def check_disabled_camera_update(
config: FrigateConfig,
birdseye: Birdseye | None,
previews: dict[str, PreviewRecorder],
write_times: dict[str, float],
) -> None:
"""Check if camera is disabled / offline and needs an update."""
now = datetime.datetime.now().timestamp()
has_enabled_camera = False
for camera, last_update in write_times.items():
if config.cameras[camera].enabled:
has_enabled_camera = True
if now - last_update > 1:
# last camera update was more than one second ago
# need to send empty data to updaters because current
# frame is now out of date
frame = get_blank_yuv_frame(
config.cameras[camera].detect.width,
config.cameras[camera].detect.height,
)
if birdseye:
birdseye.write_data(camera, [], [], now, frame)
previews[camera].write_data([], [], now, frame)
if not has_enabled_camera and birdseye:
birdseye.all_cameras_disabled()
def output_frames( def output_frames(
config: FrigateConfig, config: FrigateConfig,
): ):
@ -59,11 +92,18 @@ def output_frames(
detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video) detection_subscriber = DetectionSubscriber(DetectionTypeEnum.video)
enabled_subscribers = {
camera: ConfigSubscriber(f"config/enabled/{camera}", True)
for camera in config.cameras.keys()
if config.cameras[camera].enabled_in_config
}
jsmpeg_cameras: dict[str, JsmpegCamera] = {} jsmpeg_cameras: dict[str, JsmpegCamera] = {}
birdseye: Optional[Birdseye] = None birdseye: Birdseye | None = None
preview_recorders: dict[str, PreviewRecorder] = {} preview_recorders: dict[str, PreviewRecorder] = {}
preview_write_times: dict[str, float] = {} preview_write_times: dict[str, float] = {}
failed_frame_requests: dict[str, int] = {} failed_frame_requests: dict[str, int] = {}
last_disabled_cam_check = datetime.datetime.now().timestamp()
move_preview_frames("cache") move_preview_frames("cache")
@ -80,8 +120,25 @@ def output_frames(
websocket_thread.start() websocket_thread.start()
def get_enabled_state(camera: str) -> bool:
_, config_data = enabled_subscribers[camera].check_for_update()
if config_data:
config.cameras[camera].enabled = config_data.enabled
return config_data.enabled
return config.cameras[camera].enabled
while not stop_event.is_set(): while not stop_event.is_set():
(topic, data) = detection_subscriber.check_for_update(timeout=1) (topic, data) = detection_subscriber.check_for_update(timeout=1)
now = datetime.datetime.now().timestamp()
if now - last_disabled_cam_check > 5:
# check disabled cameras every 5 seconds
last_disabled_cam_check = now
check_disabled_camera_update(
config, birdseye, preview_recorders, preview_write_times
)
if not topic: if not topic:
continue continue
@ -95,6 +152,9 @@ def output_frames(
_, _,
) = data ) = data
if not get_enabled_state(camera):
continue
frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv) frame = frame_manager.get(frame_name, config.cameras[camera].frame_shape_yuv)
if frame is None: if frame is None:
@ -134,23 +194,10 @@ def output_frames(
) )
# send frames for low fps recording # send frames for low fps recording
generated_preview = preview_recorders[camera].write_data( preview_recorders[camera].write_data(
current_tracked_objects, motion_boxes, frame_time, frame current_tracked_objects, motion_boxes, frame_time, frame
) )
preview_write_times[camera] = frame_time preview_write_times[camera] = frame_time
# if another camera generated a preview,
# check for any cameras that are currently offline
# and need to generate a preview
if generated_preview:
logger.debug(
"Checking for offline cameras because another camera generated a preview."
)
for camera, time in preview_write_times.copy().items():
if time != 0 and frame_time - time > 10:
preview_recorders[camera].flag_offline(frame_time)
preview_write_times[camera] = frame_time
frame_manager.close(frame_name) frame_manager.close(frame_name)
move_preview_frames("clips") move_preview_frames("clips")
@ -184,6 +231,9 @@ def output_frames(
if birdseye is not None: if birdseye is not None:
birdseye.stop() birdseye.stop()
for subscriber in enabled_subscribers.values():
subscriber.stop()
websocket_server.manager.close_all() websocket_server.manager.close_all()
websocket_server.manager.stop() websocket_server.manager.stop()
websocket_server.manager.join() websocket_server.manager.join()

View File

@ -632,6 +632,22 @@ def copy_yuv_to_position(
) )
def get_blank_yuv_frame(width: int, height: int) -> np.ndarray:
"""Creates a black YUV 4:2:0 frame."""
yuv_height = height * 3 // 2
yuv_frame = np.zeros((yuv_height, width), dtype=np.uint8)
uv_height = height // 2
# The U and V planes are stored after the Y plane.
u_start = height # U plane starts right after Y plane
v_start = u_start + uv_height // 2 # V plane starts after U plane
yuv_frame[u_start : u_start + uv_height, :width] = 128
yuv_frame[v_start : v_start + uv_height, :width] = 128
return yuv_frame
def yuv_region_2_yuv(frame, region): def yuv_region_2_yuv(frame, region):
try: try:
# TODO: does this copy the numpy array? # TODO: does this copy the numpy array?

View File

@ -108,8 +108,20 @@ def capture_frames(
frame_rate.start() frame_rate.start()
skipped_eps = EventsPerSecond() skipped_eps = EventsPerSecond()
skipped_eps.start() skipped_eps.start()
config_subscriber = ConfigSubscriber(f"config/enabled/{config.name}", True)
def get_enabled_state():
"""Fetch the latest enabled state from ZMQ."""
_, config_data = config_subscriber.check_for_update()
if config_data:
return config_data.enabled
return config.enabled
while not stop_event.is_set():
if not get_enabled_state():
logger.debug(f"Stopping capture thread for disabled {config.name}")
break
while True:
fps.value = frame_rate.eps() fps.value = frame_rate.eps()
skipped_fps.value = skipped_eps.eps() skipped_fps.value = skipped_eps.eps()
current_frame.value = datetime.datetime.now().timestamp() current_frame.value = datetime.datetime.now().timestamp()
@ -178,26 +190,37 @@ class CameraWatchdog(threading.Thread):
self.stop_event = stop_event self.stop_event = stop_event
self.sleeptime = self.config.ffmpeg.retry_interval self.sleeptime = self.config.ffmpeg.retry_interval
def run(self): self.config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True)
self.start_ffmpeg_detect() self.was_enabled = self.config.enabled
for c in self.config.ffmpeg_cmds: def _update_enabled_state(self) -> bool:
if "detect" in c["roles"]: """Fetch the latest config and update enabled state."""
continue _, config_data = self.config_subscriber.check_for_update()
logpipe = LogPipe( if config_data:
f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}" enabled = config_data.enabled
) return enabled
self.ffmpeg_other_processes.append( return self.was_enabled if self.was_enabled is not None else self.config.enabled
{
"cmd": c["cmd"], def run(self):
"roles": c["roles"], if self._update_enabled_state():
"logpipe": logpipe, self.start_all_ffmpeg()
"process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe),
}
)
time.sleep(self.sleeptime) time.sleep(self.sleeptime)
while not self.stop_event.wait(self.sleeptime): while not self.stop_event.wait(self.sleeptime):
enabled = self._update_enabled_state()
if enabled != self.was_enabled:
if enabled:
self.logger.debug(f"Enabling camera {self.camera_name}")
self.start_all_ffmpeg()
else:
self.logger.debug(f"Disabling camera {self.camera_name}")
self.stop_all_ffmpeg()
self.was_enabled = enabled
continue
if not enabled:
continue
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
if not self.capture_thread.is_alive(): if not self.capture_thread.is_alive():
@ -279,11 +302,9 @@ class CameraWatchdog(threading.Thread):
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"] p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
) )
stop_ffmpeg(self.ffmpeg_detect_process, self.logger) self.stop_all_ffmpeg()
for p in self.ffmpeg_other_processes:
stop_ffmpeg(p["process"], self.logger)
p["logpipe"].close()
self.logpipe.close() self.logpipe.close()
self.config_subscriber.stop()
def start_ffmpeg_detect(self): def start_ffmpeg_detect(self):
ffmpeg_cmd = [ ffmpeg_cmd = [
@ -306,6 +327,43 @@ class CameraWatchdog(threading.Thread):
) )
self.capture_thread.start() self.capture_thread.start()
def start_all_ffmpeg(self):
"""Start all ffmpeg processes (detection and others)."""
logger.debug(f"Starting all ffmpeg processes for {self.camera_name}")
self.start_ffmpeg_detect()
for c in self.config.ffmpeg_cmds:
if "detect" in c["roles"]:
continue
logpipe = LogPipe(
f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}"
)
self.ffmpeg_other_processes.append(
{
"cmd": c["cmd"],
"roles": c["roles"],
"logpipe": logpipe,
"process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe),
}
)
def stop_all_ffmpeg(self):
"""Stop all ffmpeg processes (detection and others)."""
logger.debug(f"Stopping all ffmpeg processes for {self.camera_name}")
if self.capture_thread is not None and self.capture_thread.is_alive():
self.capture_thread.join(timeout=5)
if self.capture_thread.is_alive():
self.logger.warning(
f"Capture thread for {self.camera_name} did not stop gracefully."
)
if self.ffmpeg_detect_process is not None:
stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
self.ffmpeg_detect_process = None
for p in self.ffmpeg_other_processes[:]:
if p["process"] is not None:
stop_ffmpeg(p["process"], self.logger)
p["logpipe"].close()
self.ffmpeg_other_processes.clear()
def get_latest_segment_datetime(self, latest_segment: datetime.datetime) -> int: def get_latest_segment_datetime(self, latest_segment: datetime.datetime) -> int:
"""Checks if ffmpeg is still writing recording segments to cache.""" """Checks if ffmpeg is still writing recording segments to cache."""
cache_files = sorted( cache_files = sorted(
@ -539,7 +597,8 @@ def process_frames(
exit_on_empty: bool = False, exit_on_empty: bool = False,
): ):
next_region_update = get_tomorrow_at_time(2) next_region_update = get_tomorrow_at_time(2)
config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True) detect_config_subscriber = ConfigSubscriber(f"config/detect/{camera_name}", True)
enabled_config_subscriber = ConfigSubscriber(f"config/enabled/{camera_name}", True)
fps_tracker = EventsPerSecond() fps_tracker = EventsPerSecond()
fps_tracker.start() fps_tracker.start()
@ -549,9 +608,43 @@ def process_frames(
region_min_size = get_min_region_size(model_config) region_min_size = get_min_region_size(model_config)
prev_enabled = None
while not stop_event.is_set(): while not stop_event.is_set():
_, enabled_config = enabled_config_subscriber.check_for_update()
current_enabled = (
enabled_config.enabled
if enabled_config
else (prev_enabled if prev_enabled is not None else True)
)
if prev_enabled is None:
prev_enabled = current_enabled
if prev_enabled and not current_enabled and camera_metrics.frame_queue.empty():
logger.debug(f"Camera {camera_name} disabled, clearing tracked objects")
# Clear norfair's dictionaries
object_tracker.tracked_objects.clear()
object_tracker.disappeared.clear()
object_tracker.stationary_box_history.clear()
object_tracker.positions.clear()
object_tracker.track_id_map.clear()
# Clear internal norfair states
for trackers_by_type in object_tracker.trackers.values():
for tracker in trackers_by_type.values():
tracker.tracked_objects = []
for tracker in object_tracker.default_tracker.values():
tracker.tracked_objects = []
prev_enabled = current_enabled
if not current_enabled:
time.sleep(0.1)
continue
# check for updated detect config # check for updated detect config
_, updated_detect_config = config_subscriber.check_for_update() _, updated_detect_config = detect_config_subscriber.check_for_update()
if updated_detect_config: if updated_detect_config:
detect_config = updated_detect_config detect_config = updated_detect_config
@ -845,4 +938,5 @@ def process_frames(
motion_detector.stop() motion_detector.stop()
requestor.stop() requestor.stop()
config_subscriber.stop() detect_config_subscriber.stop()
enabled_config_subscriber.stop()

View File

@ -181,6 +181,13 @@
"ui.dialog.streaming.debugView": "Debug View", "ui.dialog.streaming.debugView": "Debug View",
"ui.dialog.search.saveSearch": "Save Search",
"ui.dialog.search.saveSearch.desc": "Provide a name for this saved search.",
"ui.dialog.search.saveSearch.placeholder": "Enter a name for your search",
"ui.dialog.search.saveSearch.overwrite": "{{searchName}} already exists. Saving will overwrite the existing value.",
"ui.dialog.search.saveSearch.success": "Search ({{searchName}}) has been saved.",
"ui.stats.ffmpegHighCpuUsage": "{{camera}} has high FFMPEG CPU usage ({{ffmpegAvg}}%)", "ui.stats.ffmpegHighCpuUsage": "{{camera}} has high FFMPEG CPU usage ({{ffmpegAvg}}%)",
"ui.stats.detectHighCpuUsage": "{{camera}} has high detect CPU usage ({{detectAvg}}%)", "ui.stats.detectHighCpuUsage": "{{camera}} has high detect CPU usage ({{detectAvg}}%)",
"ui.stats.healthy": "System is healthy", "ui.stats.healthy": "System is healthy",
@ -289,7 +296,6 @@
"ui.cameraGroup.cameras.desc": "Select cameras for this group.", "ui.cameraGroup.cameras.desc": "Select cameras for this group.",
"ui.cameraGroup.icon": "Icon", "ui.cameraGroup.icon": "Icon",
"ui.cameraGroup.success": "Camera group ({{name}}) has been saved.", "ui.cameraGroup.success": "Camera group ({{name}}) has been saved.",
"ui.cameraGroup.toast.error": "Failed to save config changes: {{error}}",
"ui.cameraGroup.camera.setting": "{{cameraName}} Streaming Settings", "ui.cameraGroup.camera.setting": "{{cameraName}} Streaming Settings",
"ui.cameraGroup.camera.setting.desc": "Change the live streaming options for this camera group's dashboard. <em>These settings are device/browser-specific.</em>", "ui.cameraGroup.camera.setting.desc": "Change the live streaming options for this camera group's dashboard. <em>These settings are device/browser-specific.</em>",
"ui.cameraGroup.camera.setting.audioIsAvailable": "Audio is available for this stream", "ui.cameraGroup.camera.setting.audioIsAvailable": "Audio is available for this stream",
@ -398,12 +404,27 @@
"ui.on": "ON", "ui.on": "ON",
"ui.off": "OFF", "ui.off": "OFF",
"ui.edit": "Edit", "ui.edit": "Edit",
"ui.copyCoordinates": "Copy coordinates",
"ui.delete": "Delete", "ui.delete": "Delete",
"ui.yes": "Yes", "ui.yes": "Yes",
"ui.no": "No", "ui.no": "No",
"ui.download": "Download", "ui.download": "Download",
"ui.info": "Info", "ui.info": "Info",
"ui.toast.save.error": "Failed to save config changes: {{errorMessage}}",
"ui.toast.save.error.noMessage": "Failed to save config changes",
"ui.form.message.polygonDrawing.error.mustBeFinished": "The polygon drawing must be finished before saving.",
"ui.form.message.zoneName.error.mustBeAtLeastTwoCharacters": "Zone name must be at least 2 characters.",
"ui.form.message.zoneName.error.mustNotBeSameWithCamera": "Zone name must not be the name of a camera.",
"ui.form.message.zoneName.error.alreadyExists": "Zone name already exists on this camera.",
"ui.form.message.zoneName.error.mustNotContainPeriod": "Zone name must not contain a period.",
"ui.form.message.zoneName.error.hasIllegalCharacter": "Zone name has an illegal character.",
"ui.form.message.distance.error": "Distance must be greater than or equal to 0.1",
"ui.form.message.distance.error.mustBeFilled": "All distance fields must be filled to use speed estimation.",
"ui.form.message.inertia.error.mustBeAboveZero": "Inertia must be above 0.",
"ui.form.message.loiteringTime.error.mustBeGreaterOrEqualZero": "Loitering time must be greater than or equal to 0.",
"ui.live.documentTitle": "Live - Frigate", "ui.live.documentTitle": "Live - Frigate",
"ui.live.documentTitle.withCamera": "{{camera}} - Live - Frigate", "ui.live.documentTitle.withCamera": "{{camera}} - Live - Frigate",
"ui.live.twoWayTalk.enable": "Enable Two Way Talk", "ui.live.twoWayTalk.enable": "Enable Two Way Talk",
@ -501,12 +522,16 @@
"ui.settingView.exploreSettings.semanticSearch.modelSize.large": "large", "ui.settingView.exploreSettings.semanticSearch.modelSize.large": "large",
"ui.settingView.exploreSettings.semanticSearch.modelSize.small.desc": "Using <em>small</em> employs a quantized version of the model that uses less RAM and runs faster on CPU with a very negligible difference in embedding quality.", "ui.settingView.exploreSettings.semanticSearch.modelSize.small.desc": "Using <em>small</em> employs a quantized version of the model that uses less RAM and runs faster on CPU with a very negligible difference in embedding quality.",
"ui.settingView.exploreSettings.semanticSearch.modelSize.large.desc": "Using <em>large</em> employs the full Jina model and will automatically run on the GPU if applicable.", "ui.settingView.exploreSettings.semanticSearch.modelSize.large.desc": "Using <em>large</em> employs the full Jina model and will automatically run on the GPU if applicable.",
"ui.settingView.exploreSettings.toast.success": "Explore settings have been saved.",
"ui.settingView.cameraSettings": "Camera Settings", "ui.settingView.cameraSettings": "Camera Settings",
"ui.settingView.cameraSettings.streams": "Streams",
"ui.settingView.cameraSettings.streams.desc": "Disabling a camera completely stops Frigate's processing of this camera's streams. Detection, recording, and debugging will be unavailable.<br /> <em>Note: This does not disable go2rtc restreams.</em>",
"ui.settingView.cameraSettings.review": "Review", "ui.settingView.cameraSettings.review": "Review",
"ui.settingView.cameraSettings.review.desc": "Enable/disable alerts and detections for this camera. When disabled, no new review items will be generated.", "ui.settingView.cameraSettings.review.desc": "Enable/disable alerts and detections for this camera. When disabled, no new review items will be generated.",
"ui.settingView.cameraSettings.review.alerts": "Alerts", "ui.settingView.cameraSettings.review.alerts": "Alerts ",
"ui.settingView.cameraSettings.review.detections": "Detections", "ui.settingView.cameraSettings.review.detections": "Detections ",
"ui.settingView.cameraSettings.reviewClassification": "Review Classification", "ui.settingView.cameraSettings.reviewClassification": "Review Classification",
"ui.settingView.cameraSettings.reviewClassification.desc": "Frigate categorizes review items as Alerts and Detections. By default, all <em>person</em> and <em>car</em> objects are considered Alerts. You can refine categorization of your review items by configuring required zones for them.", "ui.settingView.cameraSettings.reviewClassification.desc": "Frigate categorizes review items as Alerts and Detections. By default, all <em>person</em> and <em>car</em> objects are considered Alerts. You can refine categorization of your review items by configuring required zones for them.",
"ui.settingView.cameraSettings.reviewClassification.readTheDocumentation": "Read the Documentation", "ui.settingView.cameraSettings.reviewClassification.readTheDocumentation": "Read the Documentation",
@ -514,6 +539,9 @@
"ui.settingView.cameraSettings.reviewClassification.objectAlertsTips": "All {{alertsLabels}} objects on {{cameraName}} will be shown as Alerts.", "ui.settingView.cameraSettings.reviewClassification.objectAlertsTips": "All {{alertsLabels}} objects on {{cameraName}} will be shown as Alerts.",
"ui.settingView.cameraSettings.reviewClassification.zoneObjectAlertsTips": "All {{alertsLabels}} objects detected in {{zone}} on {{cameraName}} will be shown as Alerts.", "ui.settingView.cameraSettings.reviewClassification.zoneObjectAlertsTips": "All {{alertsLabels}} objects detected in {{zone}} on {{cameraName}} will be shown as Alerts.",
"ui.settingView.cameraSettings.reviewClassification.selectAlertsZones": "Select zones for Alerts", "ui.settingView.cameraSettings.reviewClassification.selectAlertsZones": "Select zones for Alerts",
"ui.settingView.cameraSettings.reviewClassification.selectDetectionsZones": "Select zones for Detections",
"ui.settingView.cameraSettings.reviewClassification.limitDetections": "Limit detections to specific zones",
"ui.settingView.cameraSettings.reviewClassification.toast.success": "Review classification configuration has been saved. Restart Frigate to apply changes.",
"ui.settingView.cameraSettings.reviewClassification.objectDetectionsTips": "All {{detectionsLabels}} objects <em>not classified as Alerts</em> on {{cameraName}} will be shown as Detections.", "ui.settingView.cameraSettings.reviewClassification.objectDetectionsTips": "All {{detectionsLabels}} objects <em>not classified as Alerts</em> on {{cameraName}} will be shown as Detections.",
"ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips": "All {{detectionsLabels}} objects <em>not classified as Alerts</em> that are detected in {{zone}} on {{cameraName}} will be shown as Detections.", "ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips": "All {{detectionsLabels}} objects <em>not classified as Alerts</em> that are detected in {{zone}} on {{cameraName}} will be shown as Detections.",
@ -521,29 +549,34 @@
"ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips.regardlessOfZoneObjectDetectionsTips": "All {{detectionsLabels}} objects <em>not classified as Alerts</em> on {{cameraName}} will be shown as Detections, regardless of zone.", "ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips.regardlessOfZoneObjectDetectionsTips": "All {{detectionsLabels}} objects <em>not classified as Alerts</em> on {{cameraName}} will be shown as Detections, regardless of zone.",
"ui.settingView.masksAndZonesSettings": "Masks / Zones", "ui.settingView.masksAndZonesSettings": "Masks / Zones",
"ui.settingView.masksAndZonesSettings.zone": "Zones", "ui.settingView.masksAndZonesSettings.filter.all": "All Masks and Zones",
"ui.settingView.masksAndZonesSettings.zone.documentTitle": "Edit Zone - Frigate",
"ui.settingView.masksAndZonesSettings.zone.desc": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.", "ui.settingView.masksAndZonesSettings.zones": "Zones",
"ui.settingView.masksAndZonesSettings.zone.desc.documentation": "Documentation", "ui.settingView.masksAndZonesSettings.zones.documentTitle": "Edit Zone - Frigate",
"ui.settingView.masksAndZonesSettings.zone.add": "Add Zone", "ui.settingView.masksAndZonesSettings.zones.desc": "Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
"ui.settingView.masksAndZonesSettings.zone.edit": "Edit Zone", "ui.settingView.masksAndZonesSettings.zones.desc.documentation": "Documentation",
"ui.settingView.masksAndZonesSettings.zone.point_one": "{{count}} point", "ui.settingView.masksAndZonesSettings.zones.add": "Add Zone",
"ui.settingView.masksAndZonesSettings.zone.point_other": "{{count}} points", "ui.settingView.masksAndZonesSettings.zones.edit": "Edit Zone",
"ui.settingView.masksAndZonesSettings.zone.clickDrawPolygon": "Click to draw a polygon on the image.", "ui.settingView.masksAndZonesSettings.zones.point_one": "{{count}} point",
"ui.settingView.masksAndZonesSettings.zone.name": "Name", "ui.settingView.masksAndZonesSettings.zones.point_other": "{{count}} points",
"ui.settingView.masksAndZonesSettings.zone.name.inputPlaceHolder": "Enter a name...", "ui.settingView.masksAndZonesSettings.zones.clickDrawPolygon": "Click to draw a polygon on the image.",
"ui.settingView.masksAndZonesSettings.zone.name.tips": "Name must be at least 2 characters and must not be the name of a camera or another zone.", "ui.settingView.masksAndZonesSettings.zones.name": "Name",
"ui.settingView.masksAndZonesSettings.zone.inertia": "Inertia", "ui.settingView.masksAndZonesSettings.zones.name.inputPlaceHolder": "Enter a name...",
"ui.settingView.masksAndZonesSettings.zone.inertia.desc": "Specifies how many frames that an object must be in a zone before they are considered in the zone. <em>Default: 3</em>", "ui.settingView.masksAndZonesSettings.zones.name.tips": "Name must be at least 2 characters and must not be the name of a camera or another zone.",
"ui.settingView.masksAndZonesSettings.zone.loiteringTime": "Loitering Time", "ui.settingView.masksAndZonesSettings.zones.inertia": "Inertia",
"ui.settingView.masksAndZonesSettings.zone.loiteringTime.desc": "Sets a minimum amount of time in seconds that the object must be in the zone for it to activate. <em>Default: 0</em>", "ui.settingView.masksAndZonesSettings.zones.inertia.desc": "Specifies how many frames that an object must be in a zone before they are considered in the zone. <em>Default: 3</em>",
"ui.settingView.masksAndZonesSettings.zone.objects": "Objects", "ui.settingView.masksAndZonesSettings.zones.loiteringTime": "Loitering Time",
"ui.settingView.masksAndZonesSettings.zone.objects.desc": "List of objects that apply to this zone.", "ui.settingView.masksAndZonesSettings.zones.loiteringTime.desc": "Sets a minimum amount of time in seconds that the object must be in the zone for it to activate. <em>Default: 0</em>",
"ui.settingView.masksAndZonesSettings.zone.allObjects": "All Objects", "ui.settingView.masksAndZonesSettings.zones.objects": "Objects",
"ui.settingView.masksAndZonesSettings.zone.speedEstimation": "Speed Estimation", "ui.settingView.masksAndZonesSettings.zones.objects.desc": "List of objects that apply to this zone.",
"ui.settingView.masksAndZonesSettings.zone.speedEstimation.desc": "Enable speed estimation for objects in this zone. The zone must have exactly 4 points.", "ui.settingView.masksAndZonesSettings.zones.allObjects": "All Objects",
"ui.settingView.masksAndZonesSettings.zone.speedEstimation.pointLengthError": "Zones with speed estimation must have exactly 4 points.", "ui.settingView.masksAndZonesSettings.zones.speedEstimation": "Speed Estimation",
"ui.settingView.masksAndZonesSettings.zone.speedEstimation.loiteringTimeError": "Zones with loitering times greater than 0 should not be used with speed estimation.", "ui.settingView.masksAndZonesSettings.zones.speedEstimation.desc": "Enable speed estimation for objects in this zone. The zone must have exactly 4 points.",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold": "Speed Threshold ({{unit}})",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold.desc": "Specifies a minimum speed for objects to be considered in this zone.",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold.toast.error.pointLengthError": "Speed estimation has been disabled for this zone. Zones with speed estimation must have exactly 4 points.",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold.toast.error.loiteringTimeError": "Zones with loitering times greater than 0 should not be used with speed estimation.",
"ui.settingView.masksAndZonesSettings.zones.toast.success": "Zone ({{zoneName}}) has been saved. Restart Frigate to apply changes.",
"ui.settingView.masksAndZonesSettings.motionMasks": "Motion Mask", "ui.settingView.masksAndZonesSettings.motionMasks": "Motion Mask",
"ui.settingView.masksAndZonesSettings.motionMasks.documentTitle": "Edit Motion Mask - Frigate", "ui.settingView.masksAndZonesSettings.motionMasks.documentTitle": "Edit Motion Mask - Frigate",
@ -559,6 +592,8 @@
"ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge": "The motion mask is covering {{polygonArea}}% of the camera frame. Large motion masks are not recommended.", "ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge": "The motion mask is covering {{polygonArea}}% of the camera frame. Large motion masks are not recommended.",
"ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.tips": "Motion masks do not prevent objects from being detected. You should use a required zone instead.", "ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.tips": "Motion masks do not prevent objects from being detected. You should use a required zone instead.",
"ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.documentation": "Read the documentation", "ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.documentation": "Read the documentation",
"ui.settingView.masksAndZonesSettings.motionMasks.toast.success": "{{polygonName}} has been saved. Restart Frigate to apply changes.",
"ui.settingView.masksAndZonesSettings.motionMasks.toast.success.noName": "Motion Mask has been saved. Restart Frigate to apply changes.",
"ui.settingView.masksAndZonesSettings.objectMasks": "Object Masks", "ui.settingView.masksAndZonesSettings.objectMasks": "Object Masks",
"ui.settingView.masksAndZonesSettings.objectMasks.documentTitle": "Edit Object Mask - Frigate", "ui.settingView.masksAndZonesSettings.objectMasks.documentTitle": "Edit Object Mask - Frigate",
@ -573,6 +608,8 @@
"ui.settingView.masksAndZonesSettings.objectMasks.objects": "Objects", "ui.settingView.masksAndZonesSettings.objectMasks.objects": "Objects",
"ui.settingView.masksAndZonesSettings.objectMasks.objects.desc": "The object type that that applies to this object mask.", "ui.settingView.masksAndZonesSettings.objectMasks.objects.desc": "The object type that that applies to this object mask.",
"ui.settingView.masksAndZonesSettings.objectMasks.objects.allObjectTypes": "All object types", "ui.settingView.masksAndZonesSettings.objectMasks.objects.allObjectTypes": "All object types",
"ui.settingView.masksAndZonesSettings.objectMasks.toast.success": "{{polygonName}} has been saved. Restart Frigate to apply changes.",
"ui.settingView.masksAndZonesSettings.objectMasks.toast.success.noName": "Object Mask has been saved. Restart Frigate to apply changes.",
"ui.settingView.motionDetectionTuner": "Motion Detection Tuner", "ui.settingView.motionDetectionTuner": "Motion Detection Tuner",
@ -584,6 +621,7 @@
"ui.settingView.motionDetectionTuner.contourArea.desc": "The contour area value is used to decide which groups of changed pixels qualify as motion. <em>Default: 10</em>", "ui.settingView.motionDetectionTuner.contourArea.desc": "The contour area value is used to decide which groups of changed pixels qualify as motion. <em>Default: 10</em>",
"ui.settingView.motionDetectionTuner.improveContrast": "Improve Contrast", "ui.settingView.motionDetectionTuner.improveContrast": "Improve Contrast",
"ui.settingView.motionDetectionTuner.improveContrast.desc": "Improve contrast for darker scenes. <em>Default: ON</em>", "ui.settingView.motionDetectionTuner.improveContrast.desc": "Improve contrast for darker scenes. <em>Default: ON</em>",
"ui.settingView.motionDetectionTuner.toast.success": "Motion settings have been saved.",
"ui.settingView.debug": "Debug", "ui.settingView.debug": "Debug",
"ui.settingView.debug.detectorDesc": "Frigate uses your detectors ({{detectors}}) to detect objects in your camera's video stream.", "ui.settingView.debug.detectorDesc": "Frigate uses your detectors ({{detectors}}) to detect objects in your camera's video stream.",

View File

@ -181,6 +181,11 @@
"ui.dialog.streaming.debugView": "调试界面", "ui.dialog.streaming.debugView": "调试界面",
"ui.dialog.search.saveSearch": "保存搜索",
"ui.dialog.search.saveSearch.desc": "请为此已保存的搜索提供一个名称。",
"ui.dialog.search.saveSearch.placeholder": "请输入搜索名称",
"ui.dialog.search.saveSearch.overwrite": "{{searchName}} 已存在。保存将覆盖现有值。",
"ui.dialog.search.saveSearch.success": "搜索 ({{searchName}}) 已保存。",
"ui.stats.ffmpegHighCpuUsage": "{{camera}} 的 FFMPEG CPU 使用率较高({{ffmpegAvg}}%", "ui.stats.ffmpegHighCpuUsage": "{{camera}} 的 FFMPEG CPU 使用率较高({{ffmpegAvg}}%",
"ui.stats.detectHighCpuUsage": "{{camera}} 的 探测 CPU 使用率较高({{detectAvg}}%", "ui.stats.detectHighCpuUsage": "{{camera}} 的 探测 CPU 使用率较高({{detectAvg}}%",
@ -291,7 +296,6 @@
"ui.cameraGroup.cameras.desc": "选择添加至该组的摄像头。", "ui.cameraGroup.cameras.desc": "选择添加至该组的摄像头。",
"ui.cameraGroup.icon": "图标", "ui.cameraGroup.icon": "图标",
"ui.cameraGroup.toast.success": "摄像头组({{name}})保存成功。", "ui.cameraGroup.toast.success": "摄像头组({{name}})保存成功。",
"ui.cameraGroup.toast.error": "保存设置失败: {{error}}",
"ui.cameraGroup.camera.setting": "{{cameraName}} 视频流设置", "ui.cameraGroup.camera.setting": "{{cameraName}} 视频流设置",
"ui.cameraGroup.camera.setting.desc": "更改此摄像头组仪表板的实时视频流选项。<em>这些设置特定于设备/浏览器。</em>", "ui.cameraGroup.camera.setting.desc": "更改此摄像头组仪表板的实时视频流选项。<em>这些设置特定于设备/浏览器。</em>",
"ui.cameraGroup.camera.setting.audioIsAvailable": "此视频流支持音频", "ui.cameraGroup.camera.setting.audioIsAvailable": "此视频流支持音频",
@ -400,11 +404,28 @@
"ui.on": "开", "ui.on": "开",
"ui.off": "关", "ui.off": "关",
"ui.edit": "编辑", "ui.edit": "编辑",
"ui.copyCoordinates": "复制坐标",
"ui.delete": "删除", "ui.delete": "删除",
"ui.yes": "是", "ui.yes": "是",
"ui.no": "否", "ui.no": "否",
"ui.download": "下载", "ui.download": "下载",
"ui.info": "信息",
"ui.toast.save.error": "保存配置信息失败: {{errorMessage}}",
"ui.toast.save.error.noMessage": "保存配置信息失败",
"ui.form.message.polygonDrawing.error.mustBeFinished": "多边形绘制必须完成闭合后才能保存。",
"ui.form.message.zoneName.error.mustBeAtLeastTwoCharacters": "区域名称必须至少包含 2 个字符。",
"ui.form.message.zoneName.error.mustNotBeSameWithCamera": "区域名称不能与摄像头名称相同。",
"ui.form.message.zoneName.error.alreadyExists": "该摄像头已有相同的区域名称。",
"ui.form.message.zoneName.error.mustNotContainPeriod": "区域名称不能包含句点。",
"ui.form.message.zoneName.error.hasIllegalCharacter": "区域名称包含非法字符。",
"ui.form.message.distance.error": "距离必须大于或等于 0.1。",
"ui.form.message.distance.error.mustBeFilled": "所有距离字段必须填写才能使用速度估算。",
"ui.form.message.inertia.error.mustBeAboveZero": "惯性必须大于 0。",
"ui.form.message.loiteringTime.error.mustBeGreaterOrEqualZero": "徘徊时间必须大于或等于 0。",
"ui.live.documentTitle": "实时监控 - Frigate", "ui.live.documentTitle": "实时监控 - Frigate",
"ui.live.documentTitle.withCamera": "{{camera}} - 实时监控 - Frigate", "ui.live.documentTitle.withCamera": "{{camera}} - 实时监控 - Frigate",
"ui.live.twoWayTalk.enable": "开启双向对话", "ui.live.twoWayTalk.enable": "开启双向对话",
@ -503,12 +524,15 @@
"ui.settingView.exploreSettings.semanticSearch.modelSize.large": "大", "ui.settingView.exploreSettings.semanticSearch.modelSize.large": "大",
"ui.settingView.exploreSettings.semanticSearch.modelSize.small.desc": "使用 <strong>小</strong>模型。该模型将使用较少的内存在CPU上也能较快的运行。质量较好。", "ui.settingView.exploreSettings.semanticSearch.modelSize.small.desc": "使用 <strong>小</strong>模型。该模型将使用较少的内存在CPU上也能较快的运行。质量较好。",
"ui.settingView.exploreSettings.semanticSearch.modelSize.large.desc": "使用 <strong>大</strong>模型。该模型采用了完整的Jina模型并在适用的情况下使用GPU。", "ui.settingView.exploreSettings.semanticSearch.modelSize.large.desc": "使用 <strong>大</strong>模型。该模型采用了完整的Jina模型并在适用的情况下使用GPU。",
"ui.settingView.exploreSettings.toast.success": "探测设置已保存。",
"ui.settingView.cameraSettings": "摄像头设置", "ui.settingView.cameraSettings": "摄像头设置",
"ui.settingView.cameraSettings.streams": "视频流",
"ui.settingView.cameraSettings.streams.desc": "禁用摄像头将完全停止 Frigate 对该摄像头视频流的处理。检测、录制和调试功能都将不可用。<br /><em>注意:该选项不会禁用 go2rtc 转播。</em>",
"ui.settingView.cameraSettings.review": "预览", "ui.settingView.cameraSettings.review": "预览",
"ui.settingView.cameraSettings.review.desc": "启用/禁用摄像头的警报和检测。禁用后,不会生成新的预览项。", "ui.settingView.cameraSettings.review.desc": "启用/禁用摄像头的警报和检测。禁用后,不会生成新的预览项。",
"ui.settingView.cameraSettings.review.alerts": "警告", "ui.settingView.cameraSettings.review.alerts": "警告 ",
"ui.settingView.cameraSettings.review.detections": "检测", "ui.settingView.cameraSettings.review.detections": "检测 ",
"ui.settingView.cameraSettings.reviewClassification": "预览分级", "ui.settingView.cameraSettings.reviewClassification": "预览分级",
"ui.settingView.cameraSettings.reviewClassification.desc": "Frigate 将回放项目分为“警告”和“检测”。默认情况下,所有的 <em>人</em>、<em>汽车</em> 的对象都视为警告。你可以通过修改配置文件配置区域来细分。", "ui.settingView.cameraSettings.reviewClassification.desc": "Frigate 将回放项目分为“警告”和“检测”。默认情况下,所有的 <em>人</em>、<em>汽车</em> 的对象都视为警告。你可以通过修改配置文件配置区域来细分。",
"ui.settingView.cameraSettings.reviewClassification.readTheDocumentation": "阅读文档(英文)", "ui.settingView.cameraSettings.reviewClassification.readTheDocumentation": "阅读文档(英文)",
@ -516,32 +540,45 @@
"ui.settingView.cameraSettings.reviewClassification.objectAlertsTips": "所有的 {{alertsLabels}} 对象在 {{cameraName}} 都将显示为警告。", "ui.settingView.cameraSettings.reviewClassification.objectAlertsTips": "所有的 {{alertsLabels}} 对象在 {{cameraName}} 都将显示为警告。",
"ui.settingView.cameraSettings.reviewClassification.zoneObjectAlertsTips": "所有的 {{alertsLabels}} 对象在 {{cameraName}} 的 {{zone}} 区域都将显示为警告。", "ui.settingView.cameraSettings.reviewClassification.zoneObjectAlertsTips": "所有的 {{alertsLabels}} 对象在 {{cameraName}} 的 {{zone}} 区域都将显示为警告。",
"ui.settingView.cameraSettings.reviewClassification.selectAlertsZones": "选择要显示为警告的区域", "ui.settingView.cameraSettings.reviewClassification.selectAlertsZones": "选择要显示为警告的区域",
"ui.settingView.cameraSettings.reviewClassification.selectDetectionsZones": "选择检测区域",
"ui.settingView.cameraSettings.reviewClassification.limitDetections": "限制仅在特定区域内进行检测",
"ui.settingView.cameraSettings.reviewClassification.toast.success": "预览分级配置已保存。请重启 Frigate 以应用更改。",
"ui.settingView.cameraSettings.reviewClassification.objectDetectionsTips": "所有未在 {{cameraName}} 归类的 {{detectionsLabels}} 对象,无论它位于哪个区域,都将显示为检测。", "ui.settingView.cameraSettings.reviewClassification.objectDetectionsTips": "所有未在 {{cameraName}} 归类的 {{detectionsLabels}} 对象,无论它位于哪个区域,都将显示为检测。",
"ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips": "所有未在 {{cameraName}} 上归类为 {{detectionsLabels}} 的对象在 {{zone}} 区域都将显示为检测。", "ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips": "所有未在 {{cameraName}} 上归类为 {{detectionsLabels}} 的对象在 {{zone}} 区域都将显示为检测。",
"ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips.notSelectDetections": "所有在 {{cameraName}} 的 {{zone}} 上检测到的未归类为警告的 {{detectionsLabels}} 对象,无论它位于哪个区域,都将显示为检测。", "ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips.notSelectDetections": "所有在 {{cameraName}} 的 {{zone}} 上检测到的未归类为警告的 {{detectionsLabels}} 对象,无论它位于哪个区域,都将显示为检测。",
"ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips.regardlessOfZoneObjectDetectionsTips": "所有未在 {{cameraName}} 归类的 {{detectionsLabels}} 对象,无论它位于哪个区域,都将显示为检测。", "ui.settingView.cameraSettings.reviewClassification.zoneObjectDetectionsTips.regardlessOfZoneObjectDetectionsTips": "所有未在 {{cameraName}} 归类的 {{detectionsLabels}} 对象,无论它位于哪个区域,都将显示为检测。",
"ui.settingView.masksAndZonesSettings": "屏罩 / 区域", "ui.settingView.masksAndZonesSettings": "遮罩/ 区域",
"ui.settingView.masksAndZonesSettings.zone": "区域", "ui.settingView.masksAndZonesSettings.filter.all": "所有遮罩和区域",
"ui.settingView.masksAndZonesSettings.zone.documentTitle": "编辑区域 - Frigate",
"ui.settingView.masksAndZonesSettings.zone.desc": "该功能允许你定义特定区域,以便你可以确定特定对象是否在该区域内。", "ui.settingView.masksAndZonesSettings.zones": "区域",
"ui.settingView.masksAndZonesSettings.zone.desc.documentation": "文档(英文)", "ui.settingView.masksAndZonesSettings.zones.documentTitle": "编辑区域 - Frigate",
"ui.settingView.masksAndZonesSettings.zone.add": "添加区域", "ui.settingView.masksAndZonesSettings.zones.desc": "该功能允许你定义特定区域,以便你可以确定特定对象是否在该区域内。",
"ui.settingView.masksAndZonesSettings.zone.edit": "编辑区域", "ui.settingView.masksAndZonesSettings.zones.desc.documentation": "文档(英文)",
"ui.settingView.masksAndZonesSettings.zone.point_one": "{{count}} 点", "ui.settingView.masksAndZonesSettings.zones.add": "添加区域",
"ui.settingView.masksAndZonesSettings.zone.point_other": "{{count}} 点", "ui.settingView.masksAndZonesSettings.zones.edit": "编辑区域",
"ui.settingView.masksAndZonesSettings.zone.clickDrawPolygon": "在图像上点击添加点绘制多边形区域。", "ui.settingView.masksAndZonesSettings.zones.point_one": "{{count}} 点",
"ui.settingView.masksAndZonesSettings.zone.name": "区域名称", "ui.settingView.masksAndZonesSettings.zones.point_other": "{{count}} 点",
"ui.settingView.masksAndZonesSettings.zone.name.inputPlaceHolder": "请输入名称", "ui.settingView.masksAndZonesSettings.zones.clickDrawPolygon": "在图像上点击添加点绘制多边形区域。",
"ui.settingView.masksAndZonesSettings.zone.name.tips": "名称至少包含两个字符,且不能和摄像头或其他区域同名。", "ui.settingView.masksAndZonesSettings.zones.name": "区域名称",
"ui.settingView.masksAndZonesSettings.zone.inertia": "区域名称", "ui.settingView.masksAndZonesSettings.zones.name.inputPlaceHolder": "请输入名称",
"ui.settingView.masksAndZonesSettings.zone.inertia.desc": "识别指定对象前该对象必须在这个区域内出现了多少帧。<em>默认值3</em>", "ui.settingView.masksAndZonesSettings.zones.name.tips": "名称至少包含两个字符,且不能和摄像头或其他区域同名。<br>当前仅支持英文与数字组合",
"ui.settingView.masksAndZonesSettings.zone.loiteringTime": "停留时间", "ui.settingView.masksAndZonesSettings.zones.inertia": "惯性",
"ui.settingView.masksAndZonesSettings.zone.loiteringTime.desc": "设置对象必须在区域中活动的最小时间(单位为秒)。<em>默认值0</em>", "ui.settingView.masksAndZonesSettings.zones.inertia.desc": "识别指定对象前该对象必须在这个区域内出现了多少帧。<em>默认值3</em>",
"ui.settingView.masksAndZonesSettings.zone.objects": "对象", "ui.settingView.masksAndZonesSettings.zones.loiteringTime": "停留时间",
"ui.settingView.masksAndZonesSettings.zone.objects.desc": "将在此区域应用的对象列表。", "ui.settingView.masksAndZonesSettings.zones.loiteringTime.desc": "设置对象必须在区域中活动的最小时间(单位为秒)。<em>默认值0</em>",
"ui.settingView.masksAndZonesSettings.zone.allObjects": "所有对象", "ui.settingView.masksAndZonesSettings.zones.objects": "对象",
"ui.settingView.masksAndZonesSettings.zones.objects.desc": "将在此区域应用的对象列表。",
"ui.settingView.masksAndZonesSettings.zones.allObjects": "所有对象",
"ui.settingView.masksAndZonesSettings.zones.speedEstimation": "速度估算",
"ui.settingView.masksAndZonesSettings.zones.speedEstimation.desc": "启用此区域内物体的速度估算。该区域必须恰好包含 4 个点。",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold": "速度阈值 ({{unit}})",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold.desc": "指定物体在此区域内被视为有效的最低速度。",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold.toast.error.pointLengthError": "此区域的速度估算已禁用。启用速度估算的区域必须恰好包含 4 个点。",
"ui.settingView.masksAndZonesSettings.zones.speedThreshold.toast.error.loiteringTimeError": "徘徊时间大于 0 的区域不应与速度估算一起使用。",
"ui.settingView.masksAndZonesSettings.zones.toast.success": "区域 ({{zoneName}}) 已保存。请重启 Frigate 以应用更改。",
"ui.settingView.masksAndZonesSettings.motionMasks": "运动遮罩", "ui.settingView.masksAndZonesSettings.motionMasks": "运动遮罩",
"ui.settingView.masksAndZonesSettings.motionMasks.documentTitle": "编辑运动遮罩 - Frigate", "ui.settingView.masksAndZonesSettings.motionMasks.documentTitle": "编辑运动遮罩 - Frigate",
@ -557,7 +594,8 @@
"ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge": "运动遮罩的大小达到了摄像头画面的{{polygonArea}}%。不建议设置太大的运动遮罩。", "ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge": "运动遮罩的大小达到了摄像头画面的{{polygonArea}}%。不建议设置太大的运动遮罩。",
"ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.tips": "运动遮罩不会阻止检测到对象,你应该使用区域来限制检测对象。", "ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.tips": "运动遮罩不会阻止检测到对象,你应该使用区域来限制检测对象。",
"ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.documentation": "阅读文档(英文)", "ui.settingView.masksAndZonesSettings.motionMasks.polygonAreaTooLarge.documentation": "阅读文档(英文)",
"ui.settingView.masksAndZonesSettings.motionMasks.toast.success": "{{polygonName}} 已保存。请重启 Frigate 以应用更改。",
"ui.settingView.masksAndZonesSettings.motionMasks.toast.success.noName": "运动遮罩已保存。请重启 Frigate 以应用更改。",
"ui.settingView.masksAndZonesSettings.objectMasks": "对象遮罩", "ui.settingView.masksAndZonesSettings.objectMasks": "对象遮罩",
"ui.settingView.masksAndZonesSettings.objectMasks.documentTitle": "编辑对象遮罩 - Frigate", "ui.settingView.masksAndZonesSettings.objectMasks.documentTitle": "编辑对象遮罩 - Frigate",
@ -572,6 +610,8 @@
"ui.settingView.masksAndZonesSettings.objectMasks.objects": "对象", "ui.settingView.masksAndZonesSettings.objectMasks.objects": "对象",
"ui.settingView.masksAndZonesSettings.objectMasks.objects.desc": "将应用于此对象遮罩的对象列表。", "ui.settingView.masksAndZonesSettings.objectMasks.objects.desc": "将应用于此对象遮罩的对象列表。",
"ui.settingView.masksAndZonesSettings.objectMasks.objects.allObjectTypes": "所有对象类型", "ui.settingView.masksAndZonesSettings.objectMasks.objects.allObjectTypes": "所有对象类型",
"ui.settingView.masksAndZonesSettings.objectMasks.toast.success": "{{polygonName}} 已保存。请重启 Frigate 以应用更改。",
"ui.settingView.masksAndZonesSettings.objectMasks.toast.success.noName": "对象遮罩已保存。请重启 Frigate 以应用更改。",
"ui.settingView.motionDetectionTuner": "运动检测调整器", "ui.settingView.motionDetectionTuner": "运动检测调整器",

View File

@ -56,6 +56,7 @@ function useValue(): useValueReturn {
const { const {
record, record,
detect, detect,
enabled,
snapshots, snapshots,
audio, audio,
notifications, notifications,
@ -67,6 +68,7 @@ function useValue(): useValueReturn {
// @ts-expect-error we know this is correct // @ts-expect-error we know this is correct
state["config"]; state["config"];
cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF"; cameraStates[`${name}/recordings/state`] = record ? "ON" : "OFF";
cameraStates[`${name}/enabled/state`] = enabled ? "ON" : "OFF";
cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF"; cameraStates[`${name}/detect/state`] = detect ? "ON" : "OFF";
cameraStates[`${name}/snapshots/state`] = snapshots ? "ON" : "OFF"; cameraStates[`${name}/snapshots/state`] = snapshots ? "ON" : "OFF";
cameraStates[`${name}/audio/state`] = audio ? "ON" : "OFF"; cameraStates[`${name}/audio/state`] = audio ? "ON" : "OFF";
@ -164,6 +166,17 @@ export function useWs(watchTopic: string, publishTopic: string) {
return { value, send }; return { value, send };
} }
export function useEnabledState(camera: string): {
payload: ToggleableSetting;
send: (payload: ToggleableSetting, retain?: boolean) => void;
} {
const {
value: { payload },
send,
} = useWs(`${camera}/enabled/state`, `${camera}/enabled/set`);
return { payload: (payload ?? "ON") as ToggleableSetting, send };
}
export function useDetectState(camera: string): { export function useDetectState(camera: string): {
payload: ToggleableSetting; payload: ToggleableSetting;
send: (payload: ToggleableSetting, retain?: boolean) => void; send: (payload: ToggleableSetting, retain?: boolean) => void;

View File

@ -5,6 +5,7 @@ import ActivityIndicator from "../indicators/activity-indicator";
import { useResizeObserver } from "@/hooks/resize-observer"; import { useResizeObserver } from "@/hooks/resize-observer";
import { isDesktop } from "react-device-detect"; import { isDesktop } from "react-device-detect";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useEnabledState } from "@/api/ws";
type CameraImageProps = { type CameraImageProps = {
className?: string; className?: string;
@ -26,7 +27,8 @@ export default function CameraImage({
const imgRef = useRef<HTMLImageElement | null>(null); const imgRef = useRef<HTMLImageElement | null>(null);
const { name } = config ? config.cameras[camera] : ""; const { name } = config ? config.cameras[camera] : "";
const enabled = config ? config.cameras[camera].enabled : "True"; const { payload: enabledState } = useEnabledState(camera);
const enabled = enabledState === "ON" || enabledState === undefined;
const [{ width: containerWidth, height: containerHeight }] = const [{ width: containerWidth, height: containerHeight }] =
useResizeObserver(containerRef); useResizeObserver(containerRef);
@ -96,9 +98,7 @@ export default function CameraImage({
loading="lazy" loading="lazy"
/> />
) : ( ) : (
<div className="pt-6 text-center"> <div className="size-full rounded-lg border-2 border-muted bg-background_alt text-center md:rounded-2xl" />
Camera is disabled in config, no stream or snapshot available!
</div>
)} )}
{!imageLoaded && enabled ? ( {!imageLoaded && enabled ? (
<div className="absolute bottom-0 left-0 right-0 top-0 flex items-center justify-center"> <div className="absolute bottom-0 left-0 right-0 top-0 flex items-center justify-center">

View File

@ -108,9 +108,7 @@ export default function CameraImage({
width={scaledWidth} width={scaledWidth}
/> />
) : ( ) : (
<div className="pt-6 text-center"> <div className="pt-6 text-center">Camera is disabled.</div>
Camera is disabled in config, no stream or snapshot available!
</div>
)} )}
{!hasLoaded && enabled ? ( {!hasLoaded && enabled ? (
<div <div

View File

@ -11,11 +11,15 @@ const variants = {
primary: { primary: {
active: "font-bold text-white bg-selected rounded-lg", active: "font-bold text-white bg-selected rounded-lg",
inactive: "text-secondary-foreground bg-secondary rounded-lg", inactive: "text-secondary-foreground bg-secondary rounded-lg",
disabled:
"text-secondary-foreground bg-secondary rounded-lg cursor-not-allowed opacity-50",
}, },
overlay: { overlay: {
active: "font-bold text-white bg-selected rounded-full", active: "font-bold text-white bg-selected rounded-full",
inactive: inactive:
"text-primary rounded-full bg-gradient-to-br from-gray-400 to-gray-500 bg-gray-500", "text-primary rounded-full bg-gradient-to-br from-gray-400 to-gray-500 bg-gray-500",
disabled:
"bg-gradient-to-br from-gray-400 to-gray-500 bg-gray-500 rounded-full cursor-not-allowed opacity-50",
}, },
}; };
@ -26,6 +30,7 @@ type CameraFeatureToggleProps = {
Icon: IconType; Icon: IconType;
title: string; title: string;
onClick?: () => void; onClick?: () => void;
disabled?: boolean; // New prop for disabling
}; };
export default function CameraFeatureToggle({ export default function CameraFeatureToggle({
@ -35,18 +40,28 @@ export default function CameraFeatureToggle({
Icon, Icon,
title, title,
onClick, onClick,
disabled = false, // Default to false
}: CameraFeatureToggleProps) { }: CameraFeatureToggleProps) {
const content = ( const content = (
<div <div
onClick={onClick} onClick={disabled ? undefined : onClick}
className={cn( className={cn(
"flex flex-col items-center justify-center", "flex flex-col items-center justify-center",
variants[variant][isActive ? "active" : "inactive"], disabled
? variants[variant].disabled
: variants[variant][isActive ? "active" : "inactive"],
className, className,
)} )}
> >
<Icon <Icon
className={`size-5 md:m-[6px] ${isActive ? "text-white" : "text-secondary-foreground"}`} className={cn(
"size-5 md:m-[6px]",
disabled
? "text-gray-400"
: isActive
? "text-white"
: "text-secondary-foreground",
)}
/> />
</div> </div>
); );
@ -54,7 +69,7 @@ export default function CameraFeatureToggle({
if (isDesktop) { if (isDesktop) {
return ( return (
<Tooltip> <Tooltip>
<TooltipTrigger>{content}</TooltipTrigger> <TooltipTrigger disabled={disabled}>{content}</TooltipTrigger>
<TooltipContent side="bottom"> <TooltipContent side="bottom">
<p>{title}</p> <p>{title}</p>
</TooltipContent> </TooltipContent>

View File

@ -744,7 +744,7 @@ export function CameraGroupEdit({
setAllGroupsStreamingSettings(updatedSettings); setAllGroupsStreamingSettings(updatedSettings);
} else { } else {
toast.error( toast.error(
t("ui.cameraGroup.toast.error", { error: res.statusText }), t("ui.toast.save.error", { errorMessage: res.statusText }),
{ {
position: "top-center", position: "top-center",
}, },
@ -753,8 +753,8 @@ export function CameraGroupEdit({
}) })
.catch((error) => { .catch((error) => {
toast.error( toast.error(
t("ui.cameraGroup.toast.error", { t("ui.toast.save.error", {
error: error.response.data.message, errorMessage: error.response.data.message,
}), }),
{ position: "top-center" }, { position: "top-center" },
); );

View File

@ -7,6 +7,7 @@ import { PolygonType } from "@/types/canvas";
import { Label } from "../ui/label"; import { Label } from "../ui/label";
import { Switch } from "../ui/switch"; import { Switch } from "../ui/switch";
import { DropdownMenuSeparator } from "../ui/dropdown-menu"; import { DropdownMenuSeparator } from "../ui/dropdown-menu";
import { Trans } from "react-i18next";
type ZoneMaskFilterButtonProps = { type ZoneMaskFilterButtonProps = {
selectedZoneMask?: PolygonType[]; selectedZoneMask?: PolygonType[];
@ -29,7 +30,7 @@ export function ZoneMaskFilterButton({
<div <div
className={`hidden md:block ${selectedZoneMask?.length ? "text-selected-foreground" : "text-primary"}`} className={`hidden md:block ${selectedZoneMask?.length ? "text-selected-foreground" : "text-primary"}`}
> >
Filter <Trans>ui.filter</Trans>
</div> </div>
</Button> </Button>
); );
@ -75,7 +76,7 @@ export function GeneralFilterContent({
className="mx-2 cursor-pointer text-primary" className="mx-2 cursor-pointer text-primary"
htmlFor="allLabels" htmlFor="allLabels"
> >
All Masks and Zones <Trans>ui.settingView.masksAndZonesSettings.filter.all</Trans>
</Label> </Label>
<Switch <Switch
className="ml-1" className="ml-1"
@ -96,9 +97,12 @@ export function GeneralFilterContent({
className="mx-2 w-full cursor-pointer capitalize text-primary" className="mx-2 w-full cursor-pointer capitalize text-primary"
htmlFor={item} htmlFor={item}
> >
{item <Trans>
.replace(/_/g, " ") ui.settingView.masksAndZonesSettings.
.replace(/\b\w/g, (char) => char.toUpperCase()) + "s"} {item.replace(/_([a-z])/g, (match, letter) =>
letter.toUpperCase(),
) + "s"}
</Trans>
</Label> </Label>
<Switch <Switch
key={item} key={item}

View File

@ -12,6 +12,8 @@ import { Input } from "@/components/ui/input";
import { useMemo, useState } from "react"; import { useMemo, useState } from "react";
import { isMobile } from "react-device-detect"; import { isMobile } from "react-device-detect";
import { toast } from "sonner"; import { toast } from "sonner";
import { Trans } from "react-i18next";
import { t } from "i18next";
type SaveSearchDialogProps = { type SaveSearchDialogProps = {
existingNames: string[]; existingNames: string[];
@ -32,9 +34,14 @@ export function SaveSearchDialog({
if (searchName.trim()) { if (searchName.trim()) {
onSave(searchName.trim()); onSave(searchName.trim());
setSearchName(""); setSearchName("");
toast.success(`Search (${searchName.trim()}) has been saved.`, { toast.success(
position: "top-center", t("ui.dialog.search.saveSearch.success", {
}); searchName: searchName.trim(),
}),
{
position: "top-center",
},
);
onClose(); onClose();
} }
}; };
@ -54,26 +61,29 @@ export function SaveSearchDialog({
}} }}
> >
<DialogHeader> <DialogHeader>
<DialogTitle>Save Search</DialogTitle> <DialogTitle>
<Trans>ui.dialog.search.saveSearch</Trans>
</DialogTitle>
<DialogDescription className="sr-only"> <DialogDescription className="sr-only">
Provide a name for this saved search. <Trans>ui.dialog.search.saveSearch.desc</Trans>
</DialogDescription> </DialogDescription>
</DialogHeader> </DialogHeader>
<Input <Input
value={searchName} value={searchName}
className="text-md" className="text-md"
onChange={(e) => setSearchName(e.target.value)} onChange={(e) => setSearchName(e.target.value)}
placeholder="Enter a name for your search" placeholder={t("ui.dialog.search.saveSearch.placeholder")}
/> />
{overwrite && ( {overwrite && (
<div className="ml-1 text-sm text-danger"> <div className="ml-1 text-sm text-danger">
{searchName} already exists. Saving will overwrite the existing <Trans values={{ searchName }}>
value. ui.dialog.search.saveSearch.overwrite
</Trans>
</div> </div>
)} )}
<DialogFooter> <DialogFooter>
<Button aria-label="Cancel" onClick={onClose}> <Button aria-label="Cancel" onClick={onClose}>
Cancel <Trans>ui.cancel</Trans>
</Button> </Button>
<Button <Button
onClick={handleSave} onClick={handleSave}
@ -81,7 +91,7 @@ export function SaveSearchDialog({
className="mb-2 md:mb-0" className="mb-2 md:mb-0"
aria-label="Save this search" aria-label="Save this search"
> >
Save <Trans>ui.save</Trans>
</Button> </Button>
</DialogFooter> </DialogFooter>
</DialogContent> </DialogContent>

View File

@ -39,7 +39,11 @@ import {
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; import { formatUnixTimestampToDateTime } from "@/utils/dateUtil";
import { useNotifications, useNotificationSuspend } from "@/api/ws"; import {
useEnabledState,
useNotifications,
useNotificationSuspend,
} from "@/api/ws";
type LiveContextMenuProps = { type LiveContextMenuProps = {
className?: string; className?: string;
@ -83,6 +87,11 @@ export default function LiveContextMenu({
}: LiveContextMenuProps) { }: LiveContextMenuProps) {
const [showSettings, setShowSettings] = useState(false); const [showSettings, setShowSettings] = useState(false);
// camera enabled
const { payload: enabledState, send: sendEnabled } = useEnabledState(camera);
const isEnabled = enabledState === "ON";
// streaming settings // streaming settings
const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } = const { allGroupsStreamingSettings, setAllGroupsStreamingSettings } =
@ -263,7 +272,7 @@ export default function LiveContextMenu({
onClick={handleVolumeIconClick} onClick={handleVolumeIconClick}
/> />
<VolumeSlider <VolumeSlider
disabled={!audioState} disabled={!audioState || !isEnabled}
className="my-3 ml-0.5 rounded-lg bg-background/60" className="my-3 ml-0.5 rounded-lg bg-background/60"
value={[volumeState ?? 0]} value={[volumeState ?? 0]}
min={0} min={0}
@ -280,34 +289,49 @@ export default function LiveContextMenu({
<ContextMenuItem> <ContextMenuItem>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={muteAll} onClick={() => sendEnabled(isEnabled ? "OFF" : "ON")}
>
<div className="text-primary">
{isEnabled ? "Disable" : "Enable"} Camera
</div>
</div>
</ContextMenuItem>
<ContextMenuSeparator />
<ContextMenuItem disabled={!isEnabled}>
<div
className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={isEnabled ? muteAll : undefined}
> >
<div className="text-primary">Mute All Cameras</div> <div className="text-primary">Mute All Cameras</div>
</div> </div>
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={unmuteAll} onClick={isEnabled ? unmuteAll : undefined}
> >
<div className="text-primary">Unmute All Cameras</div> <div className="text-primary">Unmute All Cameras</div>
</div> </div>
</ContextMenuItem> </ContextMenuItem>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={toggleStats} onClick={isEnabled ? toggleStats : undefined}
> >
<div className="text-primary"> <div className="text-primary">
{statsState ? "Hide" : "Show"} Stream Stats {statsState ? "Hide" : "Show"} Stream Stats
</div> </div>
</div> </div>
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={() => navigate(`/settings?page=debug&camera=${camera}`)} onClick={
isEnabled
? () => navigate(`/settings?page=debug&camera=${camera}`)
: undefined
}
> >
<div className="text-primary">Debug View</div> <div className="text-primary">Debug View</div>
</div> </div>
@ -315,10 +339,10 @@ export default function LiveContextMenu({
{cameraGroup && cameraGroup !== "default" && ( {cameraGroup && cameraGroup !== "default" && (
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={() => setShowSettings(true)} onClick={isEnabled ? () => setShowSettings(true) : undefined}
> >
<div className="text-primary">Streaming Settings</div> <div className="text-primary">Streaming Settings</div>
</div> </div>
@ -328,10 +352,10 @@ export default function LiveContextMenu({
{preferredLiveMode == "jsmpeg" && isRestreamed && ( {preferredLiveMode == "jsmpeg" && isRestreamed && (
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem> <ContextMenuItem disabled={!isEnabled}>
<div <div
className="flex w-full cursor-pointer items-center justify-start gap-2" className="flex w-full cursor-pointer items-center justify-start gap-2"
onClick={resetPreferredLiveMode} onClick={isEnabled ? resetPreferredLiveMode : undefined}
> >
<div className="text-primary">Reset</div> <div className="text-primary">Reset</div>
</div> </div>
@ -342,7 +366,7 @@ export default function LiveContextMenu({
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuSub> <ContextMenuSub>
<ContextMenuSubTrigger> <ContextMenuSubTrigger disabled={!isEnabled}>
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<span>Notifications</span> <span>Notifications</span>
</div> </div>
@ -382,10 +406,15 @@ export default function LiveContextMenu({
<> <>
<ContextMenuSeparator /> <ContextMenuSeparator />
<ContextMenuItem <ContextMenuItem
onClick={() => { disabled={!isEnabled}
sendNotification("ON"); onClick={
sendNotificationSuspend(0); isEnabled
}} ? () => {
sendNotification("ON");
sendNotificationSuspend(0);
}
: undefined
}
> >
<div className="flex w-full flex-col gap-2"> <div className="flex w-full flex-col gap-2">
{notificationState === "ON" ? ( {notificationState === "ON" ? (
@ -405,36 +434,71 @@ export default function LiveContextMenu({
Suspend for: Suspend for:
</p> </p>
<div className="space-y-1"> <div className="space-y-1">
<ContextMenuItem onClick={() => handleSuspend("5")}> <ContextMenuItem
disabled={!isEnabled}
onClick={
isEnabled ? () => handleSuspend("5") : undefined
}
>
5 minutes 5 minutes
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("10")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("10")
: undefined
}
> >
10 minutes 10 minutes
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("30")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("30")
: undefined
}
> >
30 minutes 30 minutes
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("60")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("60")
: undefined
}
> >
1 hour 1 hour
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("840")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("840")
: undefined
}
> >
12 hours 12 hours
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("1440")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("1440")
: undefined
}
> >
24 hours 24 hours
</ContextMenuItem> </ContextMenuItem>
<ContextMenuItem <ContextMenuItem
onClick={() => handleSuspend("off")} disabled={!isEnabled}
onClick={
isEnabled
? () => handleSuspend("off")
: undefined
}
> >
Until restart Until restart
</ContextMenuItem> </ContextMenuItem>

View File

@ -22,6 +22,7 @@ import { TbExclamationCircle } from "react-icons/tb";
import { TooltipPortal } from "@radix-ui/react-tooltip"; import { TooltipPortal } from "@radix-ui/react-tooltip";
import { baseUrl } from "@/api/baseUrl"; import { baseUrl } from "@/api/baseUrl";
import { PlayerStats } from "./PlayerStats"; import { PlayerStats } from "./PlayerStats";
import { LuVideoOff } from "react-icons/lu";
type LivePlayerProps = { type LivePlayerProps = {
cameraRef?: (ref: HTMLDivElement | null) => void; cameraRef?: (ref: HTMLDivElement | null) => void;
@ -86,8 +87,13 @@ export default function LivePlayer({
// camera activity // camera activity
const { activeMotion, activeTracking, objects, offline } = const {
useCameraActivity(cameraConfig); enabled: cameraEnabled,
activeMotion,
activeTracking,
objects,
offline,
} = useCameraActivity(cameraConfig);
const cameraActive = useMemo( const cameraActive = useMemo(
() => () =>
@ -191,12 +197,40 @@ export default function LivePlayer({
setLiveReady(true); setLiveReady(true);
}, []); }, []);
// enabled states
const [isReEnabling, setIsReEnabling] = useState(false);
const prevCameraEnabledRef = useRef(cameraEnabled ?? true);
useEffect(() => {
if (cameraEnabled == undefined) {
return;
}
if (!prevCameraEnabledRef.current && cameraEnabled) {
// Camera enabled
setLiveReady(false);
setIsReEnabling(true);
setKey((prevKey) => prevKey + 1);
} else if (prevCameraEnabledRef.current && !cameraEnabled) {
// Camera disabled
setLiveReady(false);
setKey((prevKey) => prevKey + 1);
}
prevCameraEnabledRef.current = cameraEnabled;
}, [cameraEnabled]);
useEffect(() => {
if (liveReady && isReEnabling) {
setIsReEnabling(false);
}
}, [liveReady, isReEnabling]);
if (!cameraConfig) { if (!cameraConfig) {
return <ActivityIndicator />; return <ActivityIndicator />;
} }
let player; let player;
if (!autoLive || !streamName) { if (!autoLive || !streamName || !cameraEnabled) {
player = null; player = null;
} else if (preferredLiveMode == "webrtc") { } else if (preferredLiveMode == "webrtc") {
player = ( player = (
@ -267,6 +301,22 @@ export default function LivePlayer({
player = <ActivityIndicator />; player = <ActivityIndicator />;
} }
// if (cameraConfig.name == "lpr")
// console.log(
// cameraConfig.name,
// "enabled",
// cameraEnabled,
// "prev enabled",
// prevCameraEnabledRef.current,
// "offline",
// offline,
// "show still",
// showStillWithoutActivity,
// "live ready",
// liveReady,
// player,
// );
return ( return (
<div <div
ref={cameraRef ?? internalContainerRef} ref={cameraRef ?? internalContainerRef}
@ -287,16 +337,18 @@ export default function LivePlayer({
} }
}} }}
> >
{((showStillWithoutActivity && !liveReady) || liveReady) && ( {cameraEnabled &&
<> ((showStillWithoutActivity && !liveReady) || liveReady) && (
<div className="pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl"></div> <>
<div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl"></div> <div className="pointer-events-none absolute inset-x-0 top-0 z-10 h-[30%] w-full rounded-lg bg-gradient-to-b from-black/20 to-transparent md:rounded-2xl"></div>
</> <div className="pointer-events-none absolute inset-x-0 bottom-0 z-10 h-[10%] w-full rounded-lg bg-gradient-to-t from-black/20 to-transparent md:rounded-2xl"></div>
)} </>
)}
{player} {player}
{!offline && !showStillWithoutActivity && !liveReady && ( {cameraEnabled &&
<ActivityIndicator /> !offline &&
)} (!showStillWithoutActivity || isReEnabling) &&
!liveReady && <ActivityIndicator />}
{((showStillWithoutActivity && !liveReady) || liveReady) && {((showStillWithoutActivity && !liveReady) || liveReady) &&
objects.length > 0 && ( objects.length > 0 && (
@ -344,7 +396,9 @@ export default function LivePlayer({
<div <div
className={cn( className={cn(
"absolute inset-0 w-full", "absolute inset-0 w-full",
showStillWithoutActivity && !liveReady ? "visible" : "invisible", showStillWithoutActivity && !liveReady && !isReEnabling
? "visible"
: "invisible",
)} )}
> >
<AutoUpdatingCameraImage <AutoUpdatingCameraImage
@ -371,6 +425,17 @@ export default function LivePlayer({
</div> </div>
)} )}
{!cameraEnabled && (
<div className="relative flex h-full w-full items-center justify-center">
<div className="flex h-32 flex-col items-center justify-center rounded-lg p-4 md:h-48 md:w-48">
<LuVideoOff className="mb-2 size-8 md:size-10" />
<p className="max-w-32 text-center text-sm md:max-w-40 md:text-base">
Camera is disabled
</p>
</div>
</div>
)}
<div className="absolute right-2 top-2"> <div className="absolute right-2 top-2">
{autoLive && {autoLive &&
!offline && !offline &&
@ -378,7 +443,7 @@ export default function LivePlayer({
((showStillWithoutActivity && !liveReady) || liveReady) && ( ((showStillWithoutActivity && !liveReady) || liveReady) && (
<MdCircle className="mr-2 size-2 animate-pulse text-danger shadow-danger drop-shadow-md" /> <MdCircle className="mr-2 size-2 animate-pulse text-danger shadow-danger drop-shadow-md" />
)} )}
{offline && showStillWithoutActivity && ( {((offline && showStillWithoutActivity) || !cameraEnabled) && (
<Chip <Chip
className={`z-0 flex items-start justify-between space-x-1 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 text-xs capitalize`} className={`z-0 flex items-start justify-between space-x-1 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 text-xs capitalize`}
> >

View File

@ -107,7 +107,7 @@ export default function MotionMaskEditPane({
polygon: z.object({ name: z.string(), isFinished: z.boolean() }), polygon: z.object({ name: z.string(), isFinished: z.boolean() }),
}) })
.refine(() => polygon?.isFinished === true, { .refine(() => polygon?.isFinished === true, {
message: "The polygon drawing must be finished before saving.", message: t("ui.form.message.polygonDrawing.error.mustBeFinished"),
path: ["polygon.isFinished"], path: ["polygon.isFinished"],
}); });
@ -165,7 +165,16 @@ export default function MotionMaskEditPane({
.then((res) => { .then((res) => {
if (res.status === 200) { if (res.status === 200) {
toast.success( toast.success(
`${polygon.name || "Motion Mask"} has been saved. Restart Frigate to apply changes.`, polygon.name
? t(
"ui.settingView.masksAndZonesSettings.motionMasks.toast.success",
{
polygonName: polygon.name,
},
)
: t(
"ui.settingView.masksAndZonesSettings.motionMasks.toast.success.noName",
),
{ {
position: "top-center", position: "top-center",
}, },

View File

@ -109,7 +109,7 @@ export default function ObjectMaskEditPane({
polygon: z.object({ isFinished: z.boolean(), name: z.string() }), polygon: z.object({ isFinished: z.boolean(), name: z.string() }),
}) })
.refine(() => polygon?.isFinished === true, { .refine(() => polygon?.isFinished === true, {
message: "The polygon drawing must be finished before saving.", message: t("ui.form.message.polygonDrawing.error.mustBeFinished"),
path: ["polygon.isFinished"], path: ["polygon.isFinished"],
}); });
@ -197,21 +197,37 @@ export default function ObjectMaskEditPane({
.then((res) => { .then((res) => {
if (res.status === 200) { if (res.status === 200) {
toast.success( toast.success(
`${polygon.name || "Object Mask"} has been saved. Restart Frigate to apply changes.`, polygon.name
? t(
"ui.settingView.masksAndZonesSettings.objectMasks.toast.success",
{
polygonName: polygon.name,
},
)
: t(
"ui.settingView.masksAndZonesSettings.objectMasks.toast.success.noName",
),
{ {
position: "top-center", position: "top-center",
}, },
); );
updateConfig(); updateConfig();
} else { } else {
toast.error(`Failed to save config changes: ${res.statusText}`, { toast.error(
position: "top-center", t("ui.toast.save.error", {
}); errorMessage: res.statusText,
}),
{
position: "top-center",
},
);
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( toast.error(
`Failed to save config changes: ${error.response.data.message}`, t("ui.toast.save.error", {
errorMessage: error.response.data.message,
}),
{ position: "top-center" }, { position: "top-center" },
); );
}) })
@ -290,7 +306,6 @@ export default function ObjectMaskEditPane({
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.objectMasks.clickDrawPolygon ui.settingView.masksAndZonesSettings.objectMasks.clickDrawPolygon
</Trans> </Trans>
Click to draw a polygon on the image.
</div> </div>
<Separator className="my-3 bg-secondary" /> <Separator className="my-3 bg-secondary" />
@ -360,7 +375,7 @@ export default function ObjectMaskEditPane({
aria-label="Cancel" aria-label="Cancel"
onClick={onCancel} onClick={onCancel}
> >
Cancel <Trans>ui.cancel</Trans>
</Button> </Button>
<Button <Button
variant="select" variant="select"
@ -372,10 +387,12 @@ export default function ObjectMaskEditPane({
{isLoading ? ( {isLoading ? (
<div className="flex flex-row items-center gap-2"> <div className="flex flex-row items-center gap-2">
<ActivityIndicator /> <ActivityIndicator />
<span>Saving...</span> <span>
<Trans>ui.saving</Trans>
</span>
</div> </div>
) : ( ) : (
"Save" <Trans>ui.save</Trans>
)} )}
</Button> </Button>
</div> </div>

View File

@ -36,6 +36,7 @@ import { reviewQueries } from "@/utils/zoneEdutUtil";
import IconWrapper from "../ui/icon-wrapper"; import IconWrapper from "../ui/icon-wrapper";
import { StatusBarMessagesContext } from "@/context/statusbar-provider"; import { StatusBarMessagesContext } from "@/context/statusbar-provider";
import { buttonVariants } from "../ui/button"; import { buttonVariants } from "../ui/button";
import { Trans } from "react-i18next";
type PolygonItemProps = { type PolygonItemProps = {
polygon: Polygon; polygon: Polygon;
@ -314,7 +315,9 @@ export default function PolygonItem({
}} }}
/> />
</TooltipTrigger> </TooltipTrigger>
<TooltipContent>Edit</TooltipContent> <TooltipContent>
<Trans>ui.edit</Trans>
</TooltipContent>
</Tooltip> </Tooltip>
<Tooltip> <Tooltip>
@ -327,7 +330,9 @@ export default function PolygonItem({
onClick={() => handleCopyCoordinates(index)} onClick={() => handleCopyCoordinates(index)}
/> />
</TooltipTrigger> </TooltipTrigger>
<TooltipContent>Copy coordinates</TooltipContent> <TooltipContent>
<Trans>ui.copyCoordinates</Trans>
</TooltipContent>
</Tooltip> </Tooltip>
<Tooltip> <Tooltip>
@ -341,7 +346,9 @@ export default function PolygonItem({
onClick={() => !isLoading && setDeleteDialogOpen(true)} onClick={() => !isLoading && setDeleteDialogOpen(true)}
/> />
</TooltipTrigger> </TooltipTrigger>
<TooltipContent>Delete</TooltipContent> <TooltipContent>
<Trans>ui.delete</Trans>
</TooltipContent>
</Tooltip> </Tooltip>
</div> </div>
)} )}

View File

@ -70,7 +70,7 @@ export default function ZoneEditPane({
} }
return Object.values(config.cameras) return Object.values(config.cameras)
.filter((conf) => conf.ui.dashboard && conf.enabled) .filter((conf) => conf.ui.dashboard && conf.enabled_in_config)
.sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order);
}, [config]); }, [config]);
@ -104,7 +104,9 @@ export default function ZoneEditPane({
name: z name: z
.string() .string()
.min(2, { .min(2, {
message: "Zone name must be at least 2 characters.", message: t(
"ui.form.message.zoneName.error.mustBeAtLeastTwoCharacters",
),
}) })
.transform((val: string) => val.trim().replace(/\s+/g, "_")) .transform((val: string) => val.trim().replace(/\s+/g, "_"))
.refine( .refine(
@ -112,7 +114,9 @@ export default function ZoneEditPane({
return !cameras.map((cam) => cam.name).includes(value); return !cameras.map((cam) => cam.name).includes(value);
}, },
{ {
message: "Zone name must not be the name of a camera.", message: t(
"ui.form.message.zoneName.error.mustNotBeSameWithCamera",
),
}, },
) )
.refine( .refine(
@ -125,7 +129,7 @@ export default function ZoneEditPane({
return !otherPolygonNames.includes(value); return !otherPolygonNames.includes(value);
}, },
{ {
message: "Zone name already exists on this camera.", message: t("ui.form.message.zoneName.error.alreadyExists"),
}, },
) )
.refine( .refine(
@ -133,27 +137,29 @@ export default function ZoneEditPane({
return !value.includes("."); return !value.includes(".");
}, },
{ {
message: "Zone name must not contain a period.", message: t("ui.form.message.zoneName.error.mustNotContainPeriod"),
}, },
) )
.refine((value: string) => /^[a-zA-Z0-9_-]+$/.test(value), { .refine((value: string) => /^[a-zA-Z0-9_-]+$/.test(value), {
message: "Zone name has an illegal character.", message: t("ui.form.message.zoneName.error.hasIllegalCharacter"),
}), }),
inertia: z.coerce inertia: z.coerce
.number() .number()
.min(1, { .min(1, {
message: "Inertia must be above 0.", message: t("ui.form.message.inertia.error.mustBeAboveZero"),
}) })
.or(z.literal("")), .or(z.literal("")),
loitering_time: z.coerce loitering_time: z.coerce
.number() .number()
.min(0, { .min(0, {
message: "Loitering time must be greater than or equal to 0.", message: t(
"ui.form.message.loiteringTime.error.mustBeGreaterOrEqualZero",
),
}) })
.optional() .optional()
.or(z.literal("")), .or(z.literal("")),
isFinished: z.boolean().refine(() => polygon?.isFinished === true, { isFinished: z.boolean().refine(() => polygon?.isFinished === true, {
message: "The polygon drawing must be finished before saving.", message: t("ui.form.message.polygonDrawing.error.mustBeFinished"),
}), }),
objects: z.array(z.string()).optional(), objects: z.array(z.string()).optional(),
review_alerts: z.boolean().default(false).optional(), review_alerts: z.boolean().default(false).optional(),
@ -162,28 +168,28 @@ export default function ZoneEditPane({
lineA: z.coerce lineA: z.coerce
.number() .number()
.min(0.1, { .min(0.1, {
message: "Distance must be greater than or equal to 0.1", message: t("ui.form.message.distance.error"),
}) })
.optional() .optional()
.or(z.literal("")), .or(z.literal("")),
lineB: z.coerce lineB: z.coerce
.number() .number()
.min(0.1, { .min(0.1, {
message: "Distance must be greater than or equal to 0.1", message: t("ui.form.message.distance.error"),
}) })
.optional() .optional()
.or(z.literal("")), .or(z.literal("")),
lineC: z.coerce lineC: z.coerce
.number() .number()
.min(0.1, { .min(0.1, {
message: "Distance must be greater than or equal to 0.1", message: t("ui.form.message.distance.error"),
}) })
.optional() .optional()
.or(z.literal("")), .or(z.literal("")),
lineD: z.coerce lineD: z.coerce
.number() .number()
.min(0.1, { .min(0.1, {
message: "Distance must be greater than or equal to 0.1", message: t("ui.form.message.distance.error"),
}) })
.optional() .optional()
.or(z.literal("")), .or(z.literal("")),
@ -203,7 +209,7 @@ export default function ZoneEditPane({
return true; return true;
}, },
{ {
message: "All distance fields must be filled to use speed estimation.", message: t("ui.form.message.distance.error.mustBeFilled"),
path: ["speedEstimation"], path: ["speedEstimation"],
}, },
) )
@ -217,8 +223,9 @@ export default function ZoneEditPane({
); );
}, },
{ {
message: message: t(
"Zones with loitering times greater than 0 should not be used with speed estimation.", "ui.settingView.masksAndZonesSettings.zones.speedThreshold.toast.error.loiteringTimeError",
),
path: ["loitering_time"], path: ["loitering_time"],
}, },
); );
@ -257,7 +264,9 @@ export default function ZoneEditPane({
polygon.points.length !== 4 polygon.points.length !== 4
) { ) {
toast.error( toast.error(
"Speed estimation has been disabled for this zone. Zones with speed estimation must have exactly 4 points.", t(
"ui.settingView.masksAndZonesSettings.zones.speedThreshold.toast.error.pointLengthError",
),
); );
form.setValue("speedEstimation", false); form.setValue("speedEstimation", false);
} }
@ -321,7 +330,7 @@ export default function ZoneEditPane({
// Wait for the config to be updated // Wait for the config to be updated
mutatedConfig = await updateConfig(); mutatedConfig = await updateConfig();
} catch (error) { } catch (error) {
toast.error(`Failed to save config changes.`, { toast.error(t("ui.toast.save.error.noMessage"), {
position: "top-center", position: "top-center",
}); });
return; return;
@ -403,21 +412,28 @@ export default function ZoneEditPane({
.then((res) => { .then((res) => {
if (res.status === 200) { if (res.status === 200) {
toast.success( toast.success(
`Zone (${zoneName}) has been saved. Restart Frigate to apply changes.`, t("ui.settingView.masksAndZonesSettings.zones.toast.success", {
zoneName,
}),
{ {
position: "top-center", position: "top-center",
}, },
); );
updateConfig(); updateConfig();
} else { } else {
toast.error(`Failed to save config changes: ${res.statusText}`, { toast.error(
position: "top-center", t("ui.toast.save.error", { errorMessage: res.statusText }),
}); {
position: "top-center",
},
);
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( toast.error(
`Failed to save config changes: ${error.response.data.message}`, t("ui.toast.save.error", {
errorMessage: error.response.data.message,
}),
{ position: "top-center" }, { position: "top-center" },
); );
}) })
@ -454,7 +470,7 @@ export default function ZoneEditPane({
useEffect(() => { useEffect(() => {
document.title = t( document.title = t(
"ui.settingView.masksAndZonesSettings.zone.documentTitle", "ui.settingView.masksAndZonesSettings.zones.documentTitle",
); );
}, []); }, []);
@ -467,19 +483,19 @@ export default function ZoneEditPane({
<Toaster position="top-center" closeButton={true} /> <Toaster position="top-center" closeButton={true} />
<Heading as="h3" className="my-2"> <Heading as="h3" className="my-2">
{polygon.name.length {polygon.name.length
? t("ui.settingView.masksAndZonesSettings.zone.edit") ? t("ui.settingView.masksAndZonesSettings.zones.edit")
: t("ui.settingView.masksAndZonesSettings.zone.add")} : t("ui.settingView.masksAndZonesSettings.zones.add")}
</Heading> </Heading>
<div className="my-2 text-sm text-muted-foreground"> <div className="my-2 text-sm text-muted-foreground">
<p> <p>
<Trans>ui.settingView.masksAndZonesSettings.zone.desc</Trans> <Trans>ui.settingView.masksAndZonesSettings.zones.desc</Trans>
</p> </p>
</div> </div>
<Separator className="my-3 bg-secondary" /> <Separator className="my-3 bg-secondary" />
{polygons && activePolygonIndex !== undefined && ( {polygons && activePolygonIndex !== undefined && (
<div className="my-2 flex w-full flex-row justify-between text-sm"> <div className="my-2 flex w-full flex-row justify-between text-sm">
<div className="my-1 inline-flex"> <div className="my-1 inline-flex">
{t("ui.settingView.masksAndZonesSettings.zone.point", { {t("ui.settingView.masksAndZonesSettings.zones.point", {
count: polygons[activePolygonIndex].points.length, count: polygons[activePolygonIndex].points.length,
})} })}
@ -498,7 +514,7 @@ export default function ZoneEditPane({
)} )}
<div className="mb-3 text-sm text-muted-foreground"> <div className="mb-3 text-sm text-muted-foreground">
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.clickDrawPolygon ui.settingView.masksAndZonesSettings.zones.clickDrawPolygon
</Trans> </Trans>
</div> </div>
@ -512,20 +528,20 @@ export default function ZoneEditPane({
render={({ field }) => ( render={({ field }) => (
<FormItem> <FormItem>
<FormLabel> <FormLabel>
<Trans>ui.settingView.masksAndZonesSettings.zone.name</Trans> <Trans>ui.settingView.masksAndZonesSettings.zones.name</Trans>
</FormLabel> </FormLabel>
<FormControl> <FormControl>
<Input <Input
className="text-md w-full border border-input bg-background p-2 hover:bg-accent hover:text-accent-foreground dark:[color-scheme:dark]" className="text-md w-full border border-input bg-background p-2 hover:bg-accent hover:text-accent-foreground dark:[color-scheme:dark]"
placeholder={t( placeholder={t(
"ui.settingView.masksAndZonesSettings.zone.name.inputPlaceHolder", "ui.settingView.masksAndZonesSettings.zones.name.inputPlaceHolder",
)} )}
{...field} {...field}
/> />
</FormControl> </FormControl>
<FormDescription> <FormDescription>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.name.tips ui.settingView.masksAndZonesSettings.zones.name.tips
</Trans> </Trans>
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
@ -540,7 +556,7 @@ export default function ZoneEditPane({
<FormItem> <FormItem>
<FormLabel> <FormLabel>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.inertia ui.settingView.masksAndZonesSettings.zones.inertia
</Trans> </Trans>
</FormLabel> </FormLabel>
<FormControl> <FormControl>
@ -552,7 +568,7 @@ export default function ZoneEditPane({
</FormControl> </FormControl>
<FormDescription> <FormDescription>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.inertia.desc ui.settingView.masksAndZonesSettings.zones.inertia.desc
</Trans> </Trans>
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
@ -567,7 +583,7 @@ export default function ZoneEditPane({
<FormItem> <FormItem>
<FormLabel> <FormLabel>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.loiteringTime ui.settingView.masksAndZonesSettings.zones.loiteringTime
</Trans> </Trans>
</FormLabel> </FormLabel>
<FormControl> <FormControl>
@ -579,7 +595,7 @@ export default function ZoneEditPane({
</FormControl> </FormControl>
<FormDescription> <FormDescription>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.loiteringTime.desc ui.settingView.masksAndZonesSettings.zones.loiteringTime.desc
</Trans> </Trans>
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
@ -589,11 +605,11 @@ export default function ZoneEditPane({
<Separator className="my-2 flex bg-secondary" /> <Separator className="my-2 flex bg-secondary" />
<FormItem> <FormItem>
<FormLabel> <FormLabel>
<Trans>ui.settingView.masksAndZonesSettings.zone.objects</Trans> <Trans>ui.settingView.masksAndZonesSettings.zones.objects</Trans>
</FormLabel> </FormLabel>
<FormDescription> <FormDescription>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.objects.desc ui.settingView.masksAndZonesSettings.zones.objects.desc
</Trans> </Trans>
</FormDescription> </FormDescription>
<ZoneObjectSelector <ZoneObjectSelector
@ -629,7 +645,7 @@ export default function ZoneEditPane({
htmlFor="allLabels" htmlFor="allLabels"
> >
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.speedEstimation ui.settingView.masksAndZonesSettings.zones.speedEstimation
</Trans> </Trans>
</FormLabel> </FormLabel>
<Switch <Switch
@ -643,7 +659,7 @@ export default function ZoneEditPane({
) { ) {
toast.error( toast.error(
t( t(
"ui.settingView.masksAndZonesSettings.zone.speedEstimation.pointLengthError", "ui.settingView.masksAndZonesSettings.zones.speedEstimation.pointLengthError",
), ),
); );
return; return;
@ -654,7 +670,7 @@ export default function ZoneEditPane({
if (checked && loiteringTime && loiteringTime > 0) { if (checked && loiteringTime && loiteringTime > 0) {
toast.error( toast.error(
t( t(
"ui.settingView.masksAndZonesSettings.zone.speedEstimation.loiteringTimeError", "ui.settingView.masksAndZonesSettings.zones.speedEstimation.loiteringTimeError",
), ),
); );
} }
@ -666,7 +682,7 @@ export default function ZoneEditPane({
</div> </div>
<FormDescription> <FormDescription>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.speedEstimation.desc ui.settingView.masksAndZonesSettings.zones.speedEstimation.desc
</Trans> </Trans>
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
@ -779,8 +795,16 @@ export default function ZoneEditPane({
render={({ field }) => ( render={({ field }) => (
<FormItem> <FormItem>
<FormLabel> <FormLabel>
Speed Threshold ( <Trans
{config?.ui.unit_system == "imperial" ? "mph" : "kph"}) values={{
unit:
config?.ui.unit_system == "imperial"
? t("ui.unit.speed.mph")
: t("ui.unit.speed.kph"),
}}
>
ui.settingView.masksAndZonesSettings.zones.speedThreshold
</Trans>
</FormLabel> </FormLabel>
<FormControl> <FormControl>
<Input <Input
@ -789,8 +813,9 @@ export default function ZoneEditPane({
/> />
</FormControl> </FormControl>
<FormDescription> <FormDescription>
Specifies a minimum speed for objects to be considered <Trans>
in this zone. ui.settingView.masksAndZonesSettings.zones.speedThreshold.desc
</Trans>
</FormDescription> </FormDescription>
<FormMessage /> <FormMessage />
</FormItem> </FormItem>
@ -814,7 +839,7 @@ export default function ZoneEditPane({
aria-label="Cancel" aria-label="Cancel"
onClick={onCancel} onClick={onCancel}
> >
Cancel <Trans>ui.cancel</Trans>
</Button> </Button>
<Button <Button
variant="select" variant="select"
@ -826,10 +851,12 @@ export default function ZoneEditPane({
{isLoading ? ( {isLoading ? (
<div className="flex flex-row items-center gap-2"> <div className="flex flex-row items-center gap-2">
<ActivityIndicator /> <ActivityIndicator />
<span>Saving...</span> <span>
<Trans>ui.saving</Trans>
</span>
</div> </div>
) : ( ) : (
"Save" <Trans>ui.save</Trans>
)} )}
</Button> </Button>
</div> </div>
@ -909,7 +936,7 @@ export function ZoneObjectSelector({
<div className="scrollbar-container h-auto overflow-y-auto overflow-x-hidden"> <div className="scrollbar-container h-auto overflow-y-auto overflow-x-hidden">
<div className="my-2.5 flex items-center justify-between"> <div className="my-2.5 flex items-center justify-between">
<Label className="cursor-pointer text-primary" htmlFor="allLabels"> <Label className="cursor-pointer text-primary" htmlFor="allLabels">
<Trans>ui.settingView.masksAndZonesSettings.zone.allObjects</Trans> <Trans>ui.settingView.masksAndZonesSettings.zones.allObjects</Trans>
</Label> </Label>
<Switch <Switch
className="ml-1" className="ml-1"

View File

@ -1,4 +1,5 @@
import { import {
useEnabledState,
useFrigateEvents, useFrigateEvents,
useInitialCameraState, useInitialCameraState,
useMotionActivity, useMotionActivity,
@ -15,6 +16,7 @@ import useSWR from "swr";
import { getAttributeLabels } from "@/utils/iconUtil"; import { getAttributeLabels } from "@/utils/iconUtil";
type useCameraActivityReturn = { type useCameraActivityReturn = {
enabled?: boolean;
activeTracking: boolean; activeTracking: boolean;
activeMotion: boolean; activeMotion: boolean;
objects: ObjectType[]; objects: ObjectType[];
@ -56,6 +58,7 @@ export function useCameraActivity(
[objects], [objects],
); );
const { payload: cameraEnabled } = useEnabledState(camera.name);
const { payload: detectingMotion } = useMotionActivity(camera.name); const { payload: detectingMotion } = useMotionActivity(camera.name);
const { payload: event } = useFrigateEvents(); const { payload: event } = useFrigateEvents();
const updatedEvent = useDeepMemo(event); const updatedEvent = useDeepMemo(event);
@ -145,12 +148,17 @@ export function useCameraActivity(
return cameras[camera.name].camera_fps == 0 && stats["service"].uptime > 60; return cameras[camera.name].camera_fps == 0 && stats["service"].uptime > 60;
}, [camera, stats]); }, [camera, stats]);
const isCameraEnabled = cameraEnabled ? cameraEnabled === "ON" : undefined;
return { return {
activeTracking: hasActiveObjects, enabled: isCameraEnabled,
activeMotion: detectingMotion activeTracking: isCameraEnabled ? hasActiveObjects : false,
? detectingMotion === "ON" activeMotion: isCameraEnabled
: updatedCameraState?.motion === true, ? detectingMotion
objects, ? detectingMotion === "ON"
: updatedCameraState?.motion === true
: false,
objects: isCameraEnabled ? objects : [],
offline, offline,
}; };
} }

View File

@ -106,12 +106,14 @@ function Live() {
) { ) {
const group = config.camera_groups[cameraGroup]; const group = config.camera_groups[cameraGroup];
return Object.values(config.cameras) return Object.values(config.cameras)
.filter((conf) => conf.enabled && group.cameras.includes(conf.name)) .filter(
(conf) => conf.enabled_in_config && group.cameras.includes(conf.name),
)
.sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order);
} }
return Object.values(config.cameras) return Object.values(config.cameras)
.filter((conf) => conf.ui.dashboard && conf.enabled) .filter((conf) => conf.ui.dashboard && conf.enabled_in_config)
.sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order);
}, [config, cameraGroup]); }, [config, cameraGroup]);

View File

@ -40,6 +40,7 @@ import UiSettingsView from "@/views/settings/UiSettingsView";
import { t } from "i18next"; import { t } from "i18next";
import { useSearchEffect } from "@/hooks/use-overlay-state"; import { useSearchEffect } from "@/hooks/use-overlay-state";
import { useSearchParams } from "react-router-dom"; import { useSearchParams } from "react-router-dom";
import { useInitialCameraState } from "@/api/ws";
const allSettingsViews = [ const allSettingsViews = [
"uiSettings", "uiSettings",
@ -72,12 +73,33 @@ export default function Settings() {
} }
return Object.values(config.cameras) return Object.values(config.cameras)
.filter((conf) => conf.ui.dashboard && conf.enabled) .filter((conf) => conf.ui.dashboard && conf.enabled_in_config)
.sort((aConf, bConf) => aConf.ui.order - bConf.ui.order); .sort((aConf, bConf) => aConf.ui.order - bConf.ui.order);
}, [config]); }, [config]);
const [selectedCamera, setSelectedCamera] = useState<string>(""); const [selectedCamera, setSelectedCamera] = useState<string>("");
const { payload: allCameraStates } = useInitialCameraState(
cameras.length > 0 ? cameras[0].name : "",
true,
);
const cameraEnabledStates = useMemo(() => {
const states: Record<string, boolean> = {};
if (allCameraStates) {
Object.entries(allCameraStates).forEach(([camName, state]) => {
states[camName] = state.config?.enabled ?? false;
});
}
// fallback to config if ws data isnt available yet
cameras.forEach((cam) => {
if (!(cam.name in states)) {
states[cam.name] = cam.enabled;
}
});
return states;
}, [allCameraStates, cameras]);
const [filterZoneMask, setFilterZoneMask] = useState<PolygonType[]>(); const [filterZoneMask, setFilterZoneMask] = useState<PolygonType[]>();
const handleDialog = useCallback( const handleDialog = useCallback(
@ -92,10 +114,25 @@ export default function Settings() {
); );
useEffect(() => { useEffect(() => {
if (cameras.length > 0 && selectedCamera === "") { if (cameras.length > 0) {
setSelectedCamera(cameras[0].name); if (!selectedCamera) {
// Set to first enabled camera initially if no selection
const firstEnabledCamera =
cameras.find((cam) => cameraEnabledStates[cam.name]) || cameras[0];
setSelectedCamera(firstEnabledCamera.name);
} else if (
!cameraEnabledStates[selectedCamera] &&
page !== "camera settings"
) {
// Switch to first enabled camera if current one is disabled, unless on "camera settings" page
const firstEnabledCamera =
cameras.find((cam) => cameraEnabledStates[cam.name]) || cameras[0];
if (firstEnabledCamera.name !== selectedCamera) {
setSelectedCamera(firstEnabledCamera.name);
}
}
} }
}, [cameras, selectedCamera]); }, [cameras, selectedCamera, cameraEnabledStates, page]);
useEffect(() => { useEffect(() => {
if (tabsRef.current) { if (tabsRef.current) {
@ -180,6 +217,8 @@ export default function Settings() {
allCameras={cameras} allCameras={cameras}
selectedCamera={selectedCamera} selectedCamera={selectedCamera}
setSelectedCamera={setSelectedCamera} setSelectedCamera={setSelectedCamera}
cameraEnabledStates={cameraEnabledStates}
currentPage={page}
/> />
</div> </div>
)} )}
@ -247,17 +286,21 @@ type CameraSelectButtonProps = {
allCameras: CameraConfig[]; allCameras: CameraConfig[];
selectedCamera: string; selectedCamera: string;
setSelectedCamera: React.Dispatch<React.SetStateAction<string>>; setSelectedCamera: React.Dispatch<React.SetStateAction<string>>;
cameraEnabledStates: Record<string, boolean>;
currentPage: SettingsType;
}; };
function CameraSelectButton({ function CameraSelectButton({
allCameras, allCameras,
selectedCamera, selectedCamera,
setSelectedCamera, setSelectedCamera,
cameraEnabledStates,
currentPage,
}: CameraSelectButtonProps) { }: CameraSelectButtonProps) {
const [open, setOpen] = useState(false); const [open, setOpen] = useState(false);
if (!allCameras.length) { if (!allCameras.length) {
return; return null;
} }
const trigger = ( const trigger = (
@ -286,19 +329,24 @@ function CameraSelectButton({
)} )}
<div className="scrollbar-container mb-5 h-auto max-h-[80dvh] overflow-y-auto overflow-x-hidden p-4 md:mb-1"> <div className="scrollbar-container mb-5 h-auto max-h-[80dvh] overflow-y-auto overflow-x-hidden p-4 md:mb-1">
<div className="flex flex-col gap-2.5"> <div className="flex flex-col gap-2.5">
{allCameras.map((item) => ( {allCameras.map((item) => {
<FilterSwitch const isEnabled = cameraEnabledStates[item.name];
key={item.name} const isCameraSettingsPage = currentPage === "camera settings";
isChecked={item.name === selectedCamera} return (
label={item.name.replaceAll("_", " ")} <FilterSwitch
onCheckedChange={(isChecked) => { key={item.name}
if (isChecked) { isChecked={item.name === selectedCamera}
setSelectedCamera(item.name); label={item.name.replaceAll("_", " ")}
setOpen(false); onCheckedChange={(isChecked) => {
} if (isChecked && (isEnabled || isCameraSettingsPage)) {
}} setSelectedCamera(item.name);
/> setOpen(false);
))} }
}}
disabled={!isEnabled && !isCameraSettingsPage}
/>
);
})}
</div> </div>
</div> </div>
</> </>

View File

@ -57,6 +57,7 @@ export interface CameraConfig {
width: number; width: number;
}; };
enabled: boolean; enabled: boolean;
enabled_in_config: boolean;
ffmpeg: { ffmpeg: {
global_args: string[]; global_args: string[];
hwaccel_args: string; hwaccel_args: string;

View File

@ -52,6 +52,7 @@ export type ObjectType = {
}; };
export interface FrigateCameraState { export interface FrigateCameraState {
enabled: boolean;
motion: boolean; motion: boolean;
objects: ObjectType[]; objects: ObjectType[];
} }

View File

@ -397,10 +397,12 @@ export default function DraggableGridLayout({
const initialVolumeStates: VolumeState = {}; const initialVolumeStates: VolumeState = {};
Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => { Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => {
Object.entries(groupSettings).forEach(([camera, cameraSettings]) => { if (groupSettings) {
initialAudioStates[camera] = cameraSettings.playAudio ?? false; Object.entries(groupSettings).forEach(([camera, cameraSettings]) => {
initialVolumeStates[camera] = cameraSettings.volume ?? 1; initialAudioStates[camera] = cameraSettings.playAudio ?? false;
}); initialVolumeStates[camera] = cameraSettings.volume ?? 1;
});
}
}); });
setAudioStates(initialAudioStates); setAudioStates(initialAudioStates);

View File

@ -2,6 +2,7 @@ import {
useAudioState, useAudioState,
useAutotrackingState, useAutotrackingState,
useDetectState, useDetectState,
useEnabledState,
usePtzCommand, usePtzCommand,
useRecordingsState, useRecordingsState,
useSnapshotsState, useSnapshotsState,
@ -82,6 +83,8 @@ import {
LuHistory, LuHistory,
LuInfo, LuInfo,
LuPictureInPicture, LuPictureInPicture,
LuPower,
LuPowerOff,
LuVideo, LuVideo,
LuVideoOff, LuVideoOff,
LuX, LuX,
@ -187,6 +190,10 @@ export default function LiveCameraView({
); );
}, [cameraMetadata]); }, [cameraMetadata]);
// camera enabled state
const { payload: enabledState } = useEnabledState(camera.name);
const cameraEnabled = enabledState === "ON";
// click overlay for ptzs // click overlay for ptzs
const [clickOverlay, setClickOverlay] = useState(false); const [clickOverlay, setClickOverlay] = useState(false);
@ -482,6 +489,7 @@ export default function LiveCameraView({
setPip(false); setPip(false);
} }
}} }}
disabled={!cameraEnabled}
/> />
)} )}
{supports2WayTalk && ( {supports2WayTalk && (
@ -493,11 +501,11 @@ export default function LiveCameraView({
title={`${mic ? "Disable" : "Enable"} Two Way Talk`} title={`${mic ? "Disable" : "Enable"} Two Way Talk`}
onClick={() => { onClick={() => {
setMic(!mic); setMic(!mic);
// Turn on audio when enabling the mic if audio is currently off
if (!mic && !audio) { if (!mic && !audio) {
setAudio(true); setAudio(true);
} }
}} }}
disabled={!cameraEnabled}
/> />
)} )}
{supportsAudioOutput && preferredLiveMode != "jsmpeg" && ( {supportsAudioOutput && preferredLiveMode != "jsmpeg" && (
@ -508,6 +516,7 @@ export default function LiveCameraView({
isActive={audio ?? false} isActive={audio ?? false}
title={`${audio ? "Disable" : "Enable"} Camera Audio`} title={`${audio ? "Disable" : "Enable"} Camera Audio`}
onClick={() => setAudio(!audio)} onClick={() => setAudio(!audio)}
disabled={!cameraEnabled}
/> />
)} )}
<FrigateCameraFeatures <FrigateCameraFeatures
@ -529,6 +538,7 @@ export default function LiveCameraView({
setLowBandwidth={setLowBandwidth} setLowBandwidth={setLowBandwidth}
supportsAudioOutput={supportsAudioOutput} supportsAudioOutput={supportsAudioOutput}
supports2WayTalk={supports2WayTalk} supports2WayTalk={supports2WayTalk}
cameraEnabled={cameraEnabled}
/> />
</div> </div>
</TooltipProvider> </TooltipProvider>
@ -925,6 +935,7 @@ type FrigateCameraFeaturesProps = {
setLowBandwidth: React.Dispatch<React.SetStateAction<boolean>>; setLowBandwidth: React.Dispatch<React.SetStateAction<boolean>>;
supportsAudioOutput: boolean; supportsAudioOutput: boolean;
supports2WayTalk: boolean; supports2WayTalk: boolean;
cameraEnabled: boolean;
}; };
function FrigateCameraFeatures({ function FrigateCameraFeatures({
camera, camera,
@ -943,10 +954,14 @@ function FrigateCameraFeatures({
setLowBandwidth, setLowBandwidth,
supportsAudioOutput, supportsAudioOutput,
supports2WayTalk, supports2WayTalk,
cameraEnabled,
}: FrigateCameraFeaturesProps) { }: FrigateCameraFeaturesProps) {
const { payload: detectState, send: sendDetect } = useDetectState( const { payload: detectState, send: sendDetect } = useDetectState(
camera.name, camera.name,
); );
const { payload: enabledState, send: sendEnabled } = useEnabledState(
camera.name,
);
const { payload: recordState, send: sendRecord } = useRecordingsState( const { payload: recordState, send: sendRecord } = useRecordingsState(
camera.name, camera.name,
); );
@ -1054,6 +1069,15 @@ function FrigateCameraFeatures({
if (isDesktop || isTablet) { if (isDesktop || isTablet) {
return ( return (
<> <>
<CameraFeatureToggle
className="p-2 md:p-0"
variant={fullscreen ? "overlay" : "primary"}
Icon={enabledState == "ON" ? LuPower : LuPowerOff}
isActive={enabledState == "ON"}
title={`${enabledState == "ON" ? "Disable" : "Enable"} Camera`}
onClick={() => sendEnabled(enabledState == "ON" ? "OFF" : "ON")}
disabled={false}
/>
<CameraFeatureToggle <CameraFeatureToggle
className="p-2 md:p-0" className="p-2 md:p-0"
variant={fullscreen ? "overlay" : "primary"} variant={fullscreen ? "overlay" : "primary"}
@ -1065,6 +1089,7 @@ function FrigateCameraFeatures({
: t("ui.live.detect.enable") : t("ui.live.detect.enable")
} }
onClick={() => sendDetect(detectState == "ON" ? "OFF" : "ON")} onClick={() => sendDetect(detectState == "ON" ? "OFF" : "ON")}
disabled={!cameraEnabled}
/> />
<CameraFeatureToggle <CameraFeatureToggle
className="p-2 md:p-0" className="p-2 md:p-0"
@ -1077,6 +1102,7 @@ function FrigateCameraFeatures({
: t("ui.live.recording.enable") : t("ui.live.recording.enable")
} }
onClick={() => sendRecord(recordState == "ON" ? "OFF" : "ON")} onClick={() => sendRecord(recordState == "ON" ? "OFF" : "ON")}
disabled={!cameraEnabled}
/> />
<CameraFeatureToggle <CameraFeatureToggle
className="p-2 md:p-0" className="p-2 md:p-0"
@ -1089,6 +1115,7 @@ function FrigateCameraFeatures({
: t("ui.live.snapshots.enable") : t("ui.live.snapshots.enable")
} }
onClick={() => sendSnapshot(snapshotState == "ON" ? "OFF" : "ON")} onClick={() => sendSnapshot(snapshotState == "ON" ? "OFF" : "ON")}
disabled={!cameraEnabled}
/> />
{audioDetectEnabled && ( {audioDetectEnabled && (
<CameraFeatureToggle <CameraFeatureToggle
@ -1102,6 +1129,7 @@ function FrigateCameraFeatures({
: t("ui.live.audioDetect.enable") : t("ui.live.audioDetect.enable")
} }
onClick={() => sendAudio(audioState == "ON" ? "OFF" : "ON")} onClick={() => sendAudio(audioState == "ON" ? "OFF" : "ON")}
disabled={!cameraEnabled}
/> />
)} )}
{autotrackingEnabled && ( {autotrackingEnabled && (
@ -1118,6 +1146,7 @@ function FrigateCameraFeatures({
onClick={() => onClick={() =>
sendAutotracking(autotrackingState == "ON" ? "OFF" : "ON") sendAutotracking(autotrackingState == "ON" ? "OFF" : "ON")
} }
disabled={!cameraEnabled}
/> />
)} )}
<CameraFeatureToggle <CameraFeatureToggle
@ -1132,6 +1161,7 @@ function FrigateCameraFeatures({
"ui.live.manualRecording." + (isRecording ? "stop" : "start"), "ui.live.manualRecording." + (isRecording ? "stop" : "start"),
)} )}
onClick={handleEventButtonClick} onClick={handleEventButtonClick}
disabled={!cameraEnabled}
/> />
<DropdownMenu modal={false}> <DropdownMenu modal={false}>
@ -1406,6 +1436,13 @@ function FrigateCameraFeatures({
</DrawerTrigger> </DrawerTrigger>
<DrawerContent className="rounded-2xl px-2 py-4"> <DrawerContent className="rounded-2xl px-2 py-4">
<div className="mt-2 flex flex-col gap-2"> <div className="mt-2 flex flex-col gap-2">
<FilterSwitch
label="Camera Enabled"
isChecked={enabledState == "ON"}
onCheckedChange={() =>
sendEnabled(enabledState == "ON" ? "OFF" : "ON")
}
/>
<FilterSwitch <FilterSwitch
label="Object Detection" label="Object Detection"
isChecked={detectState == "ON"} isChecked={detectState == "ON"}

View File

@ -269,10 +269,12 @@ export default function LiveDashboardView({
const initialVolumeStates: VolumeState = {}; const initialVolumeStates: VolumeState = {};
Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => { Object.entries(allGroupsStreamingSettings).forEach(([_, groupSettings]) => {
Object.entries(groupSettings).forEach(([camera, cameraSettings]) => { if (groupSettings) {
initialAudioStates[camera] = cameraSettings.playAudio ?? false; Object.entries(groupSettings).forEach(([camera, cameraSettings]) => {
initialVolumeStates[camera] = cameraSettings.volume ?? 1; initialAudioStates[camera] = cameraSettings.playAudio ?? false;
}); initialVolumeStates[camera] = cameraSettings.volume ?? 1;
});
}
}); });
setAudioStates(initialAudioStates); setAudioStates(initialAudioStates);

View File

@ -31,7 +31,7 @@ import { Trans } from "react-i18next";
import { t } from "i18next"; import { t } from "i18next";
import { Switch } from "@/components/ui/switch"; import { Switch } from "@/components/ui/switch";
import { Label } from "@/components/ui/label"; import { Label } from "@/components/ui/label";
import { useAlertsState, useDetectionsState } from "@/api/ws"; import { useAlertsState, useDetectionsState, useEnabledState } from "@/api/ws";
type CameraSettingsViewProps = { type CameraSettingsViewProps = {
selectedCamera: string; selectedCamera: string;
@ -110,6 +110,8 @@ export default function CameraSettingsView({
const watchedAlertsZones = form.watch("alerts_zones"); const watchedAlertsZones = form.watch("alerts_zones");
const watchedDetectionsZones = form.watch("detections_zones"); const watchedDetectionsZones = form.watch("detections_zones");
const { payload: enabledState, send: sendEnabled } =
useEnabledState(selectedCamera);
const { payload: alertsState, send: sendAlerts } = const { payload: alertsState, send: sendAlerts } =
useAlertsState(selectedCamera); useAlertsState(selectedCamera);
const { payload: detectionsState, send: sendDetections } = const { payload: detectionsState, send: sendDetections } =
@ -158,21 +160,28 @@ export default function CameraSettingsView({
.then((res) => { .then((res) => {
if (res.status === 200) { if (res.status === 200) {
toast.success( toast.success(
`Review classification configuration has been saved. Restart Frigate to apply changes.`, t(
"ui.settingView.cameraSettings.reviewClassification.toast.success",
),
{ {
position: "top-center", position: "top-center",
}, },
); );
updateConfig(); updateConfig();
} else { } else {
toast.error(`Failed to save config changes: ${res.statusText}`, { toast.error(
position: "top-center", t("ui.toast.save.error", { errorMessage: res.statusText }),
}); {
position: "top-center",
},
);
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( toast.error(
`Failed to save config changes: ${error.response.data.message}`, t("ui.toast.save.error", {
errorMessage: error.response.data.message,
}),
{ position: "top-center" }, { position: "top-center" },
); );
}) })
@ -254,6 +263,30 @@ export default function CameraSettingsView({
<Separator className="my-2 flex bg-secondary" /> <Separator className="my-2 flex bg-secondary" />
<Heading as="h4" className="my-2">
<Trans>ui.settingView.cameraSettings.streams</Trans>
</Heading>
<div className="flex flex-row items-center">
<Switch
id="camera-enabled"
className="mr-3"
checked={enabledState === "ON"}
onCheckedChange={(isChecked) => {
sendEnabled(isChecked ? "ON" : "OFF");
}}
/>
<div className="space-y-0.5">
<Label htmlFor="camera-enabled">
<Trans>ui.enabled</Trans>
</Label>
</div>
</div>
<div className="mt-3 text-sm text-muted-foreground">
<Trans>ui.settingView.cameraSettings.streams.desc</Trans>
</div>
<Separator className="mb-2 mt-4 flex bg-secondary" />
<Heading as="h4" className="my-2"> <Heading as="h4" className="my-2">
<Trans>ui.settingView.cameraSettings.review</Trans> <Trans>ui.settingView.cameraSettings.review</Trans>
</Heading> </Heading>
@ -349,7 +382,9 @@ export default function CameraSettingsView({
<> <>
<div className="mb-2"> <div className="mb-2">
<FormLabel className="flex flex-row items-center text-base"> <FormLabel className="flex flex-row items-center text-base">
Alerts{" "} <Trans>
ui.settingView.cameraSettings.review.alerts
</Trans>
<MdCircle className="ml-3 size-2 text-severity_alert" /> <MdCircle className="ml-3 size-2 text-severity_alert" />
</FormLabel> </FormLabel>
<FormDescription> <FormDescription>
@ -452,12 +487,16 @@ export default function CameraSettingsView({
<> <>
<div className="mb-2"> <div className="mb-2">
<FormLabel className="flex flex-row items-center text-base"> <FormLabel className="flex flex-row items-center text-base">
Detections{" "} <Trans>
ui.settingView.cameraSettings.review.detections
</Trans>
<MdCircle className="ml-3 size-2 text-severity_detection" /> <MdCircle className="ml-3 size-2 text-severity_detection" />
</FormLabel> </FormLabel>
{selectDetections && ( {selectDetections && (
<FormDescription> <FormDescription>
Select zones for Detections <Trans>
ui.settingView.cameraSettings.reviewClassification.selectDetectionsZones
</Trans>
</FormDescription> </FormDescription>
)} )}
</div> </div>
@ -520,7 +559,9 @@ export default function CameraSettingsView({
htmlFor="select-detections" htmlFor="select-detections"
className="text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70" className="text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70"
> >
Limit detections to specific zones <Trans>
ui.settingView.cameraSettings.reviewClassification.limitDetections
</Trans>
</label> </label>
</div> </div>
</div> </div>

View File

@ -492,7 +492,7 @@ export default function MasksAndZonesView({
<HoverCardTrigger asChild> <HoverCardTrigger asChild>
<div className="text-md cursor-default"> <div className="text-md cursor-default">
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone ui.settingView.masksAndZonesSettings.zones
</Trans> </Trans>
</div> </div>
</HoverCardTrigger> </HoverCardTrigger>
@ -500,7 +500,7 @@ export default function MasksAndZonesView({
<div className="my-2 flex flex-col gap-2 text-sm text-primary-variant"> <div className="my-2 flex flex-col gap-2 text-sm text-primary-variant">
<p> <p>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.desc ui.settingView.masksAndZonesSettings.zones.desc
</Trans> </Trans>
</p> </p>
<div className="flex items-center text-primary"> <div className="flex items-center text-primary">
@ -511,7 +511,7 @@ export default function MasksAndZonesView({
className="inline" className="inline"
> >
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.desc.documentation ui.settingView.masksAndZonesSettings.zones.desc.documentation
</Trans>{" "} </Trans>{" "}
<LuExternalLink className="ml-2 inline-flex size-3" /> <LuExternalLink className="ml-2 inline-flex size-3" />
</Link> </Link>
@ -535,7 +535,7 @@ export default function MasksAndZonesView({
</TooltipTrigger> </TooltipTrigger>
<TooltipContent> <TooltipContent>
<Trans> <Trans>
ui.settingView.masksAndZonesSettings.zone.add ui.settingView.masksAndZonesSettings.zones.add
</Trans> </Trans>
</TooltipContent> </TooltipContent>
</Tooltip> </Tooltip>

View File

@ -22,6 +22,7 @@ import { Link } from "react-router-dom";
import { LuExternalLink } from "react-icons/lu"; import { LuExternalLink } from "react-icons/lu";
import { StatusBarMessagesContext } from "@/context/statusbar-provider"; import { StatusBarMessagesContext } from "@/context/statusbar-provider";
import { Trans } from "react-i18next"; import { Trans } from "react-i18next";
import { t } from "i18next";
type MotionTunerViewProps = { type MotionTunerViewProps = {
selectedCamera: string; selectedCamera: string;
@ -118,20 +119,28 @@ export default function MotionTunerView({
) )
.then((res) => { .then((res) => {
if (res.status === 200) { if (res.status === 200) {
toast.success("Motion settings have been saved.", { toast.success(
position: "top-center", t("ui.settingView.motionDetectionTuner.toast.success"),
}); {
position: "top-center",
},
);
setChangedValue(false); setChangedValue(false);
updateConfig(); updateConfig();
} else { } else {
toast.error(`Failed to save config changes: ${res.statusText}`, { toast.error(
position: "top-center", t("ui.toast.save.error", { errorMessage: res.statusText }),
}); {
position: "top-center",
},
);
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( toast.error(
`Failed to save config changes: ${error.response.data.message}`, t("ui.toast.save.error", {
errorMessage: error.response.data.message,
}),
{ position: "top-center" }, { position: "top-center" },
); );
}) })

View File

@ -82,7 +82,7 @@ export default function NotificationView({
return Object.values(config.cameras) return Object.values(config.cameras)
.filter( .filter(
(conf) => (conf) =>
conf.enabled && conf.enabled_in_config &&
conf.notifications && conf.notifications &&
conf.notifications.enabled_in_config, conf.notifications.enabled_in_config,
) )

View File

@ -94,20 +94,25 @@ export default function ExploreSettingsView({
) )
.then((res) => { .then((res) => {
if (res.status === 200) { if (res.status === 200) {
toast.success("Explore settings have been saved.", { toast.success(t("ui.settingView.exploreSettings.toast.success"), {
position: "top-center", position: "top-center",
}); });
setChangedValue(false); setChangedValue(false);
updateConfig(); updateConfig();
} else { } else {
toast.error(`Failed to save config changes: ${res.statusText}`, { toast.error(
position: "top-center", t("ui.toast.save.error", { errorMessage: res.statusText }),
}); {
position: "top-center",
},
);
} }
}) })
.catch((error) => { .catch((error) => {
toast.error( toast.error(
`Failed to save config changes: ${error.response.data.message}`, t("ui.toast.save.error", {
errorMessage: error.response.data.message,
}),
{ position: "top-center" }, { position: "top-center" },
); );
}) })