Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
3be52f54a7
Merge 806faf8c78 into c244e6582a 2026-04-22 19:40:06 +00:00
19 changed files with 82 additions and 402 deletions

View File

@ -39,10 +39,6 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
## [kiosk-monitor](https://github.com/extremeshok/kiosk-monitor)
[kiosk-monitor](https://github.com/extremeshok/kiosk-monitor) is a Raspberry Pi watchdog that runs Chromium fullscreen on a Frigate dashboard (optionally with VLC on a second monitor for an RTSP camera stream), auto-restarts on frozen screens or unreachable URLs, and ships a Birdseye-aware Chromium helper that auto-sizes the grid to the display.
## [Periscope](https://github.com/maksz42/periscope)
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.

View File

@ -754,15 +754,6 @@ def events_search(
status_code=404,
)
if search_event.camera not in allowed_cameras:
return JSONResponse(
content={
"success": False,
"message": "Event not found",
},
status_code=404,
)
thumb_result = context.search_thumbnail(search_event)
thumb_ids = {result[0]: result[1] for result in thumb_result}
search_results = {

View File

@ -35,7 +35,7 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.recordings])
@router.get("/recordings/storage", dependencies=[Depends(require_role(["admin"]))])
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"

View File

@ -549,14 +549,6 @@ class WebPushClient(Communicator):
logger.debug(f"Sending camera monitoring push notification for {camera_name}")
for user in self.web_pushers:
if not self._user_has_camera_access(user, camera):
logger.debug(
"Skipping notification for user %s - no access to camera %s",
user,
camera,
)
continue
self.send_push_notification(
user=user,
payload=payload,

View File

@ -19,7 +19,6 @@ import numpy as np
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR, UPDATE_BIRDSEYE_LAYOUT
from frigate.output.ws_auth import ws_has_camera_access
from frigate.util.image import (
SharedMemoryFrameManager,
copy_yuv_to_position,
@ -237,14 +236,12 @@ class BroadcastThread(threading.Thread):
converter: FFMpegConverter,
websocket_server: Any,
stop_event: MpEvent,
config: FrigateConfig,
):
super().__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
self.stop_event = stop_event
self.config = config
def run(self) -> None:
while not self.stop_event.is_set():
@ -259,7 +256,6 @@ class BroadcastThread(threading.Thread):
if (
not ws.terminated
and ws.environ["PATH_INFO"] == f"/{self.camera}"
and ws_has_camera_access(ws, self.camera, self.config)
):
try:
ws.send(buf, binary=True)
@ -810,11 +806,7 @@ class Birdseye:
config.birdseye.restream,
)
self.broadcaster = BroadcastThread(
"birdseye",
self.converter,
websocket_server,
stop_event,
config,
"birdseye", self.converter, websocket_server, stop_event
)
self.birdseye_manager = BirdsEyeFrameManager(self.config, stop_event)
self.frame_manager = SharedMemoryFrameManager()

View File

@ -7,8 +7,7 @@ import threading
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
from frigate.config import CameraConfig, FfmpegConfig, FrigateConfig
from frigate.output.ws_auth import ws_has_camera_access
from frigate.config import CameraConfig, FfmpegConfig
logger = logging.getLogger(__name__)
@ -103,14 +102,12 @@ class BroadcastThread(threading.Thread):
converter: FFMpegConverter,
websocket_server: Any,
stop_event: MpEvent,
config: FrigateConfig,
):
super().__init__()
self.camera = camera
self.converter = converter
self.websocket_server = websocket_server
self.stop_event = stop_event
self.config = config
def run(self) -> None:
while not self.stop_event.is_set():
@ -125,7 +122,6 @@ class BroadcastThread(threading.Thread):
if (
not ws.terminated
and ws.environ["PATH_INFO"] == f"/{self.camera}"
and ws_has_camera_access(ws, self.camera, self.config)
):
try:
ws.send(buf, binary=True)
@ -139,11 +135,7 @@ class BroadcastThread(threading.Thread):
class JsmpegCamera:
def __init__(
self,
config: CameraConfig,
frigate_config: FrigateConfig,
stop_event: MpEvent,
websocket_server: Any,
self, config: CameraConfig, stop_event: MpEvent, websocket_server: Any
) -> None:
self.config = config
self.input: queue.Queue[bytes] = queue.Queue(maxsize=config.detect.fps)
@ -162,11 +154,7 @@ class JsmpegCamera:
config.live.quality,
)
self.broadcaster = BroadcastThread(
config.name or "",
self.converter,
websocket_server,
stop_event,
frigate_config,
config.name or "", self.converter, websocket_server, stop_event
)
self.converter.start()

View File

@ -32,7 +32,6 @@ from frigate.const import (
from frigate.output.birdseye import Birdseye
from frigate.output.camera import JsmpegCamera
from frigate.output.preview import PreviewRecorder
from frigate.output.ws_auth import ws_has_camera_access
from frigate.util.image import SharedMemoryFrameManager, get_blank_yuv_frame
from frigate.util.process import FrigateProcess
@ -103,7 +102,7 @@ class OutputProcess(FrigateProcess):
) -> None:
camera_config = self.config.cameras[camera]
jsmpeg_cameras[camera] = JsmpegCamera(
camera_config, self.config, self.stop_event, websocket_server
camera_config, self.stop_event, websocket_server
)
preview_recorders[camera] = PreviewRecorder(camera_config)
preview_write_times[camera] = 0
@ -263,7 +262,6 @@ class OutputProcess(FrigateProcess):
# send camera frame to ffmpeg process if websockets are connected
if any(
ws.environ["PATH_INFO"].endswith(camera)
and ws_has_camera_access(ws, camera, self.config)
for ws in websocket_server.manager
):
# write to the converter for the camera if clients are listening to the specific camera
@ -277,7 +275,6 @@ class OutputProcess(FrigateProcess):
self.config.birdseye.restream
or any(
ws.environ["PATH_INFO"].endswith("birdseye")
and ws_has_camera_access(ws, "birdseye", self.config)
for ws in websocket_server.manager
)
)

View File

@ -1,43 +0,0 @@
"""Authorization helpers for JSMPEG websocket clients."""
from typing import Any
from frigate.config import FrigateConfig
from frigate.models import User
def _get_valid_ws_roles(ws: Any, config: FrigateConfig) -> list[str]:
role_header = ws.environ.get("HTTP_REMOTE_ROLE", "")
roles = [
role.strip()
for role in role_header.split(config.proxy.separator)
if role.strip()
]
return [role for role in roles if role in config.auth.roles]
def ws_has_camera_access(ws: Any, camera_name: str, config: FrigateConfig) -> bool:
"""Return True when a websocket client is authorized for the camera path."""
roles = _get_valid_ws_roles(ws, config)
if not roles:
return False
roles_dict = config.auth.roles
# Birdseye is a composite stream, so only users with unrestricted access
# should receive it.
if camera_name == "birdseye":
return any(role == "admin" or not roles_dict.get(role) for role in roles)
all_camera_names = set(config.cameras.keys())
for role in roles:
if role == "admin" or not roles_dict.get(role):
return True
allowed_cameras = User.get_allowed_cameras(role, roles_dict, all_camera_names)
if camera_name in allowed_cameras:
return True
return False

View File

@ -23,26 +23,6 @@ class TestHttpApp(BaseTestHttp):
response_json = response.json()
assert response_json == self.test_stats
def test_recordings_storage_requires_admin(self):
stats = Mock(spec=StatsEmitter)
stats.get_latest_stats.return_value = self.test_stats
app = super().create_app(stats)
app.storage_maintainer = Mock()
app.storage_maintainer.calculate_camera_usages.return_value = {
"front_door": {"usage": 2.0},
}
with AuthTestClient(app) as client:
response = client.get(
"/recordings/storage",
headers={"remote-user": "viewer", "remote-role": "viewer"},
)
assert response.status_code == 403
response = client.get("/recordings/storage")
assert response.status_code == 200
assert response.json()["front_door"]["usage_percent"] == 25.0
def test_config_set_in_memory_replaces_objects_track_list(self):
self.minimal_config["cameras"]["front_door"]["objects"] = {
"track": ["person", "car"],

View File

@ -219,25 +219,6 @@ class TestHttpApp(BaseTestHttp):
assert len(events) == 1
assert events[0]["id"] == event_id
def test_similarity_search_hides_unauthorized_anchor_event(self):
mock_embeddings = Mock()
self.app.frigate_config.semantic_search.enabled = True
self.app.embeddings = mock_embeddings
with AuthTestClient(self.app) as client:
super().insert_mock_event("hidden.anchor", camera="back_door")
response = client.get(
"/events/search",
params={
"search_type": "similarity",
"event_id": "hidden.anchor",
},
)
assert response.status_code == 404
assert response.json()["message"] == "Event not found"
mock_embeddings.search_thumbnail.assert_not_called()
def test_get_good_event(self):
id = "123456.random"

View File

@ -145,12 +145,9 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
embeddings=embeddings,
frigate_config=SimpleNamespace(
semantic_search=SimpleNamespace(enabled=semantic_enabled),
cameras={"driveway": object()},
auth=SimpleNamespace(roles={"admin": [], "viewer": ["driveway"]}),
proxy=SimpleNamespace(separator=","),
),
)
return SimpleNamespace(app=app, headers={})
return SimpleNamespace(app=app)
def test_semantic_search_disabled_returns_error(self):
req = self._make_request(semantic_enabled=False)
@ -183,7 +180,7 @@ class TestExecuteFindSimilarObjects(unittest.TestCase):
_execute_find_similar_objects(
req,
{"event_id": "anchor", "cameras": ["nonexistent_cam"]},
allowed_cameras=["driveway"],
allowed_cameras=["nonexistent_cam"],
)
)
self.assertEqual(result["results"], [])

View File

@ -1,57 +0,0 @@
"""Tests for JSMPEG websocket authorization."""
import unittest
from types import SimpleNamespace
from frigate.config import FrigateConfig
from frigate.output.ws_auth import ws_has_camera_access
class TestWsHasCameraAccess(unittest.TestCase):
def setUp(self):
self.config = FrigateConfig(
mqtt={"host": "mqtt"},
auth={"roles": {"limited_user": ["front_door"]}},
cameras={
"front_door": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {"height": 1080, "width": 1920, "fps": 5},
},
"back_door": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.2:554/video", "roles": ["detect"]}
]
},
"detect": {"height": 1080, "width": 1920, "fps": 5},
},
},
)
def _make_ws(self, role: str):
return SimpleNamespace(environ={"HTTP_REMOTE_ROLE": role})
def test_restricted_role_only_gets_allowed_camera(self):
ws = self._make_ws("limited_user")
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
self.assertFalse(ws_has_camera_access(ws, "back_door", self.config))
def test_unrestricted_role_can_access_any_camera(self):
ws = self._make_ws("viewer")
self.assertTrue(ws_has_camera_access(ws, "front_door", self.config))
self.assertTrue(ws_has_camera_access(ws, "back_door", self.config))
def test_birdseye_requires_unrestricted_access(self):
self.assertTrue(
ws_has_camera_access(self._make_ws("admin"), "birdseye", self.config)
)
self.assertTrue(
ws_has_camera_access(self._make_ws("viewer"), "birdseye", self.config)
)
self.assertFalse(
ws_has_camera_access(self._make_ws("limited_user"), "birdseye", self.config)
)

View File

@ -1,29 +0,0 @@
"""Tests for camera monitoring notification authorization."""
import unittest
from types import SimpleNamespace
from unittest.mock import MagicMock
from frigate.comms.webpush import WebPushClient
class TestCameraMonitoringNotifications(unittest.TestCase):
def test_send_camera_monitoring_filters_by_camera_access(self):
client = WebPushClient.__new__(WebPushClient)
client.config = SimpleNamespace(
cameras={"front_door": SimpleNamespace(friendly_name=None)}
)
client.web_pushers = {"allowed": [], "denied": []}
client.user_cameras = {"allowed": {"front_door"}, "denied": set()}
client.check_registrations = MagicMock()
client.cleanup_registrations = MagicMock()
client.send_push_notification = MagicMock()
client.send_camera_monitoring(
{"camera": "front_door", "message": "Monitoring condition met"}
)
self.assertEqual(client.send_push_notification.call_count, 1)
self.assertEqual(
client.send_push_notification.call_args.kwargs["user"], "allowed"
)

View File

@ -24,12 +24,8 @@ from frigate.log import redirect_output_to_logger, suppress_stderr_during
from frigate.models import Event, Recordings, ReviewSegment
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
from frigate.util.file import get_event_thumbnail_bytes, load_event_snapshot_image
from frigate.util.image import (
calculate_region,
get_image_from_recording,
relative_box_to_absolute,
)
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording
from frigate.util.process import FrigateProcess
BATCH_SIZE = 16
@ -717,7 +713,7 @@ def collect_object_classification_examples(
This function:
1. Queries events for the specified label
2. Selects 100 balanced events across different cameras and times
3. Crops each event's clean snapshot around the object bounding box
3. Retrieves thumbnails for selected events (with 33% center crop applied)
4. Selects 24 most visually distinct thumbnails
5. Saves to dataset directory
@ -836,106 +832,66 @@ def _select_balanced_events(
def _extract_event_thumbnails(events: list[Event], output_dir: str) -> list[str]:
"""
Extract a training image for each event.
Preferred path: load the full-frame clean snapshot and crop around the
stored bounding box with the same calculate_region(..., max(w, h), 1.0)
call the live ObjectClassificationProcessor uses, so wizard examples
are framed like inference-time inputs.
Fallback: if no clean snapshot exists (snapshots disabled, or only a
legacy annotated JPG is on disk), center-crop the stored thumbnail
using a step ladder sized from the box/region area ratio.
Extract thumbnails from events and save to disk.
Args:
events: List of Event objects
output_dir: Directory to save crops
output_dir: Directory to save thumbnails
Returns:
List of paths to successfully extracted images
List of paths to successfully extracted thumbnail images
"""
image_paths = []
thumbnail_paths = []
for idx, event in enumerate(events):
try:
img = _load_event_classification_crop(event)
if img is None:
continue
thumbnail_bytes = get_event_thumbnail_bytes(event)
resized = cv2.resize(img, (224, 224))
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
cv2.imwrite(output_path, resized)
image_paths.append(output_path)
if thumbnail_bytes:
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if img is not None:
height, width = img.shape[:2]
crop_size = 1.0
if event.data and "box" in event.data and "region" in event.data:
box = event.data["box"]
region = event.data["region"]
if len(box) == 4 and len(region) == 4:
box_w, box_h = box[2], box[3]
region_w, region_h = region[2], region[3]
box_area = (box_w * box_h) / (region_w * region_h)
if box_area < 0.05:
crop_size = 0.4
elif box_area < 0.10:
crop_size = 0.5
elif box_area < 0.20:
crop_size = 0.65
elif box_area < 0.35:
crop_size = 0.80
else:
crop_size = 0.95
crop_width = int(width * crop_size)
crop_height = int(height * crop_size)
x1 = (width - crop_width) // 2
y1 = (height - crop_height) // 2
x2 = x1 + crop_width
y2 = y1 + crop_height
cropped = img[y1:y2, x1:x2]
resized = cv2.resize(cropped, (224, 224))
output_path = os.path.join(output_dir, f"thumbnail_{idx:04d}.jpg")
cv2.imwrite(output_path, resized)
thumbnail_paths.append(output_path)
except Exception as e:
logger.debug(f"Failed to extract image for event {event.id}: {e}")
logger.debug(f"Failed to extract thumbnail for event {event.id}: {e}")
continue
return image_paths
def _load_event_classification_crop(event: Event) -> np.ndarray | None:
"""Prefer a snapshot-based object crop; fall back to a center-cropped thumbnail."""
if event.data and "box" in event.data:
snapshot, _ = load_event_snapshot_image(event, clean_only=True)
if snapshot is not None:
abs_box = relative_box_to_absolute(snapshot.shape, event.data["box"])
if abs_box is not None:
xmin, ymin, xmax, ymax = abs_box
box_w = xmax - xmin
box_h = ymax - ymin
if box_w > 0 and box_h > 0:
x1, y1, x2, y2 = calculate_region(
snapshot.shape,
xmin,
ymin,
xmax,
ymax,
max(box_w, box_h),
1.0,
)
cropped = snapshot[y1:y2, x1:x2]
if cropped.size > 0:
return cropped
thumbnail_bytes = get_event_thumbnail_bytes(event)
if not thumbnail_bytes:
return None
nparr = np.frombuffer(thumbnail_bytes, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if img is None or img.size == 0:
return None
height, width = img.shape[:2]
crop_size = 1.0
if event.data and "box" in event.data and "region" in event.data:
box = event.data["box"]
region = event.data["region"]
if len(box) == 4 and len(region) == 4:
box_w, box_h = box[2], box[3]
region_w, region_h = region[2], region[3]
box_area = (box_w * box_h) / (region_w * region_h)
if box_area < 0.05:
crop_size = 0.4
elif box_area < 0.10:
crop_size = 0.5
elif box_area < 0.20:
crop_size = 0.65
elif box_area < 0.35:
crop_size = 0.80
else:
crop_size = 0.95
crop_width = int(width * crop_size)
crop_height = int(height * crop_size)
x1 = (width - crop_width) // 2
y1 = (height - crop_height) // 2
cropped = img[y1 : y1 + crop_height, x1 : x1 + crop_width]
if cropped.size == 0:
return None
return cropped
return thumbnail_paths

View File

@ -711,44 +711,23 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
else:
format_entries = None
def run(rtsp_transport: Optional[str] = None) -> sp.CompletedProcess:
cmd = [ffmpeg.ffprobe_path]
if rtsp_transport:
cmd += ["-rtsp_transport", rtsp_transport]
cmd += [
"-timeout",
"1000000",
"-print_format",
"json",
"-show_entries",
f"stream={stream_entries}",
]
if detailed and format_entries:
cmd.extend(["-show_entries", f"format={format_entries}"])
cmd.extend(["-loglevel", "error", clean_path])
try:
return sp.run(cmd, capture_output=True, timeout=6)
except sp.TimeoutExpired as e:
logger.info(
"ffprobe timed out while probing %s (transport=%s)",
clean_camera_user_pass(path),
rtsp_transport or "default",
)
return sp.CompletedProcess(
args=cmd,
returncode=1,
stdout=e.stdout or b"",
stderr=(e.stderr or b"") + b"\nffprobe timed out",
)
ffprobe_cmd = [
ffmpeg.ffprobe_path,
"-timeout",
"1000000",
"-print_format",
"json",
"-show_entries",
f"stream={stream_entries}",
]
result = run()
# Add format entries for detailed mode
if detailed and format_entries:
ffprobe_cmd.extend(["-show_entries", f"format={format_entries}"])
# For RTSP: retry with explicit TCP transport if the first attempt failed
# (default UDP may be blocked)
if result.returncode != 0 and clean_path.startswith("rtsp://"):
result = run(rtsp_transport="tcp")
ffprobe_cmd.extend(["-loglevel", "error", clean_path])
return result
return sp.run(ffprobe_cmd, capture_output=True)
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
@ -845,23 +824,11 @@ async def get_video_properties(
"-show_streams",
url,
]
proc = None
try:
proc = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
try:
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=6)
except asyncio.TimeoutError:
logger.info(
"ffprobe timed out while probing %s (transport=%s)",
clean_camera_user_pass(url),
rtsp_transport or "default",
)
proc.kill()
await proc.wait()
return False, 0, 0, None, -1
stdout, _ = await proc.communicate()
if proc.returncode != 0:
return False, 0, 0, None, -1

6
web/package-lock.json generated
View File

@ -9642,9 +9642,9 @@
"license": "MIT"
},
"node_modules/lodash-es": {
"version": "4.18.1",
"resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.18.1.tgz",
"integrity": "sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==",
"version": "4.17.23",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
"license": "MIT"
},
"node_modules/lodash.merge": {

View File

@ -415,7 +415,7 @@
"audioCodecGood": "Audio codec is {{codec}}.",
"resolutionHigh": "A resolution of {{resolution}} may cause increased resource usage.",
"resolutionLow": "A resolution of {{resolution}} may be too low for reliable detection of small objects.",
"resolutionUnknown": "The resolution of this stream could not be probed. You should manually set the detect resolution in Settings or your config.",
"resolutionUnknown": "The resolution of this stream could not be probed. This will cause issues on startup. You should manually set the detect resolution in Settings or your config.",
"noAudioWarning": "No audio detected for this stream, recordings will not have audio.",
"audioCodecRecordError": "The AAC audio codec is required to support audio in recordings.",
"audioCodecRequired": "An audio stream is required to support audio detection.",

View File

@ -17,9 +17,6 @@ import { useUserPersistence } from "@/hooks/use-user-persistence";
import { Skeleton } from "../ui/skeleton";
import { Button } from "../ui/button";
import { FaCircleCheck } from "react-icons/fa6";
import { FaExclamationTriangle } from "react-icons/fa";
import { MdOutlinePersonSearch } from "react-icons/md";
import { ThreatLevel } from "@/types/review";
import { cn } from "@/lib/utils";
import { useTranslation } from "react-i18next";
import { getTranslatedLabel } from "@/utils/i18n";
@ -130,11 +127,6 @@ export function AnimatedEventCard({
true,
);
const threatLevel = useMemo<ThreatLevel>(
() => (event.data.metadata?.potential_threat_level ?? 0) as ThreatLevel,
[event],
);
const aspectRatio = useMemo(() => {
if (
!config ||
@ -160,15 +152,7 @@ export function AnimatedEventCard({
<Tooltip>
<TooltipTrigger asChild>
<Button
className={cn(
"absolute left-2 top-1 z-40 transition-opacity",
threatLevel === ThreatLevel.SECURITY_CONCERN &&
"pointer-events-auto bg-severity_alert opacity-100 hover:bg-severity_alert",
threatLevel === ThreatLevel.NEEDS_REVIEW &&
"pointer-events-auto bg-severity_detection opacity-100 hover:bg-severity_detection",
threatLevel === ThreatLevel.NORMAL &&
"pointer-events-none bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 group-hover:pointer-events-auto group-hover:opacity-100",
)}
className="pointer-events-none absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 transition-opacity group-hover:pointer-events-auto group-hover:opacity-100"
size="xs"
aria-label={t("markAsReviewed")}
onClick={async () => {
@ -176,13 +160,7 @@ export function AnimatedEventCard({
updateEvents();
}}
>
{threatLevel === ThreatLevel.SECURITY_CONCERN ? (
<FaExclamationTriangle className="size-3 text-white" />
) : threatLevel === ThreatLevel.NEEDS_REVIEW ? (
<MdOutlinePersonSearch className="size-3 text-white" />
) : (
<FaCircleCheck className="size-3 text-white" />
)}
<FaCircleCheck className="size-3 text-white" />
</Button>
</TooltipTrigger>
<TooltipContent>{t("markAsReviewed")}</TooltipContent>

View File

@ -389,7 +389,7 @@ export default function LiveCameraView({
return "mse";
}, [lowBandwidth, mic, webRTC, isRestreamed]);
useKeyboardListener(["m", "Escape"], (key, modifiers) => {
useKeyboardListener(["m"], (key, modifiers) => {
if (!modifiers.down) {
return true;
}
@ -407,12 +407,6 @@ export default function LiveCameraView({
return true;
}
break;
case "Escape":
if (!fullscreen) {
navigate(-1);
return true;
}
break;
}
return false;