Merge branch 'dev' of https://github.com/hawkeye217/frigate into lost-object-zoom

This commit is contained in:
Josh Hawkins 2023-10-14 07:26:33 -05:00
commit 2722fe18b6
32 changed files with 375 additions and 118 deletions

View File

@ -121,13 +121,15 @@ RUN apt-get -qq update \
apt-transport-https \ apt-transport-https \
gnupg \ gnupg \
wget \ wget \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 648ACFD622F3D138 \ # the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
&& echo "deb http://deb.debian.org/debian bullseye main contrib non-free" | tee /etc/apt/sources.list.d/raspi.list \ && wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
&& apt-get -qq update \ && apt-get -qq update \
&& apt-get -qq install -y \ && apt-get -qq install -y \
python3.9 \ python3.9 \
python3.9-dev \ python3.9-dev \
wget \
# opencv dependencies # opencv dependencies
build-essential cmake git pkg-config libgtk-3-dev \ build-essential cmake git pkg-config libgtk-3-dev \
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \ libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \

View File

@ -13,8 +13,9 @@ Each role can only be assigned to one input per camera. The options for roles ar
| Role | Description | | Role | Description |
| -------- | ---------------------------------------------------------------------------------------- | | -------- | ---------------------------------------------------------------------------------------- |
| `detect` | Main feed for object detection | | `detect` | Main feed for object detection. [docs](object_detectors.md) |
| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | | `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) |
| `audio` | Feed for audio based detection. [docs](audio_detectors.md) |
| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) | | `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) |
```yaml ```yaml

View File

@ -9,11 +9,11 @@ Frigate has different live view options, some of which require the bundled `go2r
Live view options can be selected while viewing the live stream. The options are: Live view options can be selected while viewing the live stream. The options are:
| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations | | Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations |
| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | -------------------------------------------- | | ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | ------------------------------------------------- |
| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none | | jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none |
| mse | low | native | native | yes (depends on audio codec) | yes | not supported on iOS, Firefox is h.264 only | | mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only |
| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 | | webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 |
### Audio Support ### Audio Support
@ -37,12 +37,12 @@ There may be some cameras that you would prefer to use the sub stream for live v
```yaml ```yaml
go2rtc: go2rtc:
streams: streams:
rtsp_cam: test_cam:
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio. - rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio.
- "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus - "ffmpeg:test_cam#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc
rtsp_cam_sub: test_cam_sub:
- rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio. - rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio.
- "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus - "ffmpeg:test_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc
cameras: cameras:
test_cam: test_cam:
@ -59,7 +59,7 @@ cameras:
roles: roles:
- detect - detect
live: live:
stream_name: rtsp_cam_sub stream_name: test_cam_sub
``` ```
### WebRTC extra configuration: ### WebRTC extra configuration:

View File

@ -172,6 +172,8 @@ Events from the database. Accepts the following query string parameters:
| `min_score` | float | Minimum score of the event | | `min_score` | float | Minimum score of the event |
| `max_score` | float | Maximum score of the event | | `max_score` | float | Maximum score of the event |
| `is_submitted` | int | Filter events that are submitted to Frigate+ (0 or 1) | | `is_submitted` | int | Filter events that are submitted to Frigate+ (0 or 1) |
| `min_length` | float | Minimum length of the event |
| `max_length` | float | Maximum length of the event |
### `GET /api/timeline` ### `GET /api/timeline`
@ -320,6 +322,12 @@ Get PTZ info for the camera.
Create a manual event with a given `label` (ex: doorbell press) to capture a specific event besides an object being detected. Create a manual event with a given `label` (ex: doorbell press) to capture a specific event besides an object being detected.
:::caution
Recording retention config still applies to manual events, if frigate is configured with `mode: motion` then the manual event will only keep recording segments when motion occurred.
:::
**Optional Body:** **Optional Body:**
```json ```json

View File

@ -70,10 +70,10 @@ objects:
fedex: fedex:
min_score: .75 min_score: .75
person: person:
min_score: .8 min_score: .65
threshold: .85 threshold: .85
car: car:
min_score: .8 min_score: .65
threshold: .85 threshold: .85
``` ```

View File

@ -163,6 +163,8 @@ class FrigateApp:
"frame_queue": mp.Queue(maxsize=2), "frame_queue": mp.Queue(maxsize=2),
"capture_process": None, "capture_process": None,
"process": None, "process": None,
"audio_rms": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"audio_dBFS": mp.Value("d", 0.0), # type: ignore[typeddict-item]
} }
self.ptz_metrics[camera_name] = { self.ptz_metrics[camera_name] = {
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item] "ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
@ -500,6 +502,7 @@ class FrigateApp:
args=( args=(
self.config, self.config,
self.audio_recordings_info_queue, self.audio_recordings_info_queue,
self.camera_metrics,
self.feature_metrics, self.feature_metrics,
self.inter_process_communicator, self.inter_process_communicator,
), ),

View File

@ -14,7 +14,7 @@ import requests
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.comms.inter_process import InterProcessCommunicator from frigate.comms.inter_process import InterProcessCommunicator
from frigate.config import CameraConfig, FrigateConfig from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig
from frigate.const import ( from frigate.const import (
AUDIO_DURATION, AUDIO_DURATION,
AUDIO_FORMAT, AUDIO_FORMAT,
@ -26,7 +26,7 @@ from frigate.const import (
from frigate.ffmpeg_presets import parse_preset_input from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.object_detection import load_labels from frigate.object_detection import load_labels
from frigate.types import FeatureMetricsTypes from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
from frigate.util.builtin import get_ffmpeg_arg_list from frigate.util.builtin import get_ffmpeg_arg_list
from frigate.util.services import listen from frigate.util.services import listen
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
@ -39,19 +39,36 @@ except ModuleNotFoundError:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def get_ffmpeg_command(input_args: list[str], input_path: str) -> list[str]: def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
return get_ffmpeg_arg_list( ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0]
f"ffmpeg {{}} -i {{}} -f {AUDIO_FORMAT} -ar {AUDIO_SAMPLE_RATE} -ac 1 -y {{}}".format( input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + (
" ".join(input_args), parse_preset_input(ffmpeg_input.input_args, 1)
input_path, or ffmpeg_input.input_args
or parse_preset_input(ffmpeg.input_args, 1)
or ffmpeg.input_args
)
return (
["ffmpeg", "-vn"]
+ input_args
+ ["-i"]
+ [ffmpeg_input.path]
+ [
"-f",
f"{AUDIO_FORMAT}",
"-ar",
f"{AUDIO_SAMPLE_RATE}",
"-ac",
"1",
"-y",
"pipe:", "pipe:",
) ]
) )
def listen_to_audio( def listen_to_audio(
config: FrigateConfig, config: FrigateConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: mp.Queue,
camera_metrics: dict[str, CameraMetricsTypes],
process_info: dict[str, FeatureMetricsTypes], process_info: dict[str, FeatureMetricsTypes],
inter_process_communicator: InterProcessCommunicator, inter_process_communicator: InterProcessCommunicator,
) -> None: ) -> None:
@ -80,6 +97,7 @@ def listen_to_audio(
audio = AudioEventMaintainer( audio = AudioEventMaintainer(
camera, camera,
recordings_info_queue, recordings_info_queue,
camera_metrics,
process_info, process_info,
stop_event, stop_event,
inter_process_communicator, inter_process_communicator,
@ -153,6 +171,7 @@ class AudioEventMaintainer(threading.Thread):
self, self,
camera: CameraConfig, camera: CameraConfig,
recordings_info_queue: mp.Queue, recordings_info_queue: mp.Queue,
camera_metrics: dict[str, CameraMetricsTypes],
feature_metrics: dict[str, FeatureMetricsTypes], feature_metrics: dict[str, FeatureMetricsTypes],
stop_event: mp.Event, stop_event: mp.Event,
inter_process_communicator: InterProcessCommunicator, inter_process_communicator: InterProcessCommunicator,
@ -161,6 +180,7 @@ class AudioEventMaintainer(threading.Thread):
self.name = f"{camera.name}_audio_event_processor" self.name = f"{camera.name}_audio_event_processor"
self.config = camera self.config = camera
self.recordings_info_queue = recordings_info_queue self.recordings_info_queue = recordings_info_queue
self.camera_metrics = camera_metrics
self.feature_metrics = feature_metrics self.feature_metrics = feature_metrics
self.inter_process_communicator = inter_process_communicator self.inter_process_communicator = inter_process_communicator
self.detections: dict[dict[str, any]] = {} self.detections: dict[dict[str, any]] = {}
@ -169,11 +189,7 @@ class AudioEventMaintainer(threading.Thread):
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),) self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2)) self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2))
self.logger = logging.getLogger(f"audio.{self.config.name}") self.logger = logging.getLogger(f"audio.{self.config.name}")
self.ffmpeg_cmd = get_ffmpeg_command( self.ffmpeg_cmd = get_ffmpeg_command(self.config.ffmpeg)
get_ffmpeg_arg_list(self.config.ffmpeg.global_args)
+ parse_preset_input("preset-rtsp-audio-only", 1),
[i.path for i in self.config.ffmpeg.inputs if "audio" in i.roles][0],
)
self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.audio") self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.audio")
self.audio_listener = None self.audio_listener = None
@ -184,6 +200,9 @@ class AudioEventMaintainer(threading.Thread):
audio_as_float = audio.astype(np.float32) audio_as_float = audio.astype(np.float32)
rms, dBFS = self.calculate_audio_levels(audio_as_float) rms, dBFS = self.calculate_audio_levels(audio_as_float)
self.camera_metrics[self.config.name]["audio_rms"].value = rms
self.camera_metrics[self.config.name]["audio_dBFS"].value = dBFS
# only run audio detection when volume is above min_volume # only run audio detection when volume is above min_volume
if rms >= self.config.audio.min_volume: if rms >= self.config.audio.min_volume:
# add audio info to recordings queue # add audio info to recordings queue

View File

@ -256,13 +256,6 @@ PRESETS_INPUT = {
"-use_wallclock_as_timestamps", "-use_wallclock_as_timestamps",
"1", "1",
], ],
"preset-rtsp-audio-only": [
"-rtsp_transport",
"tcp",
TIMEOUT_PARAM,
"5000000",
"-vn",
],
"preset-rtsp-restream": _user_agent_args "preset-rtsp-restream": _user_agent_args
+ [ + [
"-rtsp_transport", "-rtsp_transport",

View File

@ -805,6 +805,8 @@ def events():
min_score = request.args.get("min_score", type=float) min_score = request.args.get("min_score", type=float)
max_score = request.args.get("max_score", type=float) max_score = request.args.get("max_score", type=float)
is_submitted = request.args.get("is_submitted", type=int) is_submitted = request.args.get("is_submitted", type=int)
min_length = request.args.get("min_length", type=float)
max_length = request.args.get("max_length", type=float)
clauses = [] clauses = []
@ -933,6 +935,12 @@ def events():
if min_score is not None: if min_score is not None:
clauses.append((Event.data["score"] >= min_score)) clauses.append((Event.data["score"] >= min_score))
if min_length is not None:
clauses.append(((Event.end_time - Event.start_time) >= min_length))
if max_length is not None:
clauses.append(((Event.end_time - Event.start_time) <= max_length))
if is_submitted is not None: if is_submitted is not None:
if is_submitted == 0: if is_submitted == 0:
clauses.append((Event.plus_id.is_null())) clauses.append((Event.plus_id.is_null()))

View File

@ -20,3 +20,7 @@ class MotionDetector(ABC):
@abstractmethod @abstractmethod
def detect(self, frame): def detect(self, frame):
pass pass
@abstractmethod
def is_calibrating(self):
pass

View File

@ -38,6 +38,9 @@ class FrigateMotionDetector(MotionDetector):
self.threshold = threshold self.threshold = threshold
self.contour_area = contour_area self.contour_area = contour_area
def is_calibrating(self):
return False
def detect(self, frame): def detect(self, frame):
motion_boxes = [] motion_boxes = []

View File

@ -49,6 +49,9 @@ class ImprovedMotionDetector(MotionDetector):
self.contrast_values[:, 1:2] = 255 self.contrast_values[:, 1:2] = 255
self.contrast_values_index = 0 self.contrast_values_index = 0
def is_calibrating(self):
return self.calibrating
def detect(self, frame): def detect(self, frame):
motion_boxes = [] motion_boxes = []
@ -141,7 +144,6 @@ class ImprovedMotionDetector(MotionDetector):
# if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate # if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate
if self.calibrating or pct_motion > self.config.lightning_threshold: if self.calibrating or pct_motion > self.config.lightning_threshold:
motion_boxes = []
self.calibrating = True self.calibrating = True
if self.save_images: if self.save_images:

View File

@ -355,6 +355,7 @@ class RecordingMaintainer(threading.Thread):
"+faststart", "+faststart",
file_path, file_path,
stderr=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.DEVNULL,
) )
await p.wait() await p.wait()

View File

@ -176,6 +176,8 @@ async def set_gpu_stats(
stats[nvidia_usage[i]["name"]] = { stats[nvidia_usage[i]["name"]] = {
"gpu": str(round(float(nvidia_usage[i]["gpu"]), 2)) + "%", "gpu": str(round(float(nvidia_usage[i]["gpu"]), 2)) + "%",
"mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%", "mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%",
"enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%",
"dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%",
} }
else: else:
@ -266,6 +268,8 @@ def stats_snapshot(
"pid": pid, "pid": pid,
"capture_pid": cpid, "capture_pid": cpid,
"ffmpeg_pid": ffmpeg_pid, "ffmpeg_pid": ffmpeg_pid,
"audio_rms": round(camera_stats["audio_rms"].value, 4),
"audio_dBFS": round(camera_stats["audio_dBFS"].value, 4),
} }
stats["detectors"] = {} stats["detectors"] = {}

View File

@ -1027,7 +1027,12 @@ class TestConfig(unittest.TestCase):
"roles": ["detect"], "roles": ["detect"],
}, },
] ]
} },
"detect": {
"height": 720,
"width": 1280,
"fps": 5,
},
} }
}, },
} }
@ -1082,6 +1087,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"snapshots": { "snapshots": {
"height": 100, "height": 100,
}, },
@ -1107,7 +1117,12 @@ class TestConfig(unittest.TestCase):
"roles": ["detect"], "roles": ["detect"],
}, },
] ]
} },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1132,6 +1147,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"snapshots": { "snapshots": {
"height": 150, "height": 150,
"enabled": True, "enabled": True,
@ -1160,6 +1180,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1181,7 +1206,12 @@ class TestConfig(unittest.TestCase):
"roles": ["detect"], "roles": ["detect"],
}, },
] ]
} },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1205,6 +1235,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"rtmp": { "rtmp": {
"enabled": True, "enabled": True,
}, },
@ -1234,6 +1269,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1257,6 +1297,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1278,7 +1323,12 @@ class TestConfig(unittest.TestCase):
"roles": ["detect"], "roles": ["detect"],
}, },
] ]
} },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1302,6 +1352,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"live": { "live": {
"quality": 7, "quality": 7,
}, },
@ -1329,6 +1384,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1350,7 +1410,12 @@ class TestConfig(unittest.TestCase):
"roles": ["detect"], "roles": ["detect"],
}, },
] ]
} },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1375,6 +1440,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"timestamp_style": {"position": "bl", "thickness": 4}, "timestamp_style": {"position": "bl", "thickness": 4},
} }
}, },
@ -1400,6 +1470,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1423,6 +1498,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1450,6 +1530,11 @@ class TestConfig(unittest.TestCase):
}, },
], ],
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
} }
}, },
} }
@ -1475,6 +1560,11 @@ class TestConfig(unittest.TestCase):
}, },
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"zones": { "zones": {
"steps": { "steps": {
"coordinates": "0,0,0,0", "coordinates": "0,0,0,0",
@ -1546,6 +1636,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"onvif": {"autotracking": {"movement_weights": "1.23, 2.34, 0.50"}}, "onvif": {"autotracking": {"movement_weights": "1.23, 2.34, 0.50"}},
} }
}, },
@ -1569,6 +1664,11 @@ class TestConfig(unittest.TestCase):
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]} {"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
] ]
}, },
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
"onvif": {"autotracking": {"movement_weights": "1.234, 2.345a"}}, "onvif": {"autotracking": {"movement_weights": "1.234, 2.345a"}},
} }
}, },

View File

@ -77,7 +77,7 @@ class NorfairTracker(ObjectTracker):
self.tracker = Tracker( self.tracker = Tracker(
distance_function=frigate_distance, distance_function=frigate_distance,
distance_threshold=2.5, distance_threshold=2.5,
initialization_delay=0, initialization_delay=config.detect.fps / 2,
hit_counter_max=self.max_disappeared, hit_counter_max=self.max_disappeared,
) )
if self.ptz_autotracker_enabled.value: if self.ptz_autotracker_enabled.value:
@ -106,11 +106,6 @@ class NorfairTracker(ObjectTracker):
"ymax": self.detect_config.height, "ymax": self.detect_config.height,
} }
# start object with a hit count of `fps` to avoid quick detection -> loss
next(
(o for o in self.tracker.tracked_objects if o.global_id == track_id)
).hit_counter = self.camera_config.detect.fps
def deregister(self, id, track_id): def deregister(self, id, track_id):
del self.tracked_objects[id] del self.tracked_objects[id]
del self.disappeared[id] del self.disappeared[id]

View File

@ -23,6 +23,8 @@ class CameraMetricsTypes(TypedDict):
process_fps: Synchronized process_fps: Synchronized
read_start: Synchronized read_start: Synchronized
skipped_fps: Synchronized skipped_fps: Synchronized
audio_rms: Synchronized
audio_dBFS: Synchronized
class PTZMetricsTypes(TypedDict): class PTZMetricsTypes(TypedDict):

View File

@ -293,6 +293,8 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
handle = nvml.nvmlDeviceGetHandleByIndex(i) handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle) meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle) util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle)
if util != "N/A": if util != "N/A":
gpu_util = util.gpu gpu_util = util.gpu
else: else:
@ -303,10 +305,22 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
else: else:
gpu_mem_util = -1 gpu_mem_util = -1
if enc != "N/A":
enc_util = enc[0]
else:
enc_util = -1
if dec != "N/A":
dec_util = dec[0]
else:
dec_util = -1
results[i] = { results[i] = {
"name": nvml.nvmlDeviceGetName(handle), "name": nvml.nvmlDeviceGetName(handle),
"gpu": gpu_util, "gpu": gpu_util,
"mem": gpu_mem_util, "mem": gpu_mem_util,
"enc": enc_util,
"dec": dec_util,
} }
except Exception: except Exception:
pass pass

View File

@ -21,7 +21,6 @@ from frigate.log import LogPipe
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection import RemoteObjectDetector from frigate.object_detection import RemoteObjectDetector
from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker from frigate.track.norfair_tracker import NorfairTracker
from frigate.types import PTZMetricsTypes from frigate.types import PTZMetricsTypes
@ -777,24 +776,8 @@ def process_frames(
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.") logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
continue continue
# always returns false if autotracking is disabled # look for motion if enabled
ptz_moving = ptz_moving_at_frame_time( motion_boxes = motion_detector.detect(frame) if motion_enabled.value else []
frame_time,
ptz_metrics["ptz_start_time"].value,
ptz_metrics["ptz_stop_time"].value,
)
motion_boxes = (
motion_detector.detect(frame)
if motion_enabled.value and not ptz_moving
else []
)
# full frame motion if ptz is moving from autotracking - remove this later
# better to have motion detector expose property when it's calibrating
# but still return motion boxes for retention purposes
if ptz_moving:
motion_boxes = [(0, 0, frame_shape[1], frame_shape[0] * 3 // 2)]
regions = [] regions = []
consolidated_detections = [] consolidated_detections = []
@ -819,8 +802,10 @@ def process_frames(
) )
# and it hasn't disappeared # and it hasn't disappeared
and object_tracker.disappeared[obj["id"]] == 0 and object_tracker.disappeared[obj["id"]] == 0
# and it doesn't overlap with any current motion boxes # and it doesn't overlap with any current motion boxes when not calibrating
and not intersects_any(obj["box"], motion_boxes) and not intersects_any(
obj["box"], [] if motion_detector.is_calibrating() else motion_boxes
)
] ]
# get tracked object boxes that aren't stationary # get tracked object boxes that aren't stationary
@ -830,7 +815,10 @@ def process_frames(
if obj["id"] not in stationary_object_ids if obj["id"] not in stationary_object_ids
] ]
combined_boxes = motion_boxes + tracked_object_boxes combined_boxes = tracked_object_boxes
# only add in the motion boxes when not calibrating
if not motion_detector.is_calibrating():
combined_boxes += motion_boxes
cluster_candidates = get_cluster_candidates( cluster_candidates = get_cluster_candidates(
frame_shape, region_min_size, combined_boxes frame_shape, region_min_size, combined_boxes

View File

@ -157,12 +157,9 @@ class VideoRTC extends HTMLElement {
if (this.ws) this.ws.send(JSON.stringify(value)); if (this.ws) this.ws.send(JSON.stringify(value));
} }
codecs(type) { /** @param {Function} isSupported */
const test = codecs(isSupported) {
type === 'mse' return this.CODECS.filter(codec => isSupported(`video/mp4; codecs="${codec}"`)).join();
? (codec) => MediaSource.isTypeSupported(`video/mp4; codecs="${codec}"`)
: (codec) => this.video.canPlayType(`video/mp4; codecs="${codec}"`);
return this.CODECS.filter(test).join();
} }
/** /**
@ -311,7 +308,7 @@ class VideoRTC extends HTMLElement {
const modes = []; const modes = [];
if (this.mode.indexOf('mse') >= 0 && 'MediaSource' in window) { if (this.mode.indexOf('mse') >= 0 && ('MediaSource' in window || 'ManagedMediaSource' in window)) {
// iPhone // iPhone
modes.push('mse'); modes.push('mse');
this.onmse(); this.onmse();
@ -363,18 +360,29 @@ class VideoRTC extends HTMLElement {
} }
onmse() { onmse() {
const ms = new MediaSource(); /** @type {MediaSource} */
ms.addEventListener( let ms;
'sourceopen',
() => {
URL.revokeObjectURL(this.video.src);
this.send({ type: 'mse', value: this.codecs('mse') });
},
{ once: true }
);
this.video.src = URL.createObjectURL(ms); if ('ManagedMediaSource' in window) {
this.video.srcObject = null; const MediaSource = window.ManagedMediaSource;
ms = new MediaSource();
ms.addEventListener('sourceopen', () => {
this.send({type: 'mse', value: this.codecs(MediaSource.isTypeSupported)});
}, {once: true});
this.video.disableRemotePlayback = true;
this.video.srcObject = ms;
} else {
ms = new MediaSource();
ms.addEventListener('sourceopen', () => {
URL.revokeObjectURL(this.video.src);
this.send({type: 'mse', value: this.codecs(MediaSource.isTypeSupported)});
}, {once: true});
this.video.src = URL.createObjectURL(ms);
this.video.srcObject = null;
}
this.play(); this.play();
this.mseCodecs = ''; this.mseCodecs = '';
@ -580,7 +588,7 @@ class VideoRTC extends HTMLElement {
video2.src = `data:video/mp4;base64,${VideoRTC.btoa(data)}`; video2.src = `data:video/mp4;base64,${VideoRTC.btoa(data)}`;
}; };
this.send({ type: 'mp4', value: this.codecs('mp4') }); this.send({ type: 'mp4', value: this.codecs(this.video.canPlayType) });
} }
static btoa(buffer) { static btoa(buffer) {

View File

@ -4,9 +4,7 @@ import Menu from './Menu';
import { ArrowDropdown } from '../icons/ArrowDropdown'; import { ArrowDropdown } from '../icons/ArrowDropdown';
import Heading from './Heading'; import Heading from './Heading';
import Button from './Button'; import Button from './Button';
import CameraIcon from '../icons/Camera'; import SelectOnlyIcon from '../icons/SelectOnly';
import SpeakerIcon from '../icons/Speaker';
import useSWR from 'swr';
export default function MultiSelect({ className, title, options, selection, onToggle, onShowAll, onSelectSingle }) { export default function MultiSelect({ className, title, options, selection, onToggle, onShowAll, onSelectSingle }) {
const popupRef = useRef(null); const popupRef = useRef(null);
@ -20,7 +18,6 @@ export default function MultiSelect({ className, title, options, selection, onTo
}; };
const menuHeight = Math.round(window.innerHeight * 0.55); const menuHeight = Math.round(window.innerHeight * 0.55);
const { data: config } = useSWR('config');
return ( return (
<div className={`${className} p-2`} ref={popupRef}> <div className={`${className} p-2`} ref={popupRef}>
<div className="flex justify-between min-w-[120px]" onClick={() => setState({ showMenu: true })}> <div className="flex justify-between min-w-[120px]" onClick={() => setState({ showMenu: true })}>
@ -61,7 +58,7 @@ export default function MultiSelect({ className, title, options, selection, onTo
className="max-h-[35px] mx-2" className="max-h-[35px] mx-2"
onClick={() => onSelectSingle(item)} onClick={() => onSelectSingle(item)}
> >
{title === 'Labels' && config.audio.listen.includes(item) ? <SpeakerIcon /> : <CameraIcon />} { ( <SelectOnlyIcon /> ) }
</Button> </Button>
</div> </div>
</div> </div>

View File

@ -1,7 +1,7 @@
import { h } from 'preact'; import { h } from 'preact';
import { useCallback, useState } from 'preact/hooks'; import { useCallback, useState } from 'preact/hooks';
export default function Switch({ checked, id, onChange, label, labelPosition = 'before' }) { export default function Switch({ className, checked, id, onChange, label, labelPosition = 'before' }) {
const [isFocused, setFocused] = useState(false); const [isFocused, setFocused] = useState(false);
const handleChange = useCallback(() => { const handleChange = useCallback(() => {
@ -21,7 +21,7 @@ export default function Switch({ checked, id, onChange, label, labelPosition = '
return ( return (
<label <label
htmlFor={id} htmlFor={id}
className={`flex items-center space-x-4 w-full ${onChange ? 'cursor-pointer' : 'cursor-not-allowed'}`} className={`${className ? className : ''} flex items-center space-x-4 w-full ${onChange ? 'cursor-pointer' : 'cursor-not-allowed'}`}
> >
{label && labelPosition === 'before' ? ( {label && labelPosition === 'before' ? (
<div data-testid={`${id}-label`} className="inline-flex flex-grow"> <div data-testid={`${id}-label`} className="inline-flex flex-grow">

View File

@ -0,0 +1,21 @@
import { h } from 'preact';
import { memo } from 'preact/compat';
export function SelectOnly({ className = 'h-5 w-5', stroke = 'currentColor', fill = 'none', onClick = () => {} }) {
return (
<svg
xmlns="http://www.w3.org/2000/svg"
className={className}
fill={fill}
viewBox="0 0 24 24"
stroke={stroke}
onClick={onClick}
>
<path
d="M12 8c-2.21 0-4 1.79-4 4s1.79 4 4 4 4-1.79 4-4-1.79-4-4-4zm-7 7H3v4c0 1.1.9 2 2 2h4v-2H5v-4zM5 5h4V3H5c-1.1 0-2 .9-2 2v4h2V5zm14-2h-4v2h4v4h2V5c0-1.1-.9-2-2-2zm0 16h-4v2h4c1.1 0 2-.9 2-2v-4h-2v4z"
/>
</svg>
);
}
export default memo(SelectOnly);

View File

@ -35,7 +35,7 @@ export default function Birdseye() {
let player; let player;
const playerClass = ptzCameras.length || isMaxWidth ? 'w-full' : 'max-w-5xl xl:w-1/2'; const playerClass = ptzCameras.length || isMaxWidth ? 'w-full' : 'max-w-5xl xl:w-1/2';
if (viewSource == 'mse' && config.birdseye.restream) { if (viewSource == 'mse' && config.birdseye.restream) {
if ('MediaSource' in window) { if ('MediaSource' in window || 'ManagedMediaSource' in window) {
player = ( player = (
<Fragment> <Fragment>
<div className={playerClass}> <div className={playerClass}>
@ -50,7 +50,7 @@ export default function Birdseye() {
player = ( player = (
<Fragment> <Fragment>
<div className="w-5xl text-center text-sm"> <div className="w-5xl text-center text-sm">
MSE is not supported on iOS devices. You'll need to use jsmpeg or webRTC. See the docs for more info. MSE is only supported on iOS 17.1+. You'll need to update if available or use jsmpeg / webRTC streams. See the docs for more info.
</div> </div>
</Fragment> </Fragment>
); );

View File

@ -116,7 +116,7 @@ export default function Camera({ camera }) {
let player; let player;
if (viewMode === 'live') { if (viewMode === 'live') {
if (viewSource == 'mse' && restreamEnabled) { if (viewSource == 'mse' && restreamEnabled) {
if ('MediaSource' in window) { if ('MediaSource' in window || 'ManagedMediaSource' in window) {
player = ( player = (
<Fragment> <Fragment>
<div className="max-w-5xl"> <div className="max-w-5xl">
@ -133,7 +133,7 @@ export default function Camera({ camera }) {
player = ( player = (
<Fragment> <Fragment>
<div className="w-5xl text-center text-sm"> <div className="w-5xl text-center text-sm">
MSE is not supported on iOS devices. You'll need to use jsmpeg or webRTC. See the docs for more info. MSE is only supported on iOS 17.1+. You'll need to update if available or use jsmpeg / webRTC streams. See the docs for more info.
</div> </div>
</Fragment> </Fragment>
); );

View File

@ -135,7 +135,7 @@ export default function CameraMasks({ camera }) {
const endpoint = `config/set?${queryParameters}`; const endpoint = `config/set?${queryParameters}`;
const response = await axios.put(endpoint); const response = await axios.put(endpoint);
if (response.status === 200) { if (response.status === 200) {
setSuccess(response.data); setSuccess(response.data.message);
} }
} catch (error) { } catch (error) {
if (error.response) { if (error.response) {

View File

@ -5,24 +5,41 @@ import CameraImage from '../components/CameraImage';
import AudioIcon from '../icons/Audio'; import AudioIcon from '../icons/Audio';
import ClipIcon from '../icons/Clip'; import ClipIcon from '../icons/Clip';
import MotionIcon from '../icons/Motion'; import MotionIcon from '../icons/Motion';
import SettingsIcon from '../icons/Settings';
import SnapshotIcon from '../icons/Snapshot'; import SnapshotIcon from '../icons/Snapshot';
import { useAudioState, useDetectState, useRecordingsState, useSnapshotsState } from '../api/ws'; import { useAudioState, useDetectState, useRecordingsState, useSnapshotsState } from '../api/ws';
import { useMemo } from 'preact/hooks'; import { useMemo } from 'preact/hooks';
import useSWR from 'swr'; import useSWR from 'swr';
import { useRef, useState } from 'react';
import { useResizeObserver } from '../hooks';
import Dialog from '../components/Dialog';
import Switch from '../components/Switch';
import Heading from '../components/Heading';
import Button from '../components/Button';
export default function Cameras() { export default function Cameras() {
const { data: config } = useSWR('config'); const { data: config } = useSWR('config');
const containerRef = useRef(null);
const [{ width: containerWidth }] = useResizeObserver(containerRef);
// Add scrollbar width (when visible) to the available observer width to eliminate screen juddering.
// https://github.com/blakeblackshear/frigate/issues/1657
let scrollBarWidth = 0;
if (window.innerWidth && document.body.offsetWidth) {
scrollBarWidth = window.innerWidth - document.body.offsetWidth;
}
const availableWidth = scrollBarWidth ? containerWidth + scrollBarWidth : containerWidth;
return !config ? ( return !config ? (
<ActivityIndicator /> <ActivityIndicator />
) : ( ) : (
<div className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4 p-2 px-4"> <div className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4 p-2 px-4" ref={containerRef}>
<SortedCameras config={config} unsortedCameras={config.cameras} /> <SortedCameras config={config} unsortedCameras={config.cameras} availableWidth={availableWidth} />
</div> </div>
); );
} }
function SortedCameras({ config, unsortedCameras }) { function SortedCameras({ config, unsortedCameras, availableWidth }) {
const sortedCameras = useMemo( const sortedCameras = useMemo(
() => () =>
Object.entries(unsortedCameras) Object.entries(unsortedCameras)
@ -34,17 +51,20 @@ function SortedCameras({ config, unsortedCameras }) {
return ( return (
<Fragment> <Fragment>
{sortedCameras.map(([camera, conf]) => ( {sortedCameras.map(([camera, conf]) => (
<Camera key={camera} name={camera} config={config.cameras[camera]} conf={conf} /> <Camera key={camera} name={camera} config={config.cameras[camera]} conf={conf} availableWidth={availableWidth} />
))} ))}
</Fragment> </Fragment>
); );
} }
function Camera({ name, config }) { function Camera({ name, config, availableWidth }) {
const { payload: detectValue, send: sendDetect } = useDetectState(name); const { payload: detectValue, send: sendDetect } = useDetectState(name);
const { payload: recordValue, send: sendRecordings } = useRecordingsState(name); const { payload: recordValue, send: sendRecordings } = useRecordingsState(name);
const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name); const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name);
const { payload: audioValue, send: sendAudio } = useAudioState(name); const { payload: audioValue, send: sendAudio } = useAudioState(name);
const [cameraOptions, setCameraOptions] = useState('');
const href = `/cameras/${name}`; const href = `/cameras/${name}`;
const buttons = useMemo(() => { const buttons = useMemo(() => {
return [ return [
@ -56,7 +76,15 @@ function Camera({ name, config }) {
return `${name.replaceAll('_', ' ')}`; return `${name.replaceAll('_', ' ')}`;
}, [name]); }, [name]);
const icons = useMemo( const icons = useMemo(
() => [ () => (availableWidth < 448 ? [
{
icon: SettingsIcon,
color: 'gray',
onClick: () => {
setCameraOptions(config.name);
},
},
] : [
{ {
name: `Toggle detect ${detectValue === 'ON' ? 'off' : 'on'}`, name: `Toggle detect ${detectValue === 'ON' ? 'off' : 'on'}`,
icon: MotionIcon, icon: MotionIcon,
@ -95,17 +123,64 @@ function Camera({ name, config }) {
}, },
} }
: null, : null,
].filter((button) => button != null), ]).filter((button) => button != null),
[config, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots] [config, availableWidth, setCameraOptions, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
); );
return ( return (
<Card <Fragment>
buttons={buttons} {cameraOptions && (
href={href} <Dialog>
header={cleanName} <div className="p-4">
icons={icons} <Heading size="md">{`${name.replaceAll('_', ' ')} Settings`}</Heading>
media={<CameraImage camera={name} stretch />} <Switch
/> className="my-3"
checked={detectValue == 'ON'}
id="detect"
onChange={() => sendDetect(detectValue === 'ON' ? 'OFF' : 'ON', true)}
label="Detect"
labelPosition="before"
/>
{config.record.enabled_in_config && <Switch
className="my-3"
checked={recordValue == 'ON'}
id="record"
onChange={() => sendRecordings(recordValue === 'ON' ? 'OFF' : 'ON', true)}
label="Recordings"
labelPosition="before"
/>}
<Switch
className="my-3"
checked={snapshotValue == 'ON'}
id="snapshot"
onChange={() => sendSnapshots(snapshotValue === 'ON' ? 'OFF' : 'ON', true)}
label="Snapshots"
labelPosition="before"
/>
{config.audio.enabled_in_config && <Switch
className="my-3"
checked={audioValue == 'ON'}
id="audio"
onChange={() => sendAudio(audioValue === 'ON' ? 'OFF' : 'ON', true)}
label="Audio Detection"
labelPosition="before"
/>}
</div>
<div className="p-2 flex justify-start flex-row-reverse space-x-2">
<Button className="ml-2" onClick={() => setCameraOptions('')} type="text">
Close
</Button>
</div>
</Dialog>
)}
<Card
buttons={buttons}
href={href}
header={cleanName}
icons={icons}
media={<CameraImage camera={name} stretch />}
/>
</Fragment>
); );
} }

View File

@ -29,7 +29,7 @@ export default function Config() {
.then((response) => { .then((response) => {
if (response.status === 200) { if (response.status === 200) {
setError(''); setError('');
setSuccess(response.data); setSuccess(response.data.message);
} }
}) })
.catch((error) => { .catch((error) => {

View File

@ -27,8 +27,9 @@ export default function Storage() {
const getUnitSize = (MB) => { const getUnitSize = (MB) => {
if (isNaN(MB) || MB < 0) return 'Invalid number'; if (isNaN(MB) || MB < 0) return 'Invalid number';
if (MB < 1024) return `${MB} MiB`; if (MB < 1024) return `${MB} MiB`;
if (MB < 1048576) return `${(MB / 1024).toFixed(2)} GiB`;
return `${(MB / 1024).toFixed(2)} GiB`; return `${(MB / 1048576).toFixed(2)} TiB`;
}; };
let storage_usage; let storage_usage;

View File

@ -301,12 +301,16 @@ export default function System() {
<Tr> <Tr>
<Th>GPU %</Th> <Th>GPU %</Th>
<Th>Memory %</Th> <Th>Memory %</Th>
{'dec' in gpu_usages[gpu] && (<Th>Decoder %</Th>)}
{'enc' in gpu_usages[gpu] && (<Th>Encoder %</Th>)}
</Tr> </Tr>
</Thead> </Thead>
<Tbody> <Tbody>
<Tr> <Tr>
<Td>{gpu_usages[gpu]['gpu']}</Td> <Td>{gpu_usages[gpu]['gpu']}</Td>
<Td>{gpu_usages[gpu]['mem']}</Td> <Td>{gpu_usages[gpu]['mem']}</Td>
{'dec' in gpu_usages[gpu] && (<Td>{gpu_usages[gpu]['dec']}</Td>)}
{'enc' in gpu_usages[gpu] && (<Td>{gpu_usages[gpu]['enc']}</Td>)}
</Tr> </Tr>
</Tbody> </Tbody>
</Table> </Table>

View File

@ -1,5 +1,6 @@
import { h } from 'preact'; import { h } from 'preact';
import * as CameraImage from '../../components/CameraImage'; import * as CameraImage from '../../components/CameraImage';
import * as Hooks from '../../hooks';
import * as WS from '../../api/ws'; import * as WS from '../../api/ws';
import Cameras from '../Cameras'; import Cameras from '../Cameras';
import { fireEvent, render, screen, waitForElementToBeRemoved } from 'testing-library'; import { fireEvent, render, screen, waitForElementToBeRemoved } from 'testing-library';
@ -8,6 +9,7 @@ describe('Cameras Route', () => {
beforeEach(() => { beforeEach(() => {
vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />); vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: vi.fn() })); vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: vi.fn() }));
vi.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 1000 }]);
}); });
test('shows an ActivityIndicator if not yet loaded', async () => { test('shows an ActivityIndicator if not yet loaded', async () => {

View File

@ -1,6 +1,7 @@
import { h } from 'preact'; import { h } from 'preact';
import * as CameraImage from '../../components/CameraImage'; import * as CameraImage from '../../components/CameraImage';
import * as WS from '../../api/ws'; import * as WS from '../../api/ws';
import * as Hooks from '../../hooks';
import Cameras from '../Cameras'; import Cameras from '../Cameras';
import { render, screen, waitForElementToBeRemoved } from 'testing-library'; import { render, screen, waitForElementToBeRemoved } from 'testing-library';
@ -8,6 +9,7 @@ describe('Recording Route', () => {
beforeEach(() => { beforeEach(() => {
vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />); vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: jest.fn() })); vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: jest.fn() }));
vi.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 1000 }]);
}); });
test('shows an ActivityIndicator if not yet loaded', async () => { test('shows an ActivityIndicator if not yet loaded', async () => {