mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-07 03:35:26 +03:00
Merge branch 'dev' of https://github.com/hawkeye217/frigate into lost-object-zoom
This commit is contained in:
commit
2722fe18b6
@ -121,13 +121,15 @@ RUN apt-get -qq update \
|
||||
apt-transport-https \
|
||||
gnupg \
|
||||
wget \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 648ACFD622F3D138 \
|
||||
&& echo "deb http://deb.debian.org/debian bullseye main contrib non-free" | tee /etc/apt/sources.list.d/raspi.list \
|
||||
# the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html
|
||||
&& wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \
|
||||
gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \
|
||||
&& echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \
|
||||
tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \
|
||||
&& apt-get -qq update \
|
||||
&& apt-get -qq install -y \
|
||||
python3.9 \
|
||||
python3.9-dev \
|
||||
wget \
|
||||
# opencv dependencies
|
||||
build-essential cmake git pkg-config libgtk-3-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
|
||||
|
||||
@ -13,8 +13,9 @@ Each role can only be assigned to one input per camera. The options for roles ar
|
||||
|
||||
| Role | Description |
|
||||
| -------- | ---------------------------------------------------------------------------------------- |
|
||||
| `detect` | Main feed for object detection |
|
||||
| `detect` | Main feed for object detection. [docs](object_detectors.md) |
|
||||
| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) |
|
||||
| `audio` | Feed for audio based detection. [docs](audio_detectors.md) |
|
||||
| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) |
|
||||
|
||||
```yaml
|
||||
|
||||
@ -10,9 +10,9 @@ Frigate has different live view options, some of which require the bundled `go2r
|
||||
Live view options can be selected while viewing the live stream. The options are:
|
||||
|
||||
| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations |
|
||||
| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | -------------------------------------------- |
|
||||
| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | ------------------------------------------------- |
|
||||
| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none |
|
||||
| mse | low | native | native | yes (depends on audio codec) | yes | not supported on iOS, Firefox is h.264 only |
|
||||
| mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only |
|
||||
| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 |
|
||||
|
||||
### Audio Support
|
||||
@ -37,12 +37,12 @@ There may be some cameras that you would prefer to use the sub stream for live v
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
rtsp_cam:
|
||||
test_cam:
|
||||
- rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio.
|
||||
- "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus
|
||||
rtsp_cam_sub:
|
||||
- "ffmpeg:test_cam#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc
|
||||
test_cam_sub:
|
||||
- rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio.
|
||||
- "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus
|
||||
- "ffmpeg:test_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc
|
||||
|
||||
cameras:
|
||||
test_cam:
|
||||
@ -59,7 +59,7 @@ cameras:
|
||||
roles:
|
||||
- detect
|
||||
live:
|
||||
stream_name: rtsp_cam_sub
|
||||
stream_name: test_cam_sub
|
||||
```
|
||||
|
||||
### WebRTC extra configuration:
|
||||
|
||||
@ -172,6 +172,8 @@ Events from the database. Accepts the following query string parameters:
|
||||
| `min_score` | float | Minimum score of the event |
|
||||
| `max_score` | float | Maximum score of the event |
|
||||
| `is_submitted` | int | Filter events that are submitted to Frigate+ (0 or 1) |
|
||||
| `min_length` | float | Minimum length of the event |
|
||||
| `max_length` | float | Maximum length of the event |
|
||||
|
||||
### `GET /api/timeline`
|
||||
|
||||
@ -320,6 +322,12 @@ Get PTZ info for the camera.
|
||||
|
||||
Create a manual event with a given `label` (ex: doorbell press) to capture a specific event besides an object being detected.
|
||||
|
||||
:::caution
|
||||
|
||||
Recording retention config still applies to manual events, if frigate is configured with `mode: motion` then the manual event will only keep recording segments when motion occurred.
|
||||
|
||||
:::
|
||||
|
||||
**Optional Body:**
|
||||
|
||||
```json
|
||||
|
||||
@ -70,10 +70,10 @@ objects:
|
||||
fedex:
|
||||
min_score: .75
|
||||
person:
|
||||
min_score: .8
|
||||
min_score: .65
|
||||
threshold: .85
|
||||
car:
|
||||
min_score: .8
|
||||
min_score: .65
|
||||
threshold: .85
|
||||
```
|
||||
|
||||
|
||||
@ -163,6 +163,8 @@ class FrigateApp:
|
||||
"frame_queue": mp.Queue(maxsize=2),
|
||||
"capture_process": None,
|
||||
"process": None,
|
||||
"audio_rms": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
"audio_dBFS": mp.Value("d", 0.0), # type: ignore[typeddict-item]
|
||||
}
|
||||
self.ptz_metrics[camera_name] = {
|
||||
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
|
||||
@ -500,6 +502,7 @@ class FrigateApp:
|
||||
args=(
|
||||
self.config,
|
||||
self.audio_recordings_info_queue,
|
||||
self.camera_metrics,
|
||||
self.feature_metrics,
|
||||
self.inter_process_communicator,
|
||||
),
|
||||
|
||||
@ -14,7 +14,7 @@ import requests
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.comms.inter_process import InterProcessCommunicator
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.config import CameraConfig, CameraInput, FfmpegConfig, FrigateConfig
|
||||
from frigate.const import (
|
||||
AUDIO_DURATION,
|
||||
AUDIO_FORMAT,
|
||||
@ -26,7 +26,7 @@ from frigate.const import (
|
||||
from frigate.ffmpeg_presets import parse_preset_input
|
||||
from frigate.log import LogPipe
|
||||
from frigate.object_detection import load_labels
|
||||
from frigate.types import FeatureMetricsTypes
|
||||
from frigate.types import CameraMetricsTypes, FeatureMetricsTypes
|
||||
from frigate.util.builtin import get_ffmpeg_arg_list
|
||||
from frigate.util.services import listen
|
||||
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
|
||||
@ -39,19 +39,36 @@ except ModuleNotFoundError:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_ffmpeg_command(input_args: list[str], input_path: str) -> list[str]:
|
||||
return get_ffmpeg_arg_list(
|
||||
f"ffmpeg {{}} -i {{}} -f {AUDIO_FORMAT} -ar {AUDIO_SAMPLE_RATE} -ac 1 -y {{}}".format(
|
||||
" ".join(input_args),
|
||||
input_path,
|
||||
"pipe:",
|
||||
def get_ffmpeg_command(ffmpeg: FfmpegConfig) -> list[str]:
|
||||
ffmpeg_input: CameraInput = [i for i in ffmpeg.inputs if "audio" in i.roles][0]
|
||||
input_args = get_ffmpeg_arg_list(ffmpeg.global_args) + (
|
||||
parse_preset_input(ffmpeg_input.input_args, 1)
|
||||
or ffmpeg_input.input_args
|
||||
or parse_preset_input(ffmpeg.input_args, 1)
|
||||
or ffmpeg.input_args
|
||||
)
|
||||
return (
|
||||
["ffmpeg", "-vn"]
|
||||
+ input_args
|
||||
+ ["-i"]
|
||||
+ [ffmpeg_input.path]
|
||||
+ [
|
||||
"-f",
|
||||
f"{AUDIO_FORMAT}",
|
||||
"-ar",
|
||||
f"{AUDIO_SAMPLE_RATE}",
|
||||
"-ac",
|
||||
"1",
|
||||
"-y",
|
||||
"pipe:",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def listen_to_audio(
|
||||
config: FrigateConfig,
|
||||
recordings_info_queue: mp.Queue,
|
||||
camera_metrics: dict[str, CameraMetricsTypes],
|
||||
process_info: dict[str, FeatureMetricsTypes],
|
||||
inter_process_communicator: InterProcessCommunicator,
|
||||
) -> None:
|
||||
@ -80,6 +97,7 @@ def listen_to_audio(
|
||||
audio = AudioEventMaintainer(
|
||||
camera,
|
||||
recordings_info_queue,
|
||||
camera_metrics,
|
||||
process_info,
|
||||
stop_event,
|
||||
inter_process_communicator,
|
||||
@ -153,6 +171,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
self,
|
||||
camera: CameraConfig,
|
||||
recordings_info_queue: mp.Queue,
|
||||
camera_metrics: dict[str, CameraMetricsTypes],
|
||||
feature_metrics: dict[str, FeatureMetricsTypes],
|
||||
stop_event: mp.Event,
|
||||
inter_process_communicator: InterProcessCommunicator,
|
||||
@ -161,6 +180,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
self.name = f"{camera.name}_audio_event_processor"
|
||||
self.config = camera
|
||||
self.recordings_info_queue = recordings_info_queue
|
||||
self.camera_metrics = camera_metrics
|
||||
self.feature_metrics = feature_metrics
|
||||
self.inter_process_communicator = inter_process_communicator
|
||||
self.detections: dict[dict[str, any]] = {}
|
||||
@ -169,11 +189,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
self.shape = (int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE)),)
|
||||
self.chunk_size = int(round(AUDIO_DURATION * AUDIO_SAMPLE_RATE * 2))
|
||||
self.logger = logging.getLogger(f"audio.{self.config.name}")
|
||||
self.ffmpeg_cmd = get_ffmpeg_command(
|
||||
get_ffmpeg_arg_list(self.config.ffmpeg.global_args)
|
||||
+ parse_preset_input("preset-rtsp-audio-only", 1),
|
||||
[i.path for i in self.config.ffmpeg.inputs if "audio" in i.roles][0],
|
||||
)
|
||||
self.ffmpeg_cmd = get_ffmpeg_command(self.config.ffmpeg)
|
||||
self.logpipe = LogPipe(f"ffmpeg.{self.config.name}.audio")
|
||||
self.audio_listener = None
|
||||
|
||||
@ -184,6 +200,9 @@ class AudioEventMaintainer(threading.Thread):
|
||||
audio_as_float = audio.astype(np.float32)
|
||||
rms, dBFS = self.calculate_audio_levels(audio_as_float)
|
||||
|
||||
self.camera_metrics[self.config.name]["audio_rms"].value = rms
|
||||
self.camera_metrics[self.config.name]["audio_dBFS"].value = dBFS
|
||||
|
||||
# only run audio detection when volume is above min_volume
|
||||
if rms >= self.config.audio.min_volume:
|
||||
# add audio info to recordings queue
|
||||
|
||||
@ -256,13 +256,6 @@ PRESETS_INPUT = {
|
||||
"-use_wallclock_as_timestamps",
|
||||
"1",
|
||||
],
|
||||
"preset-rtsp-audio-only": [
|
||||
"-rtsp_transport",
|
||||
"tcp",
|
||||
TIMEOUT_PARAM,
|
||||
"5000000",
|
||||
"-vn",
|
||||
],
|
||||
"preset-rtsp-restream": _user_agent_args
|
||||
+ [
|
||||
"-rtsp_transport",
|
||||
|
||||
@ -805,6 +805,8 @@ def events():
|
||||
min_score = request.args.get("min_score", type=float)
|
||||
max_score = request.args.get("max_score", type=float)
|
||||
is_submitted = request.args.get("is_submitted", type=int)
|
||||
min_length = request.args.get("min_length", type=float)
|
||||
max_length = request.args.get("max_length", type=float)
|
||||
|
||||
clauses = []
|
||||
|
||||
@ -933,6 +935,12 @@ def events():
|
||||
if min_score is not None:
|
||||
clauses.append((Event.data["score"] >= min_score))
|
||||
|
||||
if min_length is not None:
|
||||
clauses.append(((Event.end_time - Event.start_time) >= min_length))
|
||||
|
||||
if max_length is not None:
|
||||
clauses.append(((Event.end_time - Event.start_time) <= max_length))
|
||||
|
||||
if is_submitted is not None:
|
||||
if is_submitted == 0:
|
||||
clauses.append((Event.plus_id.is_null()))
|
||||
|
||||
@ -20,3 +20,7 @@ class MotionDetector(ABC):
|
||||
@abstractmethod
|
||||
def detect(self, frame):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def is_calibrating(self):
|
||||
pass
|
||||
|
||||
@ -38,6 +38,9 @@ class FrigateMotionDetector(MotionDetector):
|
||||
self.threshold = threshold
|
||||
self.contour_area = contour_area
|
||||
|
||||
def is_calibrating(self):
|
||||
return False
|
||||
|
||||
def detect(self, frame):
|
||||
motion_boxes = []
|
||||
|
||||
|
||||
@ -49,6 +49,9 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
self.contrast_values[:, 1:2] = 255
|
||||
self.contrast_values_index = 0
|
||||
|
||||
def is_calibrating(self):
|
||||
return self.calibrating
|
||||
|
||||
def detect(self, frame):
|
||||
motion_boxes = []
|
||||
|
||||
@ -141,7 +144,6 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
|
||||
# if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate
|
||||
if self.calibrating or pct_motion > self.config.lightning_threshold:
|
||||
motion_boxes = []
|
||||
self.calibrating = True
|
||||
|
||||
if self.save_images:
|
||||
|
||||
@ -355,6 +355,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
"+faststart",
|
||||
file_path,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.DEVNULL,
|
||||
)
|
||||
await p.wait()
|
||||
|
||||
|
||||
@ -176,6 +176,8 @@ async def set_gpu_stats(
|
||||
stats[nvidia_usage[i]["name"]] = {
|
||||
"gpu": str(round(float(nvidia_usage[i]["gpu"]), 2)) + "%",
|
||||
"mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%",
|
||||
"enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%",
|
||||
"dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%",
|
||||
}
|
||||
|
||||
else:
|
||||
@ -266,6 +268,8 @@ def stats_snapshot(
|
||||
"pid": pid,
|
||||
"capture_pid": cpid,
|
||||
"ffmpeg_pid": ffmpeg_pid,
|
||||
"audio_rms": round(camera_stats["audio_rms"].value, 4),
|
||||
"audio_dBFS": round(camera_stats["audio_dBFS"].value, 4),
|
||||
}
|
||||
|
||||
stats["detectors"] = {}
|
||||
|
||||
@ -1027,7 +1027,12 @@ class TestConfig(unittest.TestCase):
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
"detect": {
|
||||
"height": 720,
|
||||
"width": 1280,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1082,6 +1087,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"snapshots": {
|
||||
"height": 100,
|
||||
},
|
||||
@ -1107,7 +1117,12 @@ class TestConfig(unittest.TestCase):
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1132,6 +1147,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"snapshots": {
|
||||
"height": 150,
|
||||
"enabled": True,
|
||||
@ -1160,6 +1180,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1181,7 +1206,12 @@ class TestConfig(unittest.TestCase):
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1205,6 +1235,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"rtmp": {
|
||||
"enabled": True,
|
||||
},
|
||||
@ -1234,6 +1269,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1257,6 +1297,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1278,7 +1323,12 @@ class TestConfig(unittest.TestCase):
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1302,6 +1352,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"live": {
|
||||
"quality": 7,
|
||||
},
|
||||
@ -1329,6 +1384,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1350,7 +1410,12 @@ class TestConfig(unittest.TestCase):
|
||||
"roles": ["detect"],
|
||||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1375,6 +1440,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"timestamp_style": {"position": "bl", "thickness": 4},
|
||||
}
|
||||
},
|
||||
@ -1400,6 +1470,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1423,6 +1498,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1450,6 +1530,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
],
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
@ -1475,6 +1560,11 @@ class TestConfig(unittest.TestCase):
|
||||
},
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"zones": {
|
||||
"steps": {
|
||||
"coordinates": "0,0,0,0",
|
||||
@ -1546,6 +1636,11 @@ class TestConfig(unittest.TestCase):
|
||||
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"onvif": {"autotracking": {"movement_weights": "1.23, 2.34, 0.50"}},
|
||||
}
|
||||
},
|
||||
@ -1569,6 +1664,11 @@ class TestConfig(unittest.TestCase):
|
||||
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
|
||||
]
|
||||
},
|
||||
"detect": {
|
||||
"height": 1080,
|
||||
"width": 1920,
|
||||
"fps": 5,
|
||||
},
|
||||
"onvif": {"autotracking": {"movement_weights": "1.234, 2.345a"}},
|
||||
}
|
||||
},
|
||||
|
||||
@ -77,7 +77,7 @@ class NorfairTracker(ObjectTracker):
|
||||
self.tracker = Tracker(
|
||||
distance_function=frigate_distance,
|
||||
distance_threshold=2.5,
|
||||
initialization_delay=0,
|
||||
initialization_delay=config.detect.fps / 2,
|
||||
hit_counter_max=self.max_disappeared,
|
||||
)
|
||||
if self.ptz_autotracker_enabled.value:
|
||||
@ -106,11 +106,6 @@ class NorfairTracker(ObjectTracker):
|
||||
"ymax": self.detect_config.height,
|
||||
}
|
||||
|
||||
# start object with a hit count of `fps` to avoid quick detection -> loss
|
||||
next(
|
||||
(o for o in self.tracker.tracked_objects if o.global_id == track_id)
|
||||
).hit_counter = self.camera_config.detect.fps
|
||||
|
||||
def deregister(self, id, track_id):
|
||||
del self.tracked_objects[id]
|
||||
del self.disappeared[id]
|
||||
|
||||
@ -23,6 +23,8 @@ class CameraMetricsTypes(TypedDict):
|
||||
process_fps: Synchronized
|
||||
read_start: Synchronized
|
||||
skipped_fps: Synchronized
|
||||
audio_rms: Synchronized
|
||||
audio_dBFS: Synchronized
|
||||
|
||||
|
||||
class PTZMetricsTypes(TypedDict):
|
||||
|
||||
@ -293,6 +293,8 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
handle = nvml.nvmlDeviceGetHandleByIndex(i)
|
||||
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
|
||||
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
|
||||
enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
|
||||
dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle)
|
||||
if util != "N/A":
|
||||
gpu_util = util.gpu
|
||||
else:
|
||||
@ -303,10 +305,22 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
else:
|
||||
gpu_mem_util = -1
|
||||
|
||||
if enc != "N/A":
|
||||
enc_util = enc[0]
|
||||
else:
|
||||
enc_util = -1
|
||||
|
||||
if dec != "N/A":
|
||||
dec_util = dec[0]
|
||||
else:
|
||||
dec_util = -1
|
||||
|
||||
results[i] = {
|
||||
"name": nvml.nvmlDeviceGetName(handle),
|
||||
"gpu": gpu_util,
|
||||
"mem": gpu_mem_util,
|
||||
"enc": enc_util,
|
||||
"dec": dec_util,
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@ -21,7 +21,6 @@ from frigate.log import LogPipe
|
||||
from frigate.motion import MotionDetector
|
||||
from frigate.motion.improved_motion import ImprovedMotionDetector
|
||||
from frigate.object_detection import RemoteObjectDetector
|
||||
from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
||||
from frigate.track import ObjectTracker
|
||||
from frigate.track.norfair_tracker import NorfairTracker
|
||||
from frigate.types import PTZMetricsTypes
|
||||
@ -777,24 +776,8 @@ def process_frames(
|
||||
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||
continue
|
||||
|
||||
# always returns false if autotracking is disabled
|
||||
ptz_moving = ptz_moving_at_frame_time(
|
||||
frame_time,
|
||||
ptz_metrics["ptz_start_time"].value,
|
||||
ptz_metrics["ptz_stop_time"].value,
|
||||
)
|
||||
|
||||
motion_boxes = (
|
||||
motion_detector.detect(frame)
|
||||
if motion_enabled.value and not ptz_moving
|
||||
else []
|
||||
)
|
||||
|
||||
# full frame motion if ptz is moving from autotracking - remove this later
|
||||
# better to have motion detector expose property when it's calibrating
|
||||
# but still return motion boxes for retention purposes
|
||||
if ptz_moving:
|
||||
motion_boxes = [(0, 0, frame_shape[1], frame_shape[0] * 3 // 2)]
|
||||
# look for motion if enabled
|
||||
motion_boxes = motion_detector.detect(frame) if motion_enabled.value else []
|
||||
|
||||
regions = []
|
||||
consolidated_detections = []
|
||||
@ -819,8 +802,10 @@ def process_frames(
|
||||
)
|
||||
# and it hasn't disappeared
|
||||
and object_tracker.disappeared[obj["id"]] == 0
|
||||
# and it doesn't overlap with any current motion boxes
|
||||
and not intersects_any(obj["box"], motion_boxes)
|
||||
# and it doesn't overlap with any current motion boxes when not calibrating
|
||||
and not intersects_any(
|
||||
obj["box"], [] if motion_detector.is_calibrating() else motion_boxes
|
||||
)
|
||||
]
|
||||
|
||||
# get tracked object boxes that aren't stationary
|
||||
@ -830,7 +815,10 @@ def process_frames(
|
||||
if obj["id"] not in stationary_object_ids
|
||||
]
|
||||
|
||||
combined_boxes = motion_boxes + tracked_object_boxes
|
||||
combined_boxes = tracked_object_boxes
|
||||
# only add in the motion boxes when not calibrating
|
||||
if not motion_detector.is_calibrating():
|
||||
combined_boxes += motion_boxes
|
||||
|
||||
cluster_candidates = get_cluster_candidates(
|
||||
frame_shape, region_min_size, combined_boxes
|
||||
|
||||
@ -157,12 +157,9 @@ class VideoRTC extends HTMLElement {
|
||||
if (this.ws) this.ws.send(JSON.stringify(value));
|
||||
}
|
||||
|
||||
codecs(type) {
|
||||
const test =
|
||||
type === 'mse'
|
||||
? (codec) => MediaSource.isTypeSupported(`video/mp4; codecs="${codec}"`)
|
||||
: (codec) => this.video.canPlayType(`video/mp4; codecs="${codec}"`);
|
||||
return this.CODECS.filter(test).join();
|
||||
/** @param {Function} isSupported */
|
||||
codecs(isSupported) {
|
||||
return this.CODECS.filter(codec => isSupported(`video/mp4; codecs="${codec}"`)).join();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -311,7 +308,7 @@ class VideoRTC extends HTMLElement {
|
||||
|
||||
const modes = [];
|
||||
|
||||
if (this.mode.indexOf('mse') >= 0 && 'MediaSource' in window) {
|
||||
if (this.mode.indexOf('mse') >= 0 && ('MediaSource' in window || 'ManagedMediaSource' in window)) {
|
||||
// iPhone
|
||||
modes.push('mse');
|
||||
this.onmse();
|
||||
@ -363,18 +360,29 @@ class VideoRTC extends HTMLElement {
|
||||
}
|
||||
|
||||
onmse() {
|
||||
const ms = new MediaSource();
|
||||
ms.addEventListener(
|
||||
'sourceopen',
|
||||
() => {
|
||||
/** @type {MediaSource} */
|
||||
let ms;
|
||||
|
||||
if ('ManagedMediaSource' in window) {
|
||||
const MediaSource = window.ManagedMediaSource;
|
||||
|
||||
ms = new MediaSource();
|
||||
ms.addEventListener('sourceopen', () => {
|
||||
this.send({type: 'mse', value: this.codecs(MediaSource.isTypeSupported)});
|
||||
}, {once: true});
|
||||
|
||||
this.video.disableRemotePlayback = true;
|
||||
this.video.srcObject = ms;
|
||||
} else {
|
||||
ms = new MediaSource();
|
||||
ms.addEventListener('sourceopen', () => {
|
||||
URL.revokeObjectURL(this.video.src);
|
||||
this.send({ type: 'mse', value: this.codecs('mse') });
|
||||
},
|
||||
{ once: true }
|
||||
);
|
||||
this.send({type: 'mse', value: this.codecs(MediaSource.isTypeSupported)});
|
||||
}, {once: true});
|
||||
|
||||
this.video.src = URL.createObjectURL(ms);
|
||||
this.video.srcObject = null;
|
||||
}
|
||||
this.play();
|
||||
|
||||
this.mseCodecs = '';
|
||||
@ -580,7 +588,7 @@ class VideoRTC extends HTMLElement {
|
||||
video2.src = `data:video/mp4;base64,${VideoRTC.btoa(data)}`;
|
||||
};
|
||||
|
||||
this.send({ type: 'mp4', value: this.codecs('mp4') });
|
||||
this.send({ type: 'mp4', value: this.codecs(this.video.canPlayType) });
|
||||
}
|
||||
|
||||
static btoa(buffer) {
|
||||
|
||||
@ -4,9 +4,7 @@ import Menu from './Menu';
|
||||
import { ArrowDropdown } from '../icons/ArrowDropdown';
|
||||
import Heading from './Heading';
|
||||
import Button from './Button';
|
||||
import CameraIcon from '../icons/Camera';
|
||||
import SpeakerIcon from '../icons/Speaker';
|
||||
import useSWR from 'swr';
|
||||
import SelectOnlyIcon from '../icons/SelectOnly';
|
||||
|
||||
export default function MultiSelect({ className, title, options, selection, onToggle, onShowAll, onSelectSingle }) {
|
||||
const popupRef = useRef(null);
|
||||
@ -20,7 +18,6 @@ export default function MultiSelect({ className, title, options, selection, onTo
|
||||
};
|
||||
|
||||
const menuHeight = Math.round(window.innerHeight * 0.55);
|
||||
const { data: config } = useSWR('config');
|
||||
return (
|
||||
<div className={`${className} p-2`} ref={popupRef}>
|
||||
<div className="flex justify-between min-w-[120px]" onClick={() => setState({ showMenu: true })}>
|
||||
@ -61,7 +58,7 @@ export default function MultiSelect({ className, title, options, selection, onTo
|
||||
className="max-h-[35px] mx-2"
|
||||
onClick={() => onSelectSingle(item)}
|
||||
>
|
||||
{title === 'Labels' && config.audio.listen.includes(item) ? <SpeakerIcon /> : <CameraIcon />}
|
||||
{ ( <SelectOnlyIcon /> ) }
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { h } from 'preact';
|
||||
import { useCallback, useState } from 'preact/hooks';
|
||||
|
||||
export default function Switch({ checked, id, onChange, label, labelPosition = 'before' }) {
|
||||
export default function Switch({ className, checked, id, onChange, label, labelPosition = 'before' }) {
|
||||
const [isFocused, setFocused] = useState(false);
|
||||
|
||||
const handleChange = useCallback(() => {
|
||||
@ -21,7 +21,7 @@ export default function Switch({ checked, id, onChange, label, labelPosition = '
|
||||
return (
|
||||
<label
|
||||
htmlFor={id}
|
||||
className={`flex items-center space-x-4 w-full ${onChange ? 'cursor-pointer' : 'cursor-not-allowed'}`}
|
||||
className={`${className ? className : ''} flex items-center space-x-4 w-full ${onChange ? 'cursor-pointer' : 'cursor-not-allowed'}`}
|
||||
>
|
||||
{label && labelPosition === 'before' ? (
|
||||
<div data-testid={`${id}-label`} className="inline-flex flex-grow">
|
||||
|
||||
21
web/src/icons/SelectOnly.jsx
Normal file
21
web/src/icons/SelectOnly.jsx
Normal file
@ -0,0 +1,21 @@
|
||||
import { h } from 'preact';
|
||||
import { memo } from 'preact/compat';
|
||||
|
||||
export function SelectOnly({ className = 'h-5 w-5', stroke = 'currentColor', fill = 'none', onClick = () => {} }) {
|
||||
return (
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
className={className}
|
||||
fill={fill}
|
||||
viewBox="0 0 24 24"
|
||||
stroke={stroke}
|
||||
onClick={onClick}
|
||||
>
|
||||
<path
|
||||
d="M12 8c-2.21 0-4 1.79-4 4s1.79 4 4 4 4-1.79 4-4-1.79-4-4-4zm-7 7H3v4c0 1.1.9 2 2 2h4v-2H5v-4zM5 5h4V3H5c-1.1 0-2 .9-2 2v4h2V5zm14-2h-4v2h4v4h2V5c0-1.1-.9-2-2-2zm0 16h-4v2h4c1.1 0 2-.9 2-2v-4h-2v4z"
|
||||
/>
|
||||
</svg>
|
||||
);
|
||||
}
|
||||
|
||||
export default memo(SelectOnly);
|
||||
@ -35,7 +35,7 @@ export default function Birdseye() {
|
||||
let player;
|
||||
const playerClass = ptzCameras.length || isMaxWidth ? 'w-full' : 'max-w-5xl xl:w-1/2';
|
||||
if (viewSource == 'mse' && config.birdseye.restream) {
|
||||
if ('MediaSource' in window) {
|
||||
if ('MediaSource' in window || 'ManagedMediaSource' in window) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className={playerClass}>
|
||||
@ -50,7 +50,7 @@ export default function Birdseye() {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className="w-5xl text-center text-sm">
|
||||
MSE is not supported on iOS devices. You'll need to use jsmpeg or webRTC. See the docs for more info.
|
||||
MSE is only supported on iOS 17.1+. You'll need to update if available or use jsmpeg / webRTC streams. See the docs for more info.
|
||||
</div>
|
||||
</Fragment>
|
||||
);
|
||||
|
||||
@ -116,7 +116,7 @@ export default function Camera({ camera }) {
|
||||
let player;
|
||||
if (viewMode === 'live') {
|
||||
if (viewSource == 'mse' && restreamEnabled) {
|
||||
if ('MediaSource' in window) {
|
||||
if ('MediaSource' in window || 'ManagedMediaSource' in window) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className="max-w-5xl">
|
||||
@ -133,7 +133,7 @@ export default function Camera({ camera }) {
|
||||
player = (
|
||||
<Fragment>
|
||||
<div className="w-5xl text-center text-sm">
|
||||
MSE is not supported on iOS devices. You'll need to use jsmpeg or webRTC. See the docs for more info.
|
||||
MSE is only supported on iOS 17.1+. You'll need to update if available or use jsmpeg / webRTC streams. See the docs for more info.
|
||||
</div>
|
||||
</Fragment>
|
||||
);
|
||||
|
||||
@ -135,7 +135,7 @@ export default function CameraMasks({ camera }) {
|
||||
const endpoint = `config/set?${queryParameters}`;
|
||||
const response = await axios.put(endpoint);
|
||||
if (response.status === 200) {
|
||||
setSuccess(response.data);
|
||||
setSuccess(response.data.message);
|
||||
}
|
||||
} catch (error) {
|
||||
if (error.response) {
|
||||
|
||||
@ -5,24 +5,41 @@ import CameraImage from '../components/CameraImage';
|
||||
import AudioIcon from '../icons/Audio';
|
||||
import ClipIcon from '../icons/Clip';
|
||||
import MotionIcon from '../icons/Motion';
|
||||
import SettingsIcon from '../icons/Settings';
|
||||
import SnapshotIcon from '../icons/Snapshot';
|
||||
import { useAudioState, useDetectState, useRecordingsState, useSnapshotsState } from '../api/ws';
|
||||
import { useMemo } from 'preact/hooks';
|
||||
import useSWR from 'swr';
|
||||
import { useRef, useState } from 'react';
|
||||
import { useResizeObserver } from '../hooks';
|
||||
import Dialog from '../components/Dialog';
|
||||
import Switch from '../components/Switch';
|
||||
import Heading from '../components/Heading';
|
||||
import Button from '../components/Button';
|
||||
|
||||
export default function Cameras() {
|
||||
const { data: config } = useSWR('config');
|
||||
|
||||
const containerRef = useRef(null);
|
||||
const [{ width: containerWidth }] = useResizeObserver(containerRef);
|
||||
// Add scrollbar width (when visible) to the available observer width to eliminate screen juddering.
|
||||
// https://github.com/blakeblackshear/frigate/issues/1657
|
||||
let scrollBarWidth = 0;
|
||||
if (window.innerWidth && document.body.offsetWidth) {
|
||||
scrollBarWidth = window.innerWidth - document.body.offsetWidth;
|
||||
}
|
||||
const availableWidth = scrollBarWidth ? containerWidth + scrollBarWidth : containerWidth;
|
||||
|
||||
return !config ? (
|
||||
<ActivityIndicator />
|
||||
) : (
|
||||
<div className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4 p-2 px-4">
|
||||
<SortedCameras config={config} unsortedCameras={config.cameras} />
|
||||
<div className="grid grid-cols-1 3xl:grid-cols-3 md:grid-cols-2 gap-4 p-2 px-4" ref={containerRef}>
|
||||
<SortedCameras config={config} unsortedCameras={config.cameras} availableWidth={availableWidth} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function SortedCameras({ config, unsortedCameras }) {
|
||||
function SortedCameras({ config, unsortedCameras, availableWidth }) {
|
||||
const sortedCameras = useMemo(
|
||||
() =>
|
||||
Object.entries(unsortedCameras)
|
||||
@ -34,17 +51,20 @@ function SortedCameras({ config, unsortedCameras }) {
|
||||
return (
|
||||
<Fragment>
|
||||
{sortedCameras.map(([camera, conf]) => (
|
||||
<Camera key={camera} name={camera} config={config.cameras[camera]} conf={conf} />
|
||||
<Camera key={camera} name={camera} config={config.cameras[camera]} conf={conf} availableWidth={availableWidth} />
|
||||
))}
|
||||
</Fragment>
|
||||
);
|
||||
}
|
||||
|
||||
function Camera({ name, config }) {
|
||||
function Camera({ name, config, availableWidth }) {
|
||||
const { payload: detectValue, send: sendDetect } = useDetectState(name);
|
||||
const { payload: recordValue, send: sendRecordings } = useRecordingsState(name);
|
||||
const { payload: snapshotValue, send: sendSnapshots } = useSnapshotsState(name);
|
||||
const { payload: audioValue, send: sendAudio } = useAudioState(name);
|
||||
|
||||
const [cameraOptions, setCameraOptions] = useState('');
|
||||
|
||||
const href = `/cameras/${name}`;
|
||||
const buttons = useMemo(() => {
|
||||
return [
|
||||
@ -56,7 +76,15 @@ function Camera({ name, config }) {
|
||||
return `${name.replaceAll('_', ' ')}`;
|
||||
}, [name]);
|
||||
const icons = useMemo(
|
||||
() => [
|
||||
() => (availableWidth < 448 ? [
|
||||
{
|
||||
icon: SettingsIcon,
|
||||
color: 'gray',
|
||||
onClick: () => {
|
||||
setCameraOptions(config.name);
|
||||
},
|
||||
},
|
||||
] : [
|
||||
{
|
||||
name: `Toggle detect ${detectValue === 'ON' ? 'off' : 'on'}`,
|
||||
icon: MotionIcon,
|
||||
@ -95,11 +123,57 @@ function Camera({ name, config }) {
|
||||
},
|
||||
}
|
||||
: null,
|
||||
].filter((button) => button != null),
|
||||
[config, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
|
||||
]).filter((button) => button != null),
|
||||
[config, availableWidth, setCameraOptions, audioValue, sendAudio, detectValue, sendDetect, recordValue, sendRecordings, snapshotValue, sendSnapshots]
|
||||
);
|
||||
|
||||
return (
|
||||
<Fragment>
|
||||
{cameraOptions && (
|
||||
<Dialog>
|
||||
<div className="p-4">
|
||||
<Heading size="md">{`${name.replaceAll('_', ' ')} Settings`}</Heading>
|
||||
<Switch
|
||||
className="my-3"
|
||||
checked={detectValue == 'ON'}
|
||||
id="detect"
|
||||
onChange={() => sendDetect(detectValue === 'ON' ? 'OFF' : 'ON', true)}
|
||||
label="Detect"
|
||||
labelPosition="before"
|
||||
/>
|
||||
{config.record.enabled_in_config && <Switch
|
||||
className="my-3"
|
||||
checked={recordValue == 'ON'}
|
||||
id="record"
|
||||
onChange={() => sendRecordings(recordValue === 'ON' ? 'OFF' : 'ON', true)}
|
||||
label="Recordings"
|
||||
labelPosition="before"
|
||||
/>}
|
||||
<Switch
|
||||
className="my-3"
|
||||
checked={snapshotValue == 'ON'}
|
||||
id="snapshot"
|
||||
onChange={() => sendSnapshots(snapshotValue === 'ON' ? 'OFF' : 'ON', true)}
|
||||
label="Snapshots"
|
||||
labelPosition="before"
|
||||
/>
|
||||
{config.audio.enabled_in_config && <Switch
|
||||
className="my-3"
|
||||
checked={audioValue == 'ON'}
|
||||
id="audio"
|
||||
onChange={() => sendAudio(audioValue === 'ON' ? 'OFF' : 'ON', true)}
|
||||
label="Audio Detection"
|
||||
labelPosition="before"
|
||||
/>}
|
||||
</div>
|
||||
<div className="p-2 flex justify-start flex-row-reverse space-x-2">
|
||||
<Button className="ml-2" onClick={() => setCameraOptions('')} type="text">
|
||||
Close
|
||||
</Button>
|
||||
</div>
|
||||
</Dialog>
|
||||
)}
|
||||
|
||||
<Card
|
||||
buttons={buttons}
|
||||
href={href}
|
||||
@ -107,5 +181,6 @@ function Camera({ name, config }) {
|
||||
icons={icons}
|
||||
media={<CameraImage camera={name} stretch />}
|
||||
/>
|
||||
</Fragment>
|
||||
);
|
||||
}
|
||||
|
||||
@ -29,7 +29,7 @@ export default function Config() {
|
||||
.then((response) => {
|
||||
if (response.status === 200) {
|
||||
setError('');
|
||||
setSuccess(response.data);
|
||||
setSuccess(response.data.message);
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
|
||||
@ -27,8 +27,9 @@ export default function Storage() {
|
||||
const getUnitSize = (MB) => {
|
||||
if (isNaN(MB) || MB < 0) return 'Invalid number';
|
||||
if (MB < 1024) return `${MB} MiB`;
|
||||
if (MB < 1048576) return `${(MB / 1024).toFixed(2)} GiB`;
|
||||
|
||||
return `${(MB / 1024).toFixed(2)} GiB`;
|
||||
return `${(MB / 1048576).toFixed(2)} TiB`;
|
||||
};
|
||||
|
||||
let storage_usage;
|
||||
|
||||
@ -301,12 +301,16 @@ export default function System() {
|
||||
<Tr>
|
||||
<Th>GPU %</Th>
|
||||
<Th>Memory %</Th>
|
||||
{'dec' in gpu_usages[gpu] && (<Th>Decoder %</Th>)}
|
||||
{'enc' in gpu_usages[gpu] && (<Th>Encoder %</Th>)}
|
||||
</Tr>
|
||||
</Thead>
|
||||
<Tbody>
|
||||
<Tr>
|
||||
<Td>{gpu_usages[gpu]['gpu']}</Td>
|
||||
<Td>{gpu_usages[gpu]['mem']}</Td>
|
||||
{'dec' in gpu_usages[gpu] && (<Td>{gpu_usages[gpu]['dec']}</Td>)}
|
||||
{'enc' in gpu_usages[gpu] && (<Td>{gpu_usages[gpu]['enc']}</Td>)}
|
||||
</Tr>
|
||||
</Tbody>
|
||||
</Table>
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { h } from 'preact';
|
||||
import * as CameraImage from '../../components/CameraImage';
|
||||
import * as Hooks from '../../hooks';
|
||||
import * as WS from '../../api/ws';
|
||||
import Cameras from '../Cameras';
|
||||
import { fireEvent, render, screen, waitForElementToBeRemoved } from 'testing-library';
|
||||
@ -8,6 +9,7 @@ describe('Cameras Route', () => {
|
||||
beforeEach(() => {
|
||||
vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
|
||||
vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: vi.fn() }));
|
||||
vi.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 1000 }]);
|
||||
});
|
||||
|
||||
test('shows an ActivityIndicator if not yet loaded', async () => {
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { h } from 'preact';
|
||||
import * as CameraImage from '../../components/CameraImage';
|
||||
import * as WS from '../../api/ws';
|
||||
import * as Hooks from '../../hooks';
|
||||
import Cameras from '../Cameras';
|
||||
import { render, screen, waitForElementToBeRemoved } from 'testing-library';
|
||||
|
||||
@ -8,6 +9,7 @@ describe('Recording Route', () => {
|
||||
beforeEach(() => {
|
||||
vi.spyOn(CameraImage, 'default').mockImplementation(() => <div data-testid="camera-image" />);
|
||||
vi.spyOn(WS, 'useWs').mockImplementation(() => ({ value: { payload: 'OFF' }, send: jest.fn() }));
|
||||
vi.spyOn(Hooks, 'useResizeObserver').mockImplementation(() => [{ width: 1000 }]);
|
||||
});
|
||||
|
||||
test('shows an ActivityIndicator if not yet loaded', async () => {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user