Merge branch 'blakeblackshear:dev' into dev

This commit is contained in:
Marc Altmann 2023-10-27 13:23:58 +02:00 committed by GitHub
commit 878a056324
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 245 additions and 54 deletions

View File

@ -160,3 +160,7 @@ This is often caused by the same reason as above - the `MoveStatus` ONVIF parame
### I'm seeing this error in the logs: "Autotracker: motion estimator couldn't get transformations". What does this mean?
To maintain object tracking during PTZ moves, Frigate tracks the motion of your camera based on the details of the frame. If you are seeing this message, it could mean that your `zoom_factor` may be set too high, the scene around your detected object does not have enough details (like hard edges or color variatons), or your camera's shutter speed is too slow and motion blur is occurring. Try reducing `zoom_factor`, finding a way to alter the scene around your object, or changing your camera's shutter speed.
### Calibration seems to have completed, but the camera is not actually moving to track my object. Why?
Some cameras have firmware that reports that FOV RelativeMove, the ONVIF command that Frigate uses for autotracking, is supported. However, if the camera does not pan or tilt when an object comes into the required zone, your camera's firmware does not actually support FOV RelativeMove. One such camera is the Uniview IPC672LR-AX4DUPK. It actually moves its zoom motor instead of panning and tilting and does not follow the ONVIF standard whatsoever.

View File

@ -93,4 +93,5 @@ This list of working and non-working PTZ cameras is based on user feedback.
| Sunba 405-D20X | ✅ | ❌ | |
| Tapo C200 | ✅ | ❌ | Incomplete ONVIF support |
| Tapo C210 | ❌ | ❌ | Incomplete ONVIF support |
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support |

View File

@ -220,3 +220,29 @@ Topic to turn the PTZ autotracker for a camera on and off. Expected values are `
### `frigate/<camera_name>/ptz_autotracker/state`
Topic with current state of the PTZ autotracker for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/birdseye/set`
Topic to turn Birdseye for a camera on and off. Expected values are `ON` and `OFF`. Birdseye mode
must be enabled in the configuration.
### `frigate/<camera_name>/birdseye/state`
Topic with current state of Birdseye for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/birdseye_mode/set`
Topic to set Birdseye mode for a camera. Birdseye offers different modes to customize under which circumstances the camera is shown.
_Note: Changing the value from `CONTINUOUS` -> `MOTION | OBJECTS` will take up to 30 seconds for
the camera to be removed from the view._
| Command | Description |
| ------------ | ----------------------------------------------------------------- |
| `CONTINUOUS` | Always included |
| `MOTION` | Show when detected motion within the last 30 seconds are included |
| `OBJECTS` | Shown if an active object tracked within the last 30 seconds |
### `frigate/<camera_name>/birdseye_mode/state`
Topic with current state of the Birdseye mode for a camera. Published values are `CONTINUOUS`, `MOTION`, `OBJECTS`.

View File

@ -21,7 +21,7 @@ from frigate.comms.dispatcher import Communicator, Dispatcher
from frigate.comms.inter_process import InterProcessCommunicator
from frigate.comms.mqtt import MqttClient
from frigate.comms.ws import WebSocketClient
from frigate.config import FrigateConfig
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import (
CACHE_DIR,
CLIPS_DIR,
@ -169,6 +169,20 @@ class FrigateApp:
"process": None,
"audio_rms": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"audio_dBFS": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"birdseye_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].birdseye.enabled,
),
"birdseye_mode": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
BirdseyeModeEnum.get_index(
self.config.cameras[camera_name].birdseye.mode.value
),
),
}
self.ptz_metrics[camera_name] = {
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
@ -455,6 +469,7 @@ class FrigateApp:
args=(
self.config,
self.video_output_queue,
self.camera_metrics,
),
)
output_processor.daemon = True

View File

@ -4,7 +4,7 @@ import logging
from abc import ABC, abstractmethod
from typing import Any, Callable
from frigate.config import FrigateConfig
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import INSERT_MANY_RECORDINGS, REQUEST_REGION_GRID
from frigate.models import Recordings
from frigate.ptz.onvif import OnvifCommandEnum, OnvifController
@ -63,6 +63,8 @@ class Dispatcher:
"motion_threshold": self._on_motion_threshold_command,
"recordings": self._on_recordings_command,
"snapshots": self._on_snapshots_command,
"birdseye": self._on_birdseye_command,
"birdseye_mode": self._on_birdseye_mode_command,
}
for comm in self.comms:
@ -296,3 +298,43 @@ class Dispatcher:
logger.info(f"Setting ptz command to {command} for {camera_name}")
except KeyError as k:
logger.error(f"Invalid PTZ command {payload}: {k}")
def _on_birdseye_command(self, camera_name: str, payload: str) -> None:
"""Callback for birdseye topic."""
birdseye_settings = self.config.cameras[camera_name].birdseye
if payload == "ON":
if not self.camera_metrics[camera_name]["birdseye_enabled"].value:
logger.info(f"Turning on birdseye for {camera_name}")
self.camera_metrics[camera_name]["birdseye_enabled"].value = True
birdseye_settings.enabled = True
elif payload == "OFF":
if self.camera_metrics[camera_name]["birdseye_enabled"].value:
logger.info(f"Turning off birdseye for {camera_name}")
self.camera_metrics[camera_name]["birdseye_enabled"].value = False
birdseye_settings.enabled = False
self.publish(f"{camera_name}/birdseye/state", payload, retain=True)
def _on_birdseye_mode_command(self, camera_name: str, payload: str) -> None:
"""Callback for birdseye mode topic."""
if payload not in ["CONTINUOUS", "MOTION", "OBJECTS"]:
logger.info(f"Invalid birdseye_mode command: {payload}")
return
birdseye_config = self.config.cameras[camera_name].birdseye
if not birdseye_config.enabled:
logger.info(f"Birdseye mode not enabled for {camera_name}")
return
new_birdseye_mode = BirdseyeModeEnum(payload.lower())
logger.info(f"Setting birdseye mode for {camera_name} to {new_birdseye_mode}")
# update the metric (need the mode converted to an int)
self.camera_metrics[camera_name][
"birdseye_mode"
].value = BirdseyeModeEnum.get_index(new_birdseye_mode)
self.publish(f"{camera_name}/birdseye_mode/state", payload, retain=True)

View File

@ -89,6 +89,18 @@ class MqttClient(Communicator): # type: ignore[misc]
"OFF",
retain=False,
)
self.publish(
f"{camera_name}/birdseye/state",
"ON" if camera.birdseye.enabled else "OFF",
retain=True,
)
self.publish(
f"{camera_name}/birdseye_mode/state",
camera.birdseye.mode.value.upper()
if camera.birdseye.enabled
else "OFF",
retain=True,
)
self.publish("available", "online", retain=True)
@ -160,6 +172,8 @@ class MqttClient(Communicator): # type: ignore[misc]
"ptz_autotracker",
"motion_threshold",
"motion_contour_area",
"birdseye",
"birdseye_mode",
]
for name in self.config.cameras.keys():

View File

@ -501,6 +501,14 @@ class BirdseyeModeEnum(str, Enum):
motion = "motion"
continuous = "continuous"
@classmethod
def get_index(cls, type):
return list(cls).index(type)
@classmethod
def get(cls, index):
return list(cls)[index]
class BirdseyeConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Enable birdseye view.")

View File

@ -98,8 +98,8 @@ class EventCleanup(threading.Thread):
.iterator()
)
# delete the media from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
for expired in expired_events:
media_name = f"{expired.camera}-{expired.id}"
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
)

View File

@ -24,6 +24,7 @@ from ws4py.websocket import WebSocket
from frigate.config import BirdseyeModeEnum, FrigateConfig
from frigate.const import BASE_DIR, BIRDSEYE_PIPE
from frigate.types import CameraMetricsTypes
from frigate.util.image import (
SharedMemoryFrameManager,
copy_yuv_to_position,
@ -35,10 +36,13 @@ logger = logging.getLogger(__name__)
def get_standard_aspect_ratio(width: int, height: int) -> tuple[int, int]:
"""Ensure that only standard aspect ratios are used."""
# it is imoprtant that all ratios have the same scale
known_aspects = [
(16, 9),
(9, 16),
(32, 9),
(20, 10),
(16, 6), # reolink duo 2
(32, 9), # panoramic cameras
(12, 9),
(9, 12),
] # aspects are scaled to have common relative size
@ -238,6 +242,7 @@ class BirdsEyeFrameManager:
config: FrigateConfig,
frame_manager: SharedMemoryFrameManager,
stop_event: mp.Event,
camera_metrics: dict[str, CameraMetricsTypes],
):
self.config = config
self.mode = config.birdseye.mode
@ -248,6 +253,7 @@ class BirdsEyeFrameManager:
self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8)
self.canvas = Canvas(width, height)
self.stop_event = stop_event
self.camera_metrics = camera_metrics
# initialize the frame as black and with the Frigate logo
self.blank_frame = np.zeros(self.yuv_shape, np.uint8)
@ -494,6 +500,9 @@ class BirdsEyeFrameManager:
y += row_height
candidate_layout.append(final_row)
if max_width == 0:
max_width = x
return max_width, y, candidate_layout
canvas_aspect_x, canvas_aspect_y = self.canvas.get_aspect(coefficient)
@ -557,15 +566,18 @@ class BirdsEyeFrameManager:
row_height = int(self.canvas.height / coefficient)
total_width, total_height, standard_candidate_layout = map_layout(row_height)
if not standard_candidate_layout:
return None
# layout can't be optimized more
if total_width / self.canvas.width >= 0.99:
return standard_candidate_layout
scale_up_percent = min(
1 - (total_width / self.canvas.width),
1 - (total_height / self.canvas.height),
1 / (total_width / self.canvas.width),
1 / (total_height / self.canvas.height),
)
row_height = int(row_height * (1 + round(scale_up_percent, 1)))
row_height = int(row_height * scale_up_percent)
_, _, scaled_layout = map_layout(row_height)
if scaled_layout:
@ -579,9 +591,25 @@ class BirdsEyeFrameManager:
if not camera_config.enabled:
return False
# get our metrics (sync'd across processes)
# which allows us to control it via mqtt (or any other dispatcher)
camera_metrics = self.camera_metrics[camera]
# disabling birdseye is a little tricky
if not camera_metrics["birdseye_enabled"].value:
# if we've rendered a frame (we have a value for last_active_frame)
# then we need to set it to zero
if self.cameras[camera]["last_active_frame"] > 0:
self.cameras[camera]["last_active_frame"] = 0
return False
# get the birdseye mode state from camera metrics
birdseye_mode = BirdseyeModeEnum.get(camera_metrics["birdseye_mode"].value)
# update the last active frame for the camera
self.cameras[camera]["current_frame"] = frame_time
if self.camera_active(camera_config.mode, object_count, motion_count):
if self.camera_active(birdseye_mode, object_count, motion_count):
self.cameras[camera]["last_active_frame"] = frame_time
now = datetime.datetime.now().timestamp()
@ -605,7 +633,11 @@ class BirdsEyeFrameManager:
return False
def output_frames(config: FrigateConfig, video_output_queue):
def output_frames(
config: FrigateConfig,
video_output_queue,
camera_metrics: dict[str, CameraMetricsTypes],
):
threading.current_thread().name = "output"
setproctitle("frigate.output")
@ -661,7 +693,10 @@ def output_frames(config: FrigateConfig, video_output_queue):
config.birdseye.restream,
)
broadcasters["birdseye"] = BroadcastThread(
"birdseye", converters["birdseye"], websocket_server, stop_event
"birdseye",
converters["birdseye"],
websocket_server,
stop_event,
)
websocket_thread.start()
@ -669,7 +704,9 @@ def output_frames(config: FrigateConfig, video_output_queue):
for t in broadcasters.values():
t.start()
birdseye_manager = BirdsEyeFrameManager(config, frame_manager, stop_event)
birdseye_manager = BirdsEyeFrameManager(
config, frame_manager, stop_event, camera_metrics
)
if config.birdseye.restream:
birdseye_buffer = frame_manager.create(

View File

@ -502,7 +502,7 @@ class PtzAutoTracker:
return np.dot(self.move_coefficients[camera], input_data)
def _calculate_tracked_object_metrics(self, camera):
def _calculate_tracked_object_metrics(self, camera, obj):
def remove_outliers(data):
Q1 = np.percentile(data, 25)
Q3 = np.percentile(data, 75)
@ -541,6 +541,27 @@ class PtzAutoTracker:
"original_target_box"
] = self.tracked_object_metrics[camera]["target_box"]
(
self.tracked_object_metrics[camera]["valid_velocity"],
self.tracked_object_metrics[camera]["velocity"],
) = self._get_valid_velocity(camera, obj)
self.tracked_object_metrics[camera]["distance"] = self._get_distance_threshold(
camera, obj
)
centroid_distance = np.linalg.norm(
[
obj.obj_data["centroid"][0] - camera_config.detect.width / 2,
obj.obj_data["centroid"][1] - camera_config.detect.height / 2,
]
)
logger.debug(f"{camera}: Centroid distance: {centroid_distance}")
self.tracked_object_metrics[camera]["below_distance_threshold"] = (
centroid_distance < self.tracked_object_metrics[camera]["distance"]
)
def _process_move_queue(self, camera):
camera_config = self.config.cameras[camera]
camera_config.frame_shape[1]
@ -668,12 +689,16 @@ class PtzAutoTracker:
velocities = obj.obj_data["estimate_velocity"]
logger.debug(f"{camera}: Velocities from norfair: {velocities}")
# if we are close enough to zero, return right away
if np.all(np.round(velocities) == 0):
return True, np.zeros((2, 2))
# Thresholds
x_mags_thresh = camera_width / camera_fps / 2
y_mags_thresh = camera_height / camera_fps / 2
dir_thresh = 0.96
delta_thresh = 10
var_thresh = 3
dir_thresh = 0.93
delta_thresh = 12
var_thresh = 5
# Check magnitude
x_mags = np.abs(velocities[:, 0])
@ -690,11 +715,15 @@ class PtzAutoTracker:
high_variances = np.any(stdevs > var_thresh)
# Check direction difference
cosine_sim = np.dot(velocities[0], velocities[1]) / (
np.linalg.norm(velocities[0]) * np.linalg.norm(velocities[1])
)
dir_thresh = 0.6 if np.all(delta < delta_thresh / 2) else dir_thresh
invalid_dirs = cosine_sim < dir_thresh
velocities = np.round(velocities)
invalid_dirs = False
if not np.any(np.linalg.norm(velocities, axis=1)):
cosine_sim = np.dot(velocities[0], velocities[1]) / (
np.linalg.norm(velocities[0]) * np.linalg.norm(velocities[1])
)
dir_thresh = 0.6 if np.all(delta < delta_thresh / 2) else dir_thresh
print(f"cosine sim: {cosine_sim}")
invalid_dirs = cosine_sim < dir_thresh
# Combine
invalid = (
@ -723,11 +752,12 @@ class PtzAutoTracker:
)
)
# invalid velocity
return np.zeros((2, 2))
return False, np.zeros((2, 2))
else:
return np.mean(velocities, axis=0)
logger.debug(f"{camera}: Valid velocity ")
return True, np.mean(velocities, axis=0)
def _below_distance_threshold(self, camera, obj):
def _get_distance_threshold(self, camera, obj):
# Returns true if Euclidean distance from object to center of frame is
# less than 10% of the of the larger dimension (width or height) of the frame,
# multiplied by a scaling factor for object size.
@ -738,13 +768,6 @@ class PtzAutoTracker:
# TODO: there's probably a better way to approach this
camera_config = self.config.cameras[camera]
centroid_distance = np.linalg.norm(
[
obj.obj_data["centroid"][0] - camera_config.detect.width / 2,
obj.obj_data["centroid"][1] - camera_config.detect.height / 2,
]
)
obj_width = obj.obj_data["box"][2] - obj.obj_data["box"][0]
obj_height = obj.obj_data["box"][3] - obj.obj_data["box"][1]
@ -758,14 +781,17 @@ class PtzAutoTracker:
# larger objects should lower the threshold, smaller objects should raise it
scaling_factor = 1 - np.log(max_obj / max_frame)
percentage = 0.1 if camera_config.onvif.autotracking.movement_weights else 0.03
percentage = (
0.08
if camera_config.onvif.autotracking.movement_weights
and self.tracked_object_metrics[camera]["valid_velocity"]
else 0.03
)
distance_threshold = percentage * max_frame * scaling_factor
logger.debug(
f"{camera}: Distance: {centroid_distance}, threshold: {distance_threshold}"
)
logger.debug(f"{camera}: Distance threshold: {distance_threshold}")
return centroid_distance < distance_threshold
return distance_threshold
def _should_zoom_in(self, camera, obj, box, debug_zooming=False):
# returns True if we should zoom in, False if we should zoom out, None to do nothing
@ -774,7 +800,7 @@ class PtzAutoTracker:
camera_height = camera_config.frame_shape[0]
camera_fps = camera_config.detect.fps
average_velocity = self._get_valid_velocity(camera, obj)
average_velocity = self.tracked_object_metrics[camera]["velocity"]
bb_left, bb_top, bb_right, bb_bottom = box
@ -799,7 +825,9 @@ class PtzAutoTracker:
)
# make sure object is centered in the frame
below_distance_threshold = self._below_distance_threshold(camera, obj)
below_distance_threshold = self.tracked_object_metrics[camera][
"below_distance_threshold"
]
below_dimension_threshold = (bb_right - bb_left) <= camera_width * (
self.zoom_factor[camera] + 0.1
@ -925,7 +953,14 @@ class PtzAutoTracker:
): # use estimates if we have available coefficients
predicted_movement_time = self._predict_movement_time(camera, pan, tilt)
average_velocity = self._get_valid_velocity(camera, obj)
_, average_velocity = (
self._get_valid_velocity(camera, obj)
if "velocity" not in self.tracked_object_metrics[camera]
else (
self.tracked_object_metrics[camera]["valid_velocity"],
self.tracked_object_metrics[camera]["velocity"],
)
)
if np.any(average_velocity):
# this box could exceed the frame boundaries if velocity is high
@ -992,7 +1027,7 @@ class PtzAutoTracker:
level = (
self.ptz_metrics[camera]["ptz_max_zoom"].value
- self.ptz_metrics[camera]["ptz_min_zoom"].value
) / 10
) / 20
if result:
zoom = min(1.0, current_zoom_level + level)
else:
@ -1103,9 +1138,9 @@ class PtzAutoTracker:
)
):
self.tracked_object_history[camera].append(copy.deepcopy(obj.obj_data))
self._calculate_tracked_object_metrics(camera)
self._calculate_tracked_object_metrics(camera, obj)
if self._below_distance_threshold(camera, obj):
if self.tracked_object_metrics[camera]["below_distance_threshold"]:
logger.debug(
f"{camera}: Existing object (do NOT move ptz): {obj.obj_data['id']} {obj.obj_data['box']} {obj.obj_data['frame_time']}"
)
@ -1148,7 +1183,7 @@ class PtzAutoTracker:
self.tracked_object_history[camera].append(
copy.deepcopy(obj.obj_data)
)
self._calculate_tracked_object_metrics(camera)
self._calculate_tracked_object_metrics(camera, obj)
self._autotrack_move_ptz(camera, obj)
return

View File

@ -256,20 +256,29 @@ class RecordingMaintainer(threading.Thread):
# if it ends more than the configured pre_capture for the camera
else:
pre_capture = self.config.cameras[camera].record.events.pre_capture
most_recently_processed_frame_time = self.object_recordings_info[
camera
][-1][0]
camera_info = self.object_recordings_info[camera]
most_recently_processed_frame_time = (
camera_info[-1][0] if len(camera_info) > 0 else 0
)
retain_cutoff = most_recently_processed_frame_time - pre_capture
if end_time.timestamp() < retain_cutoff:
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
# else retain days includes this segment
else:
record_mode = self.config.cameras[camera].record.retain.mode
return await self.move_segment(
camera, start_time, end_time, duration, cache_path, record_mode
# assume that empty means the relevant recording info has not been received yet
camera_info = self.object_recordings_info[camera]
most_recently_processed_frame_time = (
camera_info[-1][0] if len(camera_info) > 0 else 0
)
# ensure delayed segment info does not lead to lost segments
if most_recently_processed_frame_time >= end_time.timestamp():
record_mode = self.config.cameras[camera].record.retain.mode
return await self.move_segment(
camera, start_time, end_time, duration, cache_path, record_mode
)
def segment_stats(
self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime
) -> SegmentInfo:
@ -451,9 +460,7 @@ class RecordingMaintainer(threading.Thread):
break
if stale_frame_count > 0:
logger.warning(
f"Found {stale_frame_count} old frames, segments from recordings may be missing."
)
logger.debug(f"Found {stale_frame_count} old frames.")
# empty the audio recordings info queue if audio is enabled
if self.audio_recordings_info_queue:

View File

@ -25,6 +25,8 @@ class CameraMetricsTypes(TypedDict):
skipped_fps: Synchronized
audio_rms: Synchronized
audio_dBFS: Synchronized
birdseye_enabled: Synchronized
birdseye_mode: Synchronized
class PTZMetricsTypes(TypedDict):

View File

@ -776,11 +776,11 @@ function Event({
<div className="text-sm flex">
<Clock className="h-5 w-5 mr-2 inline" />
{formatUnixTimestampToDateTime(event.start_time, { ...config.ui })}
<div className="hidden md:inline">
<div className="hidden sm:inline">
<span className="m-1">-</span>
<TimeAgo time={event.start_time * 1000} dense />
</div>
<div className="hidden md:inline">
<div className="hidden sm:inline">
<span className="m-1" />( {getDurationFromTimestamps(event.start_time, event.end_time)} )
</div>
</div>