Merge branch 'dev' into 230615-feature-camera-autoconf

This commit is contained in:
Sergey Krashevich 2023-06-19 21:52:53 +03:00 committed by GitHub
commit 990b6ddad0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 457 additions and 288 deletions

View File

@ -73,7 +73,11 @@
"isort.args": ["--settings-path=./pyproject.toml"], "isort.args": ["--settings-path=./pyproject.toml"],
"[python]": { "[python]": {
"editor.defaultFormatter": "ms-python.black-formatter", "editor.defaultFormatter": "ms-python.black-formatter",
"editor.formatOnSave": true "editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.fixAll": true,
"source.organizeImports": true
}
}, },
"[json][jsonc]": { "[json][jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode" "editor.defaultFormatter": "esbenp.prettier-vscode"
@ -86,7 +90,7 @@
"editor.tabSize": 2 "editor.tabSize": 2
}, },
"cSpell.ignoreWords": ["rtmp"], "cSpell.ignoreWords": ["rtmp"],
"cSpell.words": ["preact"] "cSpell.words": ["preact", "astype", "hwaccel", "mqtt"]
} }
} }
} }

View File

@ -96,6 +96,16 @@ model:
Note that if you rename objects in the labelmap, you will also need to update your `objects -> track` list as well. Note that if you rename objects in the labelmap, you will also need to update your `objects -> track` list as well.
:::caution
Some labels have special handling and modifications can disable functionality.
`person` objects are associated with `face` and `amazon`
`car` objects are associated with `license_plate`, `ups`, `fedex`, `amazon`
:::
## Custom ffmpeg build ## Custom ffmpeg build
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, a docker volume mapping can be used to overwrite the included ffmpeg build with an ffmpeg build that works for your specific hardware setup. Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, a docker volume mapping can be used to overwrite the included ffmpeg build with an ffmpeg build that works for your specific hardware setup.

View File

@ -203,10 +203,10 @@ detect:
max_disappeared: 25 max_disappeared: 25
# Optional: Configuration for stationary object tracking # Optional: Configuration for stationary object tracking
stationary: stationary:
# Optional: Frequency for confirming stationary objects (default: shown below) # Optional: Frequency for confirming stationary objects (default: same as threshold)
# When set to 0, object detection will not confirm stationary objects until movement is detected. # When set to 1, object detection will run to confirm the object still exists on every frame.
# If set to 10, object detection will run to confirm the object still exists on every 10th frame. # If set to 10, object detection will run to confirm the object still exists on every 10th frame.
interval: 0 interval: 50
# Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s) # Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s)
threshold: 50 threshold: 50
# Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever) # Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever)
@ -222,6 +222,20 @@ detect:
# Optional: Object specific values # Optional: Object specific values
objects: objects:
person: 1000 person: 1000
# Optional: Milliseconds to offset detect annotations by (default: shown below).
# There can often be latency between a recording and the detect process,
# especially when using separate streams for detect and record.
# Use this setting to make the timeline bounding boxes more closely align
# with the recording. The value can be positive or negative.
# TIP: Imagine there is an event clip with a person walking from left to right.
# If the event timeline bounding box is consistently to the left of the person
# then the value should be decreased. Similarly, if a person is walking from
# left to right and the bounding box is consistently ahead of the person
# then the value should be increased.
# TIP: This offset is dynamic so you can change the value and it will update existing
# events, this makes it easy to tune.
# WARNING: Fast moving objects will likely not have the bounding box align.
annotation_offset: 0
# Optional: Object configuration # Optional: Object configuration
# NOTE: Can be overridden at the camera level # NOTE: Can be overridden at the camera level

View File

@ -1,6 +1,6 @@
# Stationary Objects # Stationary Objects
An object is considered stationary when it is being tracked and has been in a very similar position for a certain number of frames. This number is defined in the configuration under `detect -> stationary -> threshold`, and is 10x the frame rate (or 10 seconds) by default. Once an object is considered stationary, it will remain stationary until motion occurs near the object at which point object detection will start running again. If the object changes location, it will be considered active. An object is considered stationary when it is being tracked and has been in a very similar position for a certain number of frames. This number is defined in the configuration under `detect -> stationary -> threshold`, and is 10x the frame rate (or 10 seconds) by default. Once an object is considered stationary, it will remain stationary until motion occurs within the object at which point object detection will start running again. If the object changes location, it will be considered active.
## Why does it matter if an object is stationary? ## Why does it matter if an object is stationary?
@ -13,11 +13,11 @@ The default config is:
```yaml ```yaml
detect: detect:
stationary: stationary:
interval: 0 interval: 50
threshold: 50 threshold: 50
``` ```
`interval` is defined as the frequency for running detection on stationary objects. This means that by default once an object is considered stationary, detection will not be run on it until motion is detected. With `interval > 0`, every nth frames detection will be run to make sure the object is still there. `interval` is defined as the frequency for running detection on stationary objects. This means that by default once an object is considered stationary, detection will not be run on it until motion is detected or until the interval (every 50th frame by default). With `interval >= 1`, every nth frames detection will be run to make sure the object is still there.
NOTE: There is no way to disable stationary object tracking with this value. NOTE: There is no way to disable stationary object tracking with this value.

View File

@ -62,7 +62,9 @@ Message published for each changed event. The first message is published when th
"has_clip": false, "has_clip": false,
"stationary": false, // whether or not the object is considered stationary "stationary": false, // whether or not the object is considered stationary
"motionless_count": 0, // number of frames the object has been motionless "motionless_count": 0, // number of frames the object has been motionless
"position_changes": 2 // number of times the object has moved from a stationary position "position_changes": 2, // number of times the object has moved from a stationary position
"attributes": [], // set of unique attributes that have been identified on the object
"current_attributes": [] // detailed data about the current attributes in this frame
}, },
"after": { "after": {
"id": "1607123955.475377-mxklsc", "id": "1607123955.475377-mxklsc",
@ -87,7 +89,16 @@ Message published for each changed event. The first message is published when th
"has_clip": false, "has_clip": false,
"stationary": false, // whether or not the object is considered stationary "stationary": false, // whether or not the object is considered stationary
"motionless_count": 0, // number of frames the object has been motionless "motionless_count": 0, // number of frames the object has been motionless
"position_changes": 2 // number of times the object has changed position "position_changes": 2, // number of times the object has changed position
"attributes": ["face"], // set of unique attributes that have been identified on the object
"current_attributes": [
// detailed data about the current attributes in this frame
{
"label": "face",
"box": [442, 506, 534, 524],
"score": 0.64
}
]
} }
} }
``` ```
@ -163,9 +174,9 @@ Topic with current motion contour area for a camera. Published value is an integ
Topic to send PTZ commands to camera. Topic to send PTZ commands to camera.
| Command | Description | | Command | Description |
| ---------------------- | --------------------------------------------------------------------------------------- | | ---------------------- | ----------------------------------------------------------------------------------------- |
| `preset-<preset_name>` | send command to move to preset with name `<preset_name>` | | `preset-<preset_name>` | send command to move to preset with name `<preset_name>` |
| `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] | | `MOVE_<dir>` | send command to continuously move in `<dir>`, possible values are [UP, DOWN, LEFT, RIGHT] |
| `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] | | `ZOOM_<dir>` | send command to continuously zoom `<dir>`, possible values are [IN, OUT] |
| `STOP` | send command to stop moving | | `STOP` | send command to stop moving |

View File

@ -13,8 +13,6 @@ from pydantic.fields import PrivateAttr
from frigate.const import CACHE_DIR, DEFAULT_DB_PATH, REGEX_CAMERA_NAME, YAML_EXT from frigate.const import CACHE_DIR, DEFAULT_DB_PATH, REGEX_CAMERA_NAME, YAML_EXT
from frigate.detectors import DetectorConfig, ModelConfig from frigate.detectors import DetectorConfig, ModelConfig
from frigate.detectors.detector_config import InputTensorEnum # noqa: F401
from frigate.detectors.detector_config import PixelFormatEnum # noqa: F401
from frigate.detectors.detector_config import BaseDetectorConfig from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.ffmpeg_presets import ( from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_decode, parse_preset_hardware_acceleration_decode,
@ -191,7 +189,7 @@ class RecordConfig(FrigateBaseModel):
class MotionConfig(FrigateBaseModel): class MotionConfig(FrigateBaseModel):
threshold: int = Field( threshold: int = Field(
default=40, default=30,
title="Motion detection threshold (1-255).", title="Motion detection threshold (1-255).",
ge=1, ge=1,
le=255, le=255,
@ -200,10 +198,10 @@ class MotionConfig(FrigateBaseModel):
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0 default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
) )
improve_contrast: bool = Field(default=True, title="Improve Contrast") improve_contrast: bool = Field(default=True, title="Improve Contrast")
contour_area: Optional[int] = Field(default=15, title="Contour Area") contour_area: Optional[int] = Field(default=10, title="Contour Area")
delta_alpha: float = Field(default=0.2, title="Delta Alpha") delta_alpha: float = Field(default=0.2, title="Delta Alpha")
frame_alpha: float = Field(default=0.02, title="Frame Alpha") frame_alpha: float = Field(default=0.02, title="Frame Alpha")
frame_height: Optional[int] = Field(default=50, title="Frame Height") frame_height: Optional[int] = Field(default=100, title="Frame Height")
mask: Union[str, List[str]] = Field( mask: Union[str, List[str]] = Field(
default="", title="Coordinates polygon for the motion mask." default="", title="Coordinates polygon for the motion mask."
) )
@ -253,9 +251,8 @@ class StationaryMaxFramesConfig(FrigateBaseModel):
class StationaryConfig(FrigateBaseModel): class StationaryConfig(FrigateBaseModel):
interval: Optional[int] = Field( interval: Optional[int] = Field(
default=0,
title="Frame interval for checking stationary objects.", title="Frame interval for checking stationary objects.",
ge=0, gt=0,
) )
threshold: Optional[int] = Field( threshold: Optional[int] = Field(
title="Number of frames without a position change for an object to be considered stationary", title="Number of frames without a position change for an object to be considered stationary",
@ -988,6 +985,9 @@ class FrigateConfig(FrigateBaseModel):
stationary_threshold = camera_config.detect.fps * 10 stationary_threshold = camera_config.detect.fps * 10
if camera_config.detect.stationary.threshold is None: if camera_config.detect.stationary.threshold is None:
camera_config.detect.stationary.threshold = stationary_threshold camera_config.detect.stationary.threshold = stationary_threshold
# default to the stationary_threshold if not defined
if camera_config.detect.stationary.interval is None:
camera_config.detect.stationary.interval = stationary_threshold
# FFMPEG input substitution # FFMPEG input substitution
for input in camera_config.ffmpeg.inputs: for input in camera_config.ffmpeg.inputs:

View File

@ -147,6 +147,23 @@ class EventProcessor(threading.Thread):
) )
) )
attributes = [
(
None
if event_data["snapshot"] is None
else {
"box": to_relative_box(
width,
height,
a["box"],
),
"label": a["label"],
"score": a["score"],
}
)
for a in event_data["snapshot"]["attributes"]
]
# keep these from being set back to false because the event # keep these from being set back to false because the event
# may have started while recordings and snapshots were enabled # may have started while recordings and snapshots were enabled
# this would be an issue for long running events # this would be an issue for long running events
@ -173,9 +190,14 @@ class EventProcessor(threading.Thread):
"region": region, "region": region,
"score": score, "score": score,
"top_score": event_data["top_score"], "top_score": event_data["top_score"],
"attributes": attributes,
}, },
} }
# only overwrite the sub_label in the database if it's set
if event_data.get("sub_label") is not None:
event[Event.sub_label] = event_data["sub_label"]
( (
Event.insert(event) Event.insert(event)
.on_conflict( .on_conflict(

View File

@ -38,6 +38,7 @@ class ImprovedMotionDetector(MotionDetector):
self.improve_contrast = improve_contrast self.improve_contrast = improve_contrast
self.threshold = threshold self.threshold = threshold
self.contour_area = contour_area self.contour_area = contour_area
self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
def detect(self, frame): def detect(self, frame):
motion_boxes = [] motion_boxes = []
@ -55,7 +56,7 @@ class ImprovedMotionDetector(MotionDetector):
# Improve contrast # Improve contrast
if self.improve_contrast.value: if self.improve_contrast.value:
resized_frame = cv2.equalizeHist(resized_frame) resized_frame = self.clahe.apply(resized_frame)
# mask frame # mask frame
resized_frame[self.mask] = [255] resized_frame[self.mask] = [255]

View File

@ -10,8 +10,8 @@ from abc import ABC, abstractmethod
import numpy as np import numpy as np
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.config import InputTensorEnum
from frigate.detectors import create_detector from frigate.detectors import create_detector
from frigate.detectors.detector_config import InputTensorEnum
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -24,6 +24,7 @@ from frigate.const import CLIPS_DIR
from frigate.events.maintainer import EventTypeEnum from frigate.events.maintainer import EventTypeEnum
from frigate.util import ( from frigate.util import (
SharedMemoryFrameManager, SharedMemoryFrameManager,
area,
calculate_region, calculate_region,
draw_box_with_label, draw_box_with_label,
draw_timestamp, draw_timestamp,
@ -42,11 +43,45 @@ def on_edge(box, frame_shape):
return True return True
def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool: def has_better_attr(current_thumb, new_obj, attr_label) -> bool:
max_new_attr = max(
[0]
+ [area(a["box"]) for a in new_obj["attributes"] if a["label"] == attr_label]
)
max_current_attr = max(
[0]
+ [
area(a["box"])
for a in current_thumb["attributes"]
if a["label"] == attr_label
]
)
# if the thumb has a higher scoring attr
return max_new_attr > max_current_attr
def is_better_thumbnail(label, current_thumb, new_obj, frame_shape) -> bool:
# larger is better # larger is better
# cutoff images are less ideal, but they should also be smaller? # cutoff images are less ideal, but they should also be smaller?
# better scores are obviously better too # better scores are obviously better too
# check face on person
if label == "person":
if has_better_attr(current_thumb, new_obj, "face"):
return True
# if the current thumb has a face attr, dont update unless it gets better
if any([a["label"] == "face" for a in current_thumb["attributes"]]):
return False
# check license_plate on car
if label == "car":
if has_better_attr(current_thumb, new_obj, "license_plate"):
return True
# if the current thumb has a license_plate attr, dont update unless it gets better
if any([a["label"] == "license_plate" for a in current_thumb["attributes"]]):
return False
# if the new_thumb is on an edge, and the current thumb is not # if the new_thumb is on an edge, and the current thumb is not
if on_edge(new_obj["box"], frame_shape) and not on_edge( if on_edge(new_obj["box"], frame_shape) and not on_edge(
current_thumb["box"], frame_shape current_thumb["box"], frame_shape
@ -76,6 +111,7 @@ class TrackedObject:
self.zone_presence = {} self.zone_presence = {}
self.current_zones = [] self.current_zones = []
self.entered_zones = [] self.entered_zones = []
self.attributes = set()
self.false_positive = True self.false_positive = True
self.has_clip = False self.has_clip = False
self.has_snapshot = False self.has_snapshot = False
@ -125,7 +161,10 @@ class TrackedObject:
if not self.false_positive: if not self.false_positive:
# determine if this frame is a better thumbnail # determine if this frame is a better thumbnail
if self.thumbnail_data is None or is_better_thumbnail( if self.thumbnail_data is None or is_better_thumbnail(
self.thumbnail_data, obj_data, self.camera_config.frame_shape self.obj_data["label"],
self.thumbnail_data,
obj_data,
self.camera_config.frame_shape,
): ):
self.thumbnail_data = { self.thumbnail_data = {
"frame_time": obj_data["frame_time"], "frame_time": obj_data["frame_time"],
@ -133,6 +172,7 @@ class TrackedObject:
"area": obj_data["area"], "area": obj_data["area"],
"region": obj_data["region"], "region": obj_data["region"],
"score": obj_data["score"], "score": obj_data["score"],
"attributes": obj_data["attributes"],
} }
thumb_update = True thumb_update = True
@ -164,6 +204,19 @@ class TrackedObject:
if 0 < zone_score < zone.inertia: if 0 < zone_score < zone.inertia:
self.zone_presence[name] = zone_score - 1 self.zone_presence[name] = zone_score - 1
# maintain attributes
for attr in obj_data["attributes"]:
self.attributes.add(attr["label"])
# populate the sub_label for car with first logo if it exists
if self.obj_data["label"] == "car" and "sub_label" not in self.obj_data:
recognized_logos = self.attributes.intersection(
set(["ups", "fedex", "amazon"])
)
if len(recognized_logos) > 0:
self.obj_data["sub_label"] = recognized_logos.pop()
# check for significant change
if not self.false_positive: if not self.false_positive:
# if the zones changed, signal an update # if the zones changed, signal an update
if set(self.current_zones) != set(current_zones): if set(self.current_zones) != set(current_zones):
@ -214,6 +267,8 @@ class TrackedObject:
"entered_zones": self.entered_zones.copy(), "entered_zones": self.entered_zones.copy(),
"has_clip": self.has_clip, "has_clip": self.has_clip,
"has_snapshot": self.has_snapshot, "has_snapshot": self.has_snapshot,
"attributes": list(self.attributes),
"current_attributes": self.obj_data["attributes"],
} }
if include_thumbnail: if include_thumbnail:
@ -294,6 +349,21 @@ class TrackedObject:
color=color, color=color,
) )
# draw any attributes
for attribute in self.thumbnail_data["attributes"]:
box = attribute["box"]
draw_box_with_label(
best_frame,
box[0],
box[1],
box[2],
box[3],
attribute["label"],
f"{attribute['score']:.0%}",
thickness=thickness,
color=color,
)
if crop: if crop:
box = self.thumbnail_data["box"] box = self.thumbnail_data["box"]
box_size = 300 box_size = 300
@ -421,6 +491,21 @@ class CameraState:
color=color, color=color,
) )
# draw any attributes
for attribute in obj["current_attributes"]:
box = attribute["box"]
draw_box_with_label(
frame_copy,
box[0],
box[1],
box[2],
box[3],
attribute["label"],
f"{attribute['score']:.0%}",
thickness=thickness,
color=color,
)
if draw_options.get("regions"): if draw_options.get("regions"):
for region in regions: for region in regions:
cv2.rectangle( cv2.rectangle(
@ -553,6 +638,7 @@ class CameraState:
# or the current object is older than desired, use the new object # or the current object is older than desired, use the new object
if ( if (
is_better_thumbnail( is_better_thumbnail(
object_type,
current_best.thumbnail_data, current_best.thumbnail_data,
obj.thumbnail_data, obj.thumbnail_data,
self.camera_config.frame_shape, self.camera_config.frame_shape,

View File

@ -1,6 +1,7 @@
import datetime import datetime
import glob import glob
import logging import logging
import math
import multiprocessing as mp import multiprocessing as mp
import os import os
import queue import queue
@ -269,161 +270,118 @@ class BirdsEyeFrameManager:
def update_frame(self): def update_frame(self):
"""Update to a new frame for birdseye.""" """Update to a new frame for birdseye."""
def calculate_two_cam_layout(canvas, cameras_to_add: list[str]) -> tuple[any]:
"""Calculate the optimal layout for 2 cameras."""
first_camera = cameras_to_add[0]
first_camera_dims = self.cameras[first_camera]["dimensions"].copy()
second_camera = cameras_to_add[1]
second_camera_dims = self.cameras[second_camera]["dimensions"].copy()
# check for optimal layout
if first_camera_dims[0] + second_camera_dims[0] < canvas_width:
# place cameras horizontally
first_scaled_width = int(
canvas_height * first_camera_dims[0] / first_camera_dims[1]
)
second_scaled_width = int(
canvas_height * second_camera_dims[0] / second_camera_dims[1]
)
first_height = canvas_height
second_height = canvas_height
if first_scaled_width + second_scaled_width > canvas_width:
if first_scaled_width > second_scaled_width:
first_scaled_width = canvas_width - second_scaled_width
first_height = int(
first_scaled_width
* first_camera_dims[1]
/ first_camera_dims[0]
)
else:
second_scaled_width = canvas_width - first_scaled_width
second_height = int(
second_scaled_width
* second_camera_dims[1]
/ second_camera_dims[0]
)
return [
[
(
first_camera,
(0, 0, first_scaled_width, first_height),
),
(
second_camera,
(
first_scaled_width + 1,
0,
second_scaled_width,
second_height,
),
),
],
]
else:
# place cameras vertically
top_scaled_width = int(
(canvas_height / 2) * first_camera_dims[0] / first_camera_dims[1]
)
bottom_scaled_width = int(
(canvas_height / 2) * second_camera_dims[0] / second_camera_dims[1]
)
return [
[
(
first_camera,
(0, 0, top_scaled_width, int(canvas_height / 2)),
)
],
[
(
second_camera,
(
0,
int(canvas_height / 2),
bottom_scaled_width,
int(canvas_height / 2),
),
)
],
]
def calculate_layout( def calculate_layout(
canvas, cameras_to_add: list[str], coefficient canvas, cameras_to_add: list[str], coefficient
) -> tuple[any]: ) -> tuple[any]:
"""Calculate the optimal layout for 3+ cameras.""" """Calculate the optimal layout for 2+ cameras."""
camera_layout: list[list[any]] = [] camera_layout: list[list[any]] = []
camera_layout.append([]) camera_layout.append([])
canvas_aspect = canvas[0] / canvas[1] canvas_gcd = math.gcd(canvas[0], canvas[1])
canvas_aspect_x = (canvas[0] / canvas_gcd) * coefficient
canvas_aspect_y = (canvas[0] / canvas_gcd) * coefficient
starting_x = 0 starting_x = 0
x = starting_x x = starting_x
y = 0 y = 0
y_i = 0 y_i = 0
max_height = 0 max_y = 0
for camera in cameras_to_add: for camera in cameras_to_add:
camera_dims = self.cameras[camera]["dimensions"].copy() camera_dims = self.cameras[camera]["dimensions"].copy()
camera_aspect = camera_dims[0] / camera_dims[1] camera_gcd = math.gcd(camera_dims[0], camera_dims[1])
camera_aspect_x = camera_dims[0] / camera_gcd
camera_aspect_y = camera_dims[1] / camera_gcd
if round(camera_aspect_x / camera_aspect_y, 1) == 1.8:
# account for slightly off 16:9 cameras
camera_aspect_x = 16
camera_aspect_y = 9
elif round(camera_aspect_x / camera_aspect_y, 1) == 1.3:
# make 4:3 cameras the same relative size as 16:9
camera_aspect_x = 12
camera_aspect_y = 9
if camera_dims[1] > camera_dims[0]: if camera_dims[1] > camera_dims[0]:
portrait = True portrait = True
elif camera_aspect < canvas_aspect:
# if the camera aspect ratio is less than canvas aspect ratio, it needs to be scaled down to fit
camera_dims[0] *= camera_aspect / canvas_aspect
camera_dims[1] *= camera_aspect / canvas_aspect
portrait = False
else: else:
portrait = False portrait = False
if (x + camera_dims[0] * coefficient) <= canvas[0]: if (x + camera_aspect_x) <= canvas_aspect_x:
# insert if camera can fit on current row # insert if camera can fit on current row
scaled_width = int(camera_dims[0] * coefficient)
camera_layout[y_i].append( camera_layout[y_i].append(
( (
camera, camera,
( (
x, camera_aspect_x,
y, camera_aspect_y,
scaled_width,
int(camera_dims[1] * coefficient),
), ),
) )
) )
x += scaled_width
if portrait: if portrait:
starting_x = scaled_width starting_x = camera_aspect_x
else: else:
max_height = max( max_y = max(
max_height, max_y,
int(camera_dims[1] * coefficient), camera_aspect_y,
) )
x += camera_aspect_x
else: else:
# move on to the next row and insert # move on to the next row and insert
y += max_height y += max_y
y_i += 1 y_i += 1
camera_layout.append([]) camera_layout.append([])
x = starting_x x = starting_x
if camera_dims[0] * coefficient > canvas_width: if x + camera_aspect_x > canvas_aspect_x:
safe_coefficient = 1 return None
else:
safe_coefficient = coefficient
camera_layout[y_i].append( camera_layout[y_i].append(
( (
camera, camera,
( (camera_aspect_x, camera_aspect_y),
x,
y,
int(camera_dims[0] * safe_coefficient),
int(camera_dims[1] * safe_coefficient),
),
) )
) )
x += int(camera_dims[0] * safe_coefficient) x += camera_aspect_x
return (camera_layout, y + max_height) if y + max_y > canvas_aspect_y:
return None
row_height = int(canvas_height / coefficient)
final_camera_layout = []
starting_x = 0
y = 0
for row in camera_layout:
final_row = []
x = starting_x
for cameras in row:
camera_dims = self.cameras[cameras[0]]["dimensions"].copy()
if camera_dims[1] > camera_dims[0]:
scaled_height = int(row_height * coefficient)
scaled_width = int(
scaled_height * camera_dims[0] / camera_dims[1]
)
starting_x = scaled_width
else:
scaled_height = row_height
scaled_width = int(
scaled_height * camera_dims[0] / camera_dims[1]
)
if (
x + scaled_width > canvas_width
or y + scaled_height > canvas_height
):
return None
final_row.append((cameras[0], (x, y, scaled_width, scaled_height)))
x += scaled_width
y += row_height
final_camera_layout.append(final_row)
return final_camera_layout
# determine how many cameras are tracking objects within the last 30 seconds # determine how many cameras are tracking objects within the last 30 seconds
active_cameras = set( active_cameras = set(
@ -493,30 +451,28 @@ class BirdsEyeFrameManager:
) )
] ]
] ]
elif len(active_cameras) == 2:
self.camera_layout = calculate_two_cam_layout(
(canvas_width, canvas_height), active_cameras_to_add
)
else: else:
# calculate optimal layout # calculate optimal layout
coefficient = 1.0 coefficient = 2
calculating = True calculating = True
# decrease scaling coefficient until height of all cameras can fit into the birdseye canvas # decrease scaling coefficient until height of all cameras can fit into the birdseye canvas
while calculating: while calculating:
layout_candidate, total_height = calculate_layout( layout_candidate = calculate_layout(
(canvas_width, canvas_height), (canvas_width, canvas_height),
active_cameras_to_add, active_cameras_to_add,
coefficient, coefficient,
) )
if (canvas_height * 0.75) < total_height <= canvas_height: if not layout_candidate:
calculating = False if coefficient < 10:
elif total_height < canvas_height * 0.75: coefficient += 1
coefficient += 0.1 continue
calculating = False else:
else: logger.error("Error finding appropriate birdseye layout")
coefficient -= 0.1 return
calculating = False
self.camera_layout = layout_candidate self.camera_layout = layout_candidate

View File

@ -1,5 +1,6 @@
"""Maintain recording segments in cache.""" """Maintain recording segments in cache."""
import asyncio
import datetime import datetime
import logging import logging
import multiprocessing as mp import multiprocessing as mp
@ -20,7 +21,7 @@ from frigate.config import FrigateConfig, RetainModeEnum
from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR from frigate.const import CACHE_DIR, MAX_SEGMENT_DURATION, RECORD_DIR
from frigate.models import Event, Recordings from frigate.models import Event, Recordings
from frigate.types import RecordMetricsTypes from frigate.types import RecordMetricsTypes
from frigate.util import area from frigate.util import area, get_video_properties
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -42,7 +43,7 @@ class RecordingMaintainer(threading.Thread):
self.recordings_info: dict[str, Any] = defaultdict(list) self.recordings_info: dict[str, Any] = defaultdict(list)
self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {} self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
def move_files(self) -> None: async def move_files(self) -> None:
cache_files = sorted( cache_files = sorted(
[ [
d d
@ -121,115 +122,100 @@ class RecordingMaintainer(threading.Thread):
) )
.order_by(Event.start_time) .order_by(Event.start_time)
) )
for r in recordings:
cache_path = r["cache_path"]
start_time = r["start_time"]
# Just delete files if recordings are turned off await asyncio.gather(
if ( *(self.validate_and_move_segment(camera, events, r) for r in recordings)
camera not in self.config.cameras )
or not self.process_info[camera]["record_enabled"].value
): async def validate_and_move_segment(
self, camera: str, events: Event, recording: dict[str, any]
) -> None:
cache_path = recording["cache_path"]
start_time = recording["start_time"]
# Just delete files if recordings are turned off
if (
camera not in self.config.cameras
or not self.process_info[camera]["record_enabled"].value
):
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
return
if cache_path in self.end_time_cache:
end_time, duration = self.end_time_cache[cache_path]
else:
segment_info = get_video_properties(cache_path, get_duration=True)
if segment_info["duration"]:
duration = float(segment_info["duration"])
else:
duration = -1
# ensure duration is within expected length
if 0 < duration < MAX_SEGMENT_DURATION:
end_time = start_time + datetime.timedelta(seconds=duration)
self.end_time_cache[cache_path] = (end_time, duration)
else:
if duration == -1:
logger.warning(f"Failed to probe corrupt segment {cache_path}")
logger.warning(f"Discarding a corrupt recording segment: {cache_path}")
Path(cache_path).unlink(missing_ok=True)
return
# if cached file's start_time is earlier than the retain days for the camera
if start_time <= (
(
datetime.datetime.now()
- datetime.timedelta(
days=self.config.cameras[camera].record.retain.days
)
)
):
# if the cached segment overlaps with the events:
overlaps = False
for event in events:
# if the event starts in the future, stop checking events
# and remove this segment
if event.start_time > end_time.timestamp():
overlaps = False
Path(cache_path).unlink(missing_ok=True) Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None) self.end_time_cache.pop(cache_path, None)
continue break
if cache_path in self.end_time_cache: # if the event is in progress or ends after the recording starts, keep it
end_time, duration = self.end_time_cache[cache_path] # and stop looking at events
else: if event.end_time is None or event.end_time >= start_time.timestamp():
ffprobe_cmd = [ overlaps = True
"ffprobe", break
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{cache_path}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0 and p.stdout.decode():
duration = float(p.stdout.decode().strip())
else:
duration = -1
# ensure duration is within expected length if overlaps:
if 0 < duration < MAX_SEGMENT_DURATION: record_mode = self.config.cameras[camera].record.events.retain.mode
end_time = start_time + datetime.timedelta(seconds=duration) # move from cache to recordings immediately
self.end_time_cache[cache_path] = (end_time, duration) self.store_segment(
else: camera,
if duration == -1: start_time,
logger.warning( end_time,
f"Failed to probe corrupt segment {cache_path} : {p.returncode} - {str(p.stderr)}" duration,
) cache_path,
record_mode,
logger.warning( )
f"Discarding a corrupt recording segment: {cache_path}" # if it doesn't overlap with an event, go ahead and drop the segment
) # if it ends more than the configured pre_capture for the camera
Path(cache_path).unlink(missing_ok=True) else:
continue pre_capture = self.config.cameras[camera].record.events.pre_capture
most_recently_processed_frame_time = self.recordings_info[camera][-1][0]
# if cached file's start_time is earlier than the retain days for the camera retain_cutoff = most_recently_processed_frame_time - pre_capture
if start_time <= ( if end_time.timestamp() < retain_cutoff:
( Path(cache_path).unlink(missing_ok=True)
datetime.datetime.now() self.end_time_cache.pop(cache_path, None)
- datetime.timedelta( # else retain days includes this segment
days=self.config.cameras[camera].record.retain.days else:
) record_mode = self.config.cameras[camera].record.retain.mode
) self.store_segment(
): camera, start_time, end_time, duration, cache_path, record_mode
# if the cached segment overlaps with the events: )
overlaps = False
for event in events:
# if the event starts in the future, stop checking events
# and remove this segment
if event.start_time > end_time.timestamp():
overlaps = False
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
break
# if the event is in progress or ends after the recording starts, keep it
# and stop looking at events
if (
event.end_time is None
or event.end_time >= start_time.timestamp()
):
overlaps = True
break
if overlaps:
record_mode = self.config.cameras[
camera
].record.events.retain.mode
# move from cache to recordings immediately
self.store_segment(
camera,
start_time,
end_time,
duration,
cache_path,
record_mode,
)
# if it doesn't overlap with an event, go ahead and drop the segment
# if it ends more than the configured pre_capture for the camera
else:
pre_capture = self.config.cameras[
camera
].record.events.pre_capture
most_recently_processed_frame_time = self.recordings_info[
camera
][-1][0]
retain_cutoff = most_recently_processed_frame_time - pre_capture
if end_time.timestamp() < retain_cutoff:
Path(cache_path).unlink(missing_ok=True)
self.end_time_cache.pop(cache_path, None)
# else retain days includes this segment
else:
record_mode = self.config.cameras[camera].record.retain.mode
self.store_segment(
camera, start_time, end_time, duration, cache_path, record_mode
)
def segment_stats( def segment_stats(
self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime
@ -386,7 +372,7 @@ class RecordingMaintainer(threading.Thread):
break break
try: try:
self.move_files() asyncio.run(self.move_files())
except Exception as e: except Exception as e:
logger.error( logger.error(
"Error occurred when attempting to maintain recording cache" "Error occurred when attempting to maintain recording cache"

View File

@ -730,7 +730,7 @@ class TestConfig(unittest.TestCase):
assert config == frigate_config.dict(exclude_unset=True) assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config() runtime_config = frigate_config.runtime_config()
assert runtime_config.cameras["back"].motion.frame_height == 50 assert runtime_config.cameras["back"].motion.frame_height == 100
def test_motion_contour_area_dynamic(self): def test_motion_contour_area_dynamic(self):
config = { config = {
@ -758,7 +758,7 @@ class TestConfig(unittest.TestCase):
assert config == frigate_config.dict(exclude_unset=True) assert config == frigate_config.dict(exclude_unset=True)
runtime_config = frigate_config.runtime_config() runtime_config = frigate_config.runtime_config()
assert round(runtime_config.cameras["back"].motion.contour_area) == 15 assert round(runtime_config.cameras["back"].motion.contour_area) == 10
def test_merge_labelmap(self): def test_merge_labelmap(self):
config = { config = {

View File

@ -6,8 +6,9 @@ from pydantic import parse_obj_as
import frigate.detectors as detectors import frigate.detectors as detectors
import frigate.object_detection import frigate.object_detection
from frigate.config import DetectorConfig, InputTensorEnum, ModelConfig from frigate.config import DetectorConfig, ModelConfig
from frigate.detectors import DetectorTypeEnum from frigate.detectors import DetectorTypeEnum
from frigate.detectors.detector_config import InputTensorEnum
class TestLocalObjectDetector(unittest.TestCase): class TestLocalObjectDetector(unittest.TestCase):

View File

@ -91,9 +91,13 @@ class NorfairTracker(ObjectTracker):
"ymax": self.detect_config.height, "ymax": self.detect_config.height,
} }
def deregister(self, id): def deregister(self, id, track_id):
del self.tracked_objects[id] del self.tracked_objects[id]
del self.disappeared[id] del self.disappeared[id]
self.tracker.tracked_objects = [
o for o in self.tracker.tracked_objects if o.global_id != track_id
]
del self.track_id_map[track_id]
# tracks the current position of the object based on the last N bounding boxes # tracks the current position of the object based on the last N bounding boxes
# returns False if the object has moved outside its previous position # returns False if the object has moved outside its previous position
@ -167,7 +171,7 @@ class NorfairTracker(ObjectTracker):
if self.update_position(id, obj["box"]): if self.update_position(id, obj["box"]):
self.tracked_objects[id]["motionless_count"] += 1 self.tracked_objects[id]["motionless_count"] += 1
if self.is_expired(id): if self.is_expired(id):
self.deregister(id) self.deregister(id, track_id)
return return
else: else:
# register the first position change and then only increment if # register the first position change and then only increment if
@ -261,8 +265,7 @@ class NorfairTracker(ObjectTracker):
# clear expired tracks # clear expired tracks
expired_ids = [k for k in self.track_id_map.keys() if k not in active_ids] expired_ids = [k for k in self.track_id_map.keys() if k not in active_ids]
for e_id in expired_ids: for e_id in expired_ids:
self.deregister(self.track_id_map[e_id]) self.deregister(self.track_id_map[e_id], e_id)
del self.track_id_map[e_id]
def debug_draw(self, frame, frame_time): def debug_draw(self, frame, frame_time):
active_detections = [ active_detections = [

View File

@ -1147,31 +1147,66 @@ def to_relative_box(
def get_video_properties(url, get_duration=False): def get_video_properties(url, get_duration=False):
def calculate_duration(video: Optional[any]) -> float:
duration = None
if video is not None:
# Get the frames per second (fps) of the video stream
fps = video.get(cv2.CAP_PROP_FPS)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
if fps and total_frames:
duration = total_frames / fps
# if cv2 failed need to use ffprobe
if duration is None:
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{url}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0 and p.stdout.decode():
duration = float(p.stdout.decode().strip())
else:
duration = -1
return duration
width = height = 0 width = height = 0
# Open the video stream
video = cv2.VideoCapture(url)
# Check if the video stream was opened successfully try:
if not video.isOpened(): # Open the video stream
logger.debug(f"Error opening video stream {url}.") video = cv2.VideoCapture(url)
return None
# Get the width of frames in the video stream # Check if the video stream was opened successfully
width = video.get(cv2.CAP_PROP_FRAME_WIDTH) if not video.isOpened():
video = None
except Exception:
video = None
# Get the height of frames in the video stream result = {}
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Release the video stream
video.release()
result = {"width": round(width), "height": round(height)}
if get_duration: if get_duration:
# Get the frames per second (fps) of the video stream result["duration"] = calculate_duration(video)
fps = video.get(cv2.CAP_PROP_FPS)
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
duration = total_frames / fps
result["duration"] = duration if video is not None:
# Get the width of frames in the video stream
width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
# Get the height of frames in the video stream
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Release the video stream
video.release()
result["width"] = round(width)
result["height"] = round(height)
return result return result

View File

@ -14,8 +14,9 @@ import cv2
import numpy as np import numpy as np
from setproctitle import setproctitle from setproctitle import setproctitle
from frigate.config import CameraConfig, DetectConfig, PixelFormatEnum from frigate.config import CameraConfig, DetectConfig
from frigate.const import CACHE_DIR from frigate.const import CACHE_DIR
from frigate.detectors.detector_config import PixelFormatEnum
from frigate.log import LogPipe from frigate.log import LogPipe
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector from frigate.motion.improved_motion import ImprovedMotionDetector
@ -722,6 +723,14 @@ def process_frames(
stop_event, stop_event,
exit_on_empty: bool = False, exit_on_empty: bool = False,
): ):
# attribute labels are not tracked and are not assigned regions
attribute_label_map = {
"person": ["face", "amazon"],
"car": ["ups", "fedex", "amazon", "license_plate"],
}
all_attribute_labels = [
item for sublist in attribute_label_map.values() for item in sublist
]
fps = process_info["process_fps"] fps = process_info["process_fps"]
detection_fps = process_info["detection_fps"] detection_fps = process_info["detection_fps"]
current_frame_time = process_info["detection_frame"] current_frame_time = process_info["detection_frame"]
@ -757,6 +766,7 @@ def process_frames(
motion_boxes = motion_detector.detect(frame) if motion_enabled.value else [] motion_boxes = motion_detector.detect(frame) if motion_enabled.value else []
regions = [] regions = []
consolidated_detections = []
# if detection is disabled # if detection is disabled
if not detection_enabled.value: if not detection_enabled.value:
@ -769,8 +779,8 @@ def process_frames(
stationary_object_ids = [ stationary_object_ids = [
obj["id"] obj["id"]
for obj in object_tracker.tracked_objects.values() for obj in object_tracker.tracked_objects.values()
# if there hasn't been motion for 10 frames # if it has exceeded the stationary threshold
if obj["motionless_count"] >= 10 if obj["motionless_count"] >= detect_config.stationary.threshold
# and it isn't due for a periodic check # and it isn't due for a periodic check
and ( and (
detect_config.stationary.interval == 0 detect_config.stationary.interval == 0
@ -893,12 +903,42 @@ def process_frames(
consolidated_detections = get_consolidated_object_detections( consolidated_detections = get_consolidated_object_detections(
detected_object_groups detected_object_groups
) )
tracked_detections = [
d
for d in consolidated_detections
if d[0] not in all_attribute_labels
]
# now that we have refined our detections, we need to track objects # now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, consolidated_detections) object_tracker.match_and_update(frame_time, tracked_detections)
# else, just update the frame times for the stationary objects # else, just update the frame times for the stationary objects
else: else:
object_tracker.update_frame_times(frame_time) object_tracker.update_frame_times(frame_time)
# group the attribute detections based on what label they apply to
attribute_detections = {}
for label, attribute_labels in attribute_label_map.items():
attribute_detections[label] = [
d for d in consolidated_detections if d[0] in attribute_labels
]
# build detections and add attributes
detections = {}
for obj in object_tracker.tracked_objects.values():
attributes = []
# if the objects label has associated attribute detections
if obj["label"] in attribute_detections.keys():
# add them to attributes if they intersect
for attribute_detection in attribute_detections[obj["label"]]:
if box_inside(obj["box"], (attribute_detection[2])):
attributes.append(
{
"label": attribute_detection[0],
"score": attribute_detection[1],
"box": attribute_detection[2],
}
)
detections[obj["id"]] = {**obj, "attributes": attributes}
# debug object tracking # debug object tracking
if False: if False:
bgr_frame = cv2.cvtColor( bgr_frame = cv2.cvtColor(
@ -981,7 +1021,7 @@ def process_frames(
( (
camera_name, camera_name,
frame_time, frame_time,
object_tracker.tracked_objects, detections,
motion_boxes, motion_boxes,
regions, regions,
) )