Compare commits

...

3 Commits

Author SHA1 Message Date
Nicolas Mowen
04a2f42d11
Split apart video.py (#22631)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions
2026-03-25 08:44:12 -06:00
Josh Hawkins
3f6d5bcf22
ONVIF refactor (#22629)
* add profile support and decouple relative move from autotracking

* add drag to zoom

* docs

* add profile selection to UI

* dynamically update onvif config

* ui tweak

* docs

* docs tweak
2026-03-25 08:57:47 -05:00
Josh Hawkins
f5937d8370
Update PR template and add check workflow (#22628) 2026-03-25 08:10:16 -05:00
23 changed files with 1261 additions and 750 deletions

View File

@ -1,6 +1,7 @@
_Please read the [contributing guidelines](https://github.com/blakeblackshear/frigate/blob/dev/CONTRIBUTING.md) before submitting a PR._
## Proposed change
<!--
Thank you!
@ -25,6 +26,7 @@ _Please read the [contributing guidelines](https://github.com/blakeblackshear/fr
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to discussion with maintainers (**required** for large/pinned features):
## For new features

120
.github/workflows/pr_template_check.yml vendored Normal file
View File

@ -0,0 +1,120 @@
name: PR template check
on:
pull_request_target:
types: [opened, edited]
permissions:
pull-requests: write
jobs:
check_template:
name: Validate PR description
runs-on: ubuntu-latest
steps:
- name: Check PR description against template
uses: actions/github-script@v7
with:
script: |
const maintainers = ['blakeblackshear', 'NickM-27', 'hawkeye217'];
const author = context.payload.pull_request.user.login;
if (maintainers.includes(author)) {
console.log(`Skipping template check for maintainer: ${author}`);
return;
}
const body = context.payload.pull_request.body || '';
const errors = [];
// Check that key template sections exist
const requiredSections = [
'## Proposed change',
'## Type of change',
'## AI disclosure',
'## Checklist',
];
for (const section of requiredSections) {
if (!body.includes(section)) {
errors.push(`Missing section: **${section}**`);
}
}
// Check that "Proposed change" has content beyond the default HTML comment
const proposedChangeMatch = body.match(
/## Proposed change\s*(?:<!--[\s\S]*?-->\s*)?([\s\S]*?)(?=\n## )/
);
const proposedContent = proposedChangeMatch
? proposedChangeMatch[1].trim()
: '';
if (!proposedContent) {
errors.push(
'The **Proposed change** section is empty. Please describe what this PR does.'
);
}
// Check that at least one "Type of change" checkbox is checked
const typeSection = body.match(
/## Type of change\s*([\s\S]*?)(?=\n## )/
);
if (typeSection && !/- \[x\]/i.test(typeSection[1])) {
errors.push(
'No **Type of change** selected. Please check at least one option.'
);
}
// Check that at least one AI disclosure checkbox is checked
const aiSection = body.match(
/## AI disclosure\s*([\s\S]*?)(?=\n## )/
);
if (aiSection && !/- \[x\]/i.test(aiSection[1])) {
errors.push(
'No **AI disclosure** option selected. Please indicate whether AI tools were used.'
);
}
// Check that at least one checklist item is checked
const checklistSection = body.match(
/## Checklist\s*([\s\S]*?)$/
);
if (checklistSection && !/- \[x\]/i.test(checklistSection[1])) {
errors.push(
'No **Checklist** items checked. Please review and check the items that apply.'
);
}
if (errors.length === 0) {
console.log('PR description passes template validation.');
return;
}
const prNumber = context.payload.pull_request.number;
const message = [
'## PR template validation failed',
'',
'This PR was automatically closed because the description does not follow the [pull request template](https://github.com/blakeblackshear/frigate/blob/dev/.github/pull_request_template.md).',
'',
'**Issues found:**',
...errors.map((e) => `- ${e}`),
'',
'Please update your PR description to include all required sections from the template, then reopen this PR.',
'',
'> If you used an AI tool to generate this PR, please see our [contributing guidelines](https://github.com/blakeblackshear/frigate/blob/dev/CONTRIBUTING.md) for details.',
].join('\n');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: prNumber,
body: message,
});
await github.rest.pulls.update({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: prNumber,
state: 'closed',
});
core.setFailed('PR description does not follow the template.');

View File

@ -52,6 +52,10 @@ cameras:
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: ONVIF media profile to use for PTZ control, matched by token or name. (default: shown below)
# If not set, the first profile with valid PTZ configuration is selected automatically.
# Use this when your camera has multiple ONVIF profiles and you need to select a specific one.
profile: None
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:

View File

@ -91,6 +91,8 @@ If your ONVIF camera does not require authentication credentials, you may still
:::
If your camera has multiple ONVIF profiles, you can specify which one to use for PTZ control with the `profile` option, matched by token or name. When not set, Frigate selects the first profile with a valid PTZ configuration. Check the Frigate debug logs (`frigate.ptz.onvif: debug`) to see available profile names and tokens for your camera.
An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs.
## ONVIF PTZ camera recommendations

View File

@ -951,7 +951,7 @@ cameras:
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
# NOTE: ONVIF user, and password can be specified with environment variables or docker secrets
# NOTE: ONVIF host, user, and password can be specified with environment variables or docker secrets
# that must begin with 'FRIGATE_'. e.g. host: '{FRIGATE_ONVIF_USERNAME}'
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
@ -966,6 +966,10 @@ cameras:
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
ignore_time_mismatch: False
# Optional: ONVIF media profile to use for PTZ control, matched by token or name. (default: shown below)
# If not set, the first profile with valid PTZ configuration is selected automatically.
# Use this when your camera has multiple ONVIF profiles and you need to select a specific one.
profile: None
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:

View File

@ -117,6 +117,11 @@ class OnvifConfig(FrigateBaseModel):
title="Disable TLS verify",
description="Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only).",
)
profile: Optional[str] = Field(
default=None,
title="ONVIF profile",
description="Specific ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically.",
)
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="Autotracking",

View File

@ -23,6 +23,7 @@ class CameraConfigUpdateEnum(str, Enum):
notifications = "notifications"
objects = "objects"
object_genai = "object_genai"
onvif = "onvif"
record = "record"
remove = "remove" # for removing a camera
review = "review"
@ -130,6 +131,8 @@ class CameraConfigUpdateSubscriber:
config.lpr = updated_config
elif update_type == CameraConfigUpdateEnum.snapshots:
config.snapshots = updated_config
elif update_type == CameraConfigUpdateEnum.onvif:
config.onvif = updated_config
elif update_type == CameraConfigUpdateEnum.zones:
config.zones = updated_config

View File

@ -37,8 +37,8 @@ from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe, suppress_stderr_during
from frigate.object_detection.base import load_labels
from frigate.util.builtin import get_ffmpeg_arg_list
from frigate.util.ffmpeg import start_or_restart_ffmpeg, stop_ffmpeg
from frigate.util.process import FrigateProcess
from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
try:
from tflite_runtime.interpreter import Interpreter

View File

@ -15,6 +15,10 @@ from zeep.exceptions import Fault, TransportError
from frigate.camera import PTZMetrics
from frigate.config import FrigateConfig, ZoomingModeEnum
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber,
)
from frigate.util.builtin import find_by_key
logger = logging.getLogger(__name__)
@ -65,7 +69,14 @@ class OnvifController:
self.camera_configs[cam_name] = cam
self.status_locks[cam_name] = asyncio.Lock()
self.config_subscriber = CameraConfigUpdateSubscriber(
self.config,
self.config.cameras,
[CameraConfigUpdateEnum.onvif],
)
asyncio.run_coroutine_threadsafe(self._init_cameras(), self.loop)
asyncio.run_coroutine_threadsafe(self._poll_config_updates(), self.loop)
def _run_event_loop(self) -> None:
"""Run the event loop in a separate thread."""
@ -80,6 +91,52 @@ class OnvifController:
for cam_name in self.camera_configs:
await self._init_single_camera(cam_name)
async def _poll_config_updates(self) -> None:
"""Poll for ONVIF config updates and re-initialize cameras as needed."""
while True:
await asyncio.sleep(1)
try:
updates = self.config_subscriber.check_for_updates()
for update_type, cameras in updates.items():
if update_type == CameraConfigUpdateEnum.onvif.name:
for cam_name in cameras:
await self._reinit_camera(cam_name)
except Exception:
logger.error("Error checking for ONVIF config updates")
async def _close_camera(self, cam_name: str) -> None:
"""Close the ONVIF client session for a camera."""
cam_state = self.cams.get(cam_name)
if cam_state and "onvif" in cam_state:
try:
await cam_state["onvif"].close()
except Exception:
logger.debug(f"Error closing ONVIF session for {cam_name}")
async def _reinit_camera(self, cam_name: str) -> None:
"""Re-initialize a camera after config change."""
logger.info(f"Re-initializing ONVIF for {cam_name} due to config change")
# close existing session before re-init
await self._close_camera(cam_name)
cam = self.config.cameras.get(cam_name)
if not cam or not cam.onvif.host:
# ONVIF removed from config, clean up
self.cams.pop(cam_name, None)
self.camera_configs.pop(cam_name, None)
self.failed_cams.pop(cam_name, None)
return
# update stored config and reset state
self.camera_configs[cam_name] = cam
if cam_name not in self.status_locks:
self.status_locks[cam_name] = asyncio.Lock()
self.cams.pop(cam_name, None)
self.failed_cams.pop(cam_name, None)
await self._init_single_camera(cam_name)
async def _init_single_camera(self, cam_name: str) -> bool:
"""Initialize a single camera by name.
@ -118,6 +175,7 @@ class OnvifController:
"active": False,
"features": [],
"presets": {},
"profiles": [],
}
return True
except (Fault, ONVIFError, TransportError, Exception) as e:
@ -161,22 +219,60 @@ class OnvifController:
)
return False
# build list of valid PTZ profiles
valid_profiles = [
p
for p in profiles
if p.VideoEncoderConfiguration
and p.PTZConfiguration
and (
p.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace is not None
or p.PTZConfiguration.DefaultContinuousZoomVelocitySpace is not None
)
]
# store available profiles for API response and log for debugging
self.cams[camera_name]["profiles"] = [
{"name": getattr(p, "Name", None) or p.token, "token": p.token}
for p in valid_profiles
]
for p in valid_profiles:
logger.debug(
"Onvif profile for %s: name='%s', token='%s'",
camera_name,
getattr(p, "Name", None),
p.token,
)
configured_profile = self.config.cameras[camera_name].onvif.profile
profile = None
for _, onvif_profile in enumerate(profiles):
if (
onvif_profile.VideoEncoderConfiguration
and onvif_profile.PTZConfiguration
and (
onvif_profile.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace
is not None
or onvif_profile.PTZConfiguration.DefaultContinuousZoomVelocitySpace
is not None
if configured_profile is not None:
# match by exact token first, then by name
for p in valid_profiles:
if p.token == configured_profile:
profile = p
break
if profile is None:
for p in valid_profiles:
if getattr(p, "Name", None) == configured_profile:
profile = p
break
if profile is None:
available = [
f"name='{getattr(p, 'Name', None)}', token='{p.token}'"
for p in valid_profiles
]
logger.error(
"Onvif profile '%s' not found for camera %s. Available profiles: %s",
configured_profile,
camera_name,
available,
)
):
# use the first profile that has a valid ptz configuration
profile = onvif_profile
logger.debug(f"Selected Onvif profile for {camera_name}: {profile}")
break
return False
else:
# use the first profile that has a valid ptz configuration
profile = valid_profiles[0] if valid_profiles else None
if profile is None:
logger.error(
@ -184,6 +280,8 @@ class OnvifController:
)
return False
logger.debug(f"Selected Onvif profile for {camera_name}: {profile}")
# get the PTZ config for the profile
try:
configs = profile.PTZConfiguration
@ -218,48 +316,92 @@ class OnvifController:
move_request.ProfileToken = profile.token
self.cams[camera_name]["move_request"] = move_request
# extra setup for autotracking cameras
if (
self.config.cameras[camera_name].onvif.autotracking.enabled_in_config
and self.config.cameras[camera_name].onvif.autotracking.enabled
):
# get PTZ configuration options for feature detection and relative movement
ptz_config = None
fov_space_id = None
try:
request = ptz.create_type("GetConfigurationOptions")
request.ConfigurationToken = profile.PTZConfiguration.token
ptz_config = await ptz.GetConfigurationOptions(request)
logger.debug(f"Onvif config for {camera_name}: {ptz_config}")
logger.debug(
f"Onvif PTZ configuration options for {camera_name}: {ptz_config}"
)
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.debug(
f"Unable to get PTZ configuration options for {camera_name}: {e}"
)
# detect FOV translation space for relative movement
if ptz_config is not None:
try:
fov_space_id = next(
(
i
for i, space in enumerate(
ptz_config.Spaces.RelativePanTiltTranslationSpace
)
if "TranslationSpaceFov" in space["URI"]
),
None,
)
except (AttributeError, TypeError):
fov_space_id = None
autotracking_config = self.config.cameras[camera_name].onvif.autotracking
autotracking_enabled = (
autotracking_config.enabled_in_config and autotracking_config.enabled
)
# autotracking-only: status request and service capabilities
if autotracking_enabled:
status_request = ptz.create_type("GetStatus")
status_request.ProfileToken = profile.token
self.cams[camera_name]["status_request"] = status_request
service_capabilities_request = ptz.create_type("GetServiceCapabilities")
self.cams[camera_name]["service_capabilities_request"] = (
service_capabilities_request
)
fov_space_id = next(
(
i
for i, space in enumerate(
ptz_config.Spaces.RelativePanTiltTranslationSpace
)
if "TranslationSpaceFov" in space["URI"]
),
None,
)
# status request for autotracking and filling ptz-parameters
status_request = ptz.create_type("GetStatus")
status_request.ProfileToken = profile.token
self.cams[camera_name]["status_request"] = status_request
# setup relative move request when FOV relative movement is supported
if (
fov_space_id is not None
and configs.DefaultRelativePanTiltTranslationSpace is not None
):
# one-off GetStatus to seed Translation field
status = None
try:
status = await ptz.GetStatus(status_request)
logger.debug(f"Onvif status config for {camera_name}: {status}")
one_off_status_request = ptz.create_type("GetStatus")
one_off_status_request.ProfileToken = profile.token
status = await ptz.GetStatus(one_off_status_request)
logger.debug(f"Onvif status for {camera_name}: {status}")
except Exception as e:
logger.warning(f"Unable to get status from camera: {camera_name}: {e}")
status = None
logger.warning(f"Unable to get status from camera {camera_name}: {e}")
# autotracking relative panning/tilting needs a relative zoom value set to 0
# if camera supports relative movement
rel_move_request = ptz.create_type("RelativeMove")
rel_move_request.ProfileToken = profile.token
logger.debug(f"{camera_name}: Relative move request: {rel_move_request}")
fov_uri = ptz_config["Spaces"]["RelativePanTiltTranslationSpace"][
fov_space_id
]["URI"]
if rel_move_request.Translation is None:
if status is not None:
# seed from current position
rel_move_request.Translation = status.Position
rel_move_request.Translation.PanTilt.space = fov_uri
else:
# fallback: construct Translation explicitly
rel_move_request.Translation = {
"PanTilt": {"x": 0, "y": 0, "space": fov_uri}
}
# configure zoom on relative move request
if (
self.config.cameras[camera_name].onvif.autotracking.zooming
!= ZoomingModeEnum.disabled
autotracking_enabled
and autotracking_config.zooming != ZoomingModeEnum.disabled
):
zoom_space_id = next(
(
@ -271,60 +413,43 @@ class OnvifController:
),
None,
)
# setup relative moving request for autotracking
move_request = ptz.create_type("RelativeMove")
move_request.ProfileToken = profile.token
logger.debug(f"{camera_name}: Relative move request: {move_request}")
if move_request.Translation is None and fov_space_id is not None:
move_request.Translation = status.Position
move_request.Translation.PanTilt.space = ptz_config["Spaces"][
"RelativePanTiltTranslationSpace"
][fov_space_id]["URI"]
# try setting relative zoom translation space
try:
if (
self.config.cameras[camera_name].onvif.autotracking.zooming
!= ZoomingModeEnum.disabled
):
try:
if zoom_space_id is not None:
move_request.Translation.Zoom.space = ptz_config["Spaces"][
rel_move_request.Translation.Zoom.space = ptz_config["Spaces"][
"RelativeZoomTranslationSpace"
][zoom_space_id]["URI"]
else:
if (
move_request["Translation"] is not None
and "Zoom" in move_request["Translation"]
):
del move_request["Translation"]["Zoom"]
if (
move_request["Speed"] is not None
and "Zoom" in move_request["Speed"]
):
del move_request["Speed"]["Zoom"]
logger.debug(
f"{camera_name}: Relative move request after deleting zoom: {move_request}"
except Exception as e:
autotracking_config.zooming = ZoomingModeEnum.disabled
logger.warning(
f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported. Exception: {e}"
)
except Exception as e:
self.config.cameras[
camera_name
].onvif.autotracking.zooming = ZoomingModeEnum.disabled
logger.warning(
f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported. Exception: {e}"
else:
# remove zoom fields from relative move request
if (
rel_move_request["Translation"] is not None
and "Zoom" in rel_move_request["Translation"]
):
del rel_move_request["Translation"]["Zoom"]
if (
rel_move_request["Speed"] is not None
and "Zoom" in rel_move_request["Speed"]
):
del rel_move_request["Speed"]["Zoom"]
logger.debug(
f"{camera_name}: Relative move request after deleting zoom: {rel_move_request}"
)
if move_request.Speed is None:
move_request.Speed = configs.DefaultPTZSpeed if configs else None
if rel_move_request.Speed is None:
rel_move_request.Speed = configs.DefaultPTZSpeed if configs else None
logger.debug(
f"{camera_name}: Relative move request after setup: {move_request}"
f"{camera_name}: Relative move request after setup: {rel_move_request}"
)
self.cams[camera_name]["relative_move_request"] = move_request
self.cams[camera_name]["relative_move_request"] = rel_move_request
# setup absolute moving request for autotracking zooming
move_request = ptz.create_type("AbsoluteMove")
move_request.ProfileToken = profile.token
self.cams[camera_name]["absolute_move_request"] = move_request
# setup absolute move request
abs_move_request = ptz.create_type("AbsoluteMove")
abs_move_request.ProfileToken = profile.token
self.cams[camera_name]["absolute_move_request"] = abs_move_request
# setup existing presets
try:
@ -358,48 +483,48 @@ class OnvifController:
if configs.DefaultRelativeZoomTranslationSpace:
supported_features.append("zoom-r")
if (
self.config.cameras[camera_name].onvif.autotracking.enabled_in_config
and self.config.cameras[camera_name].onvif.autotracking.enabled
):
if ptz_config is not None:
try:
# get camera's zoom limits from onvif config
self.cams[camera_name]["relative_zoom_range"] = (
ptz_config.Spaces.RelativeZoomTranslationSpace[0]
)
except Exception as e:
if (
self.config.cameras[camera_name].onvif.autotracking.zooming
== ZoomingModeEnum.relative
):
self.config.cameras[
camera_name
].onvif.autotracking.zooming = ZoomingModeEnum.disabled
if autotracking_config.zooming == ZoomingModeEnum.relative:
autotracking_config.zooming = ZoomingModeEnum.disabled
logger.warning(
f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported. Exception: {e}"
)
if configs.DefaultAbsoluteZoomPositionSpace:
supported_features.append("zoom-a")
if (
self.config.cameras[camera_name].onvif.autotracking.enabled_in_config
and self.config.cameras[camera_name].onvif.autotracking.enabled
):
if ptz_config is not None:
try:
# get camera's zoom limits from onvif config
self.cams[camera_name]["absolute_zoom_range"] = (
ptz_config.Spaces.AbsoluteZoomPositionSpace[0]
)
self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits
except Exception as e:
if self.config.cameras[camera_name].onvif.autotracking.zooming:
self.config.cameras[
camera_name
].onvif.autotracking.zooming = ZoomingModeEnum.disabled
if autotracking_config.zooming != ZoomingModeEnum.disabled:
autotracking_config.zooming = ZoomingModeEnum.disabled
logger.warning(
f"Disabling autotracking zooming for {camera_name}: Absolute zoom not supported. Exception: {e}"
)
# disable autotracking zoom if required ranges are unavailable
if autotracking_config.zooming != ZoomingModeEnum.disabled:
if autotracking_config.zooming == ZoomingModeEnum.relative:
if "relative_zoom_range" not in self.cams[camera_name]:
autotracking_config.zooming = ZoomingModeEnum.disabled
logger.warning(
f"Disabling autotracking zooming for {camera_name}: Relative zoom range unavailable"
)
if autotracking_config.zooming == ZoomingModeEnum.absolute:
if "absolute_zoom_range" not in self.cams[camera_name]:
autotracking_config.zooming = ZoomingModeEnum.disabled
logger.warning(
f"Disabling autotracking zooming for {camera_name}: Absolute zoom range unavailable"
)
if (
self.cams[camera_name]["video_source_token"] is not None
and imaging is not None
@ -416,10 +541,9 @@ class OnvifController:
except (Fault, ONVIFError, TransportError, Exception) as e:
logger.debug(f"Focus not supported for {camera_name}: {e}")
# detect FOV relative movement support
if (
self.config.cameras[camera_name].onvif.autotracking.enabled_in_config
and self.config.cameras[camera_name].onvif.autotracking.enabled
and fov_space_id is not None
fov_space_id is not None
and configs.DefaultRelativePanTiltTranslationSpace is not None
):
supported_features.append("pt-r-fov")
@ -548,11 +672,8 @@ class OnvifController:
move_request.Translation.PanTilt.x = pan
move_request.Translation.PanTilt.y = tilt
if (
"zoom-r" in self.cams[camera_name]["features"]
and self.config.cameras[camera_name].onvif.autotracking.zooming
== ZoomingModeEnum.relative
):
# include zoom if requested and camera supports relative zoom
if zoom != 0 and "zoom-r" in self.cams[camera_name]["features"]:
move_request.Speed = {
"PanTilt": {
"x": speed,
@ -560,7 +681,7 @@ class OnvifController:
},
"Zoom": {"x": speed},
}
move_request.Translation.Zoom.x = zoom
move_request["Translation"]["Zoom"] = {"x": zoom}
await self.cams[camera_name]["ptz"].RelativeMove(move_request)
@ -568,12 +689,8 @@ class OnvifController:
move_request.Translation.PanTilt.x = 0
move_request.Translation.PanTilt.y = 0
if (
"zoom-r" in self.cams[camera_name]["features"]
and self.config.cameras[camera_name].onvif.autotracking.zooming
== ZoomingModeEnum.relative
):
move_request.Translation.Zoom.x = 0
if zoom != 0 and "zoom-r" in self.cams[camera_name]["features"]:
del move_request["Translation"]["Zoom"]
self.cams[camera_name]["active"] = False
@ -717,8 +834,18 @@ class OnvifController:
elif command == OnvifCommandEnum.preset:
await self._move_to_preset(camera_name, param)
elif command == OnvifCommandEnum.move_relative:
_, pan, tilt = param.split("_")
await self._move_relative(camera_name, float(pan), float(tilt), 0, 1)
parts = param.split("_")
if len(parts) == 3:
_, pan, tilt = parts
zoom = 0.0
elif len(parts) == 4:
_, pan, tilt, zoom = parts
else:
logger.error(f"Invalid move_relative params: {param}")
return
await self._move_relative(
camera_name, float(pan), float(tilt), float(zoom), 1
)
elif command in (OnvifCommandEnum.zoom_in, OnvifCommandEnum.zoom_out):
await self._zoom(camera_name, command)
elif command in (OnvifCommandEnum.focus_in, OnvifCommandEnum.focus_out):
@ -773,6 +900,7 @@ class OnvifController:
"name": camera_name,
"features": self.cams[camera_name]["features"],
"presets": list(self.cams[camera_name]["presets"].keys()),
"profiles": self.cams[camera_name].get("profiles", []),
}
if camera_name not in self.cams.keys() and camera_name in self.config.cameras:
@ -970,6 +1098,7 @@ class OnvifController:
return
logger.info("Exiting ONVIF controller...")
self.config_subscriber.stop()
def stop_and_cleanup():
try:

48
frigate/util/ffmpeg.py Normal file
View File

@ -0,0 +1,48 @@
"""FFmpeg utility functions for managing ffmpeg processes."""
import logging
import subprocess as sp
from typing import Any
from frigate.log import LogPipe
def stop_ffmpeg(ffmpeg_process: sp.Popen[Any], logger: logging.Logger):
logger.info("Terminating the existing ffmpeg process...")
ffmpeg_process.terminate()
try:
logger.info("Waiting for ffmpeg to exit gracefully...")
ffmpeg_process.communicate(timeout=30)
logger.info("FFmpeg has exited")
except sp.TimeoutExpired:
logger.info("FFmpeg didn't exit. Force killing...")
ffmpeg_process.kill()
ffmpeg_process.communicate()
logger.info("FFmpeg has been killed")
ffmpeg_process = None
def start_or_restart_ffmpeg(
ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
) -> sp.Popen[Any]:
if ffmpeg_process is not None:
stop_ffmpeg(ffmpeg_process, logger)
if frame_size is None:
process = sp.Popen(
ffmpeg_cmd,
stdout=sp.DEVNULL,
stderr=logpipe,
stdin=sp.DEVNULL,
start_new_session=True,
)
else:
process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
stderr=logpipe,
stdin=sp.DEVNULL,
bufsize=frame_size * 10,
start_new_session=True,
)
return process

View File

@ -0,0 +1,2 @@
from .detect import * # noqa: F403
from .ffmpeg import * # noqa: F403

563
frigate/video/detect.py Normal file
View File

@ -0,0 +1,563 @@
"""Manages camera object detection processes."""
import logging
import queue
import time
from datetime import datetime, timezone
from multiprocessing import Queue
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
import cv2
from frigate.camera import CameraMetrics, PTZMetrics
from frigate.comms.inter_process import InterProcessRequestor
from frigate.config import CameraConfig, DetectConfig, LoggerConfig, ModelConfig
from frigate.config.camera.camera import CameraTypeEnum
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber,
)
from frigate.const import (
PROCESS_PRIORITY_HIGH,
REQUEST_REGION_GRID,
)
from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection.base import RemoteObjectDetector
from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker
from frigate.track.tracked_object import TrackedObjectAttribute
from frigate.util.builtin import EventsPerSecond
from frigate.util.image import (
FrameManager,
SharedMemoryFrameManager,
draw_box_with_label,
)
from frigate.util.object import (
create_tensor_input,
get_cluster_candidates,
get_cluster_region,
get_cluster_region_from_grid,
get_min_region_size,
get_startup_regions,
inside_any,
intersects_any,
is_object_filtered,
reduce_detections,
)
from frigate.util.process import FrigateProcess
from frigate.util.time import get_tomorrow_at_time
logger = logging.getLogger(__name__)
class CameraTracker(FrigateProcess):
def __init__(
self,
config: CameraConfig,
model_config: ModelConfig,
labelmap: dict[int, str],
detection_queue: Queue,
detected_objects_queue,
camera_metrics: CameraMetrics,
ptz_metrics: PTZMetrics,
region_grid: list[list[dict[str, Any]]],
stop_event: MpEvent,
log_config: LoggerConfig | None = None,
) -> None:
super().__init__(
stop_event,
PROCESS_PRIORITY_HIGH,
name=f"frigate.process:{config.name}",
daemon=True,
)
self.config = config
self.model_config = model_config
self.labelmap = labelmap
self.detection_queue = detection_queue
self.detected_objects_queue = detected_objects_queue
self.camera_metrics = camera_metrics
self.ptz_metrics = ptz_metrics
self.region_grid = region_grid
self.log_config = log_config
def run(self) -> None:
self.pre_run_setup(self.log_config)
frame_queue = self.camera_metrics.frame_queue
frame_shape = self.config.frame_shape
motion_detector = ImprovedMotionDetector(
frame_shape,
self.config.motion,
self.config.detect.fps,
name=self.config.name,
ptz_metrics=self.ptz_metrics,
)
object_detector = RemoteObjectDetector(
self.config.name,
self.labelmap,
self.detection_queue,
self.model_config,
self.stop_event,
)
object_tracker = NorfairTracker(self.config, self.ptz_metrics)
frame_manager = SharedMemoryFrameManager()
# create communication for region grid updates
requestor = InterProcessRequestor()
process_frames(
requestor,
frame_queue,
frame_shape,
self.model_config,
self.config,
frame_manager,
motion_detector,
object_detector,
object_tracker,
self.detected_objects_queue,
self.camera_metrics,
self.stop_event,
self.ptz_metrics,
self.region_grid,
)
# empty the frame queue
logger.info(f"{self.config.name}: emptying frame queue")
while not frame_queue.empty():
(frame_name, _) = frame_queue.get(False)
frame_manager.delete(frame_name)
logger.info(f"{self.config.name}: exiting subprocess")
def detect(
detect_config: DetectConfig,
object_detector,
frame,
model_config: ModelConfig,
region,
objects_to_track,
object_filters,
):
tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (y_min >= detect_config.height - 1):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (d[0], d[1], (x_min, y_min, x_max, y_max), area, ratio, region)
# apply object filters
if is_object_filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
def process_frames(
requestor: InterProcessRequestor,
frame_queue: Queue,
frame_shape: tuple[int, int],
model_config: ModelConfig,
camera_config: CameraConfig,
frame_manager: FrameManager,
motion_detector: MotionDetector,
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: Queue,
camera_metrics: CameraMetrics,
stop_event: MpEvent,
ptz_metrics: PTZMetrics,
region_grid: list[list[dict[str, Any]]],
exit_on_empty: bool = False,
):
next_region_update = get_tomorrow_at_time(2)
config_subscriber = CameraConfigUpdateSubscriber(
None,
{camera_config.name: camera_config},
[
CameraConfigUpdateEnum.detect,
CameraConfigUpdateEnum.enabled,
CameraConfigUpdateEnum.motion,
CameraConfigUpdateEnum.objects,
],
)
fps_tracker = EventsPerSecond()
fps_tracker.start()
startup_scan = True
stationary_frame_counter = 0
camera_enabled = True
region_min_size = get_min_region_size(model_config)
attributes_map = model_config.attributes_map
all_attributes = model_config.all_attributes
# remove license_plate from attributes if this camera is a dedicated LPR cam
if camera_config.type == CameraTypeEnum.lpr:
modified_attributes_map = model_config.attributes_map.copy()
if (
"car" in modified_attributes_map
and "license_plate" in modified_attributes_map["car"]
):
modified_attributes_map["car"] = [
attr
for attr in modified_attributes_map["car"]
if attr != "license_plate"
]
attributes_map = modified_attributes_map
all_attributes = [
attr for attr in model_config.all_attributes if attr != "license_plate"
]
while not stop_event.is_set():
updated_configs = config_subscriber.check_for_updates()
if "enabled" in updated_configs:
prev_enabled = camera_enabled
camera_enabled = camera_config.enabled
if "motion" in updated_configs:
motion_detector.config = camera_config.motion
motion_detector.update_mask()
if (
not camera_enabled
and prev_enabled != camera_enabled
and camera_metrics.frame_queue.empty()
):
logger.debug(
f"Camera {camera_config.name} disabled, clearing tracked objects"
)
prev_enabled = camera_enabled
# Clear norfair's dictionaries
object_tracker.tracked_objects.clear()
object_tracker.disappeared.clear()
object_tracker.stationary_box_history.clear()
object_tracker.positions.clear()
object_tracker.track_id_map.clear()
# Clear internal norfair states
for trackers_by_type in object_tracker.trackers.values():
for tracker in trackers_by_type.values():
tracker.tracked_objects = []
for tracker in object_tracker.default_tracker.values():
tracker.tracked_objects = []
if not camera_enabled:
time.sleep(0.1)
continue
if datetime.now().astimezone(timezone.utc) > next_region_update:
region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name)
next_region_update = get_tomorrow_at_time(2)
try:
if exit_on_empty:
frame_name, frame_time = frame_queue.get(False)
else:
frame_name, frame_time = frame_queue.get(True, 1)
except queue.Empty:
if exit_on_empty:
logger.info("Exiting track_objects...")
break
continue
camera_metrics.detection_frame.value = frame_time
ptz_metrics.frame_time.value = frame_time
frame = frame_manager.get(frame_name, (frame_shape[0] * 3 // 2, frame_shape[1]))
if frame is None:
logger.debug(
f"{camera_config.name}: frame {frame_time} is not in memory store."
)
continue
# look for motion if enabled
motion_boxes = motion_detector.detect(frame)
regions = []
consolidated_detections = []
# if detection is disabled
if not camera_config.detect.enabled:
object_tracker.match_and_update(frame_name, frame_time, [])
else:
# get stationary object ids
# check every Nth frame for stationary objects
# disappeared objects are not stationary
# also check for overlapping motion boxes
if stationary_frame_counter == camera_config.detect.stationary.interval:
stationary_frame_counter = 0
stationary_object_ids = []
else:
stationary_frame_counter += 1
stationary_object_ids = [
obj["id"]
for obj in object_tracker.tracked_objects.values()
# if it has exceeded the stationary threshold
if obj["motionless_count"]
>= camera_config.detect.stationary.threshold
# and it hasn't disappeared
and object_tracker.disappeared[obj["id"]] == 0
# and it doesn't overlap with any current motion boxes when not calibrating
and not intersects_any(
obj["box"],
[] if motion_detector.is_calibrating() else motion_boxes,
)
]
# get tracked object boxes that aren't stationary
tracked_object_boxes = [
(
# use existing object box for stationary objects
obj["estimate"]
if obj["motionless_count"]
< camera_config.detect.stationary.threshold
else obj["box"]
)
for obj in object_tracker.tracked_objects.values()
if obj["id"] not in stationary_object_ids
]
object_boxes = tracked_object_boxes + object_tracker.untracked_object_boxes
# get consolidated regions for tracked objects
regions = [
get_cluster_region(
frame_shape, region_min_size, candidate, object_boxes
)
for candidate in get_cluster_candidates(
frame_shape, region_min_size, object_boxes
)
]
# only add in the motion boxes when not calibrating and a ptz is not moving via autotracking
# ptz_moving_at_frame_time() always returns False for non-autotracking cameras
if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time(
frame_time,
ptz_metrics.start_time.value,
ptz_metrics.stop_time.value,
):
# find motion boxes that are not inside tracked object regions
standalone_motion_boxes = [
b for b in motion_boxes if not inside_any(b, regions)
]
if standalone_motion_boxes:
motion_clusters = get_cluster_candidates(
frame_shape,
region_min_size,
standalone_motion_boxes,
)
motion_regions = [
get_cluster_region_from_grid(
frame_shape,
region_min_size,
candidate,
standalone_motion_boxes,
region_grid,
)
for candidate in motion_clusters
]
regions += motion_regions
# if starting up, get the next startup scan region
if startup_scan:
for region in get_startup_regions(
frame_shape, region_min_size, region_grid
):
regions.append(region)
startup_scan = False
# resize regions and detect
# seed with stationary objects
detections = [
(
obj["label"],
obj["score"],
obj["box"],
obj["area"],
obj["ratio"],
obj["region"],
)
for obj in object_tracker.tracked_objects.values()
if obj["id"] in stationary_object_ids
]
for region in regions:
detections.extend(
detect(
camera_config.detect,
object_detector,
frame,
model_config,
region,
camera_config.objects.track,
camera_config.objects.filters,
)
)
consolidated_detections = reduce_detections(frame_shape, detections)
# if detection was run on this frame, consolidate
if len(regions) > 0:
tracked_detections = [
d for d in consolidated_detections if d[0] not in all_attributes
]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(
frame_name, frame_time, tracked_detections
)
# else, just update the frame times for the stationary objects
else:
object_tracker.update_frame_times(frame_name, frame_time)
# group the attribute detections based on what label they apply to
attribute_detections: dict[str, list[TrackedObjectAttribute]] = {}
for label, attribute_labels in attributes_map.items():
attribute_detections[label] = [
TrackedObjectAttribute(d)
for d in consolidated_detections
if d[0] in attribute_labels
]
# build detections
detections = {}
for obj in object_tracker.tracked_objects.values():
detections[obj["id"]] = {**obj, "attributes": []}
# find the best object for each attribute to be assigned to
all_objects: list[dict[str, Any]] = object_tracker.tracked_objects.values()
for attributes in attribute_detections.values():
for attribute in attributes:
filtered_objects = filter(
lambda o: attribute.label in attributes_map.get(o["label"], []),
all_objects,
)
selected_object_id = attribute.find_best_object(filtered_objects)
if selected_object_id is not None:
detections[selected_object_id]["attributes"].append(
attribute.get_tracking_data()
)
# debug object tracking
if False:
bgr_frame = cv2.cvtColor(
frame,
cv2.COLOR_YUV2BGR_I420,
)
object_tracker.debug_draw(bgr_frame, frame_time)
cv2.imwrite(
f"debug/frames/track-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame
)
# debug
if False:
bgr_frame = cv2.cvtColor(
frame,
cv2.COLOR_YUV2BGR_I420,
)
for m_box in motion_boxes:
cv2.rectangle(
bgr_frame,
(m_box[0], m_box[1]),
(m_box[2], m_box[3]),
(0, 0, 255),
2,
)
for b in tracked_object_boxes:
cv2.rectangle(
bgr_frame,
(b[0], b[1]),
(b[2], b[3]),
(255, 0, 0),
2,
)
for obj in object_tracker.tracked_objects.values():
if obj["frame_time"] == frame_time:
thickness = 2
color = model_config.colormap.get(obj["label"], (255, 255, 255))
else:
thickness = 1
color = (255, 0, 0)
# draw the bounding boxes on the frame
box = obj["box"]
draw_box_with_label(
bgr_frame,
box[0],
box[1],
box[2],
box[3],
obj["label"],
obj["id"],
thickness=thickness,
color=color,
)
for region in regions:
cv2.rectangle(
bgr_frame,
(region[0], region[1]),
(region[2], region[3]),
(0, 255, 0),
2,
)
cv2.imwrite(
f"debug/frames/{camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg",
bgr_frame,
)
# add to the queue if not full
if detected_objects_queue.full():
frame_manager.close(frame_name)
continue
else:
fps_tracker.update()
camera_metrics.process_fps.value = fps_tracker.eps()
detected_objects_queue.put(
(
camera_config.name,
frame_name,
frame_time,
detections,
motion_boxes,
regions,
)
)
camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(frame_name)
motion_detector.stop()
requestor.stop()
config_subscriber.stop()

587
frigate/video.py → frigate/video/ffmpeg.py Executable file → Normal file
View File

@ -1,3 +1,5 @@
"""Manages ffmpeg processes for camera frame capture."""
import logging
import queue
import subprocess as sp
@ -9,97 +11,30 @@ from multiprocessing import Queue, Value
from multiprocessing.synchronize import Event as MpEvent
from typing import Any
import cv2
from frigate.camera import CameraMetrics, PTZMetrics
from frigate.camera import CameraMetrics
from frigate.comms.inter_process import InterProcessRequestor
from frigate.comms.recordings_updater import (
RecordingsDataSubscriber,
RecordingsDataTypeEnum,
)
from frigate.config import CameraConfig, DetectConfig, LoggerConfig, ModelConfig
from frigate.config.camera.camera import CameraTypeEnum
from frigate.config import CameraConfig, LoggerConfig
from frigate.config.camera.updater import (
CameraConfigUpdateEnum,
CameraConfigUpdateSubscriber,
)
from frigate.const import (
PROCESS_PRIORITY_HIGH,
REQUEST_REGION_GRID,
)
from frigate.const import PROCESS_PRIORITY_HIGH
from frigate.log import LogPipe
from frigate.motion import MotionDetector
from frigate.motion.improved_motion import ImprovedMotionDetector
from frigate.object_detection.base import RemoteObjectDetector
from frigate.ptz.autotrack import ptz_moving_at_frame_time
from frigate.track import ObjectTracker
from frigate.track.norfair_tracker import NorfairTracker
from frigate.track.tracked_object import TrackedObjectAttribute
from frigate.util.builtin import EventsPerSecond
from frigate.util.ffmpeg import start_or_restart_ffmpeg, stop_ffmpeg
from frigate.util.image import (
FrameManager,
SharedMemoryFrameManager,
draw_box_with_label,
)
from frigate.util.object import (
create_tensor_input,
get_cluster_candidates,
get_cluster_region,
get_cluster_region_from_grid,
get_min_region_size,
get_startup_regions,
inside_any,
intersects_any,
is_object_filtered,
reduce_detections,
)
from frigate.util.process import FrigateProcess
from frigate.util.time import get_tomorrow_at_time
logger = logging.getLogger(__name__)
def stop_ffmpeg(ffmpeg_process: sp.Popen[Any], logger: logging.Logger):
logger.info("Terminating the existing ffmpeg process...")
ffmpeg_process.terminate()
try:
logger.info("Waiting for ffmpeg to exit gracefully...")
ffmpeg_process.communicate(timeout=30)
logger.info("FFmpeg has exited")
except sp.TimeoutExpired:
logger.info("FFmpeg didn't exit. Force killing...")
ffmpeg_process.kill()
ffmpeg_process.communicate()
logger.info("FFmpeg has been killed")
ffmpeg_process = None
def start_or_restart_ffmpeg(
ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
) -> sp.Popen[Any]:
if ffmpeg_process is not None:
stop_ffmpeg(ffmpeg_process, logger)
if frame_size is None:
process = sp.Popen(
ffmpeg_cmd,
stdout=sp.DEVNULL,
stderr=logpipe,
stdin=sp.DEVNULL,
start_new_session=True,
)
else:
process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
stderr=logpipe,
stdin=sp.DEVNULL,
bufsize=frame_size * 10,
start_new_session=True,
)
return process
def capture_frames(
ffmpeg_process: sp.Popen[Any],
config: CameraConfig,
@ -708,513 +643,3 @@ class CameraCapture(FrigateProcess):
)
camera_watchdog.start()
camera_watchdog.join()
class CameraTracker(FrigateProcess):
def __init__(
self,
config: CameraConfig,
model_config: ModelConfig,
labelmap: dict[int, str],
detection_queue: Queue,
detected_objects_queue,
camera_metrics: CameraMetrics,
ptz_metrics: PTZMetrics,
region_grid: list[list[dict[str, Any]]],
stop_event: MpEvent,
log_config: LoggerConfig | None = None,
) -> None:
super().__init__(
stop_event,
PROCESS_PRIORITY_HIGH,
name=f"frigate.process:{config.name}",
daemon=True,
)
self.config = config
self.model_config = model_config
self.labelmap = labelmap
self.detection_queue = detection_queue
self.detected_objects_queue = detected_objects_queue
self.camera_metrics = camera_metrics
self.ptz_metrics = ptz_metrics
self.region_grid = region_grid
self.log_config = log_config
def run(self) -> None:
self.pre_run_setup(self.log_config)
frame_queue = self.camera_metrics.frame_queue
frame_shape = self.config.frame_shape
motion_detector = ImprovedMotionDetector(
frame_shape,
self.config.motion,
self.config.detect.fps,
name=self.config.name,
ptz_metrics=self.ptz_metrics,
)
object_detector = RemoteObjectDetector(
self.config.name,
self.labelmap,
self.detection_queue,
self.model_config,
self.stop_event,
)
object_tracker = NorfairTracker(self.config, self.ptz_metrics)
frame_manager = SharedMemoryFrameManager()
# create communication for region grid updates
requestor = InterProcessRequestor()
process_frames(
requestor,
frame_queue,
frame_shape,
self.model_config,
self.config,
frame_manager,
motion_detector,
object_detector,
object_tracker,
self.detected_objects_queue,
self.camera_metrics,
self.stop_event,
self.ptz_metrics,
self.region_grid,
)
# empty the frame queue
logger.info(f"{self.config.name}: emptying frame queue")
while not frame_queue.empty():
(frame_name, _) = frame_queue.get(False)
frame_manager.delete(frame_name)
logger.info(f"{self.config.name}: exiting subprocess")
def detect(
detect_config: DetectConfig,
object_detector,
frame,
model_config: ModelConfig,
region,
objects_to_track,
object_filters,
):
tensor_input = create_tensor_input(frame, model_config, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2] - region[0]
x_min = int(max(0, (box[1] * size) + region[0]))
y_min = int(max(0, (box[0] * size) + region[1]))
x_max = int(min(detect_config.width - 1, (box[3] * size) + region[0]))
y_max = int(min(detect_config.height - 1, (box[2] * size) + region[1]))
# ignore objects that were detected outside the frame
if (x_min >= detect_config.width - 1) or (y_min >= detect_config.height - 1):
continue
width = x_max - x_min
height = y_max - y_min
area = width * height
ratio = width / max(1, height)
det = (d[0], d[1], (x_min, y_min, x_max, y_max), area, ratio, region)
# apply object filters
if is_object_filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
def process_frames(
requestor: InterProcessRequestor,
frame_queue: Queue,
frame_shape: tuple[int, int],
model_config: ModelConfig,
camera_config: CameraConfig,
frame_manager: FrameManager,
motion_detector: MotionDetector,
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: Queue,
camera_metrics: CameraMetrics,
stop_event: MpEvent,
ptz_metrics: PTZMetrics,
region_grid: list[list[dict[str, Any]]],
exit_on_empty: bool = False,
):
next_region_update = get_tomorrow_at_time(2)
config_subscriber = CameraConfigUpdateSubscriber(
None,
{camera_config.name: camera_config},
[
CameraConfigUpdateEnum.detect,
CameraConfigUpdateEnum.enabled,
CameraConfigUpdateEnum.motion,
CameraConfigUpdateEnum.objects,
],
)
fps_tracker = EventsPerSecond()
fps_tracker.start()
startup_scan = True
stationary_frame_counter = 0
camera_enabled = True
region_min_size = get_min_region_size(model_config)
attributes_map = model_config.attributes_map
all_attributes = model_config.all_attributes
# remove license_plate from attributes if this camera is a dedicated LPR cam
if camera_config.type == CameraTypeEnum.lpr:
modified_attributes_map = model_config.attributes_map.copy()
if (
"car" in modified_attributes_map
and "license_plate" in modified_attributes_map["car"]
):
modified_attributes_map["car"] = [
attr
for attr in modified_attributes_map["car"]
if attr != "license_plate"
]
attributes_map = modified_attributes_map
all_attributes = [
attr for attr in model_config.all_attributes if attr != "license_plate"
]
while not stop_event.is_set():
updated_configs = config_subscriber.check_for_updates()
if "enabled" in updated_configs:
prev_enabled = camera_enabled
camera_enabled = camera_config.enabled
if "motion" in updated_configs:
motion_detector.config = camera_config.motion
motion_detector.update_mask()
if (
not camera_enabled
and prev_enabled != camera_enabled
and camera_metrics.frame_queue.empty()
):
logger.debug(
f"Camera {camera_config.name} disabled, clearing tracked objects"
)
prev_enabled = camera_enabled
# Clear norfair's dictionaries
object_tracker.tracked_objects.clear()
object_tracker.disappeared.clear()
object_tracker.stationary_box_history.clear()
object_tracker.positions.clear()
object_tracker.track_id_map.clear()
# Clear internal norfair states
for trackers_by_type in object_tracker.trackers.values():
for tracker in trackers_by_type.values():
tracker.tracked_objects = []
for tracker in object_tracker.default_tracker.values():
tracker.tracked_objects = []
if not camera_enabled:
time.sleep(0.1)
continue
if datetime.now().astimezone(timezone.utc) > next_region_update:
region_grid = requestor.send_data(REQUEST_REGION_GRID, camera_config.name)
next_region_update = get_tomorrow_at_time(2)
try:
if exit_on_empty:
frame_name, frame_time = frame_queue.get(False)
else:
frame_name, frame_time = frame_queue.get(True, 1)
except queue.Empty:
if exit_on_empty:
logger.info("Exiting track_objects...")
break
continue
camera_metrics.detection_frame.value = frame_time
ptz_metrics.frame_time.value = frame_time
frame = frame_manager.get(frame_name, (frame_shape[0] * 3 // 2, frame_shape[1]))
if frame is None:
logger.debug(
f"{camera_config.name}: frame {frame_time} is not in memory store."
)
continue
# look for motion if enabled
motion_boxes = motion_detector.detect(frame)
regions = []
consolidated_detections = []
# if detection is disabled
if not camera_config.detect.enabled:
object_tracker.match_and_update(frame_name, frame_time, [])
else:
# get stationary object ids
# check every Nth frame for stationary objects
# disappeared objects are not stationary
# also check for overlapping motion boxes
if stationary_frame_counter == camera_config.detect.stationary.interval:
stationary_frame_counter = 0
stationary_object_ids = []
else:
stationary_frame_counter += 1
stationary_object_ids = [
obj["id"]
for obj in object_tracker.tracked_objects.values()
# if it has exceeded the stationary threshold
if obj["motionless_count"]
>= camera_config.detect.stationary.threshold
# and it hasn't disappeared
and object_tracker.disappeared[obj["id"]] == 0
# and it doesn't overlap with any current motion boxes when not calibrating
and not intersects_any(
obj["box"],
[] if motion_detector.is_calibrating() else motion_boxes,
)
]
# get tracked object boxes that aren't stationary
tracked_object_boxes = [
(
# use existing object box for stationary objects
obj["estimate"]
if obj["motionless_count"]
< camera_config.detect.stationary.threshold
else obj["box"]
)
for obj in object_tracker.tracked_objects.values()
if obj["id"] not in stationary_object_ids
]
object_boxes = tracked_object_boxes + object_tracker.untracked_object_boxes
# get consolidated regions for tracked objects
regions = [
get_cluster_region(
frame_shape, region_min_size, candidate, object_boxes
)
for candidate in get_cluster_candidates(
frame_shape, region_min_size, object_boxes
)
]
# only add in the motion boxes when not calibrating and a ptz is not moving via autotracking
# ptz_moving_at_frame_time() always returns False for non-autotracking cameras
if not motion_detector.is_calibrating() and not ptz_moving_at_frame_time(
frame_time,
ptz_metrics.start_time.value,
ptz_metrics.stop_time.value,
):
# find motion boxes that are not inside tracked object regions
standalone_motion_boxes = [
b for b in motion_boxes if not inside_any(b, regions)
]
if standalone_motion_boxes:
motion_clusters = get_cluster_candidates(
frame_shape,
region_min_size,
standalone_motion_boxes,
)
motion_regions = [
get_cluster_region_from_grid(
frame_shape,
region_min_size,
candidate,
standalone_motion_boxes,
region_grid,
)
for candidate in motion_clusters
]
regions += motion_regions
# if starting up, get the next startup scan region
if startup_scan:
for region in get_startup_regions(
frame_shape, region_min_size, region_grid
):
regions.append(region)
startup_scan = False
# resize regions and detect
# seed with stationary objects
detections = [
(
obj["label"],
obj["score"],
obj["box"],
obj["area"],
obj["ratio"],
obj["region"],
)
for obj in object_tracker.tracked_objects.values()
if obj["id"] in stationary_object_ids
]
for region in regions:
detections.extend(
detect(
camera_config.detect,
object_detector,
frame,
model_config,
region,
camera_config.objects.track,
camera_config.objects.filters,
)
)
consolidated_detections = reduce_detections(frame_shape, detections)
# if detection was run on this frame, consolidate
if len(regions) > 0:
tracked_detections = [
d for d in consolidated_detections if d[0] not in all_attributes
]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(
frame_name, frame_time, tracked_detections
)
# else, just update the frame times for the stationary objects
else:
object_tracker.update_frame_times(frame_name, frame_time)
# group the attribute detections based on what label they apply to
attribute_detections: dict[str, list[TrackedObjectAttribute]] = {}
for label, attribute_labels in attributes_map.items():
attribute_detections[label] = [
TrackedObjectAttribute(d)
for d in consolidated_detections
if d[0] in attribute_labels
]
# build detections
detections = {}
for obj in object_tracker.tracked_objects.values():
detections[obj["id"]] = {**obj, "attributes": []}
# find the best object for each attribute to be assigned to
all_objects: list[dict[str, Any]] = object_tracker.tracked_objects.values()
for attributes in attribute_detections.values():
for attribute in attributes:
filtered_objects = filter(
lambda o: attribute.label in attributes_map.get(o["label"], []),
all_objects,
)
selected_object_id = attribute.find_best_object(filtered_objects)
if selected_object_id is not None:
detections[selected_object_id]["attributes"].append(
attribute.get_tracking_data()
)
# debug object tracking
if False:
bgr_frame = cv2.cvtColor(
frame,
cv2.COLOR_YUV2BGR_I420,
)
object_tracker.debug_draw(bgr_frame, frame_time)
cv2.imwrite(
f"debug/frames/track-{'{:.6f}'.format(frame_time)}.jpg", bgr_frame
)
# debug
if False:
bgr_frame = cv2.cvtColor(
frame,
cv2.COLOR_YUV2BGR_I420,
)
for m_box in motion_boxes:
cv2.rectangle(
bgr_frame,
(m_box[0], m_box[1]),
(m_box[2], m_box[3]),
(0, 0, 255),
2,
)
for b in tracked_object_boxes:
cv2.rectangle(
bgr_frame,
(b[0], b[1]),
(b[2], b[3]),
(255, 0, 0),
2,
)
for obj in object_tracker.tracked_objects.values():
if obj["frame_time"] == frame_time:
thickness = 2
color = model_config.colormap.get(obj["label"], (255, 255, 255))
else:
thickness = 1
color = (255, 0, 0)
# draw the bounding boxes on the frame
box = obj["box"]
draw_box_with_label(
bgr_frame,
box[0],
box[1],
box[2],
box[3],
obj["label"],
obj["id"],
thickness=thickness,
color=color,
)
for region in regions:
cv2.rectangle(
bgr_frame,
(region[0], region[1]),
(region[2], region[3]),
(0, 255, 0),
2,
)
cv2.imwrite(
f"debug/frames/{camera_config.name}-{'{:.6f}'.format(frame_time)}.jpg",
bgr_frame,
)
# add to the queue if not full
if detected_objects_queue.full():
frame_manager.close(frame_name)
continue
else:
fps_tracker.update()
camera_metrics.process_fps.value = fps_tracker.eps()
detected_objects_queue.put(
(
camera_config.name,
frame_name,
frame_time,
detections,
motion_boxes,
regions,
)
)
camera_metrics.detection_fps.value = object_detector.fps.eps()
frame_manager.close(frame_name)
motion_detector.stop()
requestor.stop()
config_subscriber.stop()

View File

@ -787,6 +787,10 @@
"label": "Disable TLS verify",
"description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)."
},
"profile": {
"label": "ONVIF profile",
"description": "Specific ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically."
},
"autotracking": {
"label": "Autotracking",
"description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",

View File

@ -1536,6 +1536,10 @@
"label": "Disable TLS verify",
"description": "Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only)."
},
"profile": {
"label": "ONVIF profile",
"description": "Specific ONVIF media profile to use for PTZ control, matched by token or name. If not set, the first profile with valid PTZ configuration is selected automatically."
},
"autotracking": {
"label": "Autotracking",
"description": "Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",

View File

@ -17,6 +17,7 @@
"clickMove": {
"label": "Click in the frame to center the camera",
"enable": "Enable click to move",
"enableWithZoom": "Enable click to move / drag to zoom",
"disable": "Disable click to move"
},
"left": {

View File

@ -1573,5 +1573,9 @@
"hardwareNone": "No hardware acceleration",
"hardwareAuto": "Automatic hardware acceleration"
}
},
"onvif": {
"profileAuto": "Auto",
"profileLoading": "Loading profiles..."
}
}

View File

@ -3,20 +3,12 @@ import type { SectionConfigOverrides } from "./types";
const onvif: SectionConfigOverrides = {
base: {
sectionDocs: "/configuration/cameras#setting-up-camera-ptz-controls",
restartRequired: [
"host",
"port",
"user",
"password",
"tls_insecure",
"ignore_time_mismatch",
"autotracking.calibrate_on_startup",
],
fieldOrder: [
"host",
"port",
"user",
"password",
"profile",
"tls_insecure",
"ignore_time_mismatch",
"autotracking",
@ -27,10 +19,14 @@ const onvif: SectionConfigOverrides = {
],
advancedFields: ["tls_insecure", "ignore_time_mismatch"],
overrideFields: [],
restartRequired: ["autotracking.calibrate_on_startup"],
uiSchema: {
host: {
"ui:options": { size: "sm" },
},
profile: {
"ui:widget": "onvifProfile",
},
autotracking: {
required_zones: {
"ui:widget": "zoneNames",

View File

@ -29,6 +29,7 @@ import { TimezoneSelectWidget } from "./widgets/TimezoneSelectWidget";
import { CameraPathWidget } from "./widgets/CameraPathWidget";
import { OptionalFieldWidget } from "./widgets/OptionalFieldWidget";
import { SemanticSearchModelWidget } from "./widgets/SemanticSearchModelWidget";
import { OnvifProfileWidget } from "./widgets/OnvifProfileWidget";
import { FieldTemplate } from "./templates/FieldTemplate";
import { ObjectFieldTemplate } from "./templates/ObjectFieldTemplate";
@ -79,6 +80,7 @@ export const frigateTheme: FrigateTheme = {
timezoneSelect: TimezoneSelectWidget,
optionalField: OptionalFieldWidget,
semanticSearchModel: SemanticSearchModelWidget,
onvifProfile: OnvifProfileWidget,
},
templates: {
FieldTemplate: FieldTemplate as React.ComponentType<FieldTemplateProps>,

View File

@ -0,0 +1,84 @@
import type { WidgetProps } from "@rjsf/utils";
import useSWR from "swr";
import { useTranslation } from "react-i18next";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import type { ConfigFormContext } from "@/types/configForm";
import type { CameraPtzInfo } from "@/types/ptz";
import { getSizedFieldClassName } from "../utils";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { cn } from "@/lib/utils";
const AUTO_VALUE = "__auto__";
export function OnvifProfileWidget(props: WidgetProps) {
const { id, value, disabled, readonly, onChange, schema, options } = props;
const { t } = useTranslation(["views/settings"]);
const formContext = props.registry?.formContext as
| ConfigFormContext
| undefined;
const cameraName = formContext?.cameraName;
const isCameraLevel = formContext?.level === "camera";
const hasOnvifHost = !!formContext?.fullCameraConfig?.onvif?.host;
const { data: ptzInfo } = useSWR<CameraPtzInfo>(
isCameraLevel && cameraName && hasOnvifHost
? `${cameraName}/ptz/info`
: null,
{
// ONVIF may not be initialized yet when the settings page loads,
// so retry until profiles become available
refreshInterval: (data) =>
data?.profiles && data.profiles.length > 0 ? 0 : 5000,
},
);
const profiles = ptzInfo?.profiles ?? [];
const fieldClassName = getSizedFieldClassName(options, "md");
const hasProfiles = profiles.length > 0;
const waiting = isCameraLevel && !!cameraName && hasOnvifHost && !hasProfiles;
const selected = value ?? AUTO_VALUE;
if (waiting) {
return (
<div className={cn("flex items-center gap-2", fieldClassName)}>
<ActivityIndicator className="size-4" />
<span className="text-sm text-muted-foreground">
{t("onvif.profileLoading")}
</span>
</div>
);
}
return (
<Select
value={String(selected)}
onValueChange={(val) => {
onChange(val === AUTO_VALUE ? null : val);
}}
disabled={disabled || readonly}
>
<SelectTrigger id={id} className={cn("text-left", fieldClassName)}>
<SelectValue placeholder={schema.title || "Select..."} />
</SelectTrigger>
<SelectContent>
<SelectItem value={AUTO_VALUE}>{t("onvif.profileAuto")}</SelectItem>
{profiles.map((p) => (
<SelectItem key={p.token} value={p.token}>
{p.name !== p.token ? `${p.name} (${p.token})` : p.token}
</SelectItem>
))}
{!hasProfiles && value && value !== AUTO_VALUE && (
<SelectItem value={String(value)}>{String(value)}</SelectItem>
)}
</SelectContent>
</Select>
);
}

View File

@ -284,7 +284,9 @@ export default function PtzControlPanel({
<p>
{clickOverlay
? t("ptz.move.clickMove.disable")
: t("ptz.move.clickMove.enable")}
: ptz?.features?.includes("zoom-r")
? t("ptz.move.clickMove.enableWithZoom")
: t("ptz.move.clickMove.enable")}
</p>
</TooltipContent>
</Tooltip>

View File

@ -7,8 +7,14 @@ type PtzFeature =
| "pt-r-fov"
| "focus";
export type OnvifProfile = {
name: string;
token: string;
};
export type CameraPtzInfo = {
name: string;
features: PtzFeature[];
presets: string[];
profiles: OnvifProfile[];
};

View File

@ -122,6 +122,11 @@ import {
SnapshotResult,
} from "@/utils/snapshotUtil";
import ActivityIndicator from "@/components/indicators/activity-indicator";
import { Stage, Layer, Rect } from "react-konva";
import type { KonvaEventObject } from "konva/lib/Node";
/** Pixel threshold to distinguish drag from click. */
const DRAG_MIN_PX = 15;
type LiveCameraViewProps = {
config?: FrigateConfig;
@ -213,45 +218,112 @@ export default function LiveCameraView({
};
}, [audioTranscriptionState, sendTranscription]);
// click overlay for ptzs
// click-to-move / drag-to-zoom overlay for PTZ cameras
const [clickOverlay, setClickOverlay] = useState(false);
const clickOverlayRef = useRef<HTMLDivElement>(null);
const { send: sendPtz } = usePtzCommand(camera.name);
const handleOverlayClick = useCallback(
(
e: React.MouseEvent<HTMLDivElement> | React.TouchEvent<HTMLDivElement>,
) => {
if (!clickOverlay) {
return;
}
// drag rectangle state in stage-local coordinates
const [ptzRect, setPtzRect] = useState<{
x: number;
y: number;
width: number;
height: number;
} | null>(null);
const [isPtzDrawing, setIsPtzDrawing] = useState(false);
// raw origin to determine drag direction (not min/max corrected)
const ptzOriginRef = useRef<{ x: number; y: number } | null>(null);
let clientX;
let clientY;
if ("TouchEvent" in window && e.nativeEvent instanceof TouchEvent) {
clientX = e.nativeEvent.touches[0].clientX;
clientY = e.nativeEvent.touches[0].clientY;
} else if (e.nativeEvent instanceof MouseEvent) {
clientX = e.nativeEvent.clientX;
clientY = e.nativeEvent.clientY;
}
const [overlaySize] = useResizeObserver(clickOverlayRef);
if (clickOverlayRef.current && clientX && clientY) {
const rect = clickOverlayRef.current.getBoundingClientRect();
const normalizedX = (clientX - rect.left) / rect.width;
const normalizedY = (clientY - rect.top) / rect.height;
const pan = (normalizedX - 0.5) * 2;
const tilt = (0.5 - normalizedY) * 2;
sendPtz(`move_relative_${pan}_${tilt}`);
const onPtzStageDown = useCallback(
(e: KonvaEventObject<MouseEvent> | KonvaEventObject<TouchEvent>) => {
const pos = e.target.getStage()?.getPointerPosition();
if (pos) {
setIsPtzDrawing(true);
ptzOriginRef.current = { x: pos.x, y: pos.y };
setPtzRect({ x: pos.x, y: pos.y, width: 0, height: 0 });
}
},
[clickOverlayRef, clickOverlay, sendPtz],
[],
);
const onPtzStageMove = useCallback(
(e: KonvaEventObject<MouseEvent> | KonvaEventObject<TouchEvent>) => {
if (!isPtzDrawing || !ptzRect) return;
const pos = e.target.getStage()?.getPointerPosition();
if (pos) {
setPtzRect({
...ptzRect,
width: pos.x - ptzRect.x,
height: pos.y - ptzRect.y,
});
}
},
[isPtzDrawing, ptzRect],
);
const onPtzStageUp = useCallback(() => {
setIsPtzDrawing(false);
if (!ptzRect || !ptzOriginRef.current || overlaySize.width === 0) {
setPtzRect(null);
ptzOriginRef.current = null;
return;
}
const endX = ptzRect.x + ptzRect.width;
const endY = ptzRect.y + ptzRect.height;
const distX = Math.abs(ptzRect.width);
const distY = Math.abs(ptzRect.height);
if (distX < DRAG_MIN_PX && distY < DRAG_MIN_PX) {
// click — pan/tilt to point without zoom
const normX = endX / overlaySize.width;
const normY = endY / overlaySize.height;
const pan = (normX - 0.5) * 2;
const tilt = (0.5 - normY) * 2;
sendPtz(`move_relative_${pan}_${tilt}`);
} else {
// drag — pan/tilt to box center, zoom based on box size
const origin = ptzOriginRef.current;
const n0x = Math.min(origin.x, endX) / overlaySize.width;
const n0y = Math.min(origin.y, endY) / overlaySize.height;
const n1x = Math.max(origin.x, endX) / overlaySize.width;
const n1y = Math.max(origin.y, endY) / overlaySize.height;
let boxW = n1x - n0x;
let boxH = n1y - n0y;
// correct box to match camera aspect ratio so zoom is uniform
const frameAR = overlaySize.width / overlaySize.height;
const boxAR = boxW / boxH;
if (boxAR > frameAR) {
boxH = boxW / frameAR;
} else {
boxW = boxH * frameAR;
}
const centerX = (n0x + n1x) / 2;
const centerY = (n0y + n1y) / 2;
const pan = (centerX - 0.5) * 2;
const tilt = (0.5 - centerY) * 2;
// zoom magnitude from box size (small box = more zoom)
let zoom = Math.max(0.01, Math.min(1, Math.max(boxW, boxH)));
// drag direction: top-left → bottom-right = zoom in, reverse = zoom out
const zoomIn = endX > origin.x && endY > origin.y;
if (!zoomIn) zoom = -zoom;
sendPtz(`move_relative_${pan}_${tilt}_${zoom}`);
}
setPtzRect(null);
ptzOriginRef.current = null;
}, [ptzRect, overlaySize, sendPtz]);
// pip state
useEffect(() => {
@ -440,7 +512,8 @@ export default function LiveCameraView({
<TransformWrapper
minScale={1.0}
wheel={{ smoothStep: 0.005 }}
disabled={debug}
disabled={debug || clickOverlay}
panning={{ disabled: clickOverlay }}
>
<Toaster position="top-center" closeButton={true} />
<div
@ -634,13 +707,41 @@ export default function LiveCameraView({
}}
>
<div
className={`flex flex-col items-center justify-center ${growClassName}`}
className={`relative flex flex-col items-center justify-center ${growClassName}`}
ref={clickOverlayRef}
onClick={handleOverlayClick}
style={{
aspectRatio: constrainedAspectRatio,
}}
>
{clickOverlay && overlaySize.width > 0 && (
<div className="absolute inset-0 z-40 cursor-crosshair">
<Stage
width={overlaySize.width}
height={overlaySize.height}
onMouseDown={onPtzStageDown}
onMouseMove={onPtzStageMove}
onMouseUp={onPtzStageUp}
onTouchStart={onPtzStageDown}
onTouchMove={onPtzStageMove}
onTouchEnd={onPtzStageUp}
>
<Layer>
{ptzRect && (
<Rect
x={ptzRect.x}
y={ptzRect.y}
width={ptzRect.width}
height={ptzRect.height}
stroke="white"
strokeWidth={2}
dash={[6, 4]}
opacity={0.8}
/>
)}
</Layer>
</Stage>
</div>
)}
<LivePlayer
key={camera.name}
className={`${fullscreen ? "*:rounded-none" : ""}`}