mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-02 09:15:22 +03:00
return to Python 3.8
This commit is contained in:
parent
6ad981358e
commit
86af2a5615
@ -1,7 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import shared_memory
|
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
@ -42,7 +41,7 @@ class FrigateApp:
|
|||||||
self.detection_queue = mp.Queue()
|
self.detection_queue = mp.Queue()
|
||||||
self.detectors: Dict[str, EdgeTPUProcess] = {}
|
self.detectors: Dict[str, EdgeTPUProcess] = {}
|
||||||
self.detection_out_events: Dict[str, mp.Event] = {}
|
self.detection_out_events: Dict[str, mp.Event] = {}
|
||||||
self.detection_shms: List[shared_memory.SharedMemory] = []
|
self.detection_shms: List[mp.shared_memory.SharedMemory] = []
|
||||||
self.log_queue = mp.Queue()
|
self.log_queue = mp.Queue()
|
||||||
self.camera_metrics = {}
|
self.camera_metrics = {}
|
||||||
|
|
||||||
@ -164,20 +163,20 @@ class FrigateApp:
|
|||||||
self.detection_out_events[name] = mp.Event()
|
self.detection_out_events[name] = mp.Event()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
shm_in = shared_memory.SharedMemory(
|
shm_in = mp.shared_memory.SharedMemory(
|
||||||
name=name,
|
name=name,
|
||||||
create=True,
|
create=True,
|
||||||
size=self.config.model.height * self.config.model.width * 3,
|
size=self.config.model.height * self.config.model.width * 3,
|
||||||
)
|
)
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
shm_in = shared_memory.SharedMemory(name=name)
|
shm_in = mp.shared_memory.SharedMemory(name=name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
shm_out = shared_memory.SharedMemory(
|
shm_out = mp.shared_memory.SharedMemory(
|
||||||
name=f"out-{name}", create=True, size=20 * 6 * 4
|
name=f"out-{name}", create=True, size=20 * 6 * 4
|
||||||
)
|
)
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
shm_out = shared_memory.SharedMemory(name=f"out-{name}")
|
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
|
||||||
|
|
||||||
self.detection_shms.append(shm_in)
|
self.detection_shms.append(shm_in)
|
||||||
self.detection_shms.append(shm_out)
|
self.detection_shms.append(shm_out)
|
||||||
|
|||||||
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@ -20,8 +22,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
|
|||||||
# German Style:
|
# German Style:
|
||||||
# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
|
# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
|
||||||
|
|
||||||
FRIGATE_ENV_VARS = {k: v for k,
|
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
|
||||||
v in os.environ.items() if k.startswith("FRIGATE_")}
|
|
||||||
|
|
||||||
DEFAULT_TRACKED_OBJECTS = ["person"]
|
DEFAULT_TRACKED_OBJECTS = ["person"]
|
||||||
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
|
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
|
||||||
@ -38,8 +39,7 @@ class DetectorTypeEnum(str, Enum):
|
|||||||
|
|
||||||
|
|
||||||
class DetectorConfig(FrigateBaseModel):
|
class DetectorConfig(FrigateBaseModel):
|
||||||
type: DetectorTypeEnum = Field(
|
type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type")
|
||||||
default=DetectorTypeEnum.cpu, title="Detector Type")
|
|
||||||
device: str = Field(default="usb", title="Device Type")
|
device: str = Field(default="usb", title="Device Type")
|
||||||
num_threads: int = Field(default=3, title="Number of detection threads")
|
num_threads: int = Field(default=3, title="Number of detection threads")
|
||||||
|
|
||||||
@ -82,10 +82,8 @@ class RetainConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class EventsConfig(FrigateBaseModel):
|
class EventsConfig(FrigateBaseModel):
|
||||||
max_seconds: int = Field(default=300, title="Maximum event duration.")
|
max_seconds: int = Field(default=300, title="Maximum event duration.")
|
||||||
pre_capture: int = Field(
|
pre_capture: int = Field(default=5, title="Seconds to retain before event starts.")
|
||||||
default=5, title="Seconds to retain before event starts.")
|
post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
|
||||||
post_capture: int = Field(
|
|
||||||
default=5, title="Seconds to retain after event ends.")
|
|
||||||
required_zones: List[str] = Field(
|
required_zones: List[str] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="List of required zones to be entered in order to save the event.",
|
title="List of required zones to be entered in order to save the event.",
|
||||||
@ -163,10 +161,8 @@ class RuntimeMotionConfig(MotionConfig):
|
|||||||
|
|
||||||
|
|
||||||
class DetectConfig(FrigateBaseModel):
|
class DetectConfig(FrigateBaseModel):
|
||||||
height: int = Field(
|
height: int = Field(default=720, title="Height of the stream for the detect role.")
|
||||||
default=720, title="Height of the stream for the detect role.")
|
width: int = Field(default=1280, title="Width of the stream for the detect role.")
|
||||||
width: int = Field(
|
|
||||||
default=1280, title="Width of the stream for the detect role.")
|
|
||||||
fps: int = Field(
|
fps: int = Field(
|
||||||
default=5, title="Number of frames per second to process through detection."
|
default=5, title="Number of frames per second to process through detection."
|
||||||
)
|
)
|
||||||
@ -208,8 +204,7 @@ class RuntimeFilterConfig(FilterConfig):
|
|||||||
config["raw_mask"] = mask
|
config["raw_mask"] = mask
|
||||||
|
|
||||||
if mask is not None:
|
if mask is not None:
|
||||||
config["mask"] = create_mask(
|
config["mask"] = create_mask(config.get("frame_shape", (1, 1)), mask)
|
||||||
config.get("frame_shape", (1, 1)), mask)
|
|
||||||
|
|
||||||
super().__init__(**config)
|
super().__init__(**config)
|
||||||
|
|
||||||
@ -256,22 +251,19 @@ class ZoneConfig(BaseModel):
|
|||||||
|
|
||||||
if isinstance(coordinates, list):
|
if isinstance(coordinates, list):
|
||||||
self._contour = np.array(
|
self._contour = np.array(
|
||||||
[[int(p.split(",")[0]), int(p.split(",")[1])]
|
[[int(p.split(",")[0]), int(p.split(",")[1])] for p in coordinates]
|
||||||
for p in coordinates]
|
|
||||||
)
|
)
|
||||||
elif isinstance(coordinates, str):
|
elif isinstance(coordinates, str):
|
||||||
points = coordinates.split(",")
|
points = coordinates.split(",")
|
||||||
self._contour = np.array(
|
self._contour = np.array(
|
||||||
[[int(points[i]), int(points[i + 1])]
|
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
|
||||||
for i in range(0, len(points), 2)]
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self._contour = np.array([])
|
self._contour = np.array([])
|
||||||
|
|
||||||
|
|
||||||
class ObjectConfig(FrigateBaseModel):
|
class ObjectConfig(FrigateBaseModel):
|
||||||
track: List[str] = Field(
|
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
||||||
default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
|
||||||
filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.")
|
filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.")
|
||||||
mask: Union[str, List[str]] = Field(default="", title="Object mask.")
|
mask: Union[str, List[str]] = Field(default="", title="Object mask.")
|
||||||
|
|
||||||
@ -362,15 +354,25 @@ class FfmpegConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class GstreamerConfig(FrigateBaseModel):
|
class GstreamerConfig(FrigateBaseModel):
|
||||||
manual_pipeline: List[str] = Field(
|
manual_pipeline: List[str] = Field(
|
||||||
default=[], title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.")
|
default=[],
|
||||||
|
title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.",
|
||||||
|
)
|
||||||
input_pipeline: List[str] = Field(
|
input_pipeline: List[str] = Field(
|
||||||
default=[], title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.")
|
default=[],
|
||||||
|
title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.",
|
||||||
|
)
|
||||||
decoder_pipeline: List[str] = Field(
|
decoder_pipeline: List[str] = Field(
|
||||||
default=[], title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']")
|
default=[],
|
||||||
|
title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']",
|
||||||
|
)
|
||||||
source_format_pipeline: List[str] = Field(
|
source_format_pipeline: List[str] = Field(
|
||||||
default=[], title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']")
|
default=[],
|
||||||
|
title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']",
|
||||||
|
)
|
||||||
destination_format_pipeline: List[str] = Field(
|
destination_format_pipeline: List[str] = Field(
|
||||||
default=[], title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']")
|
default=[],
|
||||||
|
title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CameraRoleEnum(str, Enum):
|
class CameraRoleEnum(str, Enum):
|
||||||
@ -398,7 +400,8 @@ class CameraFFmpegInput(CameraInput):
|
|||||||
|
|
||||||
class CameraGStreamerInput(CameraInput):
|
class CameraGStreamerInput(CameraInput):
|
||||||
pipeline: List[str] = Field(
|
pipeline: List[str] = Field(
|
||||||
default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign")
|
default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CameraInputValidator:
|
class CameraInputValidator:
|
||||||
@ -421,8 +424,7 @@ class CameraFfmpegConfig(FfmpegConfig, CameraInputValidator):
|
|||||||
|
|
||||||
|
|
||||||
class CameraGStreamerConfig(GstreamerConfig, CameraInputValidator):
|
class CameraGStreamerConfig(GstreamerConfig, CameraInputValidator):
|
||||||
inputs: List[CameraGStreamerInput] = Field(
|
inputs: List[CameraGStreamerInput] = Field(title="Camera GStreamer inputs.")
|
||||||
title="Camera GStreamer inputs.")
|
|
||||||
|
|
||||||
|
|
||||||
class SnapshotsConfig(FrigateBaseModel):
|
class SnapshotsConfig(FrigateBaseModel):
|
||||||
@ -436,8 +438,7 @@ class SnapshotsConfig(FrigateBaseModel):
|
|||||||
bounding_box: bool = Field(
|
bounding_box: bool = Field(
|
||||||
default=True, title="Add a bounding box overlay on the snapshot."
|
default=True, title="Add a bounding box overlay on the snapshot."
|
||||||
)
|
)
|
||||||
crop: bool = Field(
|
crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
|
||||||
default=False, title="Crop the snapshot to the detected object.")
|
|
||||||
required_zones: List[str] = Field(
|
required_zones: List[str] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
title="List of required zones to be entered in order to save a snapshot.",
|
title="List of required zones to be entered in order to save a snapshot.",
|
||||||
@ -477,8 +478,7 @@ class TimestampStyleConfig(FrigateBaseModel):
|
|||||||
default=TimestampPositionEnum.tl, title="Timestamp position."
|
default=TimestampPositionEnum.tl, title="Timestamp position."
|
||||||
)
|
)
|
||||||
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
|
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
|
||||||
color: ColorConfig = Field(
|
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
|
||||||
default_factory=ColorConfig, title="Timestamp color.")
|
|
||||||
thickness: int = Field(default=2, title="Timestamp thickness.")
|
thickness: int = Field(default=2, title="Timestamp thickness.")
|
||||||
effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.")
|
effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.")
|
||||||
|
|
||||||
@ -486,10 +486,8 @@ class TimestampStyleConfig(FrigateBaseModel):
|
|||||||
class CameraMqttConfig(FrigateBaseModel):
|
class CameraMqttConfig(FrigateBaseModel):
|
||||||
enabled: bool = Field(default=True, title="Send image over MQTT.")
|
enabled: bool = Field(default=True, title="Send image over MQTT.")
|
||||||
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
|
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
|
||||||
bounding_box: bool = Field(
|
bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
|
||||||
default=True, title="Add bounding box to MQTT image.")
|
crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
|
||||||
crop: bool = Field(
|
|
||||||
default=True, title="Crop MQTT image to detected object.")
|
|
||||||
height: int = Field(default=270, title="MQTT image height.")
|
height: int = Field(default=270, title="MQTT image height.")
|
||||||
required_zones: List[str] = Field(
|
required_zones: List[str] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
@ -509,16 +507,17 @@ class RtmpConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class CameraLiveConfig(FrigateBaseModel):
|
class CameraLiveConfig(FrigateBaseModel):
|
||||||
height: int = Field(default=720, title="Live camera view height")
|
height: int = Field(default=720, title="Live camera view height")
|
||||||
quality: int = Field(default=8, ge=1, le=31,
|
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
|
||||||
title="Live camera view quality")
|
|
||||||
|
|
||||||
|
|
||||||
class CameraConfig(FrigateBaseModel):
|
class CameraConfig(FrigateBaseModel):
|
||||||
name: Optional[str] = Field(title="Camera name.")
|
name: Optional[str] = Field(title="Camera name.")
|
||||||
ffmpeg: Optional[CameraFfmpegConfig] = Field(
|
ffmpeg: Optional[CameraFfmpegConfig] = Field(
|
||||||
title="FFmpeg configuration for the camera.")
|
title="FFmpeg configuration for the camera."
|
||||||
|
)
|
||||||
gstreamer: Optional[CameraGStreamerConfig] = Field(
|
gstreamer: Optional[CameraGStreamerConfig] = Field(
|
||||||
title="GStreamer configuration for the camera.")
|
title="GStreamer configuration for the camera."
|
||||||
|
)
|
||||||
best_image_timeout: int = Field(
|
best_image_timeout: int = Field(
|
||||||
default=60,
|
default=60,
|
||||||
title="How long to wait for the image with the highest confidence score.",
|
title="How long to wait for the image with the highest confidence score.",
|
||||||
@ -544,8 +543,7 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
objects: ObjectConfig = Field(
|
objects: ObjectConfig = Field(
|
||||||
default_factory=ObjectConfig, title="Object configuration."
|
default_factory=ObjectConfig, title="Object configuration."
|
||||||
)
|
)
|
||||||
motion: Optional[MotionConfig] = Field(
|
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
|
||||||
title="Motion detection configuration.")
|
|
||||||
detect: DetectConfig = Field(
|
detect: DetectConfig = Field(
|
||||||
default_factory=DetectConfig, title="Object detection configuration."
|
default_factory=DetectConfig, title="Object detection configuration."
|
||||||
)
|
)
|
||||||
@ -559,16 +557,14 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
if "zones" in config:
|
if "zones" in config:
|
||||||
colors = plt.cm.get_cmap("tab10", len(config["zones"]))
|
colors = plt.cm.get_cmap("tab10", len(config["zones"]))
|
||||||
config["zones"] = {
|
config["zones"] = {
|
||||||
name: {**z, "color": tuple(round(255 * c)
|
name: {**z, "color": tuple(round(255 * c) for c in colors(idx)[:3])}
|
||||||
for c in colors(idx)[:3])}
|
|
||||||
for idx, (name, z) in enumerate(config["zones"].items())
|
for idx, (name, z) in enumerate(config["zones"].items())
|
||||||
}
|
}
|
||||||
|
|
||||||
# add roles to the input if there is only one
|
# add roles to the input if there is only one
|
||||||
if "ffmpeg" in config:
|
if "ffmpeg" in config:
|
||||||
if len(config["ffmpeg"]["inputs"]) == 1:
|
if len(config["ffmpeg"]["inputs"]) == 1:
|
||||||
config["ffmpeg"]["inputs"][0]["roles"] = [
|
config["ffmpeg"]["inputs"][0]["roles"] = ["record", "rtmp", "detect"]
|
||||||
"record", "rtmp", "detect"]
|
|
||||||
|
|
||||||
super().__init__(**config)
|
super().__init__(**config)
|
||||||
|
|
||||||
@ -593,7 +589,8 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
decoder_cmds.append(
|
decoder_cmds.append(
|
||||||
{"roles": gstreamer_input.roles, "cmd": decoder_cmd})
|
{"roles": gstreamer_input.roles, "cmd": decoder_cmd}
|
||||||
|
)
|
||||||
|
|
||||||
return decoder_cmds
|
return decoder_cmds
|
||||||
|
|
||||||
@ -611,63 +608,75 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
|
|
||||||
def _get_gstreamer_cmd(self, gstreamer_input: CameraGStreamerInput):
|
def _get_gstreamer_cmd(self, gstreamer_input: CameraGStreamerInput):
|
||||||
assert list(
|
assert (
|
||||||
["detect"]) == gstreamer_input.roles, "only detect role is supported"
|
list(["detect"]) == gstreamer_input.roles
|
||||||
|
), "only detect role is supported"
|
||||||
manual_pipeline = [
|
manual_pipeline = [
|
||||||
part for part in self.gstreamer.manual_pipeline if part != ""]
|
part for part in self.gstreamer.manual_pipeline if part != ""
|
||||||
input_pipeline = [
|
]
|
||||||
part for part in self.gstreamer.input_pipeline if part != ""]
|
input_pipeline = [part for part in self.gstreamer.input_pipeline if part != ""]
|
||||||
decoder_pipeline = [
|
decoder_pipeline = [
|
||||||
part for part in self.gstreamer.decoder_pipeline if part != ""]
|
part for part in self.gstreamer.decoder_pipeline if part != ""
|
||||||
|
]
|
||||||
source_format_pipeline = [
|
source_format_pipeline = [
|
||||||
part for part in self.gstreamer.source_format_pipeline if part != ""]
|
part for part in self.gstreamer.source_format_pipeline if part != ""
|
||||||
|
]
|
||||||
destination_format_pipeline = [
|
destination_format_pipeline = [
|
||||||
part for part in self.gstreamer.destination_format_pipeline if part != ""]
|
part for part in self.gstreamer.destination_format_pipeline if part != ""
|
||||||
|
]
|
||||||
|
|
||||||
video_format = f"video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420"
|
video_format = f"video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420"
|
||||||
if not manual_pipeline and not input_pipeline and not decoder_pipeline and not source_format_pipeline and not destination_format_pipeline:
|
if (
|
||||||
|
not manual_pipeline
|
||||||
|
and not input_pipeline
|
||||||
|
and not decoder_pipeline
|
||||||
|
and not source_format_pipeline
|
||||||
|
and not destination_format_pipeline
|
||||||
|
):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"gsreamer pipeline not configured. Using videotestsrc pattern=0")
|
"gsreamer pipeline not configured. Using videotestsrc pattern=0"
|
||||||
|
)
|
||||||
pipeline = [
|
pipeline = [
|
||||||
"videotestsrc pattern=0",
|
"videotestsrc pattern=0",
|
||||||
video_format,
|
video_format,
|
||||||
]
|
]
|
||||||
elif len(manual_pipeline) > 0:
|
elif len(manual_pipeline) > 0:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters")
|
"gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters"
|
||||||
|
)
|
||||||
pipeline = manual_pipeline
|
pipeline = manual_pipeline
|
||||||
else:
|
else:
|
||||||
input_pipeline = input_pipeline if input_pipeline else [
|
input_pipeline = (
|
||||||
f"rtspsrc location=\"{gstreamer_input.path}\" latency=0"
|
input_pipeline
|
||||||
]
|
if input_pipeline
|
||||||
|
else [f'rtspsrc location="{gstreamer_input.path}" latency=0']
|
||||||
|
)
|
||||||
|
|
||||||
decoder_pipeline = decoder_pipeline if decoder_pipeline else [
|
decoder_pipeline = (
|
||||||
"rtph265depay", "h265parse", "omxh265dec"
|
decoder_pipeline
|
||||||
]
|
if decoder_pipeline
|
||||||
source_format_pipeline = source_format_pipeline if source_format_pipeline else [
|
else ["rtph265depay", "h265parse", "omxh265dec"]
|
||||||
'video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale'
|
)
|
||||||
]
|
source_format_pipeline = (
|
||||||
destination_format_pipeline = destination_format_pipeline if destination_format_pipeline else [
|
source_format_pipeline
|
||||||
video_format, "videoconvert"
|
if source_format_pipeline
|
||||||
]
|
else ["video/x-raw,format=(string)NV12", "videoconvert", "videoscale"]
|
||||||
|
)
|
||||||
|
destination_format_pipeline = (
|
||||||
|
destination_format_pipeline
|
||||||
|
if destination_format_pipeline
|
||||||
|
else [video_format, "videoconvert"]
|
||||||
|
)
|
||||||
pipeline = [
|
pipeline = [
|
||||||
*input_pipeline,
|
*input_pipeline,
|
||||||
*decoder_pipeline,
|
*decoder_pipeline,
|
||||||
*source_format_pipeline,
|
*source_format_pipeline,
|
||||||
*destination_format_pipeline
|
*destination_format_pipeline,
|
||||||
]
|
]
|
||||||
pipeline_args = (
|
pipeline_args = [f"{item} !".split(" ") for item in pipeline]
|
||||||
[f"{item} !".split(" ") for item in pipeline]
|
|
||||||
)
|
|
||||||
pipeline_args = [item for sublist in pipeline_args for item in sublist]
|
pipeline_args = [item for sublist in pipeline_args for item in sublist]
|
||||||
pipeline_args = [
|
pipeline_args = ["gst-launch-1.0", "-q", *pipeline_args, "fdsink"]
|
||||||
"gst-launch-1.0",
|
logger.debug(f"using gstreamer pipeline: {' '.join(pipeline_args)}")
|
||||||
"-q",
|
|
||||||
*pipeline_args,
|
|
||||||
"fdsink"
|
|
||||||
]
|
|
||||||
logger.debug(
|
|
||||||
f"using gstreamer pipeline: {' '.join(pipeline_args)}")
|
|
||||||
|
|
||||||
return pipeline_args
|
return pipeline_args
|
||||||
|
|
||||||
@ -697,8 +706,7 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
else self.ffmpeg.output_args.rtmp.split(" ")
|
else self.ffmpeg.output_args.rtmp.split(" ")
|
||||||
)
|
)
|
||||||
ffmpeg_output_args = (
|
ffmpeg_output_args = (
|
||||||
rtmp_args +
|
rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
|
||||||
[f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
|
|
||||||
)
|
)
|
||||||
if "record" in ffmpeg_input.roles and self.record.enabled:
|
if "record" in ffmpeg_input.roles and self.record.enabled:
|
||||||
record_args = (
|
record_args = (
|
||||||
@ -722,16 +730,13 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
input_args = ffmpeg_input.input_args or self.ffmpeg.input_args
|
input_args = ffmpeg_input.input_args or self.ffmpeg.input_args
|
||||||
|
|
||||||
global_args = (
|
global_args = (
|
||||||
global_args if isinstance(
|
global_args if isinstance(global_args, list) else global_args.split(" ")
|
||||||
global_args, list) else global_args.split(" ")
|
|
||||||
)
|
)
|
||||||
hwaccel_args = (
|
hwaccel_args = (
|
||||||
hwaccel_args if isinstance(
|
hwaccel_args if isinstance(hwaccel_args, list) else hwaccel_args.split(" ")
|
||||||
hwaccel_args, list) else hwaccel_args.split(" ")
|
|
||||||
)
|
)
|
||||||
input_args = (
|
input_args = (
|
||||||
input_args if isinstance(
|
input_args if isinstance(input_args, list) else input_args.split(" ")
|
||||||
input_args, list) else input_args.split(" ")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
cmd = (
|
cmd = (
|
||||||
@ -748,7 +753,7 @@ class CameraConfig(FrigateBaseModel):
|
|||||||
@root_validator
|
@root_validator
|
||||||
def either_ffmpeg_or_gstreamer(cls, v):
|
def either_ffmpeg_or_gstreamer(cls, v):
|
||||||
if ("ffmpeg" not in v) and ("gstreamer" not in v):
|
if ("ffmpeg" not in v) and ("gstreamer" not in v):
|
||||||
raise ValueError('either ffmpeg or gstreamer should be set')
|
raise ValueError("either ffmpeg or gstreamer should be set")
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
|
||||||
@ -760,12 +765,9 @@ class DatabaseConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class ModelConfig(FrigateBaseModel):
|
class ModelConfig(FrigateBaseModel):
|
||||||
path: Optional[str] = Field(title="Custom Object detection model path.")
|
path: Optional[str] = Field(title="Custom Object detection model path.")
|
||||||
labelmap_path: Optional[str] = Field(
|
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
|
||||||
title="Label map for custom object detector.")
|
width: int = Field(default=320, title="Object detection model input width.")
|
||||||
width: int = Field(
|
height: int = Field(default=320, title="Object detection model input height.")
|
||||||
default=320, title="Object detection model input width.")
|
|
||||||
height: int = Field(
|
|
||||||
default=320, title="Object detection model input height.")
|
|
||||||
labelmap: Dict[int, str] = Field(
|
labelmap: Dict[int, str] = Field(
|
||||||
default_factory=dict, title="Labelmap customization."
|
default_factory=dict, title="Labelmap customization."
|
||||||
)
|
)
|
||||||
@ -792,8 +794,7 @@ class ModelConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
self._colormap = {}
|
self._colormap = {}
|
||||||
for key, val in self._merged_labelmap.items():
|
for key, val in self._merged_labelmap.items():
|
||||||
self._colormap[val] = tuple(int(round(255 * c))
|
self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
|
||||||
for c in cmap(key)[:3])
|
|
||||||
|
|
||||||
|
|
||||||
class LogLevelEnum(str, Enum):
|
class LogLevelEnum(str, Enum):
|
||||||
@ -825,8 +826,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
default_factory=ModelConfig, title="Detection model configuration."
|
default_factory=ModelConfig, title="Detection model configuration."
|
||||||
)
|
)
|
||||||
detectors: Dict[str, DetectorConfig] = Field(
|
detectors: Dict[str, DetectorConfig] = Field(
|
||||||
default={name: DetectorConfig(**d)
|
default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()},
|
||||||
for name, d in DEFAULT_DETECTORS.items()},
|
|
||||||
title="Detector hardware configuration.",
|
title="Detector hardware configuration.",
|
||||||
)
|
)
|
||||||
logger: LoggerConfig = Field(
|
logger: LoggerConfig = Field(
|
||||||
@ -872,8 +872,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
# MQTT password substitution
|
# MQTT password substitution
|
||||||
if config.mqtt.password:
|
if config.mqtt.password:
|
||||||
config.mqtt.password = config.mqtt.password.format(
|
config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
|
||||||
**FRIGATE_ENV_VARS)
|
|
||||||
|
|
||||||
# Global config to propegate down to camera level
|
# Global config to propegate down to camera level
|
||||||
global_config = config.dict(
|
global_config = config.dict(
|
||||||
@ -892,8 +891,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
for name, camera in config.cameras.items():
|
for name, camera in config.cameras.items():
|
||||||
merged_config = deep_merge(camera.dict(
|
merged_config = deep_merge(camera.dict(exclude_unset=True), global_config)
|
||||||
exclude_unset=True), global_config)
|
|
||||||
camera_config: CameraConfig = CameraConfig.parse_obj(
|
camera_config: CameraConfig = CameraConfig.parse_obj(
|
||||||
{"name": name, **merged_config}
|
{"name": name, **merged_config}
|
||||||
)
|
)
|
||||||
@ -957,7 +955,11 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# check runtime config
|
# check runtime config
|
||||||
decoder_config = camera_config.ffmpeg if "ffmpeg" in camera_config else camera_config.gstreamer
|
decoder_config = (
|
||||||
|
camera_config.ffmpeg
|
||||||
|
if "ffmpeg" in camera_config
|
||||||
|
else camera_config.gstreamer
|
||||||
|
)
|
||||||
assigned_roles = list(
|
assigned_roles = list(
|
||||||
set([r for i in decoder_config.inputs for r in i.roles])
|
set([r for i in decoder_config.inputs for r in i.roles])
|
||||||
)
|
)
|
||||||
|
|||||||
@ -7,10 +7,8 @@ import signal
|
|||||||
import threading
|
import threading
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
import shared_memory
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
# TensorRT https://github.com/NobuoTsukamoto/tensorrt-examples/blob/main/python/detection/README.md
|
|
||||||
import tflite_runtime.interpreter as tflite
|
import tflite_runtime.interpreter as tflite
|
||||||
from setproctitle import setproctitle
|
from setproctitle import setproctitle
|
||||||
from tflite_runtime.interpreter import load_delegate
|
from tflite_runtime.interpreter import load_delegate
|
||||||
@ -141,7 +139,7 @@ def run_detector(
|
|||||||
|
|
||||||
outputs = {}
|
outputs = {}
|
||||||
for name in out_events.keys():
|
for name in out_events.keys():
|
||||||
out_shm = shared_memory.SharedMemory(name=f"out-{name}", create=False)
|
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
|
||||||
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
|
||||||
outputs[name] = {"shm": out_shm, "np": out_np}
|
outputs[name] = {"shm": out_shm, "np": out_np}
|
||||||
|
|
||||||
@ -230,11 +228,11 @@ class RemoteObjectDetector:
|
|||||||
self.fps = EventsPerSecond()
|
self.fps = EventsPerSecond()
|
||||||
self.detection_queue = detection_queue
|
self.detection_queue = detection_queue
|
||||||
self.event = event
|
self.event = event
|
||||||
self.shm = shared_memory.SharedMemory(name=self.name, create=False)
|
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
|
||||||
self.np_shm = np.ndarray(
|
self.np_shm = np.ndarray(
|
||||||
(1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
|
(1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
|
||||||
)
|
)
|
||||||
self.out_shm = shared_memory.SharedMemory(
|
self.out_shm = mp.shared_memory.SharedMemory(
|
||||||
name=f"out-{self.name}", create=False
|
name=f"out-{self.name}", create=False
|
||||||
)
|
)
|
||||||
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
|
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
|
||||||
|
|||||||
@ -610,8 +610,7 @@ def recording_clip(camera, start_ts, end_ts):
|
|||||||
ffmpeg_cmd,
|
ffmpeg_cmd,
|
||||||
input="\n".join(playlist_lines),
|
input="\n".join(playlist_lines),
|
||||||
encoding="ascii",
|
encoding="ascii",
|
||||||
stdout=sp.PIPE,
|
capture_output=True,
|
||||||
stderr=sp.PIPE,
|
|
||||||
)
|
)
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
logger.error(p.stderr)
|
logger.error(p.stderr)
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import queue
|
|||||||
import signal
|
import signal
|
||||||
import subprocess as sp
|
import subprocess as sp
|
||||||
import threading
|
import threading
|
||||||
|
from multiprocessing import shared_memory
|
||||||
from wsgiref.simple_server import make_server
|
from wsgiref.simple_server import make_server
|
||||||
from frigate.log import LogPipe
|
from frigate.log import LogPipe
|
||||||
|
|
||||||
@ -32,27 +33,20 @@ class FFMpegConverter:
|
|||||||
ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
|
ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
|
||||||
" "
|
" "
|
||||||
)
|
)
|
||||||
# ffmpeg_cmd = f"gst-launch-1.0 rtspsrc location=\"rtsp://admin:123456@192.168.5.95:554/stream0\" ! rtph265depay ! h265parse ! omxh265dec ! 'video/x-raw,format=(string)NV12' ! videoconvert ! 'video/x-raw, width={in_width}, height={in_height}, format=I420, framerate=(fraction)10/1' ! omxh264enc bitrate=500000 temporal-tradeoff=2 iframeinterval=10 ! h264parse ! mpegtsmux ! fdsink"
|
|
||||||
# # .split(
|
|
||||||
# # " "
|
|
||||||
# # )
|
|
||||||
|
|
||||||
self.logpipe = LogPipe(
|
self.logpipe = LogPipe(
|
||||||
"ffmpeg.converter", logging.ERROR)
|
"ffmpeg.converter", logging.ERROR)
|
||||||
self.process = sp.Popen(
|
self.process = sp.Popen(
|
||||||
ffmpeg_cmd,
|
ffmpeg_cmd,
|
||||||
stdout=sp.PIPE,
|
stdout=sp.PIPE,
|
||||||
stderr=self.logpipe,
|
stderr=self.logpipe,
|
||||||
stdin=sp.PIPE,
|
stdin=sp.PIPE,
|
||||||
start_new_session=True,
|
start_new_session=True,
|
||||||
# shell=True
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def write(self, b):
|
def write(self, b):
|
||||||
try:
|
try:
|
||||||
self.process.stdin.write(b)
|
self.process.stdin.write(b)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.error("Failure while writing to the stream:")
|
|
||||||
self.logpipe.dump()
|
self.logpipe.dump()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -60,7 +54,6 @@ class FFMpegConverter:
|
|||||||
try:
|
try:
|
||||||
return self.process.stdout.read1(length)
|
return self.process.stdout.read1(length)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
logger.error("Failure while readig from the stream:")
|
|
||||||
self.logpipe.dump()
|
self.logpipe.dump()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -423,13 +416,8 @@ def output_frames(config: FrigateConfig, video_output_queue):
|
|||||||
if any(
|
if any(
|
||||||
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
|
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
|
||||||
):
|
):
|
||||||
try:
|
# write to the converter for the camera if clients are listening to the specific camera
|
||||||
# write to the converter for the camera if clients are listening to the specific camera
|
converters[camera].write(frame.tobytes())
|
||||||
converters[camera].write(frame.tobytes())
|
|
||||||
except Exception:
|
|
||||||
# in case of videoconverter failure continure processing video_output_queue
|
|
||||||
# FFMpegConverter should dump an error response
|
|
||||||
pass
|
|
||||||
|
|
||||||
# update birdseye if websockets are connected
|
# update birdseye if websockets are connected
|
||||||
if config.birdseye.enabled and any(
|
if config.birdseye.enabled and any(
|
||||||
|
|||||||
@ -502,9 +502,8 @@ class RecordingCleanup(threading.Thread):
|
|||||||
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
|
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
|
||||||
process = sp.run(
|
process = sp.run(
|
||||||
["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"],
|
["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"],
|
||||||
stdout=sp.PIPE,
|
capture_output=True,
|
||||||
stderr=sp.PIPE,
|
text=True,
|
||||||
universal_newlines=True,
|
|
||||||
)
|
)
|
||||||
files_to_check = process.stdout.splitlines()
|
files_to_check = process.stdout.splitlines()
|
||||||
|
|
||||||
|
|||||||
@ -11,8 +11,7 @@ import threading
|
|||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
#from multiprocessing import shared_memory
|
from multiprocessing import shared_memory
|
||||||
import shared_memory
|
|
||||||
from typing import AnyStr
|
from typing import AnyStr
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
|
|||||||
@ -187,8 +187,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.config = config
|
self.config = config
|
||||||
self.capture_thread = None
|
self.capture_thread = None
|
||||||
self.ffmpeg_detect_process = None
|
self.ffmpeg_detect_process = None
|
||||||
self.logpipe = LogPipe(
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
|
||||||
f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
|
|
||||||
self.ffmpeg_other_processes = []
|
self.ffmpeg_other_processes = []
|
||||||
self.camera_fps = camera_fps
|
self.camera_fps = camera_fps
|
||||||
self.ffmpeg_pid = ffmpeg_pid
|
self.ffmpeg_pid = ffmpeg_pid
|
||||||
@ -226,8 +225,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.logger.error(
|
self.logger.error(
|
||||||
"The following ffmpeg logs include the last 100 lines prior to exit."
|
"The following ffmpeg logs include the last 100 lines prior to exit."
|
||||||
)
|
)
|
||||||
self.logger.error(
|
self.logger.error("You may have invalid args defined for this camera.")
|
||||||
"You may have invalid args defined for this camera.")
|
|
||||||
self.logpipe.dump()
|
self.logpipe.dump()
|
||||||
self.start_ffmpeg_detect()
|
self.start_ffmpeg_detect()
|
||||||
elif now - self.capture_thread.current_frame.value > 20:
|
elif now - self.capture_thread.current_frame.value > 20:
|
||||||
@ -236,8 +234,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
)
|
)
|
||||||
self.ffmpeg_detect_process.terminate()
|
self.ffmpeg_detect_process.terminate()
|
||||||
try:
|
try:
|
||||||
self.logger.info(
|
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||||
"Waiting for ffmpeg to exit gracefully...")
|
|
||||||
self.ffmpeg_detect_process.communicate(timeout=30)
|
self.ffmpeg_detect_process.communicate(timeout=30)
|
||||||
except sp.TimeoutExpired:
|
except sp.TimeoutExpired:
|
||||||
self.logger.info("FFmpeg didnt exit. Force killing...")
|
self.logger.info("FFmpeg didnt exit. Force killing...")
|
||||||
@ -485,13 +482,11 @@ def process_frames(
|
|||||||
current_frame_time.value = frame_time
|
current_frame_time.value = frame_time
|
||||||
|
|
||||||
frame = frame_manager.get(
|
frame = frame_manager.get(
|
||||||
f"{camera_name}{frame_time}", (
|
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
|
||||||
frame_shape[0] * 3 // 2, frame_shape[1])
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
logger.info(
|
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||||
f"{camera_name}: frame {frame_time} is not in memory store.")
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not detection_enabled.value:
|
if not detection_enabled.value:
|
||||||
|
|||||||
@ -27,7 +27,7 @@ class FrigateWatchdog(threading.Thread):
|
|||||||
# check the detection processes
|
# check the detection processes
|
||||||
for detector in self.detectors.values():
|
for detector in self.detectors.values():
|
||||||
detection_start = detector.detection_start.value
|
detection_start = detector.detection_start.value
|
||||||
if detection_start > 0.0 and now - detection_start > 10:
|
if detection_start > 0.0 and now - detection_start > 30:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Detection appears to be stuck. Restarting detection process..."
|
"Detection appears to be stuck. Restarting detection process..."
|
||||||
)
|
)
|
||||||
|
|||||||
@ -44,7 +44,7 @@ def get_frame_shape(source):
|
|||||||
"json",
|
"json",
|
||||||
source,
|
source,
|
||||||
]
|
]
|
||||||
p = sp.run(ffprobe_cmd, stdout=sp.PIPE, stderr=sp.PIPE)
|
p = sp.run(ffprobe_cmd, capture_output=True)
|
||||||
info = json.loads(p.stdout)
|
info = json.loads(p.stdout)
|
||||||
|
|
||||||
video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0]
|
video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0]
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user