return to Python 3.8

This commit is contained in:
YS 2021-12-26 19:27:43 +03:00
parent 0f0b217a56
commit 4bd1884108
10 changed files with 135 additions and 159 deletions

View File

@ -1,7 +1,6 @@
import json import json
import logging import logging
import multiprocessing as mp import multiprocessing as mp
import shared_memory
import os import os
import signal import signal
import sys import sys
@ -42,7 +41,7 @@ class FrigateApp:
self.detection_queue = mp.Queue() self.detection_queue = mp.Queue()
self.detectors: Dict[str, EdgeTPUProcess] = {} self.detectors: Dict[str, EdgeTPUProcess] = {}
self.detection_out_events: Dict[str, mp.Event] = {} self.detection_out_events: Dict[str, mp.Event] = {}
self.detection_shms: List[shared_memory.SharedMemory] = [] self.detection_shms: List[mp.shared_memory.SharedMemory] = []
self.log_queue = mp.Queue() self.log_queue = mp.Queue()
self.camera_metrics = {} self.camera_metrics = {}
@ -155,20 +154,20 @@ class FrigateApp:
self.detection_out_events[name] = mp.Event() self.detection_out_events[name] = mp.Event()
try: try:
shm_in = shared_memory.SharedMemory( shm_in = mp.shared_memory.SharedMemory(
name=name, name=name,
create=True, create=True,
size=self.config.model.height * self.config.model.width * 3, size=self.config.model.height * self.config.model.width * 3,
) )
except FileExistsError: except FileExistsError:
shm_in = shared_memory.SharedMemory(name=name) shm_in = mp.shared_memory.SharedMemory(name=name)
try: try:
shm_out = shared_memory.SharedMemory( shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4 name=f"out-{name}", create=True, size=20 * 6 * 4
) )
except FileExistsError: except FileExistsError:
shm_out = shared_memory.SharedMemory(name=f"out-{name}") shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
self.detection_shms.append(shm_in) self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out) self.detection_shms.append(shm_out)

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json import json
import logging import logging
import os import os
@ -21,8 +23,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
# German Style: # German Style:
# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S" # DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
FRIGATE_ENV_VARS = {k: v for k, FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
v in os.environ.items() if k.startswith("FRIGATE_")}
DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_TRACKED_OBJECTS = ["person"]
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}} DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
@ -39,8 +40,7 @@ class DetectorTypeEnum(str, Enum):
class DetectorConfig(FrigateBaseModel): class DetectorConfig(FrigateBaseModel):
type: DetectorTypeEnum = Field( type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type")
default=DetectorTypeEnum.cpu, title="Detector Type")
device: str = Field(default="usb", title="Device Type") device: str = Field(default="usb", title="Device Type")
num_threads: int = Field(default=3, title="Number of detection threads") num_threads: int = Field(default=3, title="Number of detection threads")
@ -74,10 +74,8 @@ class RetainConfig(FrigateBaseModel):
class EventsConfig(FrigateBaseModel): class EventsConfig(FrigateBaseModel):
max_seconds: int = Field(default=300, title="Maximum event duration.") max_seconds: int = Field(default=300, title="Maximum event duration.")
pre_capture: int = Field( pre_capture: int = Field(default=5, title="Seconds to retain before event starts.")
default=5, title="Seconds to retain before event starts.") post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
post_capture: int = Field(
default=5, title="Seconds to retain after event ends.")
required_zones: List[str] = Field( required_zones: List[str] = Field(
default_factory=list, default_factory=list,
title="List of required zones to be entered in order to save the event.", title="List of required zones to be entered in order to save the event.",
@ -92,8 +90,7 @@ class EventsConfig(FrigateBaseModel):
class RecordConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable record on all cameras.") enabled: bool = Field(default=False, title="Enable record on all cameras.")
retain_days: float = Field( retain_days: float = Field(default=0, title="Recording retention period in days.")
default=0, title="Recording retention period in days.")
events: EventsConfig = Field( events: EventsConfig = Field(
default_factory=EventsConfig, title="Event specific settings." default_factory=EventsConfig, title="Event specific settings."
) )
@ -126,8 +123,7 @@ class RuntimeMotionConfig(MotionConfig):
config["frame_height"] = max(frame_shape[0] // 6, 180) config["frame_height"] = max(frame_shape[0] // 6, 180)
if "contour_area" not in config: if "contour_area" not in config:
frame_width = frame_shape[1] * \ frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0]
config["frame_height"] / frame_shape[0]
config["contour_area"] = ( config["contour_area"] = (
config["frame_height"] * frame_width * 0.00173611111 config["frame_height"] * frame_width * 0.00173611111
) )
@ -157,10 +153,8 @@ class RuntimeMotionConfig(MotionConfig):
class DetectConfig(FrigateBaseModel): class DetectConfig(FrigateBaseModel):
height: int = Field( height: int = Field(default=720, title="Height of the stream for the detect role.")
default=720, title="Height of the stream for the detect role.") width: int = Field(default=1280, title="Width of the stream for the detect role.")
width: int = Field(
default=1280, title="Width of the stream for the detect role.")
fps: int = Field( fps: int = Field(
default=5, title="Number of frames per second to process through detection." default=5, title="Number of frames per second to process through detection."
) )
@ -198,8 +192,7 @@ class RuntimeFilterConfig(FilterConfig):
config["raw_mask"] = mask config["raw_mask"] = mask
if mask is not None: if mask is not None:
config["mask"] = create_mask( config["mask"] = create_mask(config.get("frame_shape", (1, 1)), mask)
config.get("frame_shape", (1, 1)), mask)
super().__init__(**config) super().__init__(**config)
@ -246,22 +239,19 @@ class ZoneConfig(BaseModel):
if isinstance(coordinates, list): if isinstance(coordinates, list):
self._contour = np.array( self._contour = np.array(
[[int(p.split(",")[0]), int(p.split(",")[1])] [[int(p.split(",")[0]), int(p.split(",")[1])] for p in coordinates]
for p in coordinates]
) )
elif isinstance(coordinates, str): elif isinstance(coordinates, str):
points = coordinates.split(",") points = coordinates.split(",")
self._contour = np.array( self._contour = np.array(
[[int(points[i]), int(points[i + 1])] [[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
for i in range(0, len(points), 2)]
) )
else: else:
self._contour = np.array([]) self._contour = np.array([])
class ObjectConfig(FrigateBaseModel): class ObjectConfig(FrigateBaseModel):
track: List[str] = Field( track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.") filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.")
mask: Union[str, List[str]] = Field(default="", title="Object mask.") mask: Union[str, List[str]] = Field(default="", title="Object mask.")
@ -352,15 +342,25 @@ class FfmpegConfig(FrigateBaseModel):
class GstreamerConfig(FrigateBaseModel): class GstreamerConfig(FrigateBaseModel):
manual_pipeline: List[str] = Field( manual_pipeline: List[str] = Field(
default=[], title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.") default=[],
title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.",
)
input_pipeline: List[str] = Field( input_pipeline: List[str] = Field(
default=[], title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.") default=[],
title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.",
)
decoder_pipeline: List[str] = Field( decoder_pipeline: List[str] = Field(
default=[], title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']") default=[],
title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']",
)
source_format_pipeline: List[str] = Field( source_format_pipeline: List[str] = Field(
default=[], title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']") default=[],
title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']",
)
destination_format_pipeline: List[str] = Field( destination_format_pipeline: List[str] = Field(
default=[], title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']") default=[],
title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']",
)
class CameraRoleEnum(str, Enum): class CameraRoleEnum(str, Enum):
@ -388,7 +388,8 @@ class CameraFFmpegInput(CameraInput):
class CameraGStreamerInput(CameraInput): class CameraGStreamerInput(CameraInput):
pipeline: List[str] = Field( pipeline: List[str] = Field(
default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign") default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign"
)
class CameraInputValidator: class CameraInputValidator:
@ -411,8 +412,7 @@ class CameraFfmpegConfig(FfmpegConfig, CameraInputValidator):
class CameraGStreamerConfig(GstreamerConfig, CameraInputValidator): class CameraGStreamerConfig(GstreamerConfig, CameraInputValidator):
inputs: List[CameraGStreamerInput] = Field( inputs: List[CameraGStreamerInput] = Field(title="Camera GStreamer inputs.")
title="Camera GStreamer inputs.")
class SnapshotsConfig(FrigateBaseModel): class SnapshotsConfig(FrigateBaseModel):
@ -426,8 +426,7 @@ class SnapshotsConfig(FrigateBaseModel):
bounding_box: bool = Field( bounding_box: bool = Field(
default=True, title="Add a bounding box overlay on the snapshot." default=True, title="Add a bounding box overlay on the snapshot."
) )
crop: bool = Field( crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
default=False, title="Crop the snapshot to the detected object.")
required_zones: List[str] = Field( required_zones: List[str] = Field(
default_factory=list, default_factory=list,
title="List of required zones to be entered in order to save a snapshot.", title="List of required zones to be entered in order to save a snapshot.",
@ -467,8 +466,7 @@ class TimestampStyleConfig(FrigateBaseModel):
default=TimestampPositionEnum.tl, title="Timestamp position." default=TimestampPositionEnum.tl, title="Timestamp position."
) )
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.") format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
color: ColorConfig = Field( color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
default_factory=ColorConfig, title="Timestamp color.")
thickness: int = Field(default=2, title="Timestamp thickness.") thickness: int = Field(default=2, title="Timestamp thickness.")
effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.") effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.")
@ -476,10 +474,8 @@ class TimestampStyleConfig(FrigateBaseModel):
class CameraMqttConfig(FrigateBaseModel): class CameraMqttConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Send image over MQTT.") enabled: bool = Field(default=True, title="Send image over MQTT.")
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.") timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
bounding_box: bool = Field( bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
default=True, title="Add bounding box to MQTT image.") crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
crop: bool = Field(
default=True, title="Crop MQTT image to detected object.")
height: int = Field(default=270, title="MQTT image height.") height: int = Field(default=270, title="MQTT image height.")
required_zones: List[str] = Field( required_zones: List[str] = Field(
default_factory=list, default_factory=list,
@ -499,16 +495,17 @@ class RtmpConfig(FrigateBaseModel):
class CameraLiveConfig(FrigateBaseModel): class CameraLiveConfig(FrigateBaseModel):
height: int = Field(default=720, title="Live camera view height") height: int = Field(default=720, title="Live camera view height")
quality: int = Field(default=8, ge=1, le=31, quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
title="Live camera view quality")
class CameraConfig(FrigateBaseModel): class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(title="Camera name.") name: Optional[str] = Field(title="Camera name.")
ffmpeg: Optional[CameraFfmpegConfig] = Field( ffmpeg: Optional[CameraFfmpegConfig] = Field(
title="FFmpeg configuration for the camera.") title="FFmpeg configuration for the camera."
)
gstreamer: Optional[CameraGStreamerConfig] = Field( gstreamer: Optional[CameraGStreamerConfig] = Field(
title="GStreamer configuration for the camera.") title="GStreamer configuration for the camera."
)
best_image_timeout: int = Field( best_image_timeout: int = Field(
default=60, default=60,
title="How long to wait for the image with the highest confidence score.", title="How long to wait for the image with the highest confidence score.",
@ -534,8 +531,7 @@ class CameraConfig(FrigateBaseModel):
objects: ObjectConfig = Field( objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Object configuration." default_factory=ObjectConfig, title="Object configuration."
) )
motion: Optional[MotionConfig] = Field( motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
title="Motion detection configuration.")
detect: DetectConfig = Field( detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration." default_factory=DetectConfig, title="Object detection configuration."
) )
@ -548,16 +544,14 @@ class CameraConfig(FrigateBaseModel):
if "zones" in config: if "zones" in config:
colors = plt.cm.get_cmap("tab10", len(config["zones"])) colors = plt.cm.get_cmap("tab10", len(config["zones"]))
config["zones"] = { config["zones"] = {
name: {**z, "color": tuple(round(255 * c) name: {**z, "color": tuple(round(255 * c) for c in colors(idx)[:3])}
for c in colors(idx)[:3])}
for idx, (name, z) in enumerate(config["zones"].items()) for idx, (name, z) in enumerate(config["zones"].items())
} }
# add roles to the input if there is only one # add roles to the input if there is only one
if "ffmpeg" in config: if "ffmpeg" in config:
if len(config["ffmpeg"]["inputs"]) == 1: if len(config["ffmpeg"]["inputs"]) == 1:
config["ffmpeg"]["inputs"][0]["roles"] = [ config["ffmpeg"]["inputs"][0]["roles"] = ["record", "rtmp", "detect"]
"record", "rtmp", "detect"]
super().__init__(**config) super().__init__(**config)
@ -578,8 +572,7 @@ class CameraConfig(FrigateBaseModel):
if decoder_cmd is None: if decoder_cmd is None:
continue continue
decoder_cmds.append( decoder_cmds.append({"roles": ffmpeg_input.roles, "cmd": decoder_cmd})
{"roles": ffmpeg_input.roles, "cmd": decoder_cmd})
else: else:
assert self.gstreamer assert self.gstreamer
for gstreamer_input in self.gstreamer.inputs: for gstreamer_input in self.gstreamer.inputs:
@ -588,68 +581,81 @@ class CameraConfig(FrigateBaseModel):
continue continue
decoder_cmds.append( decoder_cmds.append(
{"roles": gstreamer_input.roles, "cmd": decoder_cmd}) {"roles": gstreamer_input.roles, "cmd": decoder_cmd}
)
return decoder_cmds return decoder_cmds
def _get_gstreamer_cmd(self, gstreamer_input: CameraGStreamerInput): def _get_gstreamer_cmd(self, gstreamer_input: CameraGStreamerInput):
assert list( assert (
["detect"]) == gstreamer_input.roles, "only detect role is supported" list(["detect"]) == gstreamer_input.roles
), "only detect role is supported"
manual_pipeline = [ manual_pipeline = [
part for part in self.gstreamer.manual_pipeline if part != ""] part for part in self.gstreamer.manual_pipeline if part != ""
input_pipeline = [ ]
part for part in self.gstreamer.input_pipeline if part != ""] input_pipeline = [part for part in self.gstreamer.input_pipeline if part != ""]
decoder_pipeline = [ decoder_pipeline = [
part for part in self.gstreamer.decoder_pipeline if part != ""] part for part in self.gstreamer.decoder_pipeline if part != ""
]
source_format_pipeline = [ source_format_pipeline = [
part for part in self.gstreamer.source_format_pipeline if part != ""] part for part in self.gstreamer.source_format_pipeline if part != ""
]
destination_format_pipeline = [ destination_format_pipeline = [
part for part in self.gstreamer.destination_format_pipeline if part != ""] part for part in self.gstreamer.destination_format_pipeline if part != ""
]
video_format = f"video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420" video_format = f"video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420"
if not manual_pipeline and not input_pipeline and not decoder_pipeline and not source_format_pipeline and not destination_format_pipeline: if (
not manual_pipeline
and not input_pipeline
and not decoder_pipeline
and not source_format_pipeline
and not destination_format_pipeline
):
logger.warn( logger.warn(
"gsreamer pipeline not configured. Using videotestsrc pattern=0") "gsreamer pipeline not configured. Using videotestsrc pattern=0"
)
pipeline = [ pipeline = [
"videotestsrc pattern=0", "videotestsrc pattern=0",
video_format, video_format,
] ]
elif len(manual_pipeline) > 0: elif len(manual_pipeline) > 0:
logger.warn( logger.warn(
"gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters") "gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters"
)
pipeline = manual_pipeline pipeline = manual_pipeline
else: else:
input_pipeline = input_pipeline if input_pipeline else [ input_pipeline = (
f"rtspsrc location=\"{gstreamer_input.path}\" latency=0" input_pipeline
] if input_pipeline
else [f'rtspsrc location="{gstreamer_input.path}" latency=0']
)
decoder_pipeline = decoder_pipeline if decoder_pipeline else [ decoder_pipeline = (
"rtph265depay", "h265parse", "omxh265dec" decoder_pipeline
] if decoder_pipeline
source_format_pipeline = source_format_pipeline if source_format_pipeline else [ else ["rtph265depay", "h265parse", "omxh265dec"]
'video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale' )
] source_format_pipeline = (
destination_format_pipeline = destination_format_pipeline if destination_format_pipeline else [ source_format_pipeline
video_format, "videoconvert" if source_format_pipeline
] else ["video/x-raw,format=(string)NV12", "videoconvert", "videoscale"]
)
destination_format_pipeline = (
destination_format_pipeline
if destination_format_pipeline
else [video_format, "videoconvert"]
)
pipeline = [ pipeline = [
*input_pipeline, *input_pipeline,
*decoder_pipeline, *decoder_pipeline,
*source_format_pipeline, *source_format_pipeline,
*destination_format_pipeline *destination_format_pipeline,
] ]
pipeline_args = ( pipeline_args = [f"{item} !".split(" ") for item in pipeline]
[f"{item} !".split(" ") for item in pipeline]
)
pipeline_args = [item for sublist in pipeline_args for item in sublist] pipeline_args = [item for sublist in pipeline_args for item in sublist]
pipeline_args = [ pipeline_args = ["gst-launch-1.0", "-q", *pipeline_args, "fdsink"]
"gst-launch-1.0", logger.debug(f"using gstreamer pipeline: {' '.join(pipeline_args)}")
"-q",
*pipeline_args,
"fdsink"
]
logger.debug(
f"using gstreamer pipeline: {' '.join(pipeline_args)}")
return pipeline_args return pipeline_args
@ -679,8 +685,7 @@ class CameraConfig(FrigateBaseModel):
else self.ffmpeg.output_args.rtmp.split(" ") else self.ffmpeg.output_args.rtmp.split(" ")
) )
ffmpeg_output_args = ( ffmpeg_output_args = (
rtmp_args + rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
[f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
) )
if "record" in ffmpeg_input.roles and self.record.enabled: if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = ( record_args = (
@ -704,16 +709,13 @@ class CameraConfig(FrigateBaseModel):
input_args = ffmpeg_input.input_args or self.ffmpeg.input_args input_args = ffmpeg_input.input_args or self.ffmpeg.input_args
global_args = ( global_args = (
global_args if isinstance( global_args if isinstance(global_args, list) else global_args.split(" ")
global_args, list) else global_args.split(" ")
) )
hwaccel_args = ( hwaccel_args = (
hwaccel_args if isinstance( hwaccel_args if isinstance(hwaccel_args, list) else hwaccel_args.split(" ")
hwaccel_args, list) else hwaccel_args.split(" ")
) )
input_args = ( input_args = (
input_args if isinstance( input_args if isinstance(input_args, list) else input_args.split(" ")
input_args, list) else input_args.split(" ")
) )
cmd = ( cmd = (
@ -730,7 +732,7 @@ class CameraConfig(FrigateBaseModel):
@root_validator @root_validator
def either_ffmpeg_or_gstreamer(cls, v): def either_ffmpeg_or_gstreamer(cls, v):
if ("ffmpeg" not in v) and ("gstreamer" not in v): if ("ffmpeg" not in v) and ("gstreamer" not in v):
raise ValueError('either ffmpeg or gstreamer should be set') raise ValueError("either ffmpeg or gstreamer should be set")
return v return v
@ -742,12 +744,9 @@ class DatabaseConfig(FrigateBaseModel):
class ModelConfig(FrigateBaseModel): class ModelConfig(FrigateBaseModel):
path: Optional[str] = Field(title="Custom Object detection model path.") path: Optional[str] = Field(title="Custom Object detection model path.")
labelmap_path: Optional[str] = Field( labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
title="Label map for custom object detector.") width: int = Field(default=320, title="Object detection model input width.")
width: int = Field( height: int = Field(default=320, title="Object detection model input height.")
default=320, title="Object detection model input width.")
height: int = Field(
default=320, title="Object detection model input height.")
labelmap: Dict[int, str] = Field( labelmap: Dict[int, str] = Field(
default_factory=dict, title="Labelmap customization." default_factory=dict, title="Labelmap customization."
) )
@ -774,8 +773,7 @@ class ModelConfig(FrigateBaseModel):
self._colormap = {} self._colormap = {}
for key, val in self._merged_labelmap.items(): for key, val in self._merged_labelmap.items():
self._colormap[val] = tuple(int(round(255 * c)) self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
for c in cmap(key)[:3])
class LogLevelEnum(str, Enum): class LogLevelEnum(str, Enum):
@ -807,8 +805,7 @@ class FrigateConfig(FrigateBaseModel):
default_factory=ModelConfig, title="Detection model configuration." default_factory=ModelConfig, title="Detection model configuration."
) )
detectors: Dict[str, DetectorConfig] = Field( detectors: Dict[str, DetectorConfig] = Field(
default={name: DetectorConfig(**d) default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()},
for name, d in DEFAULT_DETECTORS.items()},
title="Detector hardware configuration.", title="Detector hardware configuration.",
) )
logger: LoggerConfig = Field( logger: LoggerConfig = Field(
@ -854,8 +851,7 @@ class FrigateConfig(FrigateBaseModel):
# MQTT password substitution # MQTT password substitution
if config.mqtt.password: if config.mqtt.password:
config.mqtt.password = config.mqtt.password.format( config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
**FRIGATE_ENV_VARS)
# Global config to propegate down to camera level # Global config to propegate down to camera level
global_config = config.dict( global_config = config.dict(
@ -874,8 +870,7 @@ class FrigateConfig(FrigateBaseModel):
) )
for name, camera in config.cameras.items(): for name, camera in config.cameras.items():
merged_config = deep_merge(camera.dict( merged_config = deep_merge(camera.dict(exclude_unset=True), global_config)
exclude_unset=True), global_config)
camera_config: CameraConfig = CameraConfig.parse_obj( camera_config: CameraConfig = CameraConfig.parse_obj(
{"name": name, **merged_config} {"name": name, **merged_config}
) )
@ -934,7 +929,11 @@ class FrigateConfig(FrigateBaseModel):
) )
# check runtime config # check runtime config
decoder_config = camera_config.ffmpeg if "ffmpeg" in camera_config else camera_config.gstreamer decoder_config = (
camera_config.ffmpeg
if "ffmpeg" in camera_config
else camera_config.gstreamer
)
assigned_roles = list( assigned_roles = list(
set([r for i in decoder_config.inputs for r in i.roles]) set([r for i in decoder_config.inputs for r in i.roles])
) )

View File

@ -7,10 +7,8 @@ import signal
import threading import threading
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Dict from typing import Dict
import shared_memory
import numpy as np import numpy as np
# TensorRT https://github.com/NobuoTsukamoto/tensorrt-examples/blob/main/python/detection/README.md
import tflite_runtime.interpreter as tflite import tflite_runtime.interpreter as tflite
from setproctitle import setproctitle from setproctitle import setproctitle
from tflite_runtime.interpreter import load_delegate from tflite_runtime.interpreter import load_delegate
@ -161,7 +159,7 @@ def run_detector(
outputs = {} outputs = {}
for name in out_events.keys(): for name in out_events.keys():
out_shm = shared_memory.SharedMemory(name=f"out-{name}", create=False) out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
outputs[name] = {"shm": out_shm, "np": out_np} outputs[name] = {"shm": out_shm, "np": out_np}
@ -250,11 +248,11 @@ class RemoteObjectDetector:
self.fps = EventsPerSecond() self.fps = EventsPerSecond()
self.detection_queue = detection_queue self.detection_queue = detection_queue
self.event = event self.event = event
self.shm = shared_memory.SharedMemory(name=self.name, create=False) self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray( self.np_shm = np.ndarray(
(1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf (1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
) )
self.out_shm = shared_memory.SharedMemory( self.out_shm = mp.shared_memory.SharedMemory(
name=f"out-{self.name}", create=False name=f"out-{self.name}", create=False
) )
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)

View File

@ -608,8 +608,7 @@ def recording_clip(camera, start_ts, end_ts):
ffmpeg_cmd, ffmpeg_cmd,
input="\n".join(playlist_lines), input="\n".join(playlist_lines),
encoding="ascii", encoding="ascii",
stdout=sp.PIPE, capture_output=True,
stderr=sp.PIPE,
) )
if p.returncode != 0: if p.returncode != 0:
logger.error(p.stderr) logger.error(p.stderr)

View File

@ -7,6 +7,7 @@ import queue
import signal import signal
import subprocess as sp import subprocess as sp
import threading import threading
from multiprocessing import shared_memory
from wsgiref.simple_server import make_server from wsgiref.simple_server import make_server
from frigate.log import LogPipe from frigate.log import LogPipe
@ -32,27 +33,20 @@ class FFMpegConverter:
ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split( ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
" " " "
) )
# ffmpeg_cmd = f"gst-launch-1.0 rtspsrc location=\"rtsp://admin:123456@192.168.5.95:554/stream0\" ! rtph265depay ! h265parse ! omxh265dec ! 'video/x-raw,format=(string)NV12' ! videoconvert ! 'video/x-raw, width={in_width}, height={in_height}, format=I420, framerate=(fraction)10/1' ! omxh264enc bitrate=500000 temporal-tradeoff=2 iframeinterval=10 ! h264parse ! mpegtsmux ! fdsink"
# # .split(
# # " "
# # )
self.logpipe = LogPipe( self.logpipe = LogPipe(
"ffmpeg.converter", logging.ERROR) "ffmpeg.converter", logging.ERROR)
self.process = sp.Popen( self.process = sp.Popen(
ffmpeg_cmd, ffmpeg_cmd,
stdout=sp.PIPE, stdout=sp.PIPE,
stderr=self.logpipe, stderr=self.logpipe,
stdin=sp.PIPE, stdin=sp.PIPE,
start_new_session=True, start_new_session=True,
# shell=True
) )
def write(self, b): def write(self, b):
try: try:
self.process.stdin.write(b) self.process.stdin.write(b)
except Exception: except Exception:
logger.error("Failure while writing to the stream:")
self.logpipe.dump() self.logpipe.dump()
return False return False
@ -60,7 +54,6 @@ class FFMpegConverter:
try: try:
return self.process.stdout.read1(length) return self.process.stdout.read1(length)
except ValueError: except ValueError:
logger.error("Failure while readig from the stream:")
self.logpipe.dump() self.logpipe.dump()
return False return False
@ -423,13 +416,8 @@ def output_frames(config: FrigateConfig, video_output_queue):
if any( if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
): ):
try: # write to the converter for the camera if clients are listening to the specific camera
# write to the converter for the camera if clients are listening to the specific camera converters[camera].write(frame.tobytes())
converters[camera].write(frame.tobytes())
except Exception:
# in case of videoconverter failure continure processing video_output_queue
# FFMpegConverter should dump an error response
pass
# update birdseye if websockets are connected # update birdseye if websockets are connected
if config.birdseye.enabled and any( if config.birdseye.enabled and any(

View File

@ -41,7 +41,7 @@ def get_frame_shape(source):
"json", "json",
source, source,
] ]
p = sp.run(ffprobe_cmd, stdout=sp.PIPE, stderr=sp.PIPE) p = sp.run(ffprobe_cmd, capture_output=True)
info = json.loads(p.stdout) info = json.loads(p.stdout)
video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0] video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0]

View File

@ -94,7 +94,7 @@ class RecordingMaintainer(threading.Thread):
"default=noprint_wrappers=1:nokey=1", "default=noprint_wrappers=1:nokey=1",
f"{cache_path}", f"{cache_path}",
] ]
p = sp.run(ffprobe_cmd, stdout=sp.PIPE, stderr=sp.PIPE) p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0: if p.returncode == 0:
duration = float(p.stdout.decode().strip()) duration = float(p.stdout.decode().strip())
end_time = start_time + datetime.timedelta(seconds=duration) end_time = start_time + datetime.timedelta(seconds=duration)
@ -284,9 +284,8 @@ class RecordingCleanup(threading.Thread):
logger.debug(f"Oldest recording in the db: {oldest_timestamp}") logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
process = sp.run( process = sp.run(
["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"], ["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"],
stdout=sp.PIPE, capture_output=True,
stderr=sp.PIPE, text=True,
universal_newlines=True,
) )
files_to_check = process.stdout.splitlines() files_to_check = process.stdout.splitlines()

View File

@ -11,8 +11,7 @@ import threading
import time import time
import traceback import traceback
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
#from multiprocessing import shared_memory from multiprocessing import shared_memory
import shared_memory
from typing import AnyStr from typing import AnyStr
import cv2 import cv2

View File

@ -184,8 +184,7 @@ class CameraWatchdog(threading.Thread):
self.config = config self.config = config
self.capture_thread = None self.capture_thread = None
self.ffmpeg_detect_process = None self.ffmpeg_detect_process = None
self.logpipe = LogPipe( self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
self.ffmpeg_other_processes = [] self.ffmpeg_other_processes = []
self.camera_fps = camera_fps self.camera_fps = camera_fps
self.ffmpeg_pid = ffmpeg_pid self.ffmpeg_pid = ffmpeg_pid
@ -223,8 +222,7 @@ class CameraWatchdog(threading.Thread):
self.logger.error( self.logger.error(
"The following ffmpeg logs include the last 100 lines prior to exit." "The following ffmpeg logs include the last 100 lines prior to exit."
) )
self.logger.error( self.logger.error("You may have invalid args defined for this camera.")
"You may have invalid args defined for this camera.")
self.logpipe.dump() self.logpipe.dump()
self.start_ffmpeg_detect() self.start_ffmpeg_detect()
elif now - self.capture_thread.current_frame.value > 20: elif now - self.capture_thread.current_frame.value > 20:
@ -233,8 +231,7 @@ class CameraWatchdog(threading.Thread):
) )
self.ffmpeg_detect_process.terminate() self.ffmpeg_detect_process.terminate()
try: try:
self.logger.info( self.logger.info("Waiting for ffmpeg to exit gracefully...")
"Waiting for ffmpeg to exit gracefully...")
self.ffmpeg_detect_process.communicate(timeout=30) self.ffmpeg_detect_process.communicate(timeout=30)
except sp.TimeoutExpired: except sp.TimeoutExpired:
self.logger.info("FFmpeg didnt exit. Force killing...") self.logger.info("FFmpeg didnt exit. Force killing...")
@ -470,13 +467,11 @@ def process_frames(
current_frame_time.value = frame_time current_frame_time.value = frame_time
frame = frame_manager.get( frame = frame_manager.get(
f"{camera_name}{frame_time}", ( f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
frame_shape[0] * 3 // 2, frame_shape[1])
) )
if frame is None: if frame is None:
logger.info( logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
f"{camera_name}: frame {frame_time} is not in memory store.")
continue continue
if not detection_enabled.value: if not detection_enabled.value:

View File

@ -27,7 +27,7 @@ class FrigateWatchdog(threading.Thread):
# check the detection processes # check the detection processes
for detector in self.detectors.values(): for detector in self.detectors.values():
detection_start = detector.detection_start.value detection_start = detector.detection_start.value
if detection_start > 0.0 and now - detection_start > 10: if detection_start > 0.0 and now - detection_start > 30:
logger.info( logger.info(
"Detection appears to be stuck. Restarting detection process..." "Detection appears to be stuck. Restarting detection process..."
) )