mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-02 01:05:20 +03:00
gstreamer work in progress / cofr
This commit is contained in:
parent
bd8e23833c
commit
4a497204cb
@ -7,7 +7,7 @@ import sys
|
||||
import threading
|
||||
from logging.handlers import QueueHandler
|
||||
from typing import Dict, List
|
||||
|
||||
import traceback
|
||||
import yaml
|
||||
from peewee_migrate import Router
|
||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
@ -320,6 +320,7 @@ class FrigateApp:
|
||||
print("*** Config Validation Errors ***")
|
||||
print("*************************************************************")
|
||||
print(e)
|
||||
print(traceback.format_exc())
|
||||
print("*************************************************************")
|
||||
print("*** End Config Validation Errors ***")
|
||||
print("*************************************************************")
|
||||
|
||||
@ -9,7 +9,7 @@ from typing import Dict, List, Optional, Tuple, Union
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import yaml
|
||||
from pydantic import BaseModel, Extra, Field, validator
|
||||
from pydantic import BaseModel, Extra, Field, validator, root_validator
|
||||
from pydantic.fields import PrivateAttr
|
||||
|
||||
from frigate.const import BASE_DIR, CACHE_DIR, YAML_EXT
|
||||
@ -22,7 +22,8 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
|
||||
# German Style:
|
||||
# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
|
||||
|
||||
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
|
||||
FRIGATE_ENV_VARS = {k: v for k,
|
||||
v in os.environ.items() if k.startswith("FRIGATE_")}
|
||||
|
||||
DEFAULT_TRACKED_OBJECTS = ["person"]
|
||||
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
|
||||
@ -39,7 +40,8 @@ class DetectorTypeEnum(str, Enum):
|
||||
|
||||
|
||||
class DetectorConfig(FrigateBaseModel):
|
||||
type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type")
|
||||
type: DetectorTypeEnum = Field(
|
||||
default=DetectorTypeEnum.cpu, title="Detector Type")
|
||||
device: str = Field(default="usb", title="Device Type")
|
||||
num_threads: int = Field(default=3, title="Number of detection threads")
|
||||
|
||||
@ -82,8 +84,10 @@ class RetainConfig(FrigateBaseModel):
|
||||
|
||||
class EventsConfig(FrigateBaseModel):
|
||||
max_seconds: int = Field(default=300, title="Maximum event duration.")
|
||||
pre_capture: int = Field(default=5, title="Seconds to retain before event starts.")
|
||||
post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
|
||||
pre_capture: int = Field(
|
||||
default=5, title="Seconds to retain before event starts.")
|
||||
post_capture: int = Field(
|
||||
default=5, title="Seconds to retain after event ends.")
|
||||
required_zones: List[str] = Field(
|
||||
default_factory=list,
|
||||
title="List of required zones to be entered in order to save the event.",
|
||||
@ -161,8 +165,10 @@ class RuntimeMotionConfig(MotionConfig):
|
||||
|
||||
|
||||
class DetectConfig(FrigateBaseModel):
|
||||
height: int = Field(default=720, title="Height of the stream for the detect role.")
|
||||
width: int = Field(default=1280, title="Width of the stream for the detect role.")
|
||||
height: int = Field(
|
||||
default=720, title="Height of the stream for the detect role.")
|
||||
width: int = Field(
|
||||
default=1280, title="Width of the stream for the detect role.")
|
||||
fps: int = Field(
|
||||
default=5, title="Number of frames per second to process through detection."
|
||||
)
|
||||
@ -204,7 +210,8 @@ class RuntimeFilterConfig(FilterConfig):
|
||||
config["raw_mask"] = mask
|
||||
|
||||
if mask is not None:
|
||||
config["mask"] = create_mask(config.get("frame_shape", (1, 1)), mask)
|
||||
config["mask"] = create_mask(
|
||||
config.get("frame_shape", (1, 1)), mask)
|
||||
|
||||
super().__init__(**config)
|
||||
|
||||
@ -251,19 +258,22 @@ class ZoneConfig(BaseModel):
|
||||
|
||||
if isinstance(coordinates, list):
|
||||
self._contour = np.array(
|
||||
[[int(p.split(",")[0]), int(p.split(",")[1])] for p in coordinates]
|
||||
[[int(p.split(",")[0]), int(p.split(",")[1])]
|
||||
for p in coordinates]
|
||||
)
|
||||
elif isinstance(coordinates, str):
|
||||
points = coordinates.split(",")
|
||||
self._contour = np.array(
|
||||
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
|
||||
[[int(points[i]), int(points[i + 1])]
|
||||
for i in range(0, len(points), 2)]
|
||||
)
|
||||
else:
|
||||
self._contour = np.array([])
|
||||
|
||||
|
||||
class ObjectConfig(FrigateBaseModel):
|
||||
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
||||
track: List[str] = Field(
|
||||
default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
|
||||
filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.")
|
||||
mask: Union[str, List[str]] = Field(default="", title="Object mask.")
|
||||
|
||||
@ -352,6 +362,11 @@ class FfmpegConfig(FrigateBaseModel):
|
||||
)
|
||||
|
||||
|
||||
class GstreamerConfig(FrigateBaseModel):
|
||||
pipeline: List[str] = Field(
|
||||
default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign")
|
||||
|
||||
|
||||
class CameraRoleEnum(str, Enum):
|
||||
record = "record"
|
||||
rtmp = "rtmp"
|
||||
@ -361,6 +376,9 @@ class CameraRoleEnum(str, Enum):
|
||||
class CameraInput(FrigateBaseModel):
|
||||
path: str = Field(title="Camera input path.")
|
||||
roles: List[CameraRoleEnum] = Field(title="Roles assigned to this input.")
|
||||
|
||||
|
||||
class CameraFFmpegInput(CameraInput):
|
||||
global_args: Union[str, List[str]] = Field(
|
||||
default_factory=list, title="FFmpeg global arguments."
|
||||
)
|
||||
@ -372,9 +390,12 @@ class CameraInput(FrigateBaseModel):
|
||||
)
|
||||
|
||||
|
||||
class CameraFfmpegConfig(FfmpegConfig):
|
||||
inputs: List[CameraInput] = Field(title="Camera inputs.")
|
||||
class CameraGStreamerInput(CameraInput):
|
||||
pipeline: List[str] = Field(
|
||||
default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign")
|
||||
|
||||
|
||||
class CameraInputValidator:
|
||||
@validator("inputs")
|
||||
def validate_roles(cls, v):
|
||||
roles = [role for i in v for role in i.roles]
|
||||
@ -389,6 +410,15 @@ class CameraFfmpegConfig(FfmpegConfig):
|
||||
return v
|
||||
|
||||
|
||||
class CameraFfmpegConfig(FfmpegConfig, CameraInputValidator):
|
||||
inputs: List[CameraFFmpegInput] = Field(title="Camera FFMpeg inputs.")
|
||||
|
||||
|
||||
class CameraGStreamerConfig(GstreamerConfig, CameraInputValidator):
|
||||
inputs: List[CameraGStreamerInput] = Field(
|
||||
title="Camera GStreamer inputs.")
|
||||
|
||||
|
||||
class SnapshotsConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Snapshots enabled.")
|
||||
clean_copy: bool = Field(
|
||||
@ -400,7 +430,8 @@ class SnapshotsConfig(FrigateBaseModel):
|
||||
bounding_box: bool = Field(
|
||||
default=True, title="Add a bounding box overlay on the snapshot."
|
||||
)
|
||||
crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
|
||||
crop: bool = Field(
|
||||
default=False, title="Crop the snapshot to the detected object.")
|
||||
required_zones: List[str] = Field(
|
||||
default_factory=list,
|
||||
title="List of required zones to be entered in order to save a snapshot.",
|
||||
@ -440,7 +471,8 @@ class TimestampStyleConfig(FrigateBaseModel):
|
||||
default=TimestampPositionEnum.tl, title="Timestamp position."
|
||||
)
|
||||
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
|
||||
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
|
||||
color: ColorConfig = Field(
|
||||
default_factory=ColorConfig, title="Timestamp color.")
|
||||
thickness: int = Field(default=2, title="Timestamp thickness.")
|
||||
effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.")
|
||||
|
||||
@ -448,8 +480,10 @@ class TimestampStyleConfig(FrigateBaseModel):
|
||||
class CameraMqttConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=True, title="Send image over MQTT.")
|
||||
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
|
||||
bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
|
||||
crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
|
||||
bounding_box: bool = Field(
|
||||
default=True, title="Add bounding box to MQTT image.")
|
||||
crop: bool = Field(
|
||||
default=True, title="Crop MQTT image to detected object.")
|
||||
height: int = Field(default=270, title="MQTT image height.")
|
||||
required_zones: List[str] = Field(
|
||||
default_factory=list,
|
||||
@ -469,12 +503,16 @@ class RtmpConfig(FrigateBaseModel):
|
||||
|
||||
class CameraLiveConfig(FrigateBaseModel):
|
||||
height: int = Field(default=720, title="Live camera view height")
|
||||
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
|
||||
quality: int = Field(default=8, ge=1, le=31,
|
||||
title="Live camera view quality")
|
||||
|
||||
|
||||
class CameraConfig(FrigateBaseModel):
|
||||
name: Optional[str] = Field(title="Camera name.")
|
||||
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
|
||||
ffmpeg: Optional[CameraFfmpegConfig] = Field(
|
||||
title="FFmpeg configuration for the camera.")
|
||||
gstreamer: Optional[CameraGStreamerConfig] = Field(
|
||||
title="GStreamer configuration for the camera.")
|
||||
best_image_timeout: int = Field(
|
||||
default=60,
|
||||
title="How long to wait for the image with the highest confidence score.",
|
||||
@ -500,7 +538,8 @@ class CameraConfig(FrigateBaseModel):
|
||||
objects: ObjectConfig = Field(
|
||||
default_factory=ObjectConfig, title="Object configuration."
|
||||
)
|
||||
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
|
||||
motion: Optional[MotionConfig] = Field(
|
||||
title="Motion detection configuration.")
|
||||
detect: DetectConfig = Field(
|
||||
default_factory=DetectConfig, title="Object detection configuration."
|
||||
)
|
||||
@ -514,13 +553,16 @@ class CameraConfig(FrigateBaseModel):
|
||||
if "zones" in config:
|
||||
colors = plt.cm.get_cmap("tab10", len(config["zones"]))
|
||||
config["zones"] = {
|
||||
name: {**z, "color": tuple(round(255 * c) for c in colors(idx)[:3])}
|
||||
name: {**z, "color": tuple(round(255 * c)
|
||||
for c in colors(idx)[:3])}
|
||||
for idx, (name, z) in enumerate(config["zones"].items())
|
||||
}
|
||||
|
||||
# add roles to the input if there is only one
|
||||
if len(config["ffmpeg"]["inputs"]) == 1:
|
||||
config["ffmpeg"]["inputs"][0]["roles"] = ["record", "rtmp", "detect"]
|
||||
if "ffmpeg" in config:
|
||||
if len(config["ffmpeg"]["inputs"]) == 1:
|
||||
config["ffmpeg"]["inputs"][0]["roles"] = [
|
||||
"record", "rtmp", "detect"]
|
||||
|
||||
super().__init__(**config)
|
||||
|
||||
@ -533,8 +575,21 @@ class CameraConfig(FrigateBaseModel):
|
||||
return self.detect.height * 3 // 2, self.detect.width
|
||||
|
||||
@property
|
||||
def ffmpeg_cmds(self) -> List[Dict[str, List[str]]]:
|
||||
return self._ffmpeg_cmds
|
||||
def decoder_cmds(self) -> List[Dict[str, List[str]]]:
|
||||
decoder_cmds = []
|
||||
if self.ffmpeg:
|
||||
return self._ffmpeg_cmds
|
||||
else:
|
||||
assert self.gstreamer
|
||||
for gstreamer_input in self.gstreamer.inputs:
|
||||
decoder_cmd = self._get_gstreamer_cmd(gstreamer_input)
|
||||
if decoder_cmd is None:
|
||||
continue
|
||||
|
||||
decoder_cmds.append(
|
||||
{"roles": gstreamer_input.roles, "cmd": decoder_cmd})
|
||||
|
||||
return decoder_cmds
|
||||
|
||||
def create_ffmpeg_cmds(self):
|
||||
if "_ffmpeg_cmds" in self:
|
||||
@ -548,7 +603,54 @@ class CameraConfig(FrigateBaseModel):
|
||||
ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
|
||||
self._ffmpeg_cmds = ffmpeg_cmds
|
||||
|
||||
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
|
||||
|
||||
def _get_gstreamer_cmd(self, gstreamer_input: CameraGStreamerInput):
|
||||
assert list(
|
||||
["detect"]) == gstreamer_input.roles, "only detect role is supported"
|
||||
pipeline = [part for part in self.gstreamer.pipeline if part != ""]
|
||||
|
||||
video_format = f"video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420"
|
||||
if len(pipeline) == 0:
|
||||
pipeline = [
|
||||
#"videotestsrc pattern=0",
|
||||
"rtspsrc location=\"rtsp://admin:123456@192.168.5.95:554/stream0\"",
|
||||
"rtph265depay", "h265parse","omxh265dec",
|
||||
"video/x-raw,format=(string)NV12",
|
||||
"videoconvert","videoscale",
|
||||
video_format,
|
||||
"videoconvert"
|
||||
# "videoscale",
|
||||
# video_format,
|
||||
# "videoconvert"
|
||||
]
|
||||
|
||||
# pipeline = [
|
||||
# #"videotestsrc pattern=0",
|
||||
# "rtspsrc location=\"rtsp://admin:123456@192.168.5.180:554/cam/realmonitor?channel=0&subtype=0\"",
|
||||
# "rtph264depay",
|
||||
# "h264parse",
|
||||
# "omxh264dec",
|
||||
# "video/x-raw,format=(string)NV12",
|
||||
# "videoconvert",
|
||||
# "videoscale",
|
||||
# video_format,
|
||||
# "videoconvert"
|
||||
# # "videoscale",
|
||||
# # video_format,
|
||||
# # "videoconvert"
|
||||
# ]
|
||||
pipeline_args = (
|
||||
[f"{item} !".split(" ") for item in pipeline]
|
||||
)
|
||||
pipeline_args = [item for sublist in pipeline_args for item in sublist]
|
||||
return [
|
||||
"gst-launch-1.0",
|
||||
"-q",
|
||||
*pipeline_args,
|
||||
"fdsink"
|
||||
]
|
||||
|
||||
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraFFmpegInput):
|
||||
ffmpeg_output_args = []
|
||||
if "detect" in ffmpeg_input.roles:
|
||||
detect_args = (
|
||||
@ -574,7 +676,8 @@ class CameraConfig(FrigateBaseModel):
|
||||
else self.ffmpeg.output_args.rtmp.split(" ")
|
||||
)
|
||||
ffmpeg_output_args = (
|
||||
rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
|
||||
rtmp_args +
|
||||
[f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
|
||||
)
|
||||
if "record" in ffmpeg_input.roles and self.record.enabled:
|
||||
record_args = (
|
||||
@ -598,13 +701,16 @@ class CameraConfig(FrigateBaseModel):
|
||||
input_args = ffmpeg_input.input_args or self.ffmpeg.input_args
|
||||
|
||||
global_args = (
|
||||
global_args if isinstance(global_args, list) else global_args.split(" ")
|
||||
global_args if isinstance(
|
||||
global_args, list) else global_args.split(" ")
|
||||
)
|
||||
hwaccel_args = (
|
||||
hwaccel_args if isinstance(hwaccel_args, list) else hwaccel_args.split(" ")
|
||||
hwaccel_args if isinstance(
|
||||
hwaccel_args, list) else hwaccel_args.split(" ")
|
||||
)
|
||||
input_args = (
|
||||
input_args if isinstance(input_args, list) else input_args.split(" ")
|
||||
input_args if isinstance(
|
||||
input_args, list) else input_args.split(" ")
|
||||
)
|
||||
|
||||
cmd = (
|
||||
@ -618,6 +724,12 @@ class CameraConfig(FrigateBaseModel):
|
||||
|
||||
return [part for part in cmd if part != ""]
|
||||
|
||||
@root_validator
|
||||
def either_ffmpeg_or_gstreamer(cls, v):
|
||||
if ("ffmpeg" not in v) and ("gstreamer" not in v):
|
||||
raise ValueError('either ffmpeg or gstreamer should be set')
|
||||
return v
|
||||
|
||||
|
||||
class DatabaseConfig(FrigateBaseModel):
|
||||
path: str = Field(
|
||||
@ -627,9 +739,12 @@ class DatabaseConfig(FrigateBaseModel):
|
||||
|
||||
class ModelConfig(FrigateBaseModel):
|
||||
path: Optional[str] = Field(title="Custom Object detection model path.")
|
||||
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
|
||||
width: int = Field(default=320, title="Object detection model input width.")
|
||||
height: int = Field(default=320, title="Object detection model input height.")
|
||||
labelmap_path: Optional[str] = Field(
|
||||
title="Label map for custom object detector.")
|
||||
width: int = Field(
|
||||
default=320, title="Object detection model input width.")
|
||||
height: int = Field(
|
||||
default=320, title="Object detection model input height.")
|
||||
labelmap: Dict[int, str] = Field(
|
||||
default_factory=dict, title="Labelmap customization."
|
||||
)
|
||||
@ -656,7 +771,8 @@ class ModelConfig(FrigateBaseModel):
|
||||
|
||||
self._colormap = {}
|
||||
for key, val in self._merged_labelmap.items():
|
||||
self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
|
||||
self._colormap[val] = tuple(int(round(255 * c))
|
||||
for c in cmap(key)[:3])
|
||||
|
||||
|
||||
class LogLevelEnum(str, Enum):
|
||||
@ -688,7 +804,8 @@ class FrigateConfig(FrigateBaseModel):
|
||||
default_factory=ModelConfig, title="Detection model configuration."
|
||||
)
|
||||
detectors: Dict[str, DetectorConfig] = Field(
|
||||
default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()},
|
||||
default={name: DetectorConfig(**d)
|
||||
for name, d in DEFAULT_DETECTORS.items()},
|
||||
title="Detector hardware configuration.",
|
||||
)
|
||||
logger: LoggerConfig = Field(
|
||||
@ -734,7 +851,8 @@ class FrigateConfig(FrigateBaseModel):
|
||||
|
||||
# MQTT password substitution
|
||||
if config.mqtt.password:
|
||||
config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
|
||||
config.mqtt.password = config.mqtt.password.format(
|
||||
**FRIGATE_ENV_VARS)
|
||||
|
||||
# Global config to propegate down to camera level
|
||||
global_config = config.dict(
|
||||
@ -753,7 +871,8 @@ class FrigateConfig(FrigateBaseModel):
|
||||
)
|
||||
|
||||
for name, camera in config.cameras.items():
|
||||
merged_config = deep_merge(camera.dict(exclude_unset=True), global_config)
|
||||
merged_config = deep_merge(camera.dict(
|
||||
exclude_unset=True), global_config)
|
||||
camera_config: CameraConfig = CameraConfig.parse_obj(
|
||||
{"name": name, **merged_config}
|
||||
)
|
||||
@ -769,8 +888,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
camera_config.detect.stationary_interval = stationary_interval
|
||||
|
||||
# FFMPEG input substitution
|
||||
for input in camera_config.ffmpeg.inputs:
|
||||
input.path = input.path.format(**FRIGATE_ENV_VARS)
|
||||
if "ffmpeg" in camera_config:
|
||||
for input in camera_config.ffmpeg.inputs:
|
||||
input.path = input.path.format(**FRIGATE_ENV_VARS)
|
||||
|
||||
# Add default filters
|
||||
object_keys = camera_config.objects.track
|
||||
@ -816,8 +936,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
)
|
||||
|
||||
# check runtime config
|
||||
decoder_config = camera_config.ffmpeg if "ffmpeg" in camera_config else camera_config.gstreamer
|
||||
assigned_roles = list(
|
||||
set([r for i in camera_config.ffmpeg.inputs for r in i.roles])
|
||||
set([r for i in decoder_config.inputs for r in i.roles])
|
||||
)
|
||||
if camera_config.record.enabled and not "record" in assigned_roles:
|
||||
raise ValueError(
|
||||
|
||||
@ -319,11 +319,11 @@ def events():
|
||||
def config():
|
||||
config = current_app.frigate_config.dict()
|
||||
|
||||
# add in the ffmpeg_cmds
|
||||
# add in the decoder_cmds
|
||||
for camera_name, camera in current_app.frigate_config.cameras.items():
|
||||
camera_dict = config["cameras"][camera_name]
|
||||
camera_dict["ffmpeg_cmds"] = copy.deepcopy(camera.ffmpeg_cmds)
|
||||
for cmd in camera_dict["ffmpeg_cmds"]:
|
||||
camera_dict["decoder_cmds"] = copy.deepcopy(camera.decoder_cmds)
|
||||
for cmd in camera_dict["decoder_cmds"]:
|
||||
cmd["cmd"] = " ".join(cmd["cmd"])
|
||||
|
||||
return jsonify(config)
|
||||
|
||||
@ -9,6 +9,7 @@ import subprocess as sp
|
||||
import threading
|
||||
from multiprocessing import shared_memory
|
||||
from wsgiref.simple_server import make_server
|
||||
from frigate.log import LogPipe
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@ -32,21 +33,35 @@ class FFMpegConverter:
|
||||
ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
|
||||
" "
|
||||
)
|
||||
|
||||
# ffmpeg_cmd = f"gst-launch-1.0 fdsrc ! video/x-raw, width={in_width}, height={in_height}, format=I420 ! nvvideoconvert ! omxh264enc ! h264parse ! mpegtsmux ! fdsink".split(
|
||||
# " "
|
||||
# )
|
||||
|
||||
|
||||
logger.error(f" ffmpeg_cmd >>>> {ffmpeg_cmd}")
|
||||
self.logpipe = LogPipe(
|
||||
"ffmpeg.output", logging.ERROR)
|
||||
self.process = sp.Popen(
|
||||
ffmpeg_cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.DEVNULL,
|
||||
stderr=self.logpipe,
|
||||
stdin=sp.PIPE,
|
||||
start_new_session=True,
|
||||
)
|
||||
|
||||
def write(self, b):
|
||||
self.process.stdin.write(b)
|
||||
try:
|
||||
self.process.stdin.write(b)
|
||||
except Exception:
|
||||
self.logpipe.dump()
|
||||
return False
|
||||
|
||||
def read(self, length):
|
||||
try:
|
||||
return self.process.stdout.read1(length)
|
||||
except ValueError:
|
||||
self.logpipe.dump()
|
||||
return False
|
||||
|
||||
def exit(self):
|
||||
@ -408,8 +423,11 @@ def output_frames(config: FrigateConfig, video_output_queue):
|
||||
if any(
|
||||
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
|
||||
):
|
||||
# write to the converter for the camera if clients are listening to the specific camera
|
||||
converters[camera].write(frame.tobytes())
|
||||
try:
|
||||
# write to the converter for the camera if clients are listening to the specific camera
|
||||
converters[camera].write(frame.tobytes())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# update birdseye if websockets are connected
|
||||
if config.birdseye.enabled and any(
|
||||
|
||||
@ -247,7 +247,7 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert "-rtsp_transport" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "-rtsp_transport" in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
|
||||
def test_ffmpeg_params_global(self):
|
||||
config = {
|
||||
@ -276,7 +276,7 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "-re" in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
|
||||
def test_ffmpeg_params_camera(self):
|
||||
config = {
|
||||
@ -306,8 +306,8 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "test" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "-re" in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
assert "test" not in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
|
||||
def test_ffmpeg_params_input(self):
|
||||
config = {
|
||||
@ -341,10 +341,10 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
assert "-re" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "test" in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "test2" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "test3" not in runtime_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
|
||||
assert "-re" in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
assert "test" in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
assert "test2" not in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
assert "test3" not in runtime_config.cameras["back"].decoder_cmds[0]["cmd"]
|
||||
|
||||
def test_inherit_clips_retention(self):
|
||||
config = {
|
||||
@ -512,9 +512,9 @@ class TestConfig(unittest.TestCase):
|
||||
assert config == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
runtime_config = frigate_config.runtime_config
|
||||
ffmpeg_cmds = runtime_config.cameras["back"].ffmpeg_cmds
|
||||
assert len(ffmpeg_cmds) == 1
|
||||
assert not "clips" in ffmpeg_cmds[0]["roles"]
|
||||
decoder_cmds = runtime_config.cameras["back"].decoder_cmds
|
||||
assert len(decoder_cmds) == 1
|
||||
assert not "clips" in decoder_cmds[0]["roles"]
|
||||
|
||||
def test_max_disappeared_default(self):
|
||||
config = {
|
||||
|
||||
@ -101,14 +101,14 @@ def stop_ffmpeg(ffmpeg_process, logger):
|
||||
|
||||
|
||||
def start_or_restart_ffmpeg(
|
||||
ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
|
||||
decoder_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
|
||||
):
|
||||
if ffmpeg_process is not None:
|
||||
stop_ffmpeg(ffmpeg_process, logger)
|
||||
|
||||
if frame_size is None:
|
||||
process = sp.Popen(
|
||||
ffmpeg_cmd,
|
||||
decoder_cmd,
|
||||
stdout=sp.DEVNULL,
|
||||
stderr=logpipe,
|
||||
stdin=sp.DEVNULL,
|
||||
@ -116,7 +116,7 @@ def start_or_restart_ffmpeg(
|
||||
)
|
||||
else:
|
||||
process = sp.Popen(
|
||||
ffmpeg_cmd,
|
||||
decoder_cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=logpipe,
|
||||
stdin=sp.DEVNULL,
|
||||
@ -187,7 +187,8 @@ class CameraWatchdog(threading.Thread):
|
||||
self.config = config
|
||||
self.capture_thread = None
|
||||
self.ffmpeg_detect_process = None
|
||||
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
|
||||
self.logpipe = LogPipe(
|
||||
f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
|
||||
self.ffmpeg_other_processes = []
|
||||
self.camera_fps = camera_fps
|
||||
self.ffmpeg_pid = ffmpeg_pid
|
||||
@ -199,7 +200,7 @@ class CameraWatchdog(threading.Thread):
|
||||
def run(self):
|
||||
self.start_ffmpeg_detect()
|
||||
|
||||
for c in self.config.ffmpeg_cmds:
|
||||
for c in self.config.decoder_cmds:
|
||||
if "detect" in c["roles"]:
|
||||
continue
|
||||
logpipe = LogPipe(
|
||||
@ -225,7 +226,8 @@ class CameraWatchdog(threading.Thread):
|
||||
self.logger.error(
|
||||
"The following ffmpeg logs include the last 100 lines prior to exit."
|
||||
)
|
||||
self.logger.error("You may have invalid args defined for this camera.")
|
||||
self.logger.error(
|
||||
"You may have invalid args defined for this camera.")
|
||||
self.logpipe.dump()
|
||||
self.start_ffmpeg_detect()
|
||||
elif now - self.capture_thread.current_frame.value > 20:
|
||||
@ -234,7 +236,8 @@ class CameraWatchdog(threading.Thread):
|
||||
)
|
||||
self.ffmpeg_detect_process.terminate()
|
||||
try:
|
||||
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
||||
self.logger.info(
|
||||
"Waiting for ffmpeg to exit gracefully...")
|
||||
self.ffmpeg_detect_process.communicate(timeout=30)
|
||||
except sp.TimeoutExpired:
|
||||
self.logger.info("FFmpeg didnt exit. Force killing...")
|
||||
@ -257,11 +260,11 @@ class CameraWatchdog(threading.Thread):
|
||||
self.logpipe.close()
|
||||
|
||||
def start_ffmpeg_detect(self):
|
||||
ffmpeg_cmd = [
|
||||
c["cmd"] for c in self.config.ffmpeg_cmds if "detect" in c["roles"]
|
||||
decoder_cmd = [
|
||||
c["cmd"] for c in self.config.decoder_cmds if "detect" in c["roles"]
|
||||
][0]
|
||||
self.ffmpeg_detect_process = start_or_restart_ffmpeg(
|
||||
ffmpeg_cmd, self.logger, self.logpipe, self.frame_size
|
||||
decoder_cmd, self.logger, self.logpipe, self.frame_size
|
||||
)
|
||||
self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid
|
||||
self.capture_thread = CameraCapture(
|
||||
@ -482,11 +485,13 @@ def process_frames(
|
||||
current_frame_time.value = frame_time
|
||||
|
||||
frame = frame_manager.get(
|
||||
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
|
||||
f"{camera_name}{frame_time}", (
|
||||
frame_shape[0] * 3 // 2, frame_shape[1])
|
||||
)
|
||||
|
||||
if frame is None:
|
||||
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||
logger.info(
|
||||
f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||
continue
|
||||
|
||||
if not detection_enabled.value:
|
||||
@ -592,7 +597,8 @@ def process_frames(
|
||||
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
|
||||
|
||||
for index in idxs:
|
||||
obj = group[index[0]]
|
||||
index = index if isinstance(index, np.int32) else index[0]
|
||||
obj = group[index]
|
||||
if clipped(obj, frame_shape):
|
||||
box = obj[2]
|
||||
# calculate a new region that will hopefully get the entire object
|
||||
|
||||
@ -67,8 +67,8 @@ class ProcessClip:
|
||||
self.config = config
|
||||
self.camera_config = self.config.cameras["camera"]
|
||||
self.frame_shape = self.camera_config.frame_shape
|
||||
self.ffmpeg_cmd = [
|
||||
c["cmd"] for c in self.camera_config.ffmpeg_cmds if "detect" in c["roles"]
|
||||
self.decoder_cmd = [
|
||||
c["cmd"] for c in self.camera_config.decoder_cmds if "detect" in c["roles"]
|
||||
][0]
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.frame_queue = mp.Queue()
|
||||
@ -84,7 +84,7 @@ class ProcessClip:
|
||||
* self.camera_config.frame_shape_yuv[1]
|
||||
)
|
||||
ffmpeg_process = start_or_restart_ffmpeg(
|
||||
self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size
|
||||
self.decoder_cmd, logger, sp.DEVNULL, frame_size
|
||||
)
|
||||
capture_frames(
|
||||
ffmpeg_process,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user