diff --git a/frigate/app.py b/frigate/app.py index f8c62758e..7b991d9db 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -1,7 +1,6 @@ import json import logging import multiprocessing as mp -import shared_memory import os import signal import sys @@ -42,7 +41,7 @@ class FrigateApp: self.detection_queue = mp.Queue() self.detectors: Dict[str, EdgeTPUProcess] = {} self.detection_out_events: Dict[str, mp.Event] = {} - self.detection_shms: List[shared_memory.SharedMemory] = [] + self.detection_shms: List[mp.shared_memory.SharedMemory] = [] self.log_queue = mp.Queue() self.camera_metrics = {} @@ -155,20 +154,20 @@ class FrigateApp: self.detection_out_events[name] = mp.Event() try: - shm_in = shared_memory.SharedMemory( + shm_in = mp.shared_memory.SharedMemory( name=name, create=True, size=self.config.model.height * self.config.model.width * 3, ) except FileExistsError: - shm_in = shared_memory.SharedMemory(name=name) + shm_in = mp.shared_memory.SharedMemory(name=name) try: - shm_out = shared_memory.SharedMemory( + shm_out = mp.shared_memory.SharedMemory( name=f"out-{name}", create=True, size=20 * 6 * 4 ) except FileExistsError: - shm_out = shared_memory.SharedMemory(name=f"out-{name}") + shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}") self.detection_shms.append(shm_in) self.detection_shms.append(shm_out) diff --git a/frigate/config.py b/frigate/config.py index 50fe9a778..91d66afb4 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import json import logging import os @@ -21,8 +23,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S" # German Style: # DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S" -FRIGATE_ENV_VARS = {k: v for k, - v in os.environ.items() if k.startswith("FRIGATE_")} +FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}} @@ -39,8 +40,7 @@ class DetectorTypeEnum(str, Enum): class DetectorConfig(FrigateBaseModel): - type: DetectorTypeEnum = Field( - default=DetectorTypeEnum.cpu, title="Detector Type") + type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type") device: str = Field(default="usb", title="Device Type") num_threads: int = Field(default=3, title="Number of detection threads") @@ -74,10 +74,8 @@ class RetainConfig(FrigateBaseModel): class EventsConfig(FrigateBaseModel): max_seconds: int = Field(default=300, title="Maximum event duration.") - pre_capture: int = Field( - default=5, title="Seconds to retain before event starts.") - post_capture: int = Field( - default=5, title="Seconds to retain after event ends.") + pre_capture: int = Field(default=5, title="Seconds to retain before event starts.") + post_capture: int = Field(default=5, title="Seconds to retain after event ends.") required_zones: List[str] = Field( default_factory=list, title="List of required zones to be entered in order to save the event.", @@ -92,8 +90,7 @@ class EventsConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable record on all cameras.") - retain_days: float = Field( - default=0, title="Recording retention period in days.") + retain_days: float = Field(default=0, title="Recording retention period in days.") events: EventsConfig = Field( default_factory=EventsConfig, title="Event specific settings." ) @@ -126,8 +123,7 @@ class RuntimeMotionConfig(MotionConfig): config["frame_height"] = max(frame_shape[0] // 6, 180) if "contour_area" not in config: - frame_width = frame_shape[1] * \ - config["frame_height"] / frame_shape[0] + frame_width = frame_shape[1] * config["frame_height"] / frame_shape[0] config["contour_area"] = ( config["frame_height"] * frame_width * 0.00173611111 ) @@ -157,10 +153,8 @@ class RuntimeMotionConfig(MotionConfig): class DetectConfig(FrigateBaseModel): - height: int = Field( - default=720, title="Height of the stream for the detect role.") - width: int = Field( - default=1280, title="Width of the stream for the detect role.") + height: int = Field(default=720, title="Height of the stream for the detect role.") + width: int = Field(default=1280, title="Width of the stream for the detect role.") fps: int = Field( default=5, title="Number of frames per second to process through detection." ) @@ -198,8 +192,7 @@ class RuntimeFilterConfig(FilterConfig): config["raw_mask"] = mask if mask is not None: - config["mask"] = create_mask( - config.get("frame_shape", (1, 1)), mask) + config["mask"] = create_mask(config.get("frame_shape", (1, 1)), mask) super().__init__(**config) @@ -246,22 +239,19 @@ class ZoneConfig(BaseModel): if isinstance(coordinates, list): self._contour = np.array( - [[int(p.split(",")[0]), int(p.split(",")[1])] - for p in coordinates] + [[int(p.split(",")[0]), int(p.split(",")[1])] for p in coordinates] ) elif isinstance(coordinates, str): points = coordinates.split(",") self._contour = np.array( - [[int(points[i]), int(points[i + 1])] - for i in range(0, len(points), 2)] + [[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)] ) else: self._contour = np.array([]) class ObjectConfig(FrigateBaseModel): - track: List[str] = Field( - default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") + track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.") mask: Union[str, List[str]] = Field(default="", title="Object mask.") @@ -352,15 +342,25 @@ class FfmpegConfig(FrigateBaseModel): class GstreamerConfig(FrigateBaseModel): manual_pipeline: List[str] = Field( - default=[], title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.") + default=[], + title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.", + ) input_pipeline: List[str] = Field( - default=[], title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.") + default=[], + title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.", + ) decoder_pipeline: List[str] = Field( - default=[], title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']") + default=[], + title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']", + ) source_format_pipeline: List[str] = Field( - default=[], title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']") + default=[], + title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']", + ) destination_format_pipeline: List[str] = Field( - default=[], title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']") + default=[], + title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']", + ) class CameraRoleEnum(str, Enum): @@ -388,7 +388,8 @@ class CameraFFmpegInput(CameraInput): class CameraGStreamerInput(CameraInput): pipeline: List[str] = Field( - default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign") + default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign" + ) class CameraInputValidator: @@ -411,8 +412,7 @@ class CameraFfmpegConfig(FfmpegConfig, CameraInputValidator): class CameraGStreamerConfig(GstreamerConfig, CameraInputValidator): - inputs: List[CameraGStreamerInput] = Field( - title="Camera GStreamer inputs.") + inputs: List[CameraGStreamerInput] = Field(title="Camera GStreamer inputs.") class SnapshotsConfig(FrigateBaseModel): @@ -426,8 +426,7 @@ class SnapshotsConfig(FrigateBaseModel): bounding_box: bool = Field( default=True, title="Add a bounding box overlay on the snapshot." ) - crop: bool = Field( - default=False, title="Crop the snapshot to the detected object.") + crop: bool = Field(default=False, title="Crop the snapshot to the detected object.") required_zones: List[str] = Field( default_factory=list, title="List of required zones to be entered in order to save a snapshot.", @@ -467,8 +466,7 @@ class TimestampStyleConfig(FrigateBaseModel): default=TimestampPositionEnum.tl, title="Timestamp position." ) format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.") - color: ColorConfig = Field( - default_factory=ColorConfig, title="Timestamp color.") + color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.") thickness: int = Field(default=2, title="Timestamp thickness.") effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.") @@ -476,10 +474,8 @@ class TimestampStyleConfig(FrigateBaseModel): class CameraMqttConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Send image over MQTT.") timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.") - bounding_box: bool = Field( - default=True, title="Add bounding box to MQTT image.") - crop: bool = Field( - default=True, title="Crop MQTT image to detected object.") + bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.") + crop: bool = Field(default=True, title="Crop MQTT image to detected object.") height: int = Field(default=270, title="MQTT image height.") required_zones: List[str] = Field( default_factory=list, @@ -499,16 +495,17 @@ class RtmpConfig(FrigateBaseModel): class CameraLiveConfig(FrigateBaseModel): height: int = Field(default=720, title="Live camera view height") - quality: int = Field(default=8, ge=1, le=31, - title="Live camera view quality") + quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality") class CameraConfig(FrigateBaseModel): name: Optional[str] = Field(title="Camera name.") ffmpeg: Optional[CameraFfmpegConfig] = Field( - title="FFmpeg configuration for the camera.") + title="FFmpeg configuration for the camera." + ) gstreamer: Optional[CameraGStreamerConfig] = Field( - title="GStreamer configuration for the camera.") + title="GStreamer configuration for the camera." + ) best_image_timeout: int = Field( default=60, title="How long to wait for the image with the highest confidence score.", @@ -534,8 +531,7 @@ class CameraConfig(FrigateBaseModel): objects: ObjectConfig = Field( default_factory=ObjectConfig, title="Object configuration." ) - motion: Optional[MotionConfig] = Field( - title="Motion detection configuration.") + motion: Optional[MotionConfig] = Field(title="Motion detection configuration.") detect: DetectConfig = Field( default_factory=DetectConfig, title="Object detection configuration." ) @@ -548,16 +544,14 @@ class CameraConfig(FrigateBaseModel): if "zones" in config: colors = plt.cm.get_cmap("tab10", len(config["zones"])) config["zones"] = { - name: {**z, "color": tuple(round(255 * c) - for c in colors(idx)[:3])} + name: {**z, "color": tuple(round(255 * c) for c in colors(idx)[:3])} for idx, (name, z) in enumerate(config["zones"].items()) } # add roles to the input if there is only one if "ffmpeg" in config: if len(config["ffmpeg"]["inputs"]) == 1: - config["ffmpeg"]["inputs"][0]["roles"] = [ - "record", "rtmp", "detect"] + config["ffmpeg"]["inputs"][0]["roles"] = ["record", "rtmp", "detect"] super().__init__(**config) @@ -578,8 +572,7 @@ class CameraConfig(FrigateBaseModel): if decoder_cmd is None: continue - decoder_cmds.append( - {"roles": ffmpeg_input.roles, "cmd": decoder_cmd}) + decoder_cmds.append({"roles": ffmpeg_input.roles, "cmd": decoder_cmd}) else: assert self.gstreamer for gstreamer_input in self.gstreamer.inputs: @@ -588,68 +581,81 @@ class CameraConfig(FrigateBaseModel): continue decoder_cmds.append( - {"roles": gstreamer_input.roles, "cmd": decoder_cmd}) + {"roles": gstreamer_input.roles, "cmd": decoder_cmd} + ) return decoder_cmds def _get_gstreamer_cmd(self, gstreamer_input: CameraGStreamerInput): - assert list( - ["detect"]) == gstreamer_input.roles, "only detect role is supported" + assert ( + list(["detect"]) == gstreamer_input.roles + ), "only detect role is supported" manual_pipeline = [ - part for part in self.gstreamer.manual_pipeline if part != ""] - input_pipeline = [ - part for part in self.gstreamer.input_pipeline if part != ""] + part for part in self.gstreamer.manual_pipeline if part != "" + ] + input_pipeline = [part for part in self.gstreamer.input_pipeline if part != ""] decoder_pipeline = [ - part for part in self.gstreamer.decoder_pipeline if part != ""] + part for part in self.gstreamer.decoder_pipeline if part != "" + ] source_format_pipeline = [ - part for part in self.gstreamer.source_format_pipeline if part != ""] + part for part in self.gstreamer.source_format_pipeline if part != "" + ] destination_format_pipeline = [ - part for part in self.gstreamer.destination_format_pipeline if part != ""] + part for part in self.gstreamer.destination_format_pipeline if part != "" + ] video_format = f"video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420" - if not manual_pipeline and not input_pipeline and not decoder_pipeline and not source_format_pipeline and not destination_format_pipeline: + if ( + not manual_pipeline + and not input_pipeline + and not decoder_pipeline + and not source_format_pipeline + and not destination_format_pipeline + ): logger.warn( - "gsreamer pipeline not configured. Using videotestsrc pattern=0") + "gsreamer pipeline not configured. Using videotestsrc pattern=0" + ) pipeline = [ "videotestsrc pattern=0", video_format, ] elif len(manual_pipeline) > 0: logger.warn( - "gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters") + "gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters" + ) pipeline = manual_pipeline else: - input_pipeline = input_pipeline if input_pipeline else [ - f"rtspsrc location=\"{gstreamer_input.path}\" latency=0" - ] + input_pipeline = ( + input_pipeline + if input_pipeline + else [f'rtspsrc location="{gstreamer_input.path}" latency=0'] + ) - decoder_pipeline = decoder_pipeline if decoder_pipeline else [ - "rtph265depay", "h265parse", "omxh265dec" - ] - source_format_pipeline = source_format_pipeline if source_format_pipeline else [ - 'video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale' - ] - destination_format_pipeline = destination_format_pipeline if destination_format_pipeline else [ - video_format, "videoconvert" - ] + decoder_pipeline = ( + decoder_pipeline + if decoder_pipeline + else ["rtph265depay", "h265parse", "omxh265dec"] + ) + source_format_pipeline = ( + source_format_pipeline + if source_format_pipeline + else ["video/x-raw,format=(string)NV12", "videoconvert", "videoscale"] + ) + destination_format_pipeline = ( + destination_format_pipeline + if destination_format_pipeline + else [video_format, "videoconvert"] + ) pipeline = [ *input_pipeline, *decoder_pipeline, *source_format_pipeline, - *destination_format_pipeline + *destination_format_pipeline, ] - pipeline_args = ( - [f"{item} !".split(" ") for item in pipeline] - ) + pipeline_args = [f"{item} !".split(" ") for item in pipeline] pipeline_args = [item for sublist in pipeline_args for item in sublist] - pipeline_args = [ - "gst-launch-1.0", - "-q", - *pipeline_args, - "fdsink" - ] - logger.debug( - f"using gstreamer pipeline: {' '.join(pipeline_args)}") + pipeline_args = ["gst-launch-1.0", "-q", *pipeline_args, "fdsink"] + logger.debug(f"using gstreamer pipeline: {' '.join(pipeline_args)}") return pipeline_args @@ -679,8 +685,7 @@ class CameraConfig(FrigateBaseModel): else self.ffmpeg.output_args.rtmp.split(" ") ) ffmpeg_output_args = ( - rtmp_args + - [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args + rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args ) if "record" in ffmpeg_input.roles and self.record.enabled: record_args = ( @@ -704,16 +709,13 @@ class CameraConfig(FrigateBaseModel): input_args = ffmpeg_input.input_args or self.ffmpeg.input_args global_args = ( - global_args if isinstance( - global_args, list) else global_args.split(" ") + global_args if isinstance(global_args, list) else global_args.split(" ") ) hwaccel_args = ( - hwaccel_args if isinstance( - hwaccel_args, list) else hwaccel_args.split(" ") + hwaccel_args if isinstance(hwaccel_args, list) else hwaccel_args.split(" ") ) input_args = ( - input_args if isinstance( - input_args, list) else input_args.split(" ") + input_args if isinstance(input_args, list) else input_args.split(" ") ) cmd = ( @@ -730,7 +732,7 @@ class CameraConfig(FrigateBaseModel): @root_validator def either_ffmpeg_or_gstreamer(cls, v): if ("ffmpeg" not in v) and ("gstreamer" not in v): - raise ValueError('either ffmpeg or gstreamer should be set') + raise ValueError("either ffmpeg or gstreamer should be set") return v @@ -742,12 +744,9 @@ class DatabaseConfig(FrigateBaseModel): class ModelConfig(FrigateBaseModel): path: Optional[str] = Field(title="Custom Object detection model path.") - labelmap_path: Optional[str] = Field( - title="Label map for custom object detector.") - width: int = Field( - default=320, title="Object detection model input width.") - height: int = Field( - default=320, title="Object detection model input height.") + labelmap_path: Optional[str] = Field(title="Label map for custom object detector.") + width: int = Field(default=320, title="Object detection model input width.") + height: int = Field(default=320, title="Object detection model input height.") labelmap: Dict[int, str] = Field( default_factory=dict, title="Labelmap customization." ) @@ -774,8 +773,7 @@ class ModelConfig(FrigateBaseModel): self._colormap = {} for key, val in self._merged_labelmap.items(): - self._colormap[val] = tuple(int(round(255 * c)) - for c in cmap(key)[:3]) + self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3]) class LogLevelEnum(str, Enum): @@ -807,8 +805,7 @@ class FrigateConfig(FrigateBaseModel): default_factory=ModelConfig, title="Detection model configuration." ) detectors: Dict[str, DetectorConfig] = Field( - default={name: DetectorConfig(**d) - for name, d in DEFAULT_DETECTORS.items()}, + default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()}, title="Detector hardware configuration.", ) logger: LoggerConfig = Field( @@ -854,8 +851,7 @@ class FrigateConfig(FrigateBaseModel): # MQTT password substitution if config.mqtt.password: - config.mqtt.password = config.mqtt.password.format( - **FRIGATE_ENV_VARS) + config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS) # Global config to propegate down to camera level global_config = config.dict( @@ -874,8 +870,7 @@ class FrigateConfig(FrigateBaseModel): ) for name, camera in config.cameras.items(): - merged_config = deep_merge(camera.dict( - exclude_unset=True), global_config) + merged_config = deep_merge(camera.dict(exclude_unset=True), global_config) camera_config: CameraConfig = CameraConfig.parse_obj( {"name": name, **merged_config} ) @@ -934,7 +929,11 @@ class FrigateConfig(FrigateBaseModel): ) # check runtime config - decoder_config = camera_config.ffmpeg if "ffmpeg" in camera_config else camera_config.gstreamer + decoder_config = ( + camera_config.ffmpeg + if "ffmpeg" in camera_config + else camera_config.gstreamer + ) assigned_roles = list( set([r for i in decoder_config.inputs for r in i.roles]) ) diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py index 3e52a202c..1992c6b35 100644 --- a/frigate/edgetpu.py +++ b/frigate/edgetpu.py @@ -7,10 +7,8 @@ import signal import threading from abc import ABC, abstractmethod from typing import Dict -import shared_memory import numpy as np -# TensorRT https://github.com/NobuoTsukamoto/tensorrt-examples/blob/main/python/detection/README.md import tflite_runtime.interpreter as tflite from setproctitle import setproctitle from tflite_runtime.interpreter import load_delegate @@ -161,7 +159,7 @@ def run_detector( outputs = {} for name in out_events.keys(): - out_shm = shared_memory.SharedMemory(name=f"out-{name}", create=False) + out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False) out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) outputs[name] = {"shm": out_shm, "np": out_np} @@ -250,11 +248,11 @@ class RemoteObjectDetector: self.fps = EventsPerSecond() self.detection_queue = detection_queue self.event = event - self.shm = shared_memory.SharedMemory(name=self.name, create=False) + self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False) self.np_shm = np.ndarray( (1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf ) - self.out_shm = shared_memory.SharedMemory( + self.out_shm = mp.shared_memory.SharedMemory( name=f"out-{self.name}", create=False ) self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) diff --git a/frigate/http.py b/frigate/http.py index 622f1678f..1627920d6 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -608,8 +608,7 @@ def recording_clip(camera, start_ts, end_ts): ffmpeg_cmd, input="\n".join(playlist_lines), encoding="ascii", - stdout=sp.PIPE, - stderr=sp.PIPE, + capture_output=True, ) if p.returncode != 0: logger.error(p.stderr) diff --git a/frigate/output.py b/frigate/output.py index 7e1829759..a0fcd6aa4 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -7,6 +7,7 @@ import queue import signal import subprocess as sp import threading +from multiprocessing import shared_memory from wsgiref.simple_server import make_server from frigate.log import LogPipe @@ -32,27 +33,20 @@ class FFMpegConverter: ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split( " " ) - # ffmpeg_cmd = f"gst-launch-1.0 rtspsrc location=\"rtsp://admin:123456@192.168.5.95:554/stream0\" ! rtph265depay ! h265parse ! omxh265dec ! 'video/x-raw,format=(string)NV12' ! videoconvert ! 'video/x-raw, width={in_width}, height={in_height}, format=I420, framerate=(fraction)10/1' ! omxh264enc bitrate=500000 temporal-tradeoff=2 iframeinterval=10 ! h264parse ! mpegtsmux ! fdsink" - # # .split( - # # " " - # # ) - self.logpipe = LogPipe( - "ffmpeg.converter", logging.ERROR) + "ffmpeg.converter", logging.ERROR) self.process = sp.Popen( ffmpeg_cmd, stdout=sp.PIPE, stderr=self.logpipe, stdin=sp.PIPE, start_new_session=True, - # shell=True ) def write(self, b): try: self.process.stdin.write(b) except Exception: - logger.error("Failure while writing to the stream:") self.logpipe.dump() return False @@ -60,7 +54,6 @@ class FFMpegConverter: try: return self.process.stdout.read1(length) except ValueError: - logger.error("Failure while readig from the stream:") self.logpipe.dump() return False @@ -423,13 +416,8 @@ def output_frames(config: FrigateConfig, video_output_queue): if any( ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager ): - try: - # write to the converter for the camera if clients are listening to the specific camera - converters[camera].write(frame.tobytes()) - except Exception: - # in case of videoconverter failure continure processing video_output_queue - # FFMpegConverter should dump an error response - pass + # write to the converter for the camera if clients are listening to the specific camera + converters[camera].write(frame.tobytes()) # update birdseye if websockets are connected if config.birdseye.enabled and any( diff --git a/frigate/process_clip.py b/frigate/process_clip.py index 3459c4cae..50ef4d860 100644 --- a/frigate/process_clip.py +++ b/frigate/process_clip.py @@ -41,7 +41,7 @@ def get_frame_shape(source): "json", source, ] - p = sp.run(ffprobe_cmd, stdout=sp.PIPE, stderr=sp.PIPE) + p = sp.run(ffprobe_cmd, capture_output=True) info = json.loads(p.stdout) video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0] diff --git a/frigate/record.py b/frigate/record.py index 9fb024092..b0fd64798 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -94,7 +94,7 @@ class RecordingMaintainer(threading.Thread): "default=noprint_wrappers=1:nokey=1", f"{cache_path}", ] - p = sp.run(ffprobe_cmd, stdout=sp.PIPE, stderr=sp.PIPE) + p = sp.run(ffprobe_cmd, capture_output=True) if p.returncode == 0: duration = float(p.stdout.decode().strip()) end_time = start_time + datetime.timedelta(seconds=duration) @@ -284,9 +284,8 @@ class RecordingCleanup(threading.Thread): logger.debug(f"Oldest recording in the db: {oldest_timestamp}") process = sp.run( ["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"], - stdout=sp.PIPE, - stderr=sp.PIPE, - universal_newlines=True, + capture_output=True, + text=True, ) files_to_check = process.stdout.splitlines() diff --git a/frigate/util.py b/frigate/util.py index 72d52afa9..24a75b775 100755 --- a/frigate/util.py +++ b/frigate/util.py @@ -11,8 +11,7 @@ import threading import time import traceback from abc import ABC, abstractmethod -#from multiprocessing import shared_memory -import shared_memory +from multiprocessing import shared_memory from typing import AnyStr import cv2 diff --git a/frigate/video.py b/frigate/video.py index 316ff7173..30bbe1c73 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -184,8 +184,7 @@ class CameraWatchdog(threading.Thread): self.config = config self.capture_thread = None self.ffmpeg_detect_process = None - self.logpipe = LogPipe( - f"ffmpeg.{self.camera_name}.detect", logging.ERROR) + self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR) self.ffmpeg_other_processes = [] self.camera_fps = camera_fps self.ffmpeg_pid = ffmpeg_pid @@ -223,8 +222,7 @@ class CameraWatchdog(threading.Thread): self.logger.error( "The following ffmpeg logs include the last 100 lines prior to exit." ) - self.logger.error( - "You may have invalid args defined for this camera.") + self.logger.error("You may have invalid args defined for this camera.") self.logpipe.dump() self.start_ffmpeg_detect() elif now - self.capture_thread.current_frame.value > 20: @@ -233,8 +231,7 @@ class CameraWatchdog(threading.Thread): ) self.ffmpeg_detect_process.terminate() try: - self.logger.info( - "Waiting for ffmpeg to exit gracefully...") + self.logger.info("Waiting for ffmpeg to exit gracefully...") self.ffmpeg_detect_process.communicate(timeout=30) except sp.TimeoutExpired: self.logger.info("FFmpeg didnt exit. Force killing...") @@ -470,13 +467,11 @@ def process_frames( current_frame_time.value = frame_time frame = frame_manager.get( - f"{camera_name}{frame_time}", ( - frame_shape[0] * 3 // 2, frame_shape[1]) + f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1]) ) if frame is None: - logger.info( - f"{camera_name}: frame {frame_time} is not in memory store.") + logger.info(f"{camera_name}: frame {frame_time} is not in memory store.") continue if not detection_enabled.value: diff --git a/frigate/watchdog.py b/frigate/watchdog.py index 73cf86240..9c8c7c34d 100644 --- a/frigate/watchdog.py +++ b/frigate/watchdog.py @@ -27,7 +27,7 @@ class FrigateWatchdog(threading.Thread): # check the detection processes for detector in self.detectors.values(): detection_start = detector.detection_start.value - if detection_start > 0.0 and now - detection_start > 10: + if detection_start > 0.0 and now - detection_start > 30: logger.info( "Detection appears to be stuck. Restarting detection process..." )