return to Python 3.8

This commit is contained in:
YS 2021-12-26 19:27:43 +03:00
parent 6ad981358e
commit 86af2a5615
10 changed files with 131 additions and 152 deletions

View File

@ -1,7 +1,6 @@
import json
import logging
import multiprocessing as mp
import shared_memory
import os
import signal
import sys
@ -42,7 +41,7 @@ class FrigateApp:
self.detection_queue = mp.Queue()
self.detectors: Dict[str, EdgeTPUProcess] = {}
self.detection_out_events: Dict[str, mp.Event] = {}
self.detection_shms: List[shared_memory.SharedMemory] = []
self.detection_shms: List[mp.shared_memory.SharedMemory] = []
self.log_queue = mp.Queue()
self.camera_metrics = {}
@ -164,20 +163,20 @@ class FrigateApp:
self.detection_out_events[name] = mp.Event()
try:
shm_in = shared_memory.SharedMemory(
shm_in = mp.shared_memory.SharedMemory(
name=name,
create=True,
size=self.config.model.height * self.config.model.width * 3,
)
except FileExistsError:
shm_in = shared_memory.SharedMemory(name=name)
shm_in = mp.shared_memory.SharedMemory(name=name)
try:
shm_out = shared_memory.SharedMemory(
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
except FileExistsError:
shm_out = shared_memory.SharedMemory(name=f"out-{name}")
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import json
import logging
import os
@ -20,8 +22,7 @@ DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
# German Style:
# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
FRIGATE_ENV_VARS = {k: v for k,
v in os.environ.items() if k.startswith("FRIGATE_")}
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
DEFAULT_TRACKED_OBJECTS = ["person"]
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
@ -38,8 +39,7 @@ class DetectorTypeEnum(str, Enum):
class DetectorConfig(FrigateBaseModel):
type: DetectorTypeEnum = Field(
default=DetectorTypeEnum.cpu, title="Detector Type")
type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type")
device: str = Field(default="usb", title="Device Type")
num_threads: int = Field(default=3, title="Number of detection threads")
@ -82,10 +82,8 @@ class RetainConfig(FrigateBaseModel):
class EventsConfig(FrigateBaseModel):
max_seconds: int = Field(default=300, title="Maximum event duration.")
pre_capture: int = Field(
default=5, title="Seconds to retain before event starts.")
post_capture: int = Field(
default=5, title="Seconds to retain after event ends.")
pre_capture: int = Field(default=5, title="Seconds to retain before event starts.")
post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to save the event.",
@ -163,10 +161,8 @@ class RuntimeMotionConfig(MotionConfig):
class DetectConfig(FrigateBaseModel):
height: int = Field(
default=720, title="Height of the stream for the detect role.")
width: int = Field(
default=1280, title="Width of the stream for the detect role.")
height: int = Field(default=720, title="Height of the stream for the detect role.")
width: int = Field(default=1280, title="Width of the stream for the detect role.")
fps: int = Field(
default=5, title="Number of frames per second to process through detection."
)
@ -208,8 +204,7 @@ class RuntimeFilterConfig(FilterConfig):
config["raw_mask"] = mask
if mask is not None:
config["mask"] = create_mask(
config.get("frame_shape", (1, 1)), mask)
config["mask"] = create_mask(config.get("frame_shape", (1, 1)), mask)
super().__init__(**config)
@ -256,22 +251,19 @@ class ZoneConfig(BaseModel):
if isinstance(coordinates, list):
self._contour = np.array(
[[int(p.split(",")[0]), int(p.split(",")[1])]
for p in coordinates]
[[int(p.split(",")[0]), int(p.split(",")[1])] for p in coordinates]
)
elif isinstance(coordinates, str):
points = coordinates.split(",")
self._contour = np.array(
[[int(points[i]), int(points[i + 1])]
for i in range(0, len(points), 2)]
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
)
else:
self._contour = np.array([])
class ObjectConfig(FrigateBaseModel):
track: List[str] = Field(
default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.")
mask: Union[str, List[str]] = Field(default="", title="Object mask.")
@ -362,15 +354,25 @@ class FfmpegConfig(FrigateBaseModel):
class GstreamerConfig(FrigateBaseModel):
manual_pipeline: List[str] = Field(
default=[], title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.")
default=[],
title="GStreamer manual pipeline. Use `manual_pipeline` to fine tune gstreamer. Each item will be splited by the `!`.",
)
input_pipeline: List[str] = Field(
default=[], title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.")
default=[],
title="Override the `rtspsrc location={{gstreamer_input.path}} latency=0` default pipeline item.",
)
decoder_pipeline: List[str] = Field(
default=[], title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']")
default=[],
title="Set the hardware specific decoder. Example: ['rtph265depay', 'h265parse', 'omxh265dec']",
)
source_format_pipeline: List[str] = Field(
default=[], title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']")
default=[],
title="Set the camera source format. Default is: ['video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale']",
)
destination_format_pipeline: List[str] = Field(
default=[], title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']")
default=[],
title="Set the Frigate format. Please keep `format=I420` if override. Default is: ['video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420', 'videoconvert']",
)
class CameraRoleEnum(str, Enum):
@ -398,7 +400,8 @@ class CameraFFmpegInput(CameraInput):
class CameraGStreamerInput(CameraInput):
pipeline: List[str] = Field(
default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign")
default=[], title="GStreamer pipeline. Each pipeline will be splited by ! sign"
)
class CameraInputValidator:
@ -421,8 +424,7 @@ class CameraFfmpegConfig(FfmpegConfig, CameraInputValidator):
class CameraGStreamerConfig(GstreamerConfig, CameraInputValidator):
inputs: List[CameraGStreamerInput] = Field(
title="Camera GStreamer inputs.")
inputs: List[CameraGStreamerInput] = Field(title="Camera GStreamer inputs.")
class SnapshotsConfig(FrigateBaseModel):
@ -436,8 +438,7 @@ class SnapshotsConfig(FrigateBaseModel):
bounding_box: bool = Field(
default=True, title="Add a bounding box overlay on the snapshot."
)
crop: bool = Field(
default=False, title="Crop the snapshot to the detected object.")
crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to save a snapshot.",
@ -477,8 +478,7 @@ class TimestampStyleConfig(FrigateBaseModel):
default=TimestampPositionEnum.tl, title="Timestamp position."
)
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
color: ColorConfig = Field(
default_factory=ColorConfig, title="Timestamp color.")
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
thickness: int = Field(default=2, title="Timestamp thickness.")
effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.")
@ -486,10 +486,8 @@ class TimestampStyleConfig(FrigateBaseModel):
class CameraMqttConfig(FrigateBaseModel):
enabled: bool = Field(default=True, title="Send image over MQTT.")
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
bounding_box: bool = Field(
default=True, title="Add bounding box to MQTT image.")
crop: bool = Field(
default=True, title="Crop MQTT image to detected object.")
bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
height: int = Field(default=270, title="MQTT image height.")
required_zones: List[str] = Field(
default_factory=list,
@ -509,16 +507,17 @@ class RtmpConfig(FrigateBaseModel):
class CameraLiveConfig(FrigateBaseModel):
height: int = Field(default=720, title="Live camera view height")
quality: int = Field(default=8, ge=1, le=31,
title="Live camera view quality")
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(title="Camera name.")
ffmpeg: Optional[CameraFfmpegConfig] = Field(
title="FFmpeg configuration for the camera.")
title="FFmpeg configuration for the camera."
)
gstreamer: Optional[CameraGStreamerConfig] = Field(
title="GStreamer configuration for the camera.")
title="GStreamer configuration for the camera."
)
best_image_timeout: int = Field(
default=60,
title="How long to wait for the image with the highest confidence score.",
@ -544,8 +543,7 @@ class CameraConfig(FrigateBaseModel):
objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Object configuration."
)
motion: Optional[MotionConfig] = Field(
title="Motion detection configuration.")
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration."
)
@ -559,16 +557,14 @@ class CameraConfig(FrigateBaseModel):
if "zones" in config:
colors = plt.cm.get_cmap("tab10", len(config["zones"]))
config["zones"] = {
name: {**z, "color": tuple(round(255 * c)
for c in colors(idx)[:3])}
name: {**z, "color": tuple(round(255 * c) for c in colors(idx)[:3])}
for idx, (name, z) in enumerate(config["zones"].items())
}
# add roles to the input if there is only one
if "ffmpeg" in config:
if len(config["ffmpeg"]["inputs"]) == 1:
config["ffmpeg"]["inputs"][0]["roles"] = [
"record", "rtmp", "detect"]
config["ffmpeg"]["inputs"][0]["roles"] = ["record", "rtmp", "detect"]
super().__init__(**config)
@ -593,7 +589,8 @@ class CameraConfig(FrigateBaseModel):
continue
decoder_cmds.append(
{"roles": gstreamer_input.roles, "cmd": decoder_cmd})
{"roles": gstreamer_input.roles, "cmd": decoder_cmd}
)
return decoder_cmds
@ -611,63 +608,75 @@ class CameraConfig(FrigateBaseModel):
def _get_gstreamer_cmd(self, gstreamer_input: CameraGStreamerInput):
assert list(
["detect"]) == gstreamer_input.roles, "only detect role is supported"
assert (
list(["detect"]) == gstreamer_input.roles
), "only detect role is supported"
manual_pipeline = [
part for part in self.gstreamer.manual_pipeline if part != ""]
input_pipeline = [
part for part in self.gstreamer.input_pipeline if part != ""]
part for part in self.gstreamer.manual_pipeline if part != ""
]
input_pipeline = [part for part in self.gstreamer.input_pipeline if part != ""]
decoder_pipeline = [
part for part in self.gstreamer.decoder_pipeline if part != ""]
part for part in self.gstreamer.decoder_pipeline if part != ""
]
source_format_pipeline = [
part for part in self.gstreamer.source_format_pipeline if part != ""]
part for part in self.gstreamer.source_format_pipeline if part != ""
]
destination_format_pipeline = [
part for part in self.gstreamer.destination_format_pipeline if part != ""]
part for part in self.gstreamer.destination_format_pipeline if part != ""
]
video_format = f"video/x-raw,width=(int){self.detect.width},height=(int){self.detect.height},format=(string)I420"
if not manual_pipeline and not input_pipeline and not decoder_pipeline and not source_format_pipeline and not destination_format_pipeline:
if (
not manual_pipeline
and not input_pipeline
and not decoder_pipeline
and not source_format_pipeline
and not destination_format_pipeline
):
logger.warn(
"gsreamer pipeline not configured. Using videotestsrc pattern=0")
"gsreamer pipeline not configured. Using videotestsrc pattern=0"
)
pipeline = [
"videotestsrc pattern=0",
video_format,
]
elif len(manual_pipeline) > 0:
logger.warn(
"gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters")
"gsreamer manual pipeline is set. Please make sure your detect width and height does math the gstreamer parameters"
)
pipeline = manual_pipeline
else:
input_pipeline = input_pipeline if input_pipeline else [
f"rtspsrc location=\"{gstreamer_input.path}\" latency=0"
]
input_pipeline = (
input_pipeline
if input_pipeline
else [f'rtspsrc location="{gstreamer_input.path}" latency=0']
)
decoder_pipeline = decoder_pipeline if decoder_pipeline else [
"rtph265depay", "h265parse", "omxh265dec"
]
source_format_pipeline = source_format_pipeline if source_format_pipeline else [
'video/x-raw,format=(string)NV12', 'videoconvert', 'videoscale'
]
destination_format_pipeline = destination_format_pipeline if destination_format_pipeline else [
video_format, "videoconvert"
]
decoder_pipeline = (
decoder_pipeline
if decoder_pipeline
else ["rtph265depay", "h265parse", "omxh265dec"]
)
source_format_pipeline = (
source_format_pipeline
if source_format_pipeline
else ["video/x-raw,format=(string)NV12", "videoconvert", "videoscale"]
)
destination_format_pipeline = (
destination_format_pipeline
if destination_format_pipeline
else [video_format, "videoconvert"]
)
pipeline = [
*input_pipeline,
*decoder_pipeline,
*source_format_pipeline,
*destination_format_pipeline
*destination_format_pipeline,
]
pipeline_args = (
[f"{item} !".split(" ") for item in pipeline]
)
pipeline_args = [f"{item} !".split(" ") for item in pipeline]
pipeline_args = [item for sublist in pipeline_args for item in sublist]
pipeline_args = [
"gst-launch-1.0",
"-q",
*pipeline_args,
"fdsink"
]
logger.debug(
f"using gstreamer pipeline: {' '.join(pipeline_args)}")
pipeline_args = ["gst-launch-1.0", "-q", *pipeline_args, "fdsink"]
logger.debug(f"using gstreamer pipeline: {' '.join(pipeline_args)}")
return pipeline_args
@ -697,8 +706,7 @@ class CameraConfig(FrigateBaseModel):
else self.ffmpeg.output_args.rtmp.split(" ")
)
ffmpeg_output_args = (
rtmp_args +
[f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
rtmp_args + [f"rtmp://127.0.0.1/live/{self.name}"] + ffmpeg_output_args
)
if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = (
@ -722,16 +730,13 @@ class CameraConfig(FrigateBaseModel):
input_args = ffmpeg_input.input_args or self.ffmpeg.input_args
global_args = (
global_args if isinstance(
global_args, list) else global_args.split(" ")
global_args if isinstance(global_args, list) else global_args.split(" ")
)
hwaccel_args = (
hwaccel_args if isinstance(
hwaccel_args, list) else hwaccel_args.split(" ")
hwaccel_args if isinstance(hwaccel_args, list) else hwaccel_args.split(" ")
)
input_args = (
input_args if isinstance(
input_args, list) else input_args.split(" ")
input_args if isinstance(input_args, list) else input_args.split(" ")
)
cmd = (
@ -748,7 +753,7 @@ class CameraConfig(FrigateBaseModel):
@root_validator
def either_ffmpeg_or_gstreamer(cls, v):
if ("ffmpeg" not in v) and ("gstreamer" not in v):
raise ValueError('either ffmpeg or gstreamer should be set')
raise ValueError("either ffmpeg or gstreamer should be set")
return v
@ -760,12 +765,9 @@ class DatabaseConfig(FrigateBaseModel):
class ModelConfig(FrigateBaseModel):
path: Optional[str] = Field(title="Custom Object detection model path.")
labelmap_path: Optional[str] = Field(
title="Label map for custom object detector.")
width: int = Field(
default=320, title="Object detection model input width.")
height: int = Field(
default=320, title="Object detection model input height.")
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
width: int = Field(default=320, title="Object detection model input width.")
height: int = Field(default=320, title="Object detection model input height.")
labelmap: Dict[int, str] = Field(
default_factory=dict, title="Labelmap customization."
)
@ -792,8 +794,7 @@ class ModelConfig(FrigateBaseModel):
self._colormap = {}
for key, val in self._merged_labelmap.items():
self._colormap[val] = tuple(int(round(255 * c))
for c in cmap(key)[:3])
self._colormap[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
class LogLevelEnum(str, Enum):
@ -825,8 +826,7 @@ class FrigateConfig(FrigateBaseModel):
default_factory=ModelConfig, title="Detection model configuration."
)
detectors: Dict[str, DetectorConfig] = Field(
default={name: DetectorConfig(**d)
for name, d in DEFAULT_DETECTORS.items()},
default={name: DetectorConfig(**d) for name, d in DEFAULT_DETECTORS.items()},
title="Detector hardware configuration.",
)
logger: LoggerConfig = Field(
@ -872,8 +872,7 @@ class FrigateConfig(FrigateBaseModel):
# MQTT password substitution
if config.mqtt.password:
config.mqtt.password = config.mqtt.password.format(
**FRIGATE_ENV_VARS)
config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
# Global config to propegate down to camera level
global_config = config.dict(
@ -892,8 +891,7 @@ class FrigateConfig(FrigateBaseModel):
)
for name, camera in config.cameras.items():
merged_config = deep_merge(camera.dict(
exclude_unset=True), global_config)
merged_config = deep_merge(camera.dict(exclude_unset=True), global_config)
camera_config: CameraConfig = CameraConfig.parse_obj(
{"name": name, **merged_config}
)
@ -957,7 +955,11 @@ class FrigateConfig(FrigateBaseModel):
)
# check runtime config
decoder_config = camera_config.ffmpeg if "ffmpeg" in camera_config else camera_config.gstreamer
decoder_config = (
camera_config.ffmpeg
if "ffmpeg" in camera_config
else camera_config.gstreamer
)
assigned_roles = list(
set([r for i in decoder_config.inputs for r in i.roles])
)

View File

@ -7,10 +7,8 @@ import signal
import threading
from abc import ABC, abstractmethod
from typing import Dict
import shared_memory
import numpy as np
# TensorRT https://github.com/NobuoTsukamoto/tensorrt-examples/blob/main/python/detection/README.md
import tflite_runtime.interpreter as tflite
from setproctitle import setproctitle
from tflite_runtime.interpreter import load_delegate
@ -141,7 +139,7 @@ def run_detector(
outputs = {}
for name in out_events.keys():
out_shm = shared_memory.SharedMemory(name=f"out-{name}", create=False)
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
outputs[name] = {"shm": out_shm, "np": out_np}
@ -230,11 +228,11 @@ class RemoteObjectDetector:
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
self.event = event
self.shm = shared_memory.SharedMemory(name=self.name, create=False)
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray(
(1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
)
self.out_shm = shared_memory.SharedMemory(
self.out_shm = mp.shared_memory.SharedMemory(
name=f"out-{self.name}", create=False
)
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)

View File

@ -610,8 +610,7 @@ def recording_clip(camera, start_ts, end_ts):
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
stdout=sp.PIPE,
stderr=sp.PIPE,
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)

View File

@ -7,6 +7,7 @@ import queue
import signal
import subprocess as sp
import threading
from multiprocessing import shared_memory
from wsgiref.simple_server import make_server
from frigate.log import LogPipe
@ -32,27 +33,20 @@ class FFMpegConverter:
ffmpeg_cmd = f"ffmpeg -f rawvideo -pix_fmt yuv420p -video_size {in_width}x{in_height} -i pipe: -f mpegts -s {out_width}x{out_height} -codec:v mpeg1video -q {quality} -bf 0 pipe:".split(
" "
)
# ffmpeg_cmd = f"gst-launch-1.0 rtspsrc location=\"rtsp://admin:123456@192.168.5.95:554/stream0\" ! rtph265depay ! h265parse ! omxh265dec ! 'video/x-raw,format=(string)NV12' ! videoconvert ! 'video/x-raw, width={in_width}, height={in_height}, format=I420, framerate=(fraction)10/1' ! omxh264enc bitrate=500000 temporal-tradeoff=2 iframeinterval=10 ! h264parse ! mpegtsmux ! fdsink"
# # .split(
# # " "
# # )
self.logpipe = LogPipe(
"ffmpeg.converter", logging.ERROR)
"ffmpeg.converter", logging.ERROR)
self.process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
stderr=self.logpipe,
stdin=sp.PIPE,
start_new_session=True,
# shell=True
)
def write(self, b):
try:
self.process.stdin.write(b)
except Exception:
logger.error("Failure while writing to the stream:")
self.logpipe.dump()
return False
@ -60,7 +54,6 @@ class FFMpegConverter:
try:
return self.process.stdout.read1(length)
except ValueError:
logger.error("Failure while readig from the stream:")
self.logpipe.dump()
return False
@ -423,13 +416,8 @@ def output_frames(config: FrigateConfig, video_output_queue):
if any(
ws.environ["PATH_INFO"].endswith(camera) for ws in websocket_server.manager
):
try:
# write to the converter for the camera if clients are listening to the specific camera
converters[camera].write(frame.tobytes())
except Exception:
# in case of videoconverter failure continure processing video_output_queue
# FFMpegConverter should dump an error response
pass
# write to the converter for the camera if clients are listening to the specific camera
converters[camera].write(frame.tobytes())
# update birdseye if websockets are connected
if config.birdseye.enabled and any(

View File

@ -502,9 +502,8 @@ class RecordingCleanup(threading.Thread):
logger.debug(f"Oldest recording in the db: {oldest_timestamp}")
process = sp.run(
["find", RECORD_DIR, "-type", "f", "!", "-newermt", f"@{oldest_timestamp}"],
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True,
capture_output=True,
text=True,
)
files_to_check = process.stdout.splitlines()

View File

@ -11,8 +11,7 @@ import threading
import time
import traceback
from abc import ABC, abstractmethod
#from multiprocessing import shared_memory
import shared_memory
from multiprocessing import shared_memory
from typing import AnyStr
import cv2

View File

@ -187,8 +187,7 @@ class CameraWatchdog(threading.Thread):
self.config = config
self.capture_thread = None
self.ffmpeg_detect_process = None
self.logpipe = LogPipe(
f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
self.ffmpeg_other_processes = []
self.camera_fps = camera_fps
self.ffmpeg_pid = ffmpeg_pid
@ -226,8 +225,7 @@ class CameraWatchdog(threading.Thread):
self.logger.error(
"The following ffmpeg logs include the last 100 lines prior to exit."
)
self.logger.error(
"You may have invalid args defined for this camera.")
self.logger.error("You may have invalid args defined for this camera.")
self.logpipe.dump()
self.start_ffmpeg_detect()
elif now - self.capture_thread.current_frame.value > 20:
@ -236,8 +234,7 @@ class CameraWatchdog(threading.Thread):
)
self.ffmpeg_detect_process.terminate()
try:
self.logger.info(
"Waiting for ffmpeg to exit gracefully...")
self.logger.info("Waiting for ffmpeg to exit gracefully...")
self.ffmpeg_detect_process.communicate(timeout=30)
except sp.TimeoutExpired:
self.logger.info("FFmpeg didnt exit. Force killing...")
@ -485,13 +482,11 @@ def process_frames(
current_frame_time.value = frame_time
frame = frame_manager.get(
f"{camera_name}{frame_time}", (
frame_shape[0] * 3 // 2, frame_shape[1])
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
)
if frame is None:
logger.info(
f"{camera_name}: frame {frame_time} is not in memory store.")
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
continue
if not detection_enabled.value:

View File

@ -27,7 +27,7 @@ class FrigateWatchdog(threading.Thread):
# check the detection processes
for detector in self.detectors.values():
detection_start = detector.detection_start.value
if detection_start > 0.0 and now - detection_start > 10:
if detection_start > 0.0 and now - detection_start > 30:
logger.info(
"Detection appears to be stuck. Restarting detection process..."
)

View File

@ -44,7 +44,7 @@ def get_frame_shape(source):
"json",
source,
]
p = sp.run(ffprobe_cmd, stdout=sp.PIPE, stderr=sp.PIPE)
p = sp.run(ffprobe_cmd, capture_output=True)
info = json.loads(p.stdout)
video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0]