mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-05 10:45:21 +03:00
Merge branch 'dev' of github.com:blakeblackshear/frigate into audio-events
This commit is contained in:
commit
46e1355586
@ -129,7 +129,9 @@ RUN apt-get -qq update \
|
|||||||
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
|
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
|
||||||
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
|
||||||
# scipy dependencies
|
# scipy dependencies
|
||||||
gcc gfortran libopenblas-dev liblapack-dev && \
|
gcc gfortran libopenblas-dev liblapack-dev \
|
||||||
|
# faster-fifo dependencies
|
||||||
|
g++ cython3 && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||||
|
|||||||
@ -1,14 +1,13 @@
|
|||||||
import datetime
|
import datetime
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import os
|
import os
|
||||||
from statistics import mean
|
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from frigate.config import MotionConfig
|
from frigate.config import MotionConfig
|
||||||
from frigate.motion.frigate_motion import FrigateMotionDetector
|
|
||||||
from frigate.motion.improved_motion import ImprovedMotionDetector
|
from frigate.motion.improved_motion import ImprovedMotionDetector
|
||||||
|
from frigate.util import create_mask
|
||||||
|
|
||||||
# get info on the video
|
# get info on the video
|
||||||
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
|
# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4")
|
||||||
@ -20,84 +19,85 @@ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|||||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||||
frame_shape = (height, width, 3)
|
frame_shape = (height, width, 3)
|
||||||
|
|
||||||
|
mask = create_mask(
|
||||||
|
(height, width),
|
||||||
|
[],
|
||||||
|
)
|
||||||
|
|
||||||
# create the motion config
|
# create the motion config
|
||||||
motion_config = MotionConfig()
|
motion_config_1 = MotionConfig()
|
||||||
motion_config.mask = np.zeros((height, width), np.uint8)
|
motion_config_1.mask = np.zeros((height, width), np.uint8)
|
||||||
motion_config.mask[:] = 255
|
motion_config_1.mask[:] = mask
|
||||||
motion_config.improve_contrast = 1
|
# motion_config_1.improve_contrast = 1
|
||||||
motion_config.frame_alpha = 0.02
|
# motion_config_1.frame_height = 150
|
||||||
motion_config.threshold = 40
|
# motion_config_1.frame_alpha = 0.02
|
||||||
motion_config.contour_area = 15
|
# motion_config_1.threshold = 30
|
||||||
|
# motion_config_1.contour_area = 10
|
||||||
|
|
||||||
|
motion_config_2 = MotionConfig()
|
||||||
|
motion_config_2.mask = np.zeros((height, width), np.uint8)
|
||||||
|
motion_config_2.mask[:] = mask
|
||||||
|
# motion_config_2.improve_contrast = 1
|
||||||
|
# motion_config_2.frame_height = 150
|
||||||
|
# motion_config_2.frame_alpha = 0.01
|
||||||
|
# motion_config_2.threshold = 20
|
||||||
|
# motion_config.contour_area = 10
|
||||||
save_images = True
|
save_images = True
|
||||||
|
|
||||||
# create motion detectors
|
improved_motion_detector_1 = ImprovedMotionDetector(
|
||||||
frigate_motion_detector = FrigateMotionDetector(
|
|
||||||
frame_shape=frame_shape,
|
frame_shape=frame_shape,
|
||||||
config=motion_config,
|
config=motion_config_1,
|
||||||
fps=fps,
|
fps=fps,
|
||||||
improve_contrast=mp.Value("i", motion_config.improve_contrast),
|
improve_contrast=mp.Value("i", motion_config_1.improve_contrast),
|
||||||
threshold=mp.Value("i", motion_config.threshold),
|
threshold=mp.Value("i", motion_config_1.threshold),
|
||||||
contour_area=mp.Value("i", motion_config.contour_area),
|
contour_area=mp.Value("i", motion_config_1.contour_area),
|
||||||
|
name="default",
|
||||||
|
clipLimit=2.0,
|
||||||
|
tileGridSize=(8, 8),
|
||||||
)
|
)
|
||||||
frigate_motion_detector.save_images = save_images
|
improved_motion_detector_1.save_images = save_images
|
||||||
|
|
||||||
improved_motion_detector = ImprovedMotionDetector(
|
improved_motion_detector_2 = ImprovedMotionDetector(
|
||||||
frame_shape=frame_shape,
|
frame_shape=frame_shape,
|
||||||
config=motion_config,
|
config=motion_config_2,
|
||||||
fps=fps,
|
fps=fps,
|
||||||
improve_contrast=mp.Value("i", motion_config.improve_contrast),
|
improve_contrast=mp.Value("i", motion_config_2.improve_contrast),
|
||||||
threshold=mp.Value("i", motion_config.threshold),
|
threshold=mp.Value("i", motion_config_2.threshold),
|
||||||
contour_area=mp.Value("i", motion_config.contour_area),
|
contour_area=mp.Value("i", motion_config_2.contour_area),
|
||||||
|
name="compare",
|
||||||
)
|
)
|
||||||
improved_motion_detector.save_images = save_images
|
improved_motion_detector_2.save_images = save_images
|
||||||
|
|
||||||
# read and process frames
|
# read and process frames
|
||||||
frame_times = {"frigate": [], "improved": []}
|
|
||||||
ret, frame = cap.read()
|
ret, frame = cap.read()
|
||||||
frame_counter = 1
|
frame_counter = 1
|
||||||
while ret:
|
while ret:
|
||||||
yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
|
yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
|
||||||
|
|
||||||
start_frame = datetime.datetime.now().timestamp()
|
start_frame = datetime.datetime.now().timestamp()
|
||||||
frigate_motion_detector.detect(yuv_frame)
|
improved_motion_detector_1.detect(yuv_frame)
|
||||||
frame_times["frigate"].append(datetime.datetime.now().timestamp() - start_frame)
|
|
||||||
|
|
||||||
start_frame = datetime.datetime.now().timestamp()
|
start_frame = datetime.datetime.now().timestamp()
|
||||||
improved_motion_detector.detect(yuv_frame)
|
improved_motion_detector_2.detect(yuv_frame)
|
||||||
frame_times["improved"].append(datetime.datetime.now().timestamp() - start_frame)
|
|
||||||
|
|
||||||
frigate_frame = f"debug/frames/frigate-{frame_counter}.jpg"
|
default_frame = f"debug/frames/default-{frame_counter}.jpg"
|
||||||
improved_frame = f"debug/frames/improved-{frame_counter}.jpg"
|
compare_frame = f"debug/frames/compare-{frame_counter}.jpg"
|
||||||
if os.path.exists(frigate_frame) and os.path.exists(improved_frame):
|
if os.path.exists(default_frame) and os.path.exists(compare_frame):
|
||||||
image_row_1 = cv2.hconcat(
|
images = [
|
||||||
[
|
cv2.imread(default_frame),
|
||||||
cv2.imread(frigate_frame),
|
cv2.imread(compare_frame),
|
||||||
cv2.imread(improved_frame),
|
]
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
image_row_2 = cv2.resize(
|
|
||||||
frame,
|
|
||||||
dsize=(
|
|
||||||
frigate_motion_detector.motion_frame_size[1] * 2,
|
|
||||||
frigate_motion_detector.motion_frame_size[0] * 2,
|
|
||||||
),
|
|
||||||
interpolation=cv2.INTER_LINEAR,
|
|
||||||
)
|
|
||||||
|
|
||||||
cv2.imwrite(
|
cv2.imwrite(
|
||||||
f"debug/frames/all-{frame_counter}.jpg",
|
f"debug/frames/all-{frame_counter}.jpg",
|
||||||
cv2.vconcat([image_row_1, image_row_2]),
|
cv2.vconcat(images)
|
||||||
|
if frame_shape[0] > frame_shape[1]
|
||||||
|
else cv2.hconcat(images),
|
||||||
)
|
)
|
||||||
os.unlink(frigate_frame)
|
os.unlink(default_frame)
|
||||||
os.unlink(improved_frame)
|
os.unlink(compare_frame)
|
||||||
frame_counter += 1
|
frame_counter += 1
|
||||||
|
|
||||||
ret, frame = cap.read()
|
ret, frame = cap.read()
|
||||||
|
|
||||||
cap.release()
|
cap.release()
|
||||||
|
|
||||||
print("Frigate Motion Detector")
|
|
||||||
print(f"Average frame processing time: {mean(frame_times['frigate'])*1000:.2f}ms")
|
|
||||||
print("Improved Motion Detector")
|
|
||||||
print(f"Average frame processing time: {mean(frame_times['improved'])*1000:.2f}ms")
|
|
||||||
|
|||||||
@ -230,7 +230,7 @@ detect:
|
|||||||
# especially when using separate streams for detect and record.
|
# especially when using separate streams for detect and record.
|
||||||
# Use this setting to make the timeline bounding boxes more closely align
|
# Use this setting to make the timeline bounding boxes more closely align
|
||||||
# with the recording. The value can be positive or negative.
|
# with the recording. The value can be positive or negative.
|
||||||
# TIP: Imagine there is an event clip with a person walking from left to right.
|
# TIP: Imagine there is an event clip with a person walking from left to right.
|
||||||
# If the event timeline bounding box is consistently to the left of the person
|
# If the event timeline bounding box is consistently to the left of the person
|
||||||
# then the value should be decreased. Similarly, if a person is walking from
|
# then the value should be decreased. Similarly, if a person is walking from
|
||||||
# left to right and the bounding box is consistently ahead of the person
|
# left to right and the bounding box is consistently ahead of the person
|
||||||
@ -275,7 +275,7 @@ motion:
|
|||||||
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
||||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||||
# The value should be between 1 and 255.
|
# The value should be between 1 and 255.
|
||||||
threshold: 40
|
threshold: 20
|
||||||
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
# Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection
|
||||||
# needs to recalibrate. (default: shown below)
|
# needs to recalibrate. (default: shown below)
|
||||||
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
# Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion.
|
||||||
@ -286,19 +286,19 @@ motion:
|
|||||||
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
|
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will
|
||||||
# make motion detection more sensitive to smaller moving objects.
|
# make motion detection more sensitive to smaller moving objects.
|
||||||
# As a rule of thumb:
|
# As a rule of thumb:
|
||||||
# - 15 - high sensitivity
|
# - 10 - high sensitivity
|
||||||
# - 30 - medium sensitivity
|
# - 30 - medium sensitivity
|
||||||
# - 50 - low sensitivity
|
# - 50 - low sensitivity
|
||||||
contour_area: 15
|
contour_area: 10
|
||||||
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below)
|
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below)
|
||||||
# Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster.
|
# Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster.
|
||||||
# Low values will cause things like moving shadows to be detected as motion for longer.
|
# Low values will cause things like moving shadows to be detected as motion for longer.
|
||||||
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
|
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
|
||||||
frame_alpha: 0.02
|
frame_alpha: 0.01
|
||||||
# Optional: Height of the resized motion frame (default: 50)
|
# Optional: Height of the resized motion frame (default: 50)
|
||||||
# Higher values will result in more granular motion detection at the expense of higher CPU usage.
|
# Higher values will result in more granular motion detection at the expense of higher CPU usage.
|
||||||
# Lower values result in less CPU, but small changes may not register as motion.
|
# Lower values result in less CPU, but small changes may not register as motion.
|
||||||
frame_height: 50
|
frame_height: 100
|
||||||
# Optional: motion mask
|
# Optional: motion mask
|
||||||
# NOTE: see docs for more detailed info on creating masks
|
# NOTE: see docs for more detailed info on creating masks
|
||||||
mask: 0,900,1080,900,1080,1920,0,1920
|
mask: 0,900,1080,900,1080,1920,0,1920
|
||||||
|
|||||||
@ -6,12 +6,12 @@ import shutil
|
|||||||
import signal
|
import signal
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
from multiprocessing.queues import Queue
|
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from types import FrameType
|
from types import FrameType
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
|
from faster_fifo import Queue
|
||||||
from peewee_migrate import Router
|
from peewee_migrate import Router
|
||||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||||
from playhouse.sqliteq import SqliteQueueDatabase
|
from playhouse.sqliteq import SqliteQueueDatabase
|
||||||
|
|||||||
@ -188,7 +188,7 @@ class RecordConfig(FrigateBaseModel):
|
|||||||
|
|
||||||
class MotionConfig(FrigateBaseModel):
|
class MotionConfig(FrigateBaseModel):
|
||||||
threshold: int = Field(
|
threshold: int = Field(
|
||||||
default=40,
|
default=20,
|
||||||
title="Motion detection threshold (1-255).",
|
title="Motion detection threshold (1-255).",
|
||||||
ge=1,
|
ge=1,
|
||||||
le=255,
|
le=255,
|
||||||
@ -197,10 +197,10 @@ class MotionConfig(FrigateBaseModel):
|
|||||||
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
|
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
|
||||||
)
|
)
|
||||||
improve_contrast: bool = Field(default=True, title="Improve Contrast")
|
improve_contrast: bool = Field(default=True, title="Improve Contrast")
|
||||||
contour_area: Optional[int] = Field(default=15, title="Contour Area")
|
contour_area: Optional[int] = Field(default=10, title="Contour Area")
|
||||||
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
|
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
|
||||||
frame_alpha: float = Field(default=0.02, title="Frame Alpha")
|
frame_alpha: float = Field(default=0.01, title="Frame Alpha")
|
||||||
frame_height: Optional[int] = Field(default=50, title="Frame Height")
|
frame_height: Optional[int] = Field(default=100, title="Frame Height")
|
||||||
mask: Union[str, List[str]] = Field(
|
mask: Union[str, List[str]] = Field(
|
||||||
default="", title="Coordinates polygon for the motion mask."
|
default="", title="Coordinates polygon for the motion mask."
|
||||||
)
|
)
|
||||||
|
|||||||
@ -13,6 +13,16 @@ PLUS_ENV_VAR = "PLUS_API_KEY"
|
|||||||
PLUS_API_HOST = "https://api.frigate.video"
|
PLUS_API_HOST = "https://api.frigate.video"
|
||||||
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
|
BTBN_PATH = "/usr/lib/btbn-ffmpeg"
|
||||||
|
|
||||||
|
# Attributes
|
||||||
|
|
||||||
|
ATTRIBUTE_LABEL_MAP = {
|
||||||
|
"person": ["face", "amazon"],
|
||||||
|
"car": ["ups", "fedex", "amazon", "license_plate"],
|
||||||
|
}
|
||||||
|
ALL_ATTRIBUTE_LABELS = [
|
||||||
|
item for sublist in ATTRIBUTE_LABEL_MAP.values() for item in sublist
|
||||||
|
]
|
||||||
|
|
||||||
# Audio Consts
|
# Audio Consts
|
||||||
|
|
||||||
AUDIO_DURATION = 0.975
|
AUDIO_DURATION = 0.975
|
||||||
|
|||||||
@ -6,10 +6,10 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
from multiprocessing.queues import Queue
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
|
from faster_fifo import Queue
|
||||||
|
|
||||||
from frigate.config import CameraConfig, FrigateConfig
|
from frigate.config import CameraConfig, FrigateConfig
|
||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR
|
||||||
|
|||||||
@ -3,10 +3,11 @@ import logging
|
|||||||
import queue
|
import queue
|
||||||
import threading
|
import threading
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from multiprocessing.queues import Queue
|
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
|
from faster_fifo import Queue
|
||||||
|
|
||||||
from frigate.config import EventsConfig, FrigateConfig
|
from frigate.config import EventsConfig, FrigateConfig
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.types import CameraMetricsTypes
|
from frigate.types import CameraMetricsTypes
|
||||||
|
|||||||
@ -410,6 +410,24 @@ def set_sub_label(id):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@bp.route("/labels")
|
||||||
|
def get_labels():
|
||||||
|
camera = request.args.get("camera", type=str, default="")
|
||||||
|
|
||||||
|
try:
|
||||||
|
if camera:
|
||||||
|
events = Event.select(Event.label).where(Event.camera == camera).distinct()
|
||||||
|
else:
|
||||||
|
events = Event.select(Event.label).distinct()
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify(
|
||||||
|
{"success": False, "message": f"Failed to get labels: {e}"}, "404"
|
||||||
|
)
|
||||||
|
|
||||||
|
labels = sorted([e.label for e in events])
|
||||||
|
return jsonify(labels)
|
||||||
|
|
||||||
|
|
||||||
@bp.route("/sub_labels")
|
@bp.route("/sub_labels")
|
||||||
def get_sub_labels():
|
def get_sub_labels():
|
||||||
split_joined = request.args.get("split_joined", type=int)
|
split_joined = request.args.get("split_joined", type=int)
|
||||||
|
|||||||
@ -7,10 +7,10 @@ import signal
|
|||||||
import threading
|
import threading
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from logging import handlers
|
from logging import handlers
|
||||||
from multiprocessing.queues import Queue
|
|
||||||
from types import FrameType
|
from types import FrameType
|
||||||
from typing import Deque, Optional
|
from typing import Deque, Optional
|
||||||
|
|
||||||
|
from faster_fifo import Queue
|
||||||
from setproctitle import setproctitle
|
from setproctitle import setproctitle
|
||||||
|
|
||||||
from frigate.util import clean_camera_user_pass
|
from frigate.util import clean_camera_user_pass
|
||||||
|
|||||||
@ -15,7 +15,11 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
improve_contrast,
|
improve_contrast,
|
||||||
threshold,
|
threshold,
|
||||||
contour_area,
|
contour_area,
|
||||||
|
clipLimit=2.0,
|
||||||
|
tileGridSize=(2, 2),
|
||||||
|
name="improved",
|
||||||
):
|
):
|
||||||
|
self.name = name
|
||||||
self.config = config
|
self.config = config
|
||||||
self.frame_shape = frame_shape
|
self.frame_shape = frame_shape
|
||||||
self.resize_factor = frame_shape[0] / config.frame_height
|
self.resize_factor = frame_shape[0] / config.frame_height
|
||||||
@ -38,6 +42,7 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
self.improve_contrast = improve_contrast
|
self.improve_contrast = improve_contrast
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
self.contour_area = contour_area
|
self.contour_area = contour_area
|
||||||
|
self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
|
||||||
|
|
||||||
def detect(self, frame):
|
def detect(self, frame):
|
||||||
motion_boxes = []
|
motion_boxes = []
|
||||||
@ -51,11 +56,20 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
interpolation=cv2.INTER_LINEAR,
|
interpolation=cv2.INTER_LINEAR,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.save_images:
|
||||||
|
resized_saved = resized_frame.copy()
|
||||||
|
|
||||||
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
|
resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT)
|
||||||
|
|
||||||
|
if self.save_images:
|
||||||
|
blurred_saved = resized_frame.copy()
|
||||||
|
|
||||||
# Improve contrast
|
# Improve contrast
|
||||||
if self.improve_contrast.value:
|
if self.improve_contrast.value:
|
||||||
resized_frame = cv2.equalizeHist(resized_frame)
|
resized_frame = self.clahe.apply(resized_frame)
|
||||||
|
|
||||||
|
if self.save_images:
|
||||||
|
contrasted_saved = resized_frame.copy()
|
||||||
|
|
||||||
# mask frame
|
# mask frame
|
||||||
resized_frame[self.mask] = [255]
|
resized_frame[self.mask] = [255]
|
||||||
@ -118,8 +132,19 @@ class ImprovedMotionDetector(MotionDetector):
|
|||||||
(0, 0, 255),
|
(0, 0, 255),
|
||||||
2,
|
2,
|
||||||
)
|
)
|
||||||
|
frames = [
|
||||||
|
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
|
||||||
|
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
||||||
|
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
|
||||||
|
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
|
||||||
|
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
|
||||||
|
thresh_dilated,
|
||||||
|
]
|
||||||
cv2.imwrite(
|
cv2.imwrite(
|
||||||
f"debug/frames/improved-{self.frame_counter}.jpg", thresh_dilated
|
f"debug/frames/{self.name}-{self.frame_counter}.jpg",
|
||||||
|
cv2.hconcat(frames)
|
||||||
|
if self.frame_shape[0] > self.frame_shape[1]
|
||||||
|
else cv2.vconcat(frames),
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(motion_boxes) > 0:
|
if len(motion_boxes) > 0:
|
||||||
|
|||||||
@ -730,7 +730,7 @@ class TestConfig(unittest.TestCase):
|
|||||||
assert config == frigate_config.dict(exclude_unset=True)
|
assert config == frigate_config.dict(exclude_unset=True)
|
||||||
|
|
||||||
runtime_config = frigate_config.runtime_config()
|
runtime_config = frigate_config.runtime_config()
|
||||||
assert runtime_config.cameras["back"].motion.frame_height == 50
|
assert runtime_config.cameras["back"].motion.frame_height == 100
|
||||||
|
|
||||||
def test_motion_contour_area_dynamic(self):
|
def test_motion_contour_area_dynamic(self):
|
||||||
config = {
|
config = {
|
||||||
@ -758,7 +758,7 @@ class TestConfig(unittest.TestCase):
|
|||||||
assert config == frigate_config.dict(exclude_unset=True)
|
assert config == frigate_config.dict(exclude_unset=True)
|
||||||
|
|
||||||
runtime_config = frigate_config.runtime_config()
|
runtime_config = frigate_config.runtime_config()
|
||||||
assert round(runtime_config.cameras["back"].motion.contour_area) == 15
|
assert round(runtime_config.cameras["back"].motion.contour_area) == 10
|
||||||
|
|
||||||
def test_merge_labelmap(self):
|
def test_merge_labelmap(self):
|
||||||
config = {
|
config = {
|
||||||
|
|||||||
@ -3,9 +3,10 @@
|
|||||||
import logging
|
import logging
|
||||||
import queue
|
import queue
|
||||||
import threading
|
import threading
|
||||||
from multiprocessing.queues import Queue
|
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
|
|
||||||
|
from faster_fifo import Queue
|
||||||
|
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.events.maintainer import EventTypeEnum
|
from frigate.events.maintainer import EventTypeEnum
|
||||||
from frigate.models import Timeline
|
from frigate.models import Timeline
|
||||||
|
|||||||
@ -1,8 +1,9 @@
|
|||||||
from multiprocessing.context import Process
|
from multiprocessing.context import Process
|
||||||
from multiprocessing.queues import Queue
|
|
||||||
from multiprocessing.sharedctypes import Synchronized
|
from multiprocessing.sharedctypes import Synchronized
|
||||||
from typing import Optional, TypedDict
|
from typing import Optional, TypedDict
|
||||||
|
|
||||||
|
from faster_fifo import Queue
|
||||||
|
|
||||||
from frigate.object_detection import ObjectDetectProcess
|
from frigate.object_detection import ObjectDetectProcess
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -650,34 +650,42 @@ def restart_frigate():
|
|||||||
|
|
||||||
|
|
||||||
class EventsPerSecond:
|
class EventsPerSecond:
|
||||||
def __init__(self, max_events=1000):
|
def __init__(self, max_events=1000, last_n_seconds=10):
|
||||||
self._start = None
|
self._start = None
|
||||||
self._max_events = max_events
|
self._max_events = max_events
|
||||||
|
self._last_n_seconds = last_n_seconds
|
||||||
self._timestamps = []
|
self._timestamps = []
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self._start = datetime.datetime.now().timestamp()
|
self._start = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
|
now = datetime.datetime.now().timestamp()
|
||||||
if self._start is None:
|
if self._start is None:
|
||||||
self.start()
|
self._start = now
|
||||||
self._timestamps.append(datetime.datetime.now().timestamp())
|
self._timestamps.append(now)
|
||||||
# truncate the list when it goes 100 over the max_size
|
# truncate the list when it goes 100 over the max_size
|
||||||
if len(self._timestamps) > self._max_events + 100:
|
if len(self._timestamps) > self._max_events + 100:
|
||||||
self._timestamps = self._timestamps[(1 - self._max_events) :]
|
self._timestamps = self._timestamps[(1 - self._max_events) :]
|
||||||
|
self.expire_timestamps(now)
|
||||||
|
|
||||||
def eps(self, last_n_seconds=10):
|
def eps(self):
|
||||||
if self._start is None:
|
|
||||||
self.start()
|
|
||||||
# compute the (approximate) events in the last n seconds
|
|
||||||
now = datetime.datetime.now().timestamp()
|
now = datetime.datetime.now().timestamp()
|
||||||
seconds = min(now - self._start, last_n_seconds)
|
if self._start is None:
|
||||||
|
self._start = now
|
||||||
|
# compute the (approximate) events in the last n seconds
|
||||||
|
self.expire_timestamps(now)
|
||||||
|
seconds = min(now - self._start, self._last_n_seconds)
|
||||||
# avoid divide by zero
|
# avoid divide by zero
|
||||||
if seconds == 0:
|
if seconds == 0:
|
||||||
seconds = 1
|
seconds = 1
|
||||||
return (
|
return len(self._timestamps) / seconds
|
||||||
len([t for t in self._timestamps if t > (now - last_n_seconds)]) / seconds
|
|
||||||
)
|
# remove aged out timestamps
|
||||||
|
def expire_timestamps(self, now):
|
||||||
|
threshold = now - self._last_n_seconds
|
||||||
|
while self._timestamps and self._timestamps[0] < threshold:
|
||||||
|
del self._timestamps[0]
|
||||||
|
|
||||||
|
|
||||||
def print_stack(sig, frame):
|
def print_stack(sig, frame):
|
||||||
|
|||||||
@ -15,7 +15,7 @@ import numpy as np
|
|||||||
from setproctitle import setproctitle
|
from setproctitle import setproctitle
|
||||||
|
|
||||||
from frigate.config import CameraConfig, DetectConfig
|
from frigate.config import CameraConfig, DetectConfig
|
||||||
from frigate.const import CACHE_DIR
|
from frigate.const import ALL_ATTRIBUTE_LABELS, ATTRIBUTE_LABEL_MAP, CACHE_DIR
|
||||||
from frigate.detectors.detector_config import PixelFormatEnum
|
from frigate.detectors.detector_config import PixelFormatEnum
|
||||||
from frigate.log import LogPipe
|
from frigate.log import LogPipe
|
||||||
from frigate.motion import MotionDetector
|
from frigate.motion import MotionDetector
|
||||||
@ -172,7 +172,7 @@ def capture_frames(
|
|||||||
skipped_eps.start()
|
skipped_eps.start()
|
||||||
while True:
|
while True:
|
||||||
fps.value = frame_rate.eps()
|
fps.value = frame_rate.eps()
|
||||||
skipped_eps.eps()
|
skipped_fps.value = skipped_eps.eps()
|
||||||
|
|
||||||
current_frame.value = datetime.datetime.now().timestamp()
|
current_frame.value = datetime.datetime.now().timestamp()
|
||||||
frame_name = f"{camera_name}{current_frame.value}"
|
frame_name = f"{camera_name}{current_frame.value}"
|
||||||
@ -215,6 +215,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
config: CameraConfig,
|
config: CameraConfig,
|
||||||
frame_queue,
|
frame_queue,
|
||||||
camera_fps,
|
camera_fps,
|
||||||
|
skipped_fps,
|
||||||
ffmpeg_pid,
|
ffmpeg_pid,
|
||||||
stop_event,
|
stop_event,
|
||||||
):
|
):
|
||||||
@ -227,6 +228,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
|
||||||
self.ffmpeg_other_processes: list[dict[str, any]] = []
|
self.ffmpeg_other_processes: list[dict[str, any]] = []
|
||||||
self.camera_fps = camera_fps
|
self.camera_fps = camera_fps
|
||||||
|
self.skipped_fps = skipped_fps
|
||||||
self.ffmpeg_pid = ffmpeg_pid
|
self.ffmpeg_pid = ffmpeg_pid
|
||||||
self.frame_queue = frame_queue
|
self.frame_queue = frame_queue
|
||||||
self.frame_shape = self.config.frame_shape_yuv
|
self.frame_shape = self.config.frame_shape_yuv
|
||||||
@ -346,6 +348,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.frame_shape,
|
self.frame_shape,
|
||||||
self.frame_queue,
|
self.frame_queue,
|
||||||
self.camera_fps,
|
self.camera_fps,
|
||||||
|
self.skipped_fps,
|
||||||
self.stop_event,
|
self.stop_event,
|
||||||
)
|
)
|
||||||
self.capture_thread.start()
|
self.capture_thread.start()
|
||||||
@ -376,7 +379,14 @@ class CameraWatchdog(threading.Thread):
|
|||||||
|
|
||||||
class CameraCapture(threading.Thread):
|
class CameraCapture(threading.Thread):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps, stop_event
|
self,
|
||||||
|
camera_name,
|
||||||
|
ffmpeg_process,
|
||||||
|
frame_shape,
|
||||||
|
frame_queue,
|
||||||
|
fps,
|
||||||
|
skipped_fps,
|
||||||
|
stop_event,
|
||||||
):
|
):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
self.name = f"capture:{camera_name}"
|
self.name = f"capture:{camera_name}"
|
||||||
@ -385,14 +395,13 @@ class CameraCapture(threading.Thread):
|
|||||||
self.frame_queue = frame_queue
|
self.frame_queue = frame_queue
|
||||||
self.fps = fps
|
self.fps = fps
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.skipped_fps = EventsPerSecond()
|
self.skipped_fps = skipped_fps
|
||||||
self.frame_manager = SharedMemoryFrameManager()
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
self.ffmpeg_process = ffmpeg_process
|
self.ffmpeg_process = ffmpeg_process
|
||||||
self.current_frame = mp.Value("d", 0.0)
|
self.current_frame = mp.Value("d", 0.0)
|
||||||
self.last_frame = 0
|
self.last_frame = 0
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.skipped_fps.start()
|
|
||||||
capture_frames(
|
capture_frames(
|
||||||
self.ffmpeg_process,
|
self.ffmpeg_process,
|
||||||
self.camera_name,
|
self.camera_name,
|
||||||
@ -424,6 +433,7 @@ def capture_camera(name, config: CameraConfig, process_info):
|
|||||||
config,
|
config,
|
||||||
frame_queue,
|
frame_queue,
|
||||||
process_info["camera_fps"],
|
process_info["camera_fps"],
|
||||||
|
process_info["skipped_fps"],
|
||||||
process_info["ffmpeg_pid"],
|
process_info["ffmpeg_pid"],
|
||||||
stop_event,
|
stop_event,
|
||||||
)
|
)
|
||||||
@ -723,14 +733,6 @@ def process_frames(
|
|||||||
stop_event,
|
stop_event,
|
||||||
exit_on_empty: bool = False,
|
exit_on_empty: bool = False,
|
||||||
):
|
):
|
||||||
# attribute labels are not tracked and are not assigned regions
|
|
||||||
attribute_label_map = {
|
|
||||||
"person": ["face", "amazon"],
|
|
||||||
"car": ["ups", "fedex", "amazon", "license_plate"],
|
|
||||||
}
|
|
||||||
all_attribute_labels = [
|
|
||||||
item for sublist in attribute_label_map.values() for item in sublist
|
|
||||||
]
|
|
||||||
fps = process_info["process_fps"]
|
fps = process_info["process_fps"]
|
||||||
detection_fps = process_info["detection_fps"]
|
detection_fps = process_info["detection_fps"]
|
||||||
current_frame_time = process_info["detection_frame"]
|
current_frame_time = process_info["detection_frame"]
|
||||||
@ -906,7 +908,7 @@ def process_frames(
|
|||||||
tracked_detections = [
|
tracked_detections = [
|
||||||
d
|
d
|
||||||
for d in consolidated_detections
|
for d in consolidated_detections
|
||||||
if d[0] not in all_attribute_labels
|
if d[0] not in ALL_ATTRIBUTE_LABELS
|
||||||
]
|
]
|
||||||
# now that we have refined our detections, we need to track objects
|
# now that we have refined our detections, we need to track objects
|
||||||
object_tracker.match_and_update(frame_time, tracked_detections)
|
object_tracker.match_and_update(frame_time, tracked_detections)
|
||||||
@ -916,7 +918,7 @@ def process_frames(
|
|||||||
|
|
||||||
# group the attribute detections based on what label they apply to
|
# group the attribute detections based on what label they apply to
|
||||||
attribute_detections = {}
|
attribute_detections = {}
|
||||||
for label, attribute_labels in attribute_label_map.items():
|
for label, attribute_labels in ATTRIBUTE_LABEL_MAP.items():
|
||||||
attribute_detections[label] = [
|
attribute_detections[label] = [
|
||||||
d for d in consolidated_detections if d[0] in attribute_labels
|
d for d in consolidated_detections if d[0] in attribute_labels
|
||||||
]
|
]
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
click == 8.1.*
|
click == 8.1.*
|
||||||
Flask == 2.3.*
|
Flask == 2.3.*
|
||||||
|
faster-fifo == 1.4.*
|
||||||
imutils == 0.5.*
|
imutils == 0.5.*
|
||||||
matplotlib == 3.7.*
|
matplotlib == 3.7.*
|
||||||
mypy == 0.942
|
mypy == 0.942
|
||||||
|
|||||||
@ -29,7 +29,7 @@ export default function Tooltip({ relativeTo, text }) {
|
|||||||
let newLeft = left - Math.round(tipWidth / 2);
|
let newLeft = left - Math.round(tipWidth / 2);
|
||||||
// too far right
|
// too far right
|
||||||
if (newLeft + tipWidth + TIP_SPACE > windowWidth - window.scrollX) {
|
if (newLeft + tipWidth + TIP_SPACE > windowWidth - window.scrollX) {
|
||||||
newLeft = left - tipWidth - TIP_SPACE;
|
newLeft = Math.max(0, left - tipWidth - TIP_SPACE);
|
||||||
newTop = top - Math.round(tipHeight / 2);
|
newTop = top - Math.round(tipHeight / 2);
|
||||||
}
|
}
|
||||||
// too far left
|
// too far left
|
||||||
|
|||||||
@ -22,6 +22,7 @@ const emptyObject = Object.freeze({});
|
|||||||
|
|
||||||
export default function Camera({ camera }) {
|
export default function Camera({ camera }) {
|
||||||
const { data: config } = useSWR('config');
|
const { data: config } = useSWR('config');
|
||||||
|
const { data: trackedLabels } = useSWR(['labels', { camera }]);
|
||||||
const apiHost = useApiHost();
|
const apiHost = useApiHost();
|
||||||
const [showSettings, setShowSettings] = useState(false);
|
const [showSettings, setShowSettings] = useState(false);
|
||||||
const [viewMode, setViewMode] = useState('live');
|
const [viewMode, setViewMode] = useState('live');
|
||||||
@ -121,7 +122,9 @@ export default function Camera({ camera }) {
|
|||||||
<div className="max-w-5xl">
|
<div className="max-w-5xl">
|
||||||
<video-stream
|
<video-stream
|
||||||
mode="mse"
|
mode="mse"
|
||||||
src={new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)}
|
src={
|
||||||
|
new URL(`${baseUrl.replace(/^http/, 'ws')}live/webrtc/api/ws?src=${cameraConfig.live.stream_name}`)
|
||||||
|
}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</Fragment>
|
</Fragment>
|
||||||
@ -203,7 +206,7 @@ export default function Camera({ camera }) {
|
|||||||
<div className="space-y-4">
|
<div className="space-y-4">
|
||||||
<Heading size="sm">Tracked objects</Heading>
|
<Heading size="sm">Tracked objects</Heading>
|
||||||
<div className="flex flex-wrap justify-start">
|
<div className="flex flex-wrap justify-start">
|
||||||
{cameraConfig.objects.track.map((objectType) => (
|
{(trackedLabels || []).map((objectType) => (
|
||||||
<Card
|
<Card
|
||||||
className="mb-4 mr-4"
|
className="mb-4 mr-4"
|
||||||
key={objectType}
|
key={objectType}
|
||||||
|
|||||||
@ -106,6 +106,7 @@ export default function Events({ path, ...props }) {
|
|||||||
|
|
||||||
const { data: config } = useSWR('config');
|
const { data: config } = useSWR('config');
|
||||||
|
|
||||||
|
const { data: allLabels } = useSWR(['labels']);
|
||||||
const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]);
|
const { data: allSubLabels } = useSWR(['sub_labels', { split_joined: 1 }]);
|
||||||
|
|
||||||
const filterValues = useMemo(
|
const filterValues = useMemo(
|
||||||
@ -120,15 +121,10 @@ export default function Events({ path, ...props }) {
|
|||||||
.filter((value, i, self) => self.indexOf(value) === i),
|
.filter((value, i, self) => self.indexOf(value) === i),
|
||||||
'None',
|
'None',
|
||||||
],
|
],
|
||||||
labels: Object.values(config?.cameras || {})
|
labels: Object.values(allLabels || {}),
|
||||||
.reduce((memo, camera) => {
|
|
||||||
memo = memo.concat(camera?.objects?.track || []);
|
|
||||||
return memo;
|
|
||||||
}, config?.objects?.track || [])
|
|
||||||
.filter((value, i, self) => self.indexOf(value) === i),
|
|
||||||
sub_labels: (allSubLabels || []).length > 0 ? [...Object.values(allSubLabels), 'None'] : [],
|
sub_labels: (allSubLabels || []).length > 0 ? [...Object.values(allSubLabels), 'None'] : [],
|
||||||
}),
|
}),
|
||||||
[config, allSubLabels]
|
[config, allLabels, allSubLabels]
|
||||||
);
|
);
|
||||||
|
|
||||||
const onSave = async (e, eventId, save) => {
|
const onSave = async (e, eventId, save) => {
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user