Formatting and remove yolov5

This commit is contained in:
Nicolas Mowen 2024-03-28 07:57:32 -06:00
parent f7f172d6a1
commit 5ff95c223d
10 changed files with 71 additions and 88 deletions

View File

@ -131,7 +131,7 @@ model:
labelmap_path: /openvino-model/coco_91cl_bkgr.txt labelmap_path: /openvino-model/coco_91cl_bkgr.txt
``` ```
This detector also supports some YOLO variants: YOLOX and YOLOv5 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate: This detector also supports YOLOX. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
```yaml ```yaml
detectors: detectors:

View File

@ -80,7 +80,7 @@ model:
# Valid values are nhwc or nchw (default: shown below) # Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector # Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox or yolov5 (default: shown below) # Valid values are ssd, yolox (default: shown below)
model_type: ssd model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap. # Optional: Label name modifications. These are merged into the standard labelmap.
labelmap: labelmap:

View File

@ -30,7 +30,6 @@ class InputTensorEnum(str, Enum):
class ModelTypeEnum(str, Enum): class ModelTypeEnum(str, Enum):
ssd = "ssd" ssd = "ssd"
yolox = "yolox" yolox = "yolox"
yolov5 = "yolov5"
class ModelConfig(BaseModel): class ModelConfig(BaseModel):

View File

@ -131,21 +131,3 @@ class OvDetector(DetectionApi):
object_detected[6], object_detected[5], object_detected[:4] object_detected[6], object_detected[5], object_detected[:4]
) )
return detections return detections
elif self.ov_model_type == ModelTypeEnum.yolov5:
out_tensor = infer_request.get_output_tensor()
output_data = out_tensor.data[0]
# filter out lines with scores below threshold
conf_mask = (output_data[:, 4] >= 0.5).squeeze()
output_data = output_data[conf_mask]
# limit to top 20 scores, descending order
ordered = output_data[output_data[:, 4].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(
np.argmax(object_detected[5:]),
object_detected[4],
object_detected[:4],
)
return detections

View File

@ -103,7 +103,9 @@ class Rknn(DetectionApi):
"Error initializing rknn runtime. Do you run docker in privileged mode?" "Error initializing rknn runtime. Do you run docker in privileged mode?"
) )
raise Exception("RKNN does not currently support any models. Please see the docs for more info.") raise Exception(
"RKNN does not currently support any models. Please see the docs for more info."
)
def __del__(self): def __del__(self):
self.rknn.release() self.rknn.release()

View File

@ -256,9 +256,9 @@ class AudioEventMaintainer(threading.Thread):
def handle_detection(self, label: str, score: float) -> None: def handle_detection(self, label: str, score: float) -> None:
if self.detections.get(label): if self.detections.get(label):
self.detections[label][ self.detections[label]["last_detection"] = (
"last_detection" datetime.datetime.now().timestamp()
] = datetime.datetime.now().timestamp() )
else: else:
self.inter_process_communicator.queue.put( self.inter_process_communicator.queue.put(
(f"{self.config.name}/audio/{label}", "ON") (f"{self.config.name}/audio/{label}", "ON")

View File

@ -700,9 +700,9 @@ def event_snapshot(id):
else: else:
response.headers["Cache-Control"] = "no-store" response.headers["Cache-Control"] = "no-store"
if download: if download:
response.headers[ response.headers["Content-Disposition"] = (
"Content-Disposition" f"attachment; filename=snapshot-{id}.jpg"
] = f"attachment; filename=snapshot-{id}.jpg" )
return response return response
@ -889,9 +889,9 @@ def event_clip(id):
if download: if download:
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
response.headers["Content-Length"] = os.path.getsize(clip_path) response.headers["Content-Length"] = os.path.getsize(clip_path)
response.headers[ response.headers["X-Accel-Redirect"] = (
"X-Accel-Redirect" f"/clips/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
] = f"/clips/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers )
return response return response
@ -1187,9 +1187,9 @@ def config():
config["plus"] = {"enabled": current_app.plus_api.is_active()} config["plus"] = {"enabled": current_app.plus_api.is_active()}
for detector, detector_config in config["detectors"].items(): for detector, detector_config in config["detectors"].items():
detector_config["model"][ detector_config["model"]["labelmap"] = (
"labelmap" current_app.frigate_config.model.merged_labelmap
] = current_app.frigate_config.model.merged_labelmap )
return jsonify(config) return jsonify(config)
@ -1596,9 +1596,9 @@ def get_recordings_storage_usage():
total_mb = recording_stats["total"] total_mb = recording_stats["total"]
camera_usages: dict[ camera_usages: dict[str, dict] = (
str, dict current_app.storage_maintainer.calculate_camera_usages()
] = current_app.storage_maintainer.calculate_camera_usages() )
for camera_name in camera_usages.keys(): for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"): if camera_usages.get(camera_name, {}).get("usage"):
@ -1785,9 +1785,9 @@ def recording_clip(camera_name, start_ts, end_ts):
if download: if download:
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
response.headers["Content-Length"] = os.path.getsize(path) response.headers["Content-Length"] = os.path.getsize(path)
response.headers[ response.headers["X-Accel-Redirect"] = (
"X-Accel-Redirect" f"/cache/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
] = f"/cache/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers )
return response return response

View File

@ -297,12 +297,12 @@ class PtzAutoTracker:
self.ptz_metrics[camera][ self.ptz_metrics[camera][
"ptz_max_zoom" "ptz_max_zoom"
].value = camera_config.onvif.autotracking.movement_weights[1] ].value = camera_config.onvif.autotracking.movement_weights[1]
self.intercept[ self.intercept[camera] = (
camera camera_config.onvif.autotracking.movement_weights[2]
] = camera_config.onvif.autotracking.movement_weights[2] )
self.move_coefficients[ self.move_coefficients[camera] = (
camera camera_config.onvif.autotracking.movement_weights[3:]
] = camera_config.onvif.autotracking.movement_weights[3:] )
else: else:
camera_config.onvif.autotracking.enabled = False camera_config.onvif.autotracking.enabled = False
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
@ -566,9 +566,9 @@ class PtzAutoTracker:
) ** self.zoom_factor[camera] ) ** self.zoom_factor[camera]
if "original_target_box" not in self.tracked_object_metrics[camera]: if "original_target_box" not in self.tracked_object_metrics[camera]:
self.tracked_object_metrics[camera][ self.tracked_object_metrics[camera]["original_target_box"] = (
"original_target_box" self.tracked_object_metrics[camera]["target_box"]
] = self.tracked_object_metrics[camera]["target_box"] )
( (
self.tracked_object_metrics[camera]["valid_velocity"], self.tracked_object_metrics[camera]["valid_velocity"],

View File

@ -123,9 +123,9 @@ class OnvifController:
logger.debug(f"Onvif config for {camera_name}: {ptz_config}") logger.debug(f"Onvif config for {camera_name}: {ptz_config}")
service_capabilities_request = ptz.create_type("GetServiceCapabilities") service_capabilities_request = ptz.create_type("GetServiceCapabilities")
self.cams[camera_name][ self.cams[camera_name]["service_capabilities_request"] = (
"service_capabilities_request" service_capabilities_request
] = service_capabilities_request )
fov_space_id = next( fov_space_id = next(
( (
@ -241,9 +241,9 @@ class OnvifController:
supported_features.append("zoom-r") supported_features.append("zoom-r")
try: try:
# get camera's zoom limits from onvif config # get camera's zoom limits from onvif config
self.cams[camera_name][ self.cams[camera_name]["relative_zoom_range"] = (
"relative_zoom_range" ptz_config.Spaces.RelativeZoomTranslationSpace[0]
] = ptz_config.Spaces.RelativeZoomTranslationSpace[0] )
except Exception: except Exception:
if ( if (
self.config.cameras[camera_name].onvif.autotracking.zooming self.config.cameras[camera_name].onvif.autotracking.zooming
@ -260,9 +260,9 @@ class OnvifController:
supported_features.append("zoom-a") supported_features.append("zoom-a")
try: try:
# get camera's zoom limits from onvif config # get camera's zoom limits from onvif config
self.cams[camera_name][ self.cams[camera_name]["absolute_zoom_range"] = (
"absolute_zoom_range" ptz_config.Spaces.AbsoluteZoomPositionSpace[0]
] = ptz_config.Spaces.AbsoluteZoomPositionSpace[0] )
self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits
except Exception: except Exception:
if self.config.cameras[camera_name].onvif.autotracking.zooming: if self.config.cameras[camera_name].onvif.autotracking.zooming:
@ -279,9 +279,9 @@ class OnvifController:
and configs.DefaultRelativePanTiltTranslationSpace is not None and configs.DefaultRelativePanTiltTranslationSpace is not None
): ):
supported_features.append("pt-r-fov") supported_features.append("pt-r-fov")
self.cams[camera_name][ self.cams[camera_name]["relative_fov_range"] = (
"relative_fov_range" ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id]
] = ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id] )
self.cams[camera_name]["features"] = supported_features self.cams[camera_name]["features"] = supported_features

View File

@ -45,9 +45,9 @@ class TestFfmpegPresets(unittest.TestCase):
assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True) assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True)
def test_ffmpeg_hwaccel_preset(self): def test_ffmpeg_hwaccel_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
"hwaccel_args" "preset-rpi-64-h264"
] = "preset-rpi-64-h264" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rpi-64-h264" not in ( assert "preset-rpi-64-h264" not in (
@ -58,9 +58,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_hwaccel_not_preset(self): def test_ffmpeg_hwaccel_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
"hwaccel_args" "-other-hwaccel args"
] = "-other-hwaccel args" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-other-hwaccel args" in ( assert "-other-hwaccel args" in (
@ -68,9 +68,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_hwaccel_scale_preset(self): def test_ffmpeg_hwaccel_scale_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
"hwaccel_args" "preset-nvidia-h264"
] = "preset-nvidia-h264" )
self.default_ffmpeg["cameras"]["back"]["detect"] = { self.default_ffmpeg["cameras"]["back"]["detect"] = {
"height": 1920, "height": 1920,
"width": 2560, "width": 2560,
@ -89,9 +89,9 @@ class TestFfmpegPresets(unittest.TestCase):
def test_default_ffmpeg_input_arg_preset(self): def test_default_ffmpeg_input_arg_preset(self):
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = (
"input_args" "preset-rtsp-generic"
] = "preset-rtsp-generic" )
frigate_preset_config = FrigateConfig(**self.default_ffmpeg) frigate_preset_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
frigate_preset_config.cameras["back"].create_ffmpeg_cmds() frigate_preset_config.cameras["back"].create_ffmpeg_cmds()
@ -102,9 +102,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_input_preset(self): def test_ffmpeg_input_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = (
"input_args" "preset-rtmp-generic"
] = "preset-rtmp-generic" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-generic" not in ( assert "preset-rtmp-generic" not in (
@ -135,9 +135,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_output_record_preset(self): def test_ffmpeg_output_record_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["record"] = (
"record" "preset-record-generic-audio-aac"
] = "preset-record-generic-audio-aac" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-record-generic-audio-aac" not in ( assert "preset-record-generic-audio-aac" not in (
@ -148,9 +148,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_output_record_not_preset(self): def test_ffmpeg_output_record_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["record"] = (
"record" "-some output"
] = "-some output" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in ( assert "-some output" in (
@ -158,9 +158,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_output_rtmp_preset(self): def test_ffmpeg_output_rtmp_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["rtmp"] = (
"rtmp" "preset-rtmp-jpeg"
] = "preset-rtmp-jpeg" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "preset-rtmp-jpeg" not in ( assert "preset-rtmp-jpeg" not in (
@ -171,9 +171,9 @@ class TestFfmpegPresets(unittest.TestCase):
) )
def test_ffmpeg_output_rtmp_not_preset(self): def test_ffmpeg_output_rtmp_not_preset(self):
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][ self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["rtmp"] = (
"rtmp" "-some output"
] = "-some output" )
frigate_config = FrigateConfig(**self.default_ffmpeg) frigate_config = FrigateConfig(**self.default_ffmpeg)
frigate_config.cameras["back"].create_ffmpeg_cmds() frigate_config.cameras["back"].create_ffmpeg_cmds()
assert "-some output" in ( assert "-some output" in (