mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-11 05:35:25 +03:00
Formatting and remove yolov5
This commit is contained in:
parent
f7f172d6a1
commit
5ff95c223d
@ -131,7 +131,7 @@ model:
|
||||
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
||||
```
|
||||
|
||||
This detector also supports some YOLO variants: YOLOX and YOLOv5 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
|
||||
This detector also supports YOLOX. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
|
||||
@ -80,7 +80,7 @@ model:
|
||||
# Valid values are nhwc or nchw (default: shown below)
|
||||
input_tensor: nhwc
|
||||
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Valid values are ssd, yolox or yolov5 (default: shown below)
|
||||
# Valid values are ssd, yolox (default: shown below)
|
||||
model_type: ssd
|
||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
||||
labelmap:
|
||||
|
||||
@ -30,7 +30,6 @@ class InputTensorEnum(str, Enum):
|
||||
class ModelTypeEnum(str, Enum):
|
||||
ssd = "ssd"
|
||||
yolox = "yolox"
|
||||
yolov5 = "yolov5"
|
||||
|
||||
|
||||
class ModelConfig(BaseModel):
|
||||
|
||||
@ -131,21 +131,3 @@ class OvDetector(DetectionApi):
|
||||
object_detected[6], object_detected[5], object_detected[:4]
|
||||
)
|
||||
return detections
|
||||
elif self.ov_model_type == ModelTypeEnum.yolov5:
|
||||
out_tensor = infer_request.get_output_tensor()
|
||||
output_data = out_tensor.data[0]
|
||||
# filter out lines with scores below threshold
|
||||
conf_mask = (output_data[:, 4] >= 0.5).squeeze()
|
||||
output_data = output_data[conf_mask]
|
||||
# limit to top 20 scores, descending order
|
||||
ordered = output_data[output_data[:, 4].argsort()[::-1]][:20]
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i, object_detected in enumerate(ordered):
|
||||
detections[i] = self.process_yolo(
|
||||
np.argmax(object_detected[5:]),
|
||||
object_detected[4],
|
||||
object_detected[:4],
|
||||
)
|
||||
return detections
|
||||
|
||||
@ -103,7 +103,9 @@ class Rknn(DetectionApi):
|
||||
"Error initializing rknn runtime. Do you run docker in privileged mode?"
|
||||
)
|
||||
|
||||
raise Exception("RKNN does not currently support any models. Please see the docs for more info.")
|
||||
raise Exception(
|
||||
"RKNN does not currently support any models. Please see the docs for more info."
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
self.rknn.release()
|
||||
|
||||
@ -256,9 +256,9 @@ class AudioEventMaintainer(threading.Thread):
|
||||
|
||||
def handle_detection(self, label: str, score: float) -> None:
|
||||
if self.detections.get(label):
|
||||
self.detections[label][
|
||||
"last_detection"
|
||||
] = datetime.datetime.now().timestamp()
|
||||
self.detections[label]["last_detection"] = (
|
||||
datetime.datetime.now().timestamp()
|
||||
)
|
||||
else:
|
||||
self.inter_process_communicator.queue.put(
|
||||
(f"{self.config.name}/audio/{label}", "ON")
|
||||
|
||||
@ -700,9 +700,9 @@ def event_snapshot(id):
|
||||
else:
|
||||
response.headers["Cache-Control"] = "no-store"
|
||||
if download:
|
||||
response.headers[
|
||||
"Content-Disposition"
|
||||
] = f"attachment; filename=snapshot-{id}.jpg"
|
||||
response.headers["Content-Disposition"] = (
|
||||
f"attachment; filename=snapshot-{id}.jpg"
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
@ -889,9 +889,9 @@ def event_clip(id):
|
||||
if download:
|
||||
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
|
||||
response.headers["Content-Length"] = os.path.getsize(clip_path)
|
||||
response.headers[
|
||||
"X-Accel-Redirect"
|
||||
] = f"/clips/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
|
||||
response.headers["X-Accel-Redirect"] = (
|
||||
f"/clips/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@ -1187,9 +1187,9 @@ def config():
|
||||
config["plus"] = {"enabled": current_app.plus_api.is_active()}
|
||||
|
||||
for detector, detector_config in config["detectors"].items():
|
||||
detector_config["model"][
|
||||
"labelmap"
|
||||
] = current_app.frigate_config.model.merged_labelmap
|
||||
detector_config["model"]["labelmap"] = (
|
||||
current_app.frigate_config.model.merged_labelmap
|
||||
)
|
||||
|
||||
return jsonify(config)
|
||||
|
||||
@ -1596,9 +1596,9 @@ def get_recordings_storage_usage():
|
||||
|
||||
total_mb = recording_stats["total"]
|
||||
|
||||
camera_usages: dict[
|
||||
str, dict
|
||||
] = current_app.storage_maintainer.calculate_camera_usages()
|
||||
camera_usages: dict[str, dict] = (
|
||||
current_app.storage_maintainer.calculate_camera_usages()
|
||||
)
|
||||
|
||||
for camera_name in camera_usages.keys():
|
||||
if camera_usages.get(camera_name, {}).get("usage"):
|
||||
@ -1785,9 +1785,9 @@ def recording_clip(camera_name, start_ts, end_ts):
|
||||
if download:
|
||||
response.headers["Content-Disposition"] = "attachment; filename=%s" % file_name
|
||||
response.headers["Content-Length"] = os.path.getsize(path)
|
||||
response.headers[
|
||||
"X-Accel-Redirect"
|
||||
] = f"/cache/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
|
||||
response.headers["X-Accel-Redirect"] = (
|
||||
f"/cache/{file_name}" # nginx: https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@ -297,12 +297,12 @@ class PtzAutoTracker:
|
||||
self.ptz_metrics[camera][
|
||||
"ptz_max_zoom"
|
||||
].value = camera_config.onvif.autotracking.movement_weights[1]
|
||||
self.intercept[
|
||||
camera
|
||||
] = camera_config.onvif.autotracking.movement_weights[2]
|
||||
self.move_coefficients[
|
||||
camera
|
||||
] = camera_config.onvif.autotracking.movement_weights[3:]
|
||||
self.intercept[camera] = (
|
||||
camera_config.onvif.autotracking.movement_weights[2]
|
||||
)
|
||||
self.move_coefficients[camera] = (
|
||||
camera_config.onvif.autotracking.movement_weights[3:]
|
||||
)
|
||||
else:
|
||||
camera_config.onvif.autotracking.enabled = False
|
||||
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
|
||||
@ -566,9 +566,9 @@ class PtzAutoTracker:
|
||||
) ** self.zoom_factor[camera]
|
||||
|
||||
if "original_target_box" not in self.tracked_object_metrics[camera]:
|
||||
self.tracked_object_metrics[camera][
|
||||
"original_target_box"
|
||||
] = self.tracked_object_metrics[camera]["target_box"]
|
||||
self.tracked_object_metrics[camera]["original_target_box"] = (
|
||||
self.tracked_object_metrics[camera]["target_box"]
|
||||
)
|
||||
|
||||
(
|
||||
self.tracked_object_metrics[camera]["valid_velocity"],
|
||||
|
||||
@ -123,9 +123,9 @@ class OnvifController:
|
||||
logger.debug(f"Onvif config for {camera_name}: {ptz_config}")
|
||||
|
||||
service_capabilities_request = ptz.create_type("GetServiceCapabilities")
|
||||
self.cams[camera_name][
|
||||
"service_capabilities_request"
|
||||
] = service_capabilities_request
|
||||
self.cams[camera_name]["service_capabilities_request"] = (
|
||||
service_capabilities_request
|
||||
)
|
||||
|
||||
fov_space_id = next(
|
||||
(
|
||||
@ -241,9 +241,9 @@ class OnvifController:
|
||||
supported_features.append("zoom-r")
|
||||
try:
|
||||
# get camera's zoom limits from onvif config
|
||||
self.cams[camera_name][
|
||||
"relative_zoom_range"
|
||||
] = ptz_config.Spaces.RelativeZoomTranslationSpace[0]
|
||||
self.cams[camera_name]["relative_zoom_range"] = (
|
||||
ptz_config.Spaces.RelativeZoomTranslationSpace[0]
|
||||
)
|
||||
except Exception:
|
||||
if (
|
||||
self.config.cameras[camera_name].onvif.autotracking.zooming
|
||||
@ -260,9 +260,9 @@ class OnvifController:
|
||||
supported_features.append("zoom-a")
|
||||
try:
|
||||
# get camera's zoom limits from onvif config
|
||||
self.cams[camera_name][
|
||||
"absolute_zoom_range"
|
||||
] = ptz_config.Spaces.AbsoluteZoomPositionSpace[0]
|
||||
self.cams[camera_name]["absolute_zoom_range"] = (
|
||||
ptz_config.Spaces.AbsoluteZoomPositionSpace[0]
|
||||
)
|
||||
self.cams[camera_name]["zoom_limits"] = configs.ZoomLimits
|
||||
except Exception:
|
||||
if self.config.cameras[camera_name].onvif.autotracking.zooming:
|
||||
@ -279,9 +279,9 @@ class OnvifController:
|
||||
and configs.DefaultRelativePanTiltTranslationSpace is not None
|
||||
):
|
||||
supported_features.append("pt-r-fov")
|
||||
self.cams[camera_name][
|
||||
"relative_fov_range"
|
||||
] = ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id]
|
||||
self.cams[camera_name]["relative_fov_range"] = (
|
||||
ptz_config.Spaces.RelativePanTiltTranslationSpace[fov_space_id]
|
||||
)
|
||||
|
||||
self.cams[camera_name]["features"] = supported_features
|
||||
|
||||
|
||||
@ -45,9 +45,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
assert self.default_ffmpeg == frigate_config.dict(exclude_unset=True)
|
||||
|
||||
def test_ffmpeg_hwaccel_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
|
||||
"hwaccel_args"
|
||||
] = "preset-rpi-64-h264"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
|
||||
"preset-rpi-64-h264"
|
||||
)
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
assert "preset-rpi-64-h264" not in (
|
||||
@ -58,9 +58,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_ffmpeg_hwaccel_not_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
|
||||
"hwaccel_args"
|
||||
] = "-other-hwaccel args"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
|
||||
"-other-hwaccel args"
|
||||
)
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
assert "-other-hwaccel args" in (
|
||||
@ -68,9 +68,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_ffmpeg_hwaccel_scale_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
|
||||
"hwaccel_args"
|
||||
] = "preset-nvidia-h264"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["hwaccel_args"] = (
|
||||
"preset-nvidia-h264"
|
||||
)
|
||||
self.default_ffmpeg["cameras"]["back"]["detect"] = {
|
||||
"height": 1920,
|
||||
"width": 2560,
|
||||
@ -89,9 +89,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
def test_default_ffmpeg_input_arg_preset(self):
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
|
||||
"input_args"
|
||||
] = "preset-rtsp-generic"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = (
|
||||
"preset-rtsp-generic"
|
||||
)
|
||||
frigate_preset_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
frigate_preset_config.cameras["back"].create_ffmpeg_cmds()
|
||||
@ -102,9 +102,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_ffmpeg_input_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"][
|
||||
"input_args"
|
||||
] = "preset-rtmp-generic"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["input_args"] = (
|
||||
"preset-rtmp-generic"
|
||||
)
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
assert "preset-rtmp-generic" not in (
|
||||
@ -135,9 +135,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_ffmpeg_output_record_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
|
||||
"record"
|
||||
] = "preset-record-generic-audio-aac"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["record"] = (
|
||||
"preset-record-generic-audio-aac"
|
||||
)
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
assert "preset-record-generic-audio-aac" not in (
|
||||
@ -148,9 +148,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_ffmpeg_output_record_not_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
|
||||
"record"
|
||||
] = "-some output"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["record"] = (
|
||||
"-some output"
|
||||
)
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
assert "-some output" in (
|
||||
@ -158,9 +158,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_ffmpeg_output_rtmp_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
|
||||
"rtmp"
|
||||
] = "preset-rtmp-jpeg"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["rtmp"] = (
|
||||
"preset-rtmp-jpeg"
|
||||
)
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
assert "preset-rtmp-jpeg" not in (
|
||||
@ -171,9 +171,9 @@ class TestFfmpegPresets(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_ffmpeg_output_rtmp_not_preset(self):
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"][
|
||||
"rtmp"
|
||||
] = "-some output"
|
||||
self.default_ffmpeg["cameras"]["back"]["ffmpeg"]["output_args"]["rtmp"] = (
|
||||
"-some output"
|
||||
)
|
||||
frigate_config = FrigateConfig(**self.default_ffmpeg)
|
||||
frigate_config.cameras["back"].create_ffmpeg_cmds()
|
||||
assert "-some output" in (
|
||||
|
||||
Loading…
Reference in New Issue
Block a user