Compare commits

..

No commits in common. "e6cbc93703e208416e448a807494a96dc4e4a4d7" and "8b293449f9789b34f0ed2c814a8f1af10cef7282" have entirely different histories.

10 changed files with 23 additions and 322 deletions

View File

@ -173,31 +173,6 @@ jobs:
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
synaptics_build:
runs-on: ubuntu-22.04-arm
name: Synaptics Build
needs:
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Synaptics build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: synaptics
files: docker/synaptics/synaptics.hcl
set: |
synaptics.tags=${{ steps.setup.outputs.image-name }}-synaptics
*.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image
assemble_default_build:

View File

@ -1,28 +0,0 @@
# syntax=docker/dockerfile:1.6
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
# Globally set pip break-system-packages option to avoid having to specify it every time
ARG PIP_BREAK_SYSTEM_PACKAGES=1
FROM wheels AS synap1680-wheels
ARG TARGETARCH
# Install dependencies
RUN wget -qO- "https://github.com/GaryHuang-ASUS/synaptics_astra_sdk/releases/download/v1.5.0/Synaptics-SL1680-v1.5.0-rt.tar" | tar -C / -xzf -
RUN wget -P /wheels/ "https://github.com/synaptics-synap/synap-python/releases/download/v0.0.4-preview/synap_python-0.0.4-cp311-cp311-manylinux_2_35_aarch64.whl"
FROM deps AS synap1680-deps
ARG TARGETARCH
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=synap1680-wheels,source=/wheels,target=/deps/synap-wheels \
pip3 install --no-deps -U /deps/synap-wheels/*.whl
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY --from=synap1680-wheels /rootfs/usr/local/lib/*.so /usr/lib
ADD https://raw.githubusercontent.com/synaptics-astra/synap-release/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80/model.synap /synaptics/mobilenet.synap

View File

@ -1,27 +0,0 @@
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "wheels"
}
target deps {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "deps"
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64"]
target = "rootfs"
}
target synaptics {
dockerfile = "docker/synaptics/Dockerfile"
contexts = {
wheels = "target:wheels",
deps = "target:deps",
rootfs = "target:rootfs"
}
platforms = ["linux/arm64"]
}

View File

@ -1,15 +0,0 @@
BOARDS += synaptics
local-synaptics: version
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=frigate:latest-synaptics \
--load
build-synaptics: version
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics
push-synaptics: build-synaptics
docker buildx bake --file=docker/synaptics/synaptics.hcl synaptics \
--set synaptics.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-synaptics \
--push

View File

@ -427,29 +427,3 @@ cameras:
```
:::
## Synaptics
Hardware accelerated video de-/encoding is supported on Synpatics SL-series SoC.
### Prerequisites
Make sure to follow the [Synaptics specific installation instructions](/frigate/installation#synaptics).
### Configuration
Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
```yaml
ffmpeg:
hwaccel_args: -c:v h264_v4l2m2m
input_args: preset-rtsp-restream
output_args:
record: preset-record-generic-audio-aac
```
:::warning
Make sure that your SoC supports hardware acceleration for your input stream and your input stream is h264 encoding. For example, if your camera streams with h264 encoding, your SoC must be able to de- and encode with it. If you are unsure whether your SoC meets the requirements, take a look at the datasheet.
:::

View File

@ -43,10 +43,6 @@ Frigate supports multiple different detectors that work on different types of ha
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
**For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
@ -1053,41 +1049,6 @@ model:
height: 320 # MUST match the chosen model i.e yolov7-320 -> 320 yolov4-416 -> 416
```
## Synaptics
Hardware accelerated object detection is supported on the following SoCs:
- SL1680
This implementation uses the [Synaptics model conversion](https://synaptics-synap.github.io/doc/v/latest/docs/manual/introduction.html#offline-model-conversion), version v3.1.0.
This implementation is based on sdk `v1.5.0`.
See the [installation docs](../frigate/installation.md#synaptics) for information on configuring the SL-series NPU hardware.
### Configuration
When configuring the Synap detector, you have to specify the model: a local **path**.
#### SSD Mobilenet
A synap model is provided in the container at /mobilenet.synap and is used by this detector type by default. The model comes from [Synap-release Github](https://github.com/synaptics-astra/synap-release/tree/v1.5.0/models/dolphin/object_detection/coco/model/mobilenet224_full80).
Use the model configuration shown below when using the synaptics detector with the default synap model:
```yaml
detectors: # required
synap_npu: # required
type: synaptics # required
model: # required
path: /synaptics/mobilenet.synap # required
width: 224 # required
height: 224 # required
tensor_format: nhwc # default value (optional. If you change the model, it is required)
labelmap_path: /labelmap/coco-80.txt # required
```
## Rockchip platform
Hardware accelerated object detection is supported on the following SoCs:

View File

@ -95,21 +95,8 @@ Frigate supports multiple different detectors that work on different types of ha
- Runs best with tiny or small size models
- Runs efficiently on low power hardware
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
:::
### Synaptics
- **Synaptics** Default model is **mobilenet**
| Name | Synaptics SL1680 Inference Time |
| ---------------- | ------------------------------- |
| ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms |
### Hailo-8
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isnt provided.

View File

@ -256,37 +256,6 @@ or add these options to your `docker run` command:
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform).
### Synaptics
- SL1680
#### Setup
Follow Frigate's default installation instructions, but use a docker image with `-synaptics` suffix for example `ghcr.io/blakeblackshear/frigate:stable-synaptics`.
Next, you need to grant docker permissions to access your hardware:
- During the configuration process, you should run docker in privileged mode to avoid any errors due to insufficient permissions. To do so, add `privileged: true` to your `docker-compose.yml` file or the `--privileged` flag to your docker run command.
```yaml
devices:
- /dev/synap
- /dev/video0
- /dev/video1
```
or add these options to your `docker run` command:
```
--device /dev/synap \
--device /dev/video0 \
--device /dev/video1
```
#### Configuration
Next, you should configure [hardware object detection](/configuration/object_detectors#synaptics) and [hardware video processing](/configuration/hardware_acceleration_video#synaptics).
## Docker
Running through Docker with Docker Compose is the recommended install method.

View File

@ -1,91 +0,0 @@
import logging
import os
import numpy as np
from synap import Network
from synap.postprocessor import Detector
from synap.preprocessor import Preprocessor
from synap.types import Layout, Shape
from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import (
BaseDetectorConfig,
InputTensorEnum,
ModelTypeEnum,
)
logger = logging.getLogger(__name__)
DETECTOR_KEY = "synaptics"
class SynapDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY]
class SynapDetector(DetectionApi):
type_key = DETECTOR_KEY
def __init__(self, detector_config: SynapDetectorConfig):
try:
_, ext = os.path.splitext(detector_config.model.path)
if ext and ext != ".synap":
raise ValueError("Model path config for Synap1680 is wrong.")
synap_network = Network(detector_config.model.path)
logger.info(f"Synap NPU loaded model: {detector_config.model.path}")
except ValueError as ve:
logger.error(f"Config to Synap1680 was Failed: {ve}")
raise
except Exception as e:
logger.error(f"Failed to init Synap NPU: {e}")
raise
self.width = detector_config.model.width
self.height = detector_config.model.height
self.model_type = detector_config.model.model_type
self.network = synap_network
self.network_input_details = self.network.inputs[0]
self.input_tensor_layout = detector_config.model.input_tensor
# Create Inference Engine
self.preprocessor = Preprocessor()
self.detector = Detector(score_threshold=0.4, iou_threshold=0.4)
def detect_raw(self, tensor_input: np.ndarray):
# It has only been testing for pre-converted mobilenet80 .tflite -> .synap model currently
layout = Layout.nhwc # default layout
detections = np.zeros((20, 6), np.float32)
if self.input_tensor_layout == InputTensorEnum.nhwc:
layout = Layout.nhwc
postprocess_data = self.preprocessor.assign(
self.network.inputs, tensor_input, Shape(tensor_input.shape), layout
)
output_tensor_obj = self.network.predict()
output = self.detector.process(output_tensor_obj, postprocess_data)
if self.model_type == ModelTypeEnum.ssd:
for i, item in enumerate(output.items):
if i == 20:
break
bb = item.bounding_box
# Convert corner coordinates to normalized [0,1] range
x1 = bb.origin.x / self.width # Top-left X
y1 = bb.origin.y / self.height # Top-left Y
x2 = (bb.origin.x + bb.size.x) / self.width # Bottom-right X
y2 = (bb.origin.y + bb.size.y) / self.height # Bottom-right Y
detections[i] = [
item.class_index,
float(item.confidence),
y1,
x1,
y2,
x2,
]
else:
logger.error(f"Unsupported model type: {self.model_type}")
return detections

View File

@ -330,18 +330,6 @@ class NorfairTracker(ObjectTracker):
stationary: bool,
yuv_frame: np.ndarray | None,
) -> bool:
def reset_position(xmin: int, ymin: int, xmax: int, ymax: int) -> None:
self.positions[id] = {
"xmins": [xmin],
"ymins": [ymin],
"xmaxs": [xmax],
"ymaxs": [ymax],
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
xmin, ymin, xmax, ymax = box
position = self.positions[id]
self.stationary_box_history[id].append(box)
@ -372,11 +360,17 @@ class NorfairTracker(ObjectTracker):
if not self.stationary_classifier.evaluate(
id, yuv_frame, cast(tuple[int, int, int, int], tuple(box))
):
reset_position(xmin, ymin, xmax, ymax)
self.positions[id] = {
"xmins": [xmin],
"ymins": [ymin],
"xmaxs": [xmax],
"ymaxs": [ymax],
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return False
else:
reset_position(xmin, ymin, xmax, ymax)
return False
threshold = (
THRESHOLD_STATIONARY_CHECK_IOU if stationary else THRESHOLD_ACTIVE_CHECK_IOU
@ -397,20 +391,26 @@ class NorfairTracker(ObjectTracker):
# if the median iou drops below the threshold
# assume object is no longer stationary
if median_iou < threshold:
# If we have a yuv_frame to check before flipping to active, check with classifier if we have YUV frame
# Before flipping to active, check with classifier if we have YUV frame
if stationary and yuv_frame is not None:
if not self.stationary_classifier.evaluate(
id, yuv_frame, cast(tuple[int, int, int, int], tuple(box))
):
reset_position(xmin, ymin, xmax, ymax)
self.positions[id] = {
"xmins": [xmin],
"ymins": [ymin],
"xmaxs": [xmax],
"ymaxs": [ymax],
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return False
else:
reset_position(xmin, ymin, xmax, ymax)
return False
# if there are more than 5 and less than 10 entries for the position, add the bounding box
# and recompute the position box
if len(position["xmins"]) < 10:
if 5 <= len(position["xmins"]) < 10:
position["xmins"].append(xmin)
position["ymins"].append(ymin)
position["xmaxs"].append(xmax)
@ -614,11 +614,7 @@ class NorfairTracker(ObjectTracker):
self.tracked_objects[id]["estimate"] = new_obj["estimate"]
# else update it
else:
self.update(
str(t.global_id),
new_obj,
yuv_frame if new_obj["label"] == "car" else None,
)
self.update(str(t.global_id), new_obj, yuv_frame)
# clear expired tracks
expired_ids = [k for k in self.track_id_map.keys() if k not in active_ids]