mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-16 16:15:22 +03:00
Merge branch 'dev' into http-api-documentation
# Conflicts: # docs/docs/integrations/api.md # docs/sidebars.js
This commit is contained in:
commit
e6a2db617e
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@ -205,7 +205,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
string: ${{ github.repository }}
|
string: ${{ github.repository }}
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
string: ${{ github.repository }}
|
string: ${{ github.repository }}
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446
|
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
|
|||||||
@ -13,6 +13,7 @@ apt-get -qq install --no-install-recommends -y \
|
|||||||
python3.9 \
|
python3.9 \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
curl \
|
curl \
|
||||||
|
lsof \
|
||||||
jq \
|
jq \
|
||||||
nethogs
|
nethogs
|
||||||
|
|
||||||
@ -44,7 +45,7 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then
|
|||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-30-15-36/ffmpeg-n7.1-linux64-gpl-7.1.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||||
fi
|
fi
|
||||||
@ -56,7 +57,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
|
|||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
|
wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2024-09-30-15-36/ffmpeg-n7.1-linuxarm64-gpl-7.1.tar.xz"
|
||||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||||
fi
|
fi
|
||||||
|
|||||||
@ -83,6 +83,7 @@ ARG AMDGPU
|
|||||||
|
|
||||||
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
|
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
|
||||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
|
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
|
||||||
|
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
|
||||||
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
|
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
|
||||||
COPY --from=rocm /opt/rocm-dist/ /
|
COPY --from=rocm /opt/rocm-dist/ /
|
||||||
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
|
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
|
||||||
|
|||||||
@ -183,7 +183,7 @@ To do this:
|
|||||||
3. Give `go2rtc` execute permission.
|
3. Give `go2rtc` execute permission.
|
||||||
4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs.
|
4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs.
|
||||||
|
|
||||||
## Validating your config.yaml file updates
|
## Validating your config.yml file updates
|
||||||
|
|
||||||
When frigate starts up, it checks whether your config file is valid, and if it is not, the process exits. To minimize interruptions when updating your config, you have three options -- you can edit the config via the WebUI which has built in validation, use the config API, or you can validate on the command line using the frigate docker container.
|
When frigate starts up, it checks whether your config file is valid, and if it is not, the process exits. To minimize interruptions when updating your config, you have three options -- you can edit the config via the WebUI which has built in validation, use the config API, or you can validate on the command line using the frigate docker container.
|
||||||
|
|
||||||
@ -211,5 +211,5 @@ docker run \
|
|||||||
--entrypoint python3 \
|
--entrypoint python3 \
|
||||||
ghcr.io/blakeblackshear/frigate:stable \
|
ghcr.io/blakeblackshear/frigate:stable \
|
||||||
-u -m frigate \
|
-u -m frigate \
|
||||||
--validate_config
|
--validate-config
|
||||||
```
|
```
|
||||||
|
|||||||
@ -9,6 +9,12 @@ This page makes use of presets of FFmpeg args. For more information on presets,
|
|||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
:::note
|
||||||
|
|
||||||
|
Many cameras support encoding options which greatly affect the live view experience, see the [Live view](/configuration/live) page for more info.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
## MJPEG Cameras
|
## MJPEG Cameras
|
||||||
|
|
||||||
Note that mjpeg cameras require encoding the video into h264 for recording, and restream roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. It is recommended to use the restream role to create an h264 restream and then use that as the source for ffmpeg.
|
Note that mjpeg cameras require encoding the video into h264 for recording, and restream roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. It is recommended to use the restream role to create an h264 restream and then use that as the source for ffmpeg.
|
||||||
|
|||||||
@ -79,29 +79,41 @@ cameras:
|
|||||||
|
|
||||||
If the ONVIF connection is successful, PTZ controls will be available in the camera's WebUI.
|
If the ONVIF connection is successful, PTZ controls will be available in the camera's WebUI.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
If your ONVIF camera does not require authentication credentials, you may still need to specify an empty string for `user` and `password`, eg: `user: ""` and `password: ""`.
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs.
|
An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs.
|
||||||
|
|
||||||
## ONVIF PTZ camera recommendations
|
## ONVIF PTZ camera recommendations
|
||||||
|
|
||||||
This list of working and non-working PTZ cameras is based on user feedback.
|
This list of working and non-working PTZ cameras is based on user feedback.
|
||||||
|
|
||||||
| Brand or specific camera | PTZ Controls | Autotracking | Notes |
|
| Brand or specific camera | PTZ Controls | Autotracking | Notes |
|
||||||
| ------------------------ | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking |
|
| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking |
|
||||||
| Amcrest ASH21 | ❌ | ❌ | No ONVIF support |
|
| Amcrest ASH21 | ✅ | ❌ | ONVIF service port: 80 |
|
||||||
| Ctronics PTZ | ✅ | ❌ | |
|
| Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. |
|
||||||
| Dahua | ✅ | ✅ | |
|
| Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. |
|
||||||
| Foscam R5 | ✅ | ❌ | |
|
| Ctronics PTZ | ✅ | ❌ | |
|
||||||
| Hanwha XNP-6550RH | ✅ | ❌ | |
|
| Dahua | ✅ | ✅ | |
|
||||||
| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others |
|
| Dahua DH-SD2A500HB | ✅ | ❌ | |
|
||||||
| Reolink 511WA | ✅ | ❌ | Zoom only |
|
| Foscam R5 | ✅ | ❌ | |
|
||||||
| Reolink E1 Pro | ✅ | ❌ | |
|
| Hanwha XNP-6550RH | ✅ | ❌ | |
|
||||||
| Reolink E1 Zoom | ✅ | ❌ | |
|
| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others |
|
||||||
| Reolink RLC-823A 16x | ✅ | ❌ | |
|
| Hikvision DS-2DE3A404IWG-E/W | ✅ | ✅ | |
|
||||||
| Sunba 405-D20X | ✅ | ❌ | |
|
| Reolink 511WA | ✅ | ❌ | Zoom only |
|
||||||
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
|
| Reolink E1 Pro | ✅ | ❌ | |
|
||||||
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
|
| Reolink E1 Zoom | ✅ | ❌ | |
|
||||||
| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support |
|
| Reolink RLC-823A 16x | ✅ | ❌ | |
|
||||||
|
| Speco O8P32X | ✅ | ❌ | |
|
||||||
|
| Sunba 405-D20X | ✅ | ❌ | |
|
||||||
|
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
|
||||||
|
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
|
||||||
|
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |
|
||||||
|
| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support |
|
||||||
|
|
||||||
## Setting up camera groups
|
## Setting up camera groups
|
||||||
|
|
||||||
|
|||||||
@ -100,6 +100,28 @@ genai:
|
|||||||
model: gpt-4o
|
model: gpt-4o
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Azure OpenAI
|
||||||
|
|
||||||
|
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
|
||||||
|
|
||||||
|
### Create Resource and Get API Key
|
||||||
|
|
||||||
|
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
genai:
|
||||||
|
enabled: True
|
||||||
|
provider: azure_openai
|
||||||
|
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
||||||
|
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||||
|
```
|
||||||
|
|
||||||
## Custom Prompts
|
## Custom Prompts
|
||||||
|
|
||||||
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
||||||
@ -130,10 +152,13 @@ genai:
|
|||||||
|
|
||||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||||
|
|
||||||
|
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the thumbnails collected over the object's lifetime to the model. Using a snapshot provides the AI with a higher-resolution image (typically downscaled by the AI itself), but the trade-off is that only a single image is used, which might limit the model's ability to determine object movement or direction.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
cameras:
|
cameras:
|
||||||
front_door:
|
front_door:
|
||||||
genai:
|
genai:
|
||||||
|
use_snapshot: True
|
||||||
prompt: "Describe the {label} in these images from the {camera} security camera at the front door of a house, aimed outward toward the street."
|
prompt: "Describe the {label} in these images from the {camera} security camera at the front door of a house, aimed outward toward the street."
|
||||||
object_prompts:
|
object_prompts:
|
||||||
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from."
|
person: "Describe the main person in these images (gender, age, clothing, activity, etc). Do not include where the activity is occurring (sidewalk, concrete, driveway, etc). If delivering a package, include the company the package is from."
|
||||||
|
|||||||
@ -65,6 +65,8 @@ Or map in all the `/dev/video*` devices.
|
|||||||
|
|
||||||
## Intel-based CPUs
|
## Intel-based CPUs
|
||||||
|
|
||||||
|
:::info
|
||||||
|
|
||||||
**Recommended hwaccel Preset**
|
**Recommended hwaccel Preset**
|
||||||
|
|
||||||
| CPU Generation | Intel Driver | Recommended Preset | Notes |
|
| CPU Generation | Intel Driver | Recommended Preset | Notes |
|
||||||
@ -74,11 +76,13 @@ Or map in all the `/dev/video*` devices.
|
|||||||
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
|
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
|
||||||
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
|
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
|
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
|
||||||
|
|
||||||
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html to figure out what generation your CPU is.)
|
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -379,7 +383,7 @@ Make sure to follow the [Rockchip specific installation instructions](/frigate/i
|
|||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
Add one of the following FFmpeg presets to your `config.yaml` to enable hardware video processing:
|
Add one of the following FFmpeg presets to your `config.yml` to enable hardware video processing:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# if you try to decode a h264 encoded stream
|
# if you try to decode a h264 encoded stream
|
||||||
|
|||||||
@ -11,11 +11,21 @@ Frigate intelligently uses three different streaming technologies to display you
|
|||||||
|
|
||||||
The jsmpeg live view will use more browser and client GPU resources. Using go2rtc is highly recommended and will provide a superior experience.
|
The jsmpeg live view will use more browser and client GPU resources. Using go2rtc is highly recommended and will provide a superior experience.
|
||||||
|
|
||||||
| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations |
|
| Source | Frame Rate | Resolution | Audio | Requires go2rtc | Notes |
|
||||||
| ------ | ------- | ------------------------------------- | ---------- | ---------------------------- | --------------- | ------------------------------------------------------------------------------------ |
|
| ------ | ------------------------------------- | ---------- | ---------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| jsmpeg | low | same as `detect -> fps`, capped at 10 | 720p | no | no | resolution is configurable, but go2rtc is recommended if you want higher resolutions |
|
| jsmpeg | same as `detect -> fps`, capped at 10 | 720p | no | no | Resolution is configurable, but go2rtc is recommended if you want higher resolutions and better frame rates. jsmpeg is Frigate's default without go2rtc configured. |
|
||||||
| mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only |
|
| mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. |
|
||||||
| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 |
|
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration, doesn't support h.265. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
|
||||||
|
|
||||||
|
### Camera Settings Recommendations
|
||||||
|
|
||||||
|
If you are using go2rtc, you should adjust the following settings in your camera's firmware for the best experience with Live view:
|
||||||
|
|
||||||
|
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
|
||||||
|
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
|
||||||
|
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes.
|
||||||
|
|
||||||
|
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.
|
||||||
|
|
||||||
### Audio Support
|
### Audio Support
|
||||||
|
|
||||||
@ -32,6 +42,15 @@ go2rtc:
|
|||||||
- "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
|
- "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If your camera does not have audio and you are having problems with Live view, you should have go2rtc send video only:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
go2rtc:
|
||||||
|
streams:
|
||||||
|
no_audio_camera:
|
||||||
|
- ffmpeg:rtsp://192.168.1.5:554/live0#video=copy
|
||||||
|
```
|
||||||
|
|
||||||
### Setting Stream For Live UI
|
### Setting Stream For Live UI
|
||||||
|
|
||||||
There may be some cameras that you would prefer to use the sub stream for live view, but the main stream for recording. This can be done via `live -> stream_name`.
|
There may be some cameras that you would prefer to use the sub stream for live view, but the main stream for recording. This can be done via `live -> stream_name`.
|
||||||
|
|||||||
@ -167,7 +167,7 @@ This detector also supports YOLOX. Frigate does not come with any YOLOX models p
|
|||||||
|
|
||||||
#### YOLO-NAS
|
#### YOLO-NAS
|
||||||
|
|
||||||
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
|
|||||||
24
docs/docs/configuration/pwa.md
Normal file
24
docs/docs/configuration/pwa.md
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
id: pwa
|
||||||
|
title: Installing Frigate App
|
||||||
|
---
|
||||||
|
|
||||||
|
Frigate supports being installed as a [Progressive Web App](https://web.dev/explore/progressive-web-apps) on Desktop, Android, and iOS.
|
||||||
|
|
||||||
|
This adds features including the ability to deep link directly into the app.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
In order to install Frigate as a PWA, the following requirements must be met:
|
||||||
|
|
||||||
|
- Frigate must be accessed via a secure context (localhost, secure https, etc.)
|
||||||
|
- On Android, Firefox, Chrome, Edge, Opera, and Samsung Internet Browser all support installing PWAs.
|
||||||
|
- On iOS 16.4 and later, PWAs can be installed from the Share menu in Safari, Chrome, Edge, Firefox, and Orion.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Installation varies slightly based on the device that is being used:
|
||||||
|
|
||||||
|
- Desktop: Use the install button typically found in right edge of the address bar
|
||||||
|
- Android: Use the `Install as App` button in the more options menu
|
||||||
|
- iOS: Use the `Add to Homescreen` button in the share menu
|
||||||
@ -334,6 +334,9 @@ review:
|
|||||||
- car
|
- car
|
||||||
- person
|
- person
|
||||||
# Optional: required zones for an object to be marked as an alert (default: none)
|
# Optional: required zones for an object to be marked as an alert (default: none)
|
||||||
|
# NOTE: when settings required zones globally, this zone must exist on all cameras
|
||||||
|
# or the config will be considered invalid. In that case the required_zones
|
||||||
|
# should be configured at the camera level.
|
||||||
required_zones:
|
required_zones:
|
||||||
- driveway
|
- driveway
|
||||||
# Optional: detections configuration
|
# Optional: detections configuration
|
||||||
@ -343,12 +346,20 @@ review:
|
|||||||
- car
|
- car
|
||||||
- person
|
- person
|
||||||
# Optional: required zones for an object to be marked as a detection (default: none)
|
# Optional: required zones for an object to be marked as a detection (default: none)
|
||||||
|
# NOTE: when settings required zones globally, this zone must exist on all cameras
|
||||||
|
# or the config will be considered invalid. In that case the required_zones
|
||||||
|
# should be configured at the camera level.
|
||||||
required_zones:
|
required_zones:
|
||||||
- driveway
|
- driveway
|
||||||
|
|
||||||
# Optional: Motion configuration
|
# Optional: Motion configuration
|
||||||
# NOTE: Can be overridden at the camera level
|
# NOTE: Can be overridden at the camera level
|
||||||
motion:
|
motion:
|
||||||
|
# Optional: enables detection for the camera (default: True)
|
||||||
|
# NOTE: Motion detection is required for object detection,
|
||||||
|
# setting this to False and leaving detect enabled
|
||||||
|
# will result in an error on startup.
|
||||||
|
enabled: False
|
||||||
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
||||||
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||||
# The value should be between 1 and 255.
|
# The value should be between 1 and 255.
|
||||||
@ -726,6 +737,8 @@ cameras:
|
|||||||
genai:
|
genai:
|
||||||
# Optional: Enable AI description generation (default: shown below)
|
# Optional: Enable AI description generation (default: shown below)
|
||||||
enabled: False
|
enabled: False
|
||||||
|
# Optional: Use the object snapshot instead of thumbnails for description generation (default: shown below)
|
||||||
|
use_snapshot: False
|
||||||
# Optional: The default prompt for generating descriptions. Can use replacement
|
# Optional: The default prompt for generating descriptions. Can use replacement
|
||||||
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
|
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
|
||||||
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
|
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
|
||||||
@ -802,7 +815,7 @@ camera_groups:
|
|||||||
- side_cam
|
- side_cam
|
||||||
- front_doorbell_cam
|
- front_doorbell_cam
|
||||||
# Required: icon used for group
|
# Required: icon used for group
|
||||||
icon: car
|
icon: LuCar
|
||||||
# Required: index of this group
|
# Required: index of this group
|
||||||
order: 0
|
order: 0
|
||||||
```
|
```
|
||||||
|
|||||||
@ -41,8 +41,6 @@ review:
|
|||||||
|
|
||||||
By default all detections that do not qualify as an alert qualify as a detection. However, detections can further be filtered to only include certain labels or certain zones.
|
By default all detections that do not qualify as an alert qualify as a detection. However, detections can further be filtered to only include certain labels or certain zones.
|
||||||
|
|
||||||
By default a review item will only be marked as an alert if a person or car is detected. This can be configured to include any object or audio label using the following config:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# can be overridden at the camera level
|
# can be overridden at the camera level
|
||||||
review:
|
review:
|
||||||
|
|||||||
@ -69,6 +69,7 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known
|
|||||||
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
|
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
|
||||||
| Intel i5 1135G7 | 10 - 15 ms | |
|
| Intel i5 1135G7 | 10 - 15 ms | |
|
||||||
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
|
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
|
||||||
|
| Intel Arc A750 | ~ 4 ms | |
|
||||||
|
|
||||||
### TensorRT - Nvidia GPU
|
### TensorRT - Nvidia GPU
|
||||||
|
|
||||||
|
|||||||
@ -13,7 +13,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
|||||||
|
|
||||||
# Setup a go2rtc stream
|
# Setup a go2rtc stream
|
||||||
|
|
||||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp.
|
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. For the best experience, you should set the stream name under go2rtc to match the name of your camera so that Frigate will automatically map it and be able to use better live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
@ -22,7 +22,7 @@ go2rtc:
|
|||||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||||
```
|
```
|
||||||
|
|
||||||
The easiest live view to get working is MSE. After adding this to the config, restart Frigate and try to watch the live stream by selecting MSE in the dropdown after clicking on the camera.
|
After adding this to the config, restart Frigate and try to watch the live stream for a single camera by clicking on it from the dashboard. It should look much clearer and more fluent than the original jsmpeg stream.
|
||||||
|
|
||||||
|
|
||||||
### What if my video doesn't play?
|
### What if my video doesn't play?
|
||||||
@ -46,7 +46,7 @@ The easiest live view to get working is MSE. After adding this to the config, re
|
|||||||
streams:
|
streams:
|
||||||
back:
|
back:
|
||||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||||
- "ffmpeg:back#video=h264"
|
- "ffmpeg:back#video=h264#hardware"
|
||||||
```
|
```
|
||||||
|
|
||||||
- Switch to FFmpeg if needed:
|
- Switch to FFmpeg if needed:
|
||||||
@ -58,9 +58,8 @@ The easiest live view to get working is MSE. After adding this to the config, re
|
|||||||
- ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
- ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||||
```
|
```
|
||||||
|
|
||||||
- If you can see the video but do not have audio, this is most likely because your
|
- If you can see the video but do not have audio, this is most likely because your camera's audio stream codec is not AAC.
|
||||||
camera's audio stream is not AAC.
|
- If possible, update your camera's audio settings to AAC in your camera's firmware.
|
||||||
- If possible, update your camera's audio settings to AAC.
|
|
||||||
- If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows:
|
- If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows:
|
||||||
```yaml
|
```yaml
|
||||||
go2rtc:
|
go2rtc:
|
||||||
@ -77,7 +76,7 @@ camera's audio stream is not AAC.
|
|||||||
streams:
|
streams:
|
||||||
back:
|
back:
|
||||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||||
- "ffmpeg:back#video=h264#audio=aac"
|
- "ffmpeg:back#video=h264#audio=aac#hardware"
|
||||||
```
|
```
|
||||||
|
|
||||||
When using the ffmpeg module, you would add AAC audio like this:
|
When using the ffmpeg module, you would add AAC audio like this:
|
||||||
@ -86,7 +85,7 @@ camera's audio stream is not AAC.
|
|||||||
go2rtc:
|
go2rtc:
|
||||||
streams:
|
streams:
|
||||||
back:
|
back:
|
||||||
- "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac"
|
- "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac#hardware"
|
||||||
```
|
```
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
@ -102,4 +101,4 @@ section.
|
|||||||
## Next steps
|
## Next steps
|
||||||
|
|
||||||
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
|
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
|
||||||
1. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats.
|
2. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats and may require opening ports on your router.
|
||||||
|
|||||||
@ -3,25 +3,38 @@ id: reverse_proxy
|
|||||||
title: Setting up a reverse proxy
|
title: Setting up a reverse proxy
|
||||||
---
|
---
|
||||||
|
|
||||||
This guide outlines the basic configuration steps needed to expose your Frigate UI to the internet.
|
This guide outlines the basic configuration steps needed to set up a reverse proxy in front of your Frigate instance.
|
||||||
A common way of accomplishing this is to use a reverse proxy webserver between your router and your Frigate instance.
|
|
||||||
A reverse proxy accepts HTTP requests from the public internet and redirects them transparently to internal webserver(s) on your network.
|
|
||||||
|
|
||||||
The suggested steps are:
|
A reverse proxy is typically needed if you want to set up Frigate on a custom URL, on a subdomain, or on a host serving multiple sites. It could also be used to set up your own authentication provider or for more advanced HTTP routing.
|
||||||
|
|
||||||
- **Configure** a 'proxy' HTTP webserver (such as [Apache2](https://httpd.apache.org/docs/current/) or [NPM](https://github.com/NginxProxyManager/nginx-proxy-manager)) and only expose ports 80/443 from this webserver to the internet
|
Before setting up a reverse proxy, check if any of the built-in functionality in Frigate suits your needs:
|
||||||
- **Encrypt** content from the proxy webserver by installing SSL (such as with [Let's Encrypt](https://letsencrypt.org/)). Note that SSL is then not required on your Frigate webserver as the proxy encrypts all requests for you
|
|Topic|Docs|
|
||||||
- **Restrict** access to your Frigate instance at the proxy using, for example, password authentication
|
|-|-|
|
||||||
|
|TLS|Please see the `tls` [configuration option](../configuration/tls.md)|
|
||||||
|
|Authentication|Please see the [authentication](../configuration/authentication.md) documentation|
|
||||||
|
|IPv6|[Enabling IPv6](../configuration/advanced.md#enabling-ipv6)
|
||||||
|
|
||||||
|
**Note about TLS**
|
||||||
|
When using a reverse proxy, the TLS session is usually terminated at the proxy, sending the internal request over plain HTTP. If this is the desired behavior, TLS must first be disabled in Frigate, or you will encounter an HTTP 400 error: "The plain HTTP request was sent to HTTPS port."
|
||||||
|
To disable TLS, set the following in your Frigate configuration:
|
||||||
|
```yml
|
||||||
|
tls:
|
||||||
|
enabled: false
|
||||||
|
```
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
A reverse proxy can be used to secure access to an internal webserver but the user will be entirely reliant
|
A reverse proxy can be used to secure access to an internal web server, but the user will be entirely reliant on the steps they have taken. You must ensure you are following security best practices.
|
||||||
on the steps they have taken. You must ensure you are following security best practices.
|
|
||||||
This page does not attempt to outline the specific steps needed to secure your internal website.
|
This page does not attempt to outline the specific steps needed to secure your internal website.
|
||||||
Please use your own knowledge to assess and vet the reverse proxy software before you install anything on your system.
|
Please use your own knowledge to assess and vet the reverse proxy software before you install anything on your system.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
There are several technologies available to implement reverse proxies. This document currently suggests one, using Apache2,
|
## Proxies
|
||||||
and the community is invited to document others through a contribution to this page.
|
|
||||||
|
There are many solutions available to implement reverse proxies and the community is invited to help out documenting others through a contribution to this page.
|
||||||
|
|
||||||
|
* [Apache2](#apache2-reverse-proxy)
|
||||||
|
* [Nginx](#nginx-reverse-proxy)
|
||||||
|
* [Traefik](#traefik-reverse-proxy)
|
||||||
|
|
||||||
## Apache2 Reverse Proxy
|
## Apache2 Reverse Proxy
|
||||||
|
|
||||||
@ -141,3 +154,26 @@ The settings below enabled connection upgrade, sets up logging (optional) and pr
|
|||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Traefik Reverse Proxy
|
||||||
|
|
||||||
|
This example shows how to add a `label` to the Frigate Docker compose file, enabling Traefik to automatically discover your Frigate instance.
|
||||||
|
Before using the example below, you must first set up Traefik with the [Docker provider](https://doc.traefik.io/traefik/providers/docker/)
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
frigate:
|
||||||
|
container_name: frigate
|
||||||
|
image: ghcr.io/blakeblackshear/frigate:stable
|
||||||
|
...
|
||||||
|
...
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.services.frigate.loadbalancer.server.port=8971"
|
||||||
|
- "traefik.http.routers.frigate.rule=Host(`traefik.example.com`)"
|
||||||
|
```
|
||||||
|
|
||||||
|
The above configuration will create a "service" in Traefik, automatically adding your container's IP on port 8971 as a backend.
|
||||||
|
It will also add a router, routing requests to "traefik.example.com" to your local container.
|
||||||
|
|
||||||
|
Note that with this approach, you don't need to expose any ports for the Frigate instance since all traffic will be routed over the internal Docker network.
|
||||||
|
|||||||
0
docs/docs/integrations/api.md
Normal file
0
docs/docs/integrations/api.md
Normal file
@ -25,7 +25,7 @@ Available via HACS as a default repository. To install:
|
|||||||
- Use [HACS](https://hacs.xyz/) to install the integration:
|
- Use [HACS](https://hacs.xyz/) to install the integration:
|
||||||
|
|
||||||
```
|
```
|
||||||
Home Assistant > HACS > Integrations > "Explore & Add Integrations" > Frigate
|
Home Assistant > HACS > Click in the Search bar and type "Frigate" > Frigate
|
||||||
```
|
```
|
||||||
|
|
||||||
- Restart Home Assistant.
|
- Restart Home Assistant.
|
||||||
|
|||||||
@ -11,7 +11,7 @@ These are the MQTT messages generated by Frigate. The default topic_prefix is `f
|
|||||||
|
|
||||||
Designed to be used as an availability topic with Home Assistant. Possible message are:
|
Designed to be used as an availability topic with Home Assistant. Possible message are:
|
||||||
"online": published when Frigate is running (on startup)
|
"online": published when Frigate is running (on startup)
|
||||||
"offline": published right before Frigate stops
|
"offline": published after Frigate has stopped
|
||||||
|
|
||||||
### `frigate/restart`
|
### `frigate/restart`
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@ In Frigate, you can use an environment variable or a docker secret named `PLUS_A
|
|||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
|
|
||||||
You cannot use the `environment_vars` section of your configuration file to set this environment variable.
|
You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or HA addon config.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|||||||
@ -18,3 +18,7 @@ Please use your own knowledge to assess and vet them before you install anything
|
|||||||
[Double Take](https://github.com/skrashevich/double-take) provides an unified UI and API for processing and training images for facial recognition.
|
[Double Take](https://github.com/skrashevich/double-take) provides an unified UI and API for processing and training images for facial recognition.
|
||||||
It supports automatically setting the sub labels in Frigate for person objects that are detected and recognized.
|
It supports automatically setting the sub labels in Frigate for person objects that are detected and recognized.
|
||||||
This is a fork (with fixed errors and new features) of [original Double Take](https://github.com/jakowenko/double-take) project which, unfortunately, isn't being maintained by author.
|
This is a fork (with fixed errors and new features) of [original Double Take](https://github.com/jakowenko/double-take) project which, unfortunately, isn't being maintained by author.
|
||||||
|
|
||||||
|
## [Frigate telegram](https://github.com/OldTyT/frigate-telegram)
|
||||||
|
|
||||||
|
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
|
||||||
|
|||||||
@ -28,6 +28,18 @@ The USB coral has different IDs when it is uninitialized and initialized.
|
|||||||
- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped.
|
- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped.
|
||||||
- When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed.
|
- When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed.
|
||||||
|
|
||||||
|
### Synology 716+II running DSM 7.2.1-69057 Update 5
|
||||||
|
|
||||||
|
Some users have reported that this older device runs an older kernel causing issues with the coral not being detected. The following steps allowed it to be detected correctly:
|
||||||
|
|
||||||
|
1. Plug in the coral TPU in any of the USB ports on the NAS
|
||||||
|
2. Open the control panel - info screen. The coral TPU would be shown as a generic device.
|
||||||
|
3. Start the docker container with Coral TPU enabled in the config
|
||||||
|
4. The TPU would be detected but a few moments later it would disconnect.
|
||||||
|
5. While leaving the TPU device plugged in, restart the NAS using the reboot command in the UI. Do NOT unplug the NAS/power it off etc.
|
||||||
|
6. Open the control panel - info scree. The coral TPU will now be recognised as a USB Device - google inc
|
||||||
|
7. Start the frigate container. Everything should work now!
|
||||||
|
|
||||||
## USB Coral Detection Appears to be Stuck
|
## USB Coral Detection Appears to be Stuck
|
||||||
|
|
||||||
The USB Coral can become stuck and need to be restarted, this can happen for a number of reasons depending on hardware and software setup. Some common reasons are:
|
The USB Coral can become stuck and need to be restarted, this can happen for a number of reasons depending on hardware and software setup. Some common reasons are:
|
||||||
|
|||||||
0
docs/sidebars.js
Normal file
0
docs/sidebars.js
Normal file
BIN
docs/static/img/plus/send-to-plus.jpg
vendored
BIN
docs/static/img/plus/send-to-plus.jpg
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 57 KiB After Width: | Height: | Size: 62 KiB |
BIN
docs/static/img/plus/submit-to-plus.jpg
vendored
BIN
docs/static/img/plus/submit-to-plus.jpg
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 49 KiB |
9
frigate/api/defs/regenerate_query_parameters.py
Normal file
9
frigate/api/defs/regenerate_query_parameters.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from frigate.events.types import RegenerateDescriptionEnum
|
||||||
|
|
||||||
|
|
||||||
|
class RegenerateQueryParameters(BaseModel):
|
||||||
|
source: Optional[RegenerateDescriptionEnum] = RegenerateDescriptionEnum.thumbnails
|
||||||
@ -31,6 +31,9 @@ from frigate.api.defs.events_query_parameters import (
|
|||||||
EventsSearchQueryParams,
|
EventsSearchQueryParams,
|
||||||
EventsSummaryQueryParams,
|
EventsSummaryQueryParams,
|
||||||
)
|
)
|
||||||
|
from frigate.api.defs.regenerate_query_parameters import (
|
||||||
|
RegenerateQueryParameters,
|
||||||
|
)
|
||||||
from frigate.api.defs.tags import Tags
|
from frigate.api.defs.tags import Tags
|
||||||
from frigate.const import (
|
from frigate.const import (
|
||||||
CLIPS_DIR,
|
CLIPS_DIR,
|
||||||
@ -996,7 +999,9 @@ def set_description(
|
|||||||
|
|
||||||
|
|
||||||
@router.put("/events/{event_id}/description/regenerate")
|
@router.put("/events/{event_id}/description/regenerate")
|
||||||
def regenerate_description(request: Request, event_id: str):
|
def regenerate_description(
|
||||||
|
request: Request, event_id: str, params: RegenerateQueryParameters = Depends()
|
||||||
|
):
|
||||||
try:
|
try:
|
||||||
event: Event = Event.get(Event.id == event_id)
|
event: Event = Event.get(Event.id == event_id)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
@ -1009,7 +1014,7 @@ def regenerate_description(request: Request, event_id: str):
|
|||||||
request.app.frigate_config.semantic_search.enabled
|
request.app.frigate_config.semantic_search.enabled
|
||||||
and request.app.frigate_config.genai.enabled
|
and request.app.frigate_config.genai.enabled
|
||||||
):
|
):
|
||||||
request.app.event_metadata_updater.publish(event.id)
|
request.app.event_metadata_updater.publish((event.id, params.source))
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
content=(
|
content=(
|
||||||
@ -1017,7 +1022,8 @@ def regenerate_description(request: Request, event_id: str):
|
|||||||
"success": True,
|
"success": True,
|
||||||
"message": "Event "
|
"message": "Event "
|
||||||
+ event_id
|
+ event_id
|
||||||
+ " description regeneration has been requested.",
|
+ " description regeneration has been requested using "
|
||||||
|
+ params.source,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
status_code=200,
|
status_code=200,
|
||||||
|
|||||||
@ -1,6 +1,8 @@
|
|||||||
"""Export apis."""
|
"""Export apis."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import random
|
||||||
|
import string
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
@ -72,8 +74,10 @@ def export_recording(
|
|||||||
status_code=400,
|
status_code=400,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
|
||||||
exporter = RecordingExporter(
|
exporter = RecordingExporter(
|
||||||
request.app.frigate_config,
|
request.app.frigate_config,
|
||||||
|
export_id,
|
||||||
camera_name,
|
camera_name,
|
||||||
friendly_name,
|
friendly_name,
|
||||||
existing_image,
|
existing_image,
|
||||||
@ -91,6 +95,7 @@ def export_recording(
|
|||||||
{
|
{
|
||||||
"success": True,
|
"success": True,
|
||||||
"message": "Starting export of recording.",
|
"message": "Starting export of recording.",
|
||||||
|
"export_id": export_id,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
status_code=200,
|
status_code=200,
|
||||||
|
|||||||
@ -14,7 +14,7 @@ import numpy as np
|
|||||||
import pytz
|
import pytz
|
||||||
from fastapi import APIRouter, Path, Query, Request, Response
|
from fastapi import APIRouter, Path, Query, Request, Response
|
||||||
from fastapi.params import Depends
|
from fastapi.params import Depends
|
||||||
from fastapi.responses import FileResponse, JSONResponse
|
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
|
||||||
from pathvalidate import sanitize_filename
|
from pathvalidate import sanitize_filename
|
||||||
from peewee import DoesNotExist, fn
|
from peewee import DoesNotExist, fn
|
||||||
from tzlocal import get_localzone_name
|
from tzlocal import get_localzone_name
|
||||||
@ -44,7 +44,7 @@ logger = logging.getLogger(__name__)
|
|||||||
router = APIRouter(tags=[Tags.media])
|
router = APIRouter(tags=[Tags.media])
|
||||||
|
|
||||||
|
|
||||||
@router.get("{camera_name}")
|
@router.get("/{camera_name}")
|
||||||
def mjpeg_feed(
|
def mjpeg_feed(
|
||||||
request: Request,
|
request: Request,
|
||||||
camera_name: str,
|
camera_name: str,
|
||||||
@ -60,7 +60,7 @@ def mjpeg_feed(
|
|||||||
}
|
}
|
||||||
if camera_name in request.app.frigate_config.cameras:
|
if camera_name in request.app.frigate_config.cameras:
|
||||||
# return a multipart response
|
# return a multipart response
|
||||||
return Response(
|
return StreamingResponse(
|
||||||
imagestream(
|
imagestream(
|
||||||
request.app.detected_frames_processor,
|
request.app.detected_frames_processor,
|
||||||
camera_name,
|
camera_name,
|
||||||
|
|||||||
@ -6,7 +6,7 @@ import secrets
|
|||||||
import shutil
|
import shutil
|
||||||
from multiprocessing import Queue
|
from multiprocessing import Queue
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from typing import Any, Optional
|
from typing import Optional
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
import uvicorn
|
import uvicorn
|
||||||
@ -29,11 +29,11 @@ from frigate.comms.mqtt import MqttClient
|
|||||||
from frigate.comms.webpush import WebPushClient
|
from frigate.comms.webpush import WebPushClient
|
||||||
from frigate.comms.ws import WebSocketClient
|
from frigate.comms.ws import WebSocketClient
|
||||||
from frigate.comms.zmq_proxy import ZmqProxy
|
from frigate.comms.zmq_proxy import ZmqProxy
|
||||||
|
from frigate.config.config import FrigateConfig
|
||||||
from frigate.const import (
|
from frigate.const import (
|
||||||
CACHE_DIR,
|
CACHE_DIR,
|
||||||
CLIPS_DIR,
|
CLIPS_DIR,
|
||||||
CONFIG_DIR,
|
CONFIG_DIR,
|
||||||
DEFAULT_DB_PATH,
|
|
||||||
EXPORT_DIR,
|
EXPORT_DIR,
|
||||||
MODEL_CACHE_DIR,
|
MODEL_CACHE_DIR,
|
||||||
RECORD_DIR,
|
RECORD_DIR,
|
||||||
@ -77,10 +77,8 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class FrigateApp:
|
class FrigateApp:
|
||||||
audio_process: Optional[mp.Process] = None
|
def __init__(self, config: FrigateConfig) -> None:
|
||||||
|
self.audio_process: Optional[mp.Process] = None
|
||||||
# TODO: Fix FrigateConfig usage, so we can properly annotate it here without mypy erroring out.
|
|
||||||
def __init__(self, config: Any) -> None:
|
|
||||||
self.stop_event: MpEvent = mp.Event()
|
self.stop_event: MpEvent = mp.Event()
|
||||||
self.detection_queue: Queue = mp.Queue()
|
self.detection_queue: Queue = mp.Queue()
|
||||||
self.detectors: dict[str, ObjectDetectProcess] = {}
|
self.detectors: dict[str, ObjectDetectProcess] = {}
|
||||||
@ -149,13 +147,6 @@ class FrigateApp:
|
|||||||
except PermissionError:
|
except PermissionError:
|
||||||
logger.error("Unable to write to /config to save DB state")
|
logger.error("Unable to write to /config to save DB state")
|
||||||
|
|
||||||
# Migrate DB location
|
|
||||||
old_db_path = DEFAULT_DB_PATH
|
|
||||||
if not os.path.isfile(self.config.database.path) and os.path.isfile(
|
|
||||||
old_db_path
|
|
||||||
):
|
|
||||||
os.rename(old_db_path, self.config.database.path)
|
|
||||||
|
|
||||||
# Migrate DB schema
|
# Migrate DB schema
|
||||||
migrate_db = SqliteExtDatabase(self.config.database.path)
|
migrate_db = SqliteExtDatabase(self.config.database.path)
|
||||||
|
|
||||||
@ -281,7 +272,7 @@ class FrigateApp:
|
|||||||
except PermissionError:
|
except PermissionError:
|
||||||
logger.error("Unable to write to /config to save export state")
|
logger.error("Unable to write to /config to save export state")
|
||||||
|
|
||||||
migrate_exports(self.config.ffmpeg, self.config.cameras.keys())
|
migrate_exports(self.config.ffmpeg, list(self.config.cameras.keys()))
|
||||||
|
|
||||||
def init_external_event_processor(self) -> None:
|
def init_external_event_processor(self) -> None:
|
||||||
self.external_event_processor = ExternalEventProcessor(self.config)
|
self.external_event_processor = ExternalEventProcessor(self.config)
|
||||||
@ -325,7 +316,9 @@ class FrigateApp:
|
|||||||
largest_frame = max(
|
largest_frame = max(
|
||||||
[
|
[
|
||||||
det.model.height * det.model.width * 3
|
det.model.height * det.model.width * 3
|
||||||
for (name, det) in self.config.detectors.items()
|
if det.model is not None
|
||||||
|
else 320
|
||||||
|
for det in self.config.detectors.values()
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
shm_in = mp.shared_memory.SharedMemory(
|
shm_in = mp.shared_memory.SharedMemory(
|
||||||
@ -392,6 +385,7 @@ class FrigateApp:
|
|||||||
|
|
||||||
# create or update region grids for each camera
|
# create or update region grids for each camera
|
||||||
for camera in self.config.cameras.values():
|
for camera in self.config.cameras.values():
|
||||||
|
assert camera.name is not None
|
||||||
self.region_grids[camera.name] = get_camera_regions_grid(
|
self.region_grids[camera.name] = get_camera_regions_grid(
|
||||||
camera.name,
|
camera.name,
|
||||||
camera.detect,
|
camera.detect,
|
||||||
@ -505,10 +499,10 @@ class FrigateApp:
|
|||||||
min_req_shm += 8
|
min_req_shm += 8
|
||||||
|
|
||||||
available_shm = total_shm - min_req_shm
|
available_shm = total_shm - min_req_shm
|
||||||
cam_total_frame_size = 0
|
cam_total_frame_size = 0.0
|
||||||
|
|
||||||
for camera in self.config.cameras.values():
|
for camera in self.config.cameras.values():
|
||||||
if camera.enabled:
|
if camera.enabled and camera.detect.width and camera.detect.height:
|
||||||
cam_total_frame_size += round(
|
cam_total_frame_size += round(
|
||||||
(camera.detect.width * camera.detect.height * 1.5 + 270480)
|
(camera.detect.width * camera.detect.height * 1.5 + 270480)
|
||||||
/ 1048576,
|
/ 1048576,
|
||||||
|
|||||||
@ -4,6 +4,8 @@ import logging
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from frigate.events.types import RegenerateDescriptionEnum
|
||||||
|
|
||||||
from .zmq_proxy import Publisher, Subscriber
|
from .zmq_proxy import Publisher, Subscriber
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -23,6 +25,9 @@ class EventMetadataPublisher(Publisher):
|
|||||||
topic = topic.value
|
topic = topic.value
|
||||||
super().__init__(topic)
|
super().__init__(topic)
|
||||||
|
|
||||||
|
def publish(self, payload: tuple[str, RegenerateDescriptionEnum]) -> None:
|
||||||
|
super().publish(payload)
|
||||||
|
|
||||||
|
|
||||||
class EventMetadataSubscriber(Subscriber):
|
class EventMetadataSubscriber(Subscriber):
|
||||||
"""Simplifies receiving event metadata."""
|
"""Simplifies receiving event metadata."""
|
||||||
@ -35,10 +40,12 @@ class EventMetadataSubscriber(Subscriber):
|
|||||||
|
|
||||||
def check_for_update(
|
def check_for_update(
|
||||||
self, timeout: float = None
|
self, timeout: float = None
|
||||||
) -> Optional[tuple[EventMetadataTypeEnum, any]]:
|
) -> Optional[tuple[EventMetadataTypeEnum, str, RegenerateDescriptionEnum]]:
|
||||||
return super().check_for_update(timeout)
|
return super().check_for_update(timeout)
|
||||||
|
|
||||||
def _return_object(self, topic: str, payload: any) -> any:
|
def _return_object(self, topic: str, payload: any) -> any:
|
||||||
if payload is None:
|
if payload is None:
|
||||||
return (None, None)
|
return (None, None, None)
|
||||||
return (EventMetadataTypeEnum[topic[len(self.topic_base) :]], payload)
|
topic = EventMetadataTypeEnum[topic[len(self.topic_base) :]]
|
||||||
|
event_id, source = payload
|
||||||
|
return (topic, event_id, RegenerateDescriptionEnum(source))
|
||||||
|
|||||||
@ -12,8 +12,7 @@ SOCKET_SUB = "ipc:///tmp/cache/proxy_sub"
|
|||||||
|
|
||||||
class ZmqProxyRunner(threading.Thread):
|
class ZmqProxyRunner(threading.Thread):
|
||||||
def __init__(self, context: zmq.Context[zmq.Socket]) -> None:
|
def __init__(self, context: zmq.Context[zmq.Socket]) -> None:
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="detection_proxy")
|
||||||
self.name = "detection_proxy"
|
|
||||||
self.context = context
|
self.context = context
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
|
|||||||
@ -11,6 +11,7 @@ __all__ = ["GenAIConfig", "GenAICameraConfig", "GenAIProviderEnum"]
|
|||||||
|
|
||||||
class GenAIProviderEnum(str, Enum):
|
class GenAIProviderEnum(str, Enum):
|
||||||
openai = "openai"
|
openai = "openai"
|
||||||
|
azure_openai = "azure_openai"
|
||||||
gemini = "gemini"
|
gemini = "gemini"
|
||||||
ollama = "ollama"
|
ollama = "ollama"
|
||||||
|
|
||||||
@ -18,6 +19,9 @@ class GenAIProviderEnum(str, Enum):
|
|||||||
# uses BaseModel because some global attributes are not available at the camera level
|
# uses BaseModel because some global attributes are not available at the camera level
|
||||||
class GenAICameraConfig(BaseModel):
|
class GenAICameraConfig(BaseModel):
|
||||||
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
|
enabled: bool = Field(default=False, title="Enable GenAI for camera.")
|
||||||
|
use_snapshot: bool = Field(
|
||||||
|
default=False, title="Use snapshots for generating descriptions."
|
||||||
|
)
|
||||||
prompt: str = Field(
|
prompt: str = Field(
|
||||||
default="Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.",
|
default="Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background.",
|
||||||
title="Default caption prompt.",
|
title="Default caption prompt.",
|
||||||
|
|||||||
@ -289,7 +289,9 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
default_factory=dict, title="Frigate environment variables."
|
default_factory=dict, title="Frigate environment variables."
|
||||||
)
|
)
|
||||||
logger: LoggerConfig = Field(
|
logger: LoggerConfig = Field(
|
||||||
default_factory=LoggerConfig, title="Logging configuration."
|
default_factory=LoggerConfig,
|
||||||
|
title="Logging configuration.",
|
||||||
|
validate_default=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Global config
|
# Global config
|
||||||
|
|||||||
@ -26,9 +26,6 @@ DETECTOR_KEY = "tensorrt"
|
|||||||
if TRT_SUPPORT:
|
if TRT_SUPPORT:
|
||||||
|
|
||||||
class TrtLogger(trt.ILogger):
|
class TrtLogger(trt.ILogger):
|
||||||
def __init__(self):
|
|
||||||
trt.ILogger.__init__(self)
|
|
||||||
|
|
||||||
def log(self, severity, msg):
|
def log(self, severity, msg):
|
||||||
logger.log(self.getSeverity(severity), msg)
|
logger.log(self.getSeverity(severity), msg)
|
||||||
|
|
||||||
|
|||||||
@ -3,6 +3,7 @@
|
|||||||
import base64
|
import base64
|
||||||
import io
|
import io
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
import threading
|
import threading
|
||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
@ -19,7 +20,7 @@ from frigate.comms.event_metadata_updater import (
|
|||||||
from frigate.comms.events_updater import EventEndSubscriber, EventUpdateSubscriber
|
from frigate.comms.events_updater import EventEndSubscriber, EventUpdateSubscriber
|
||||||
from frigate.comms.inter_process import InterProcessRequestor
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.const import UPDATE_EVENT_DESCRIPTION
|
from frigate.const import CLIPS_DIR, UPDATE_EVENT_DESCRIPTION
|
||||||
from frigate.events.types import EventTypeEnum
|
from frigate.events.types import EventTypeEnum
|
||||||
from frigate.genai import get_genai_client
|
from frigate.genai import get_genai_client
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
@ -136,6 +137,44 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
or set(event.zones) & set(camera_config.genai.required_zones)
|
or set(event.zones) & set(camera_config.genai.required_zones)
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
|
logger.debug(
|
||||||
|
f"Description generation for {event}, has_snapshot: {event.has_snapshot}"
|
||||||
|
)
|
||||||
|
if event.has_snapshot and camera_config.genai.use_snapshot:
|
||||||
|
with open(
|
||||||
|
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
|
||||||
|
"rb",
|
||||||
|
) as image_file:
|
||||||
|
snapshot_image = image_file.read()
|
||||||
|
|
||||||
|
img = cv2.imdecode(
|
||||||
|
np.frombuffer(snapshot_image, dtype=np.int8),
|
||||||
|
cv2.IMREAD_COLOR,
|
||||||
|
)
|
||||||
|
|
||||||
|
# crop snapshot based on region before sending off to genai
|
||||||
|
height, width = img.shape[:2]
|
||||||
|
x1_rel, y1_rel, width_rel, height_rel = event.data["region"]
|
||||||
|
|
||||||
|
x1, y1 = int(x1_rel * width), int(y1_rel * height)
|
||||||
|
cropped_image = img[
|
||||||
|
y1 : y1 + int(height_rel * height),
|
||||||
|
x1 : x1 + int(width_rel * width),
|
||||||
|
]
|
||||||
|
|
||||||
|
_, buffer = cv2.imencode(".jpg", cropped_image)
|
||||||
|
snapshot_image = buffer.tobytes()
|
||||||
|
|
||||||
|
embed_image = (
|
||||||
|
[snapshot_image]
|
||||||
|
if event.has_snapshot and camera_config.genai.use_snapshot
|
||||||
|
else (
|
||||||
|
[thumbnail for data in self.tracked_events[event_id]]
|
||||||
|
if len(self.tracked_events.get(event_id, [])) > 0
|
||||||
|
else [thumbnail]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# Generate the description. Call happens in a thread since it is network bound.
|
# Generate the description. Call happens in a thread since it is network bound.
|
||||||
threading.Thread(
|
threading.Thread(
|
||||||
target=self._embed_description,
|
target=self._embed_description,
|
||||||
@ -143,12 +182,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
daemon=True,
|
daemon=True,
|
||||||
args=(
|
args=(
|
||||||
event,
|
event,
|
||||||
[
|
embed_image,
|
||||||
data["thumbnail"]
|
|
||||||
for data in self.tracked_events[event_id]
|
|
||||||
]
|
|
||||||
if len(self.tracked_events.get(event_id, [])) > 0
|
|
||||||
else [thumbnail],
|
|
||||||
metadata,
|
metadata,
|
||||||
),
|
),
|
||||||
).start()
|
).start()
|
||||||
@ -159,13 +193,15 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
|
|
||||||
def _process_event_metadata(self):
|
def _process_event_metadata(self):
|
||||||
# Check for regenerate description requests
|
# Check for regenerate description requests
|
||||||
(topic, event_id) = self.event_metadata_subscriber.check_for_update(timeout=1)
|
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update(
|
||||||
|
timeout=1
|
||||||
|
)
|
||||||
|
|
||||||
if topic is None:
|
if topic is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
if event_id:
|
if event_id:
|
||||||
self.handle_regenerate_description(event_id)
|
self.handle_regenerate_description(event_id, source)
|
||||||
|
|
||||||
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
|
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
|
||||||
"""Return jpg thumbnail of a region of the frame."""
|
"""Return jpg thumbnail of a region of the frame."""
|
||||||
@ -228,7 +264,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
description,
|
description,
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle_regenerate_description(self, event_id: str) -> None:
|
def handle_regenerate_description(self, event_id: str, source: str) -> None:
|
||||||
try:
|
try:
|
||||||
event: Event = Event.get(Event.id == event_id)
|
event: Event = Event.get(Event.id == event_id)
|
||||||
except DoesNotExist:
|
except DoesNotExist:
|
||||||
@ -243,4 +279,40 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
metadata = get_metadata(event)
|
metadata = get_metadata(event)
|
||||||
thumbnail = base64.b64decode(event.thumbnail)
|
thumbnail = base64.b64decode(event.thumbnail)
|
||||||
|
|
||||||
self._embed_description(event, [thumbnail], metadata)
|
logger.debug(
|
||||||
|
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if event.has_snapshot and source == "snapshot":
|
||||||
|
with open(
|
||||||
|
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
|
||||||
|
"rb",
|
||||||
|
) as image_file:
|
||||||
|
snapshot_image = image_file.read()
|
||||||
|
img = cv2.imdecode(
|
||||||
|
np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR
|
||||||
|
)
|
||||||
|
|
||||||
|
# crop snapshot based on region before sending off to genai
|
||||||
|
height, width = img.shape[:2]
|
||||||
|
x1_rel, y1_rel, width_rel, height_rel = event.data["region"]
|
||||||
|
|
||||||
|
x1, y1 = int(x1_rel * width), int(y1_rel * height)
|
||||||
|
cropped_image = img[
|
||||||
|
y1 : y1 + int(height_rel * height), x1 : x1 + int(width_rel * width)
|
||||||
|
]
|
||||||
|
|
||||||
|
_, buffer = cv2.imencode(".jpg", cropped_image)
|
||||||
|
snapshot_image = buffer.tobytes()
|
||||||
|
|
||||||
|
embed_image = (
|
||||||
|
[snapshot_image]
|
||||||
|
if event.has_snapshot and source == "snapshot"
|
||||||
|
else (
|
||||||
|
[thumbnail for data in self.tracked_events[event_id]]
|
||||||
|
if len(self.tracked_events.get(event_id, [])) > 0
|
||||||
|
else [thumbnail]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self._embed_description(event, embed_image, metadata)
|
||||||
|
|||||||
@ -2,8 +2,6 @@
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
@ -73,46 +71,42 @@ class AudioProcessor(util.Process):
|
|||||||
):
|
):
|
||||||
super().__init__(name="frigate.audio_manager", daemon=True)
|
super().__init__(name="frigate.audio_manager", daemon=True)
|
||||||
|
|
||||||
self.logger = logging.getLogger(self.name)
|
|
||||||
self.camera_metrics = camera_metrics
|
self.camera_metrics = camera_metrics
|
||||||
self.cameras = cameras
|
self.cameras = cameras
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
stop_event = threading.Event()
|
|
||||||
audio_threads: list[AudioEventMaintainer] = []
|
audio_threads: list[AudioEventMaintainer] = []
|
||||||
|
|
||||||
threading.current_thread().name = "process:audio_manager"
|
threading.current_thread().name = "process:audio_manager"
|
||||||
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit())
|
|
||||||
|
|
||||||
if len(self.cameras) == 0:
|
if len(self.cameras) == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
for camera in self.cameras:
|
||||||
for camera in self.cameras:
|
audio_thread = AudioEventMaintainer(
|
||||||
audio_thread = AudioEventMaintainer(
|
camera,
|
||||||
camera,
|
self.camera_metrics,
|
||||||
self.camera_metrics,
|
self.stop_event,
|
||||||
stop_event,
|
)
|
||||||
)
|
audio_threads.append(audio_thread)
|
||||||
audio_threads.append(audio_thread)
|
audio_thread.start()
|
||||||
audio_thread.start()
|
|
||||||
|
|
||||||
self.logger.info(f"Audio processor started (pid: {self.pid})")
|
self.logger.info(f"Audio processor started (pid: {self.pid})")
|
||||||
|
|
||||||
while True:
|
while not self.stop_event.wait():
|
||||||
signal.pause()
|
pass
|
||||||
finally:
|
|
||||||
stop_event.set()
|
|
||||||
for thread in audio_threads:
|
|
||||||
thread.join(1)
|
|
||||||
if thread.is_alive():
|
|
||||||
self.logger.info(f"Waiting for thread {thread.name:s} to exit")
|
|
||||||
thread.join(10)
|
|
||||||
|
|
||||||
for thread in audio_threads:
|
for thread in audio_threads:
|
||||||
if thread.is_alive():
|
thread.join(1)
|
||||||
self.logger.warning(f"Thread {thread.name} is still alive")
|
if thread.is_alive():
|
||||||
self.logger.info("Exiting audio processor")
|
self.logger.info(f"Waiting for thread {thread.name:s} to exit")
|
||||||
|
thread.join(10)
|
||||||
|
|
||||||
|
for thread in audio_threads:
|
||||||
|
if thread.is_alive():
|
||||||
|
self.logger.warning(f"Thread {thread.name} is still alive")
|
||||||
|
|
||||||
|
self.logger.info("Exiting audio processor")
|
||||||
|
|
||||||
|
|
||||||
class AudioEventMaintainer(threading.Thread):
|
class AudioEventMaintainer(threading.Thread):
|
||||||
|
|||||||
@ -23,8 +23,7 @@ class EventCleanupType(str, Enum):
|
|||||||
|
|
||||||
class EventCleanup(threading.Thread):
|
class EventCleanup(threading.Thread):
|
||||||
def __init__(self, config: FrigateConfig, stop_event: MpEvent):
|
def __init__(self, config: FrigateConfig, stop_event: MpEvent):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="event_cleanup")
|
||||||
self.name = "event_cleanup"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.camera_keys = list(self.config.cameras.keys())
|
self.camera_keys = list(self.config.cameras.keys())
|
||||||
|
|||||||
@ -54,8 +54,7 @@ class EventProcessor(threading.Thread):
|
|||||||
timeline_queue: Queue,
|
timeline_queue: Queue,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
):
|
):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="event_processor")
|
||||||
self.name = "event_processor"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.timeline_queue = timeline_queue
|
self.timeline_queue = timeline_queue
|
||||||
self.events_in_process: Dict[str, Event] = {}
|
self.events_in_process: Dict[str, Event] = {}
|
||||||
@ -125,6 +124,9 @@ class EventProcessor(threading.Thread):
|
|||||||
updated_db = False
|
updated_db = False
|
||||||
|
|
||||||
# if this is the first message, just store it and continue, its not time to insert it in the db
|
# if this is the first message, just store it and continue, its not time to insert it in the db
|
||||||
|
if event_type == EventStateEnum.start:
|
||||||
|
self.events_in_process[event_data["id"]] = event_data
|
||||||
|
|
||||||
if should_update_db(self.events_in_process[event_data["id"]], event_data):
|
if should_update_db(self.events_in_process[event_data["id"]], event_data):
|
||||||
updated_db = True
|
updated_db = True
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
|
|||||||
@ -12,3 +12,8 @@ class EventStateEnum(str, Enum):
|
|||||||
start = "start"
|
start = "start"
|
||||||
update = "update"
|
update = "update"
|
||||||
end = "end"
|
end = "end"
|
||||||
|
|
||||||
|
|
||||||
|
class RegenerateDescriptionEnum(str, Enum):
|
||||||
|
thumbnails = "thumbnails"
|
||||||
|
snapshot = "snapshot"
|
||||||
|
|||||||
@ -32,7 +32,7 @@ class LibvaGpuSelector:
|
|||||||
devices = list(filter(lambda d: d.startswith("render"), os.listdir("/dev/dri")))
|
devices = list(filter(lambda d: d.startswith("render"), os.listdir("/dev/dri")))
|
||||||
|
|
||||||
if len(devices) < 2:
|
if len(devices) < 2:
|
||||||
self._selected_gpu = "/dev/dri/renderD128"
|
self._selected_gpu = f"/dev/dri/{devices[0]}"
|
||||||
return self._selected_gpu
|
return self._selected_gpu
|
||||||
|
|
||||||
for device in devices:
|
for device in devices:
|
||||||
@ -91,10 +91,10 @@ PRESETS_HW_ACCEL_DECODE["preset-nvidia-mjpeg"] = PRESETS_HW_ACCEL_DECODE[
|
|||||||
PRESETS_HW_ACCEL_SCALE = {
|
PRESETS_HW_ACCEL_SCALE = {
|
||||||
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
|
"preset-rpi-64-h264": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||||
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
|
"preset-rpi-64-h265": "-r {0} -vf fps={0},scale={1}:{2}",
|
||||||
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
|
FFMPEG_HWACCEL_VAAPI: "-r {0} -vf fps={0},scale_vaapi=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
|
||||||
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
"preset-intel-qsv-h264": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||||
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
"preset-intel-qsv-h265": "-r {0} -vf vpp_qsv=framerate={0}:w={1}:h={2}:format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||||
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.05",
|
FFMPEG_HWACCEL_NVIDIA: "-r {0} -vf fps={0},scale_cuda=w={1}:h={2},hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
|
||||||
"preset-jetson-h264": "-r {0}", # scaled in decoder
|
"preset-jetson-h264": "-r {0}", # scaled in decoder
|
||||||
"preset-jetson-h265": "-r {0}", # scaled in decoder
|
"preset-jetson-h265": "-r {0}", # scaled in decoder
|
||||||
"preset-rk-h264": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
|
"preset-rk-h264": "-r {0} -vf scale_rkrga=w={1}:h={2}:format=yuv420p:force_original_aspect_ratio=0,hwmap=mode=read,format=yuv420p",
|
||||||
@ -186,11 +186,11 @@ def parse_preset_hardware_acceleration_scale(
|
|||||||
scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
|
scale = PRESETS_HW_ACCEL_SCALE.get(arg, PRESETS_HW_ACCEL_SCALE["default"])
|
||||||
|
|
||||||
if (
|
if (
|
||||||
",hwdownload,format=nv12,eq=gamma=1.05" in scale
|
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5" in scale
|
||||||
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
|
and os.environ.get("FFMPEG_DISABLE_GAMMA_EQUALIZER") is not None
|
||||||
):
|
):
|
||||||
scale.replace(
|
scale.replace(
|
||||||
",hwdownload,format=nv12,eq=gamma=1.05",
|
",hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5",
|
||||||
":format=nv12,hwdownload,format=nv12,format=yuv420p",
|
":format=nv12,hwdownload,format=nv12,format=yuv420p",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
73
frigate/genai/azure-openai.py
Normal file
73
frigate/genai/azure-openai.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
"""Azure OpenAI Provider for Frigate AI."""
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
from urllib.parse import parse_qs, urlparse
|
||||||
|
|
||||||
|
from openai import AzureOpenAI
|
||||||
|
|
||||||
|
from frigate.config import GenAIProviderEnum
|
||||||
|
from frigate.genai import GenAIClient, register_genai_provider
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@register_genai_provider(GenAIProviderEnum.azure_openai)
|
||||||
|
class OpenAIClient(GenAIClient):
|
||||||
|
"""Generative AI client for Frigate using Azure OpenAI."""
|
||||||
|
|
||||||
|
provider: AzureOpenAI
|
||||||
|
|
||||||
|
def _init_provider(self):
|
||||||
|
"""Initialize the client."""
|
||||||
|
try:
|
||||||
|
parsed_url = urlparse(self.genai_config.base_url)
|
||||||
|
query_params = parse_qs(parsed_url.query)
|
||||||
|
api_version = query_params.get("api-version", [None])[0]
|
||||||
|
azure_endpoint = f"{parsed_url.scheme}://{parsed_url.netloc}/"
|
||||||
|
|
||||||
|
if not api_version:
|
||||||
|
logger.warning("Azure OpenAI url is missing API version.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Error parsing Azure OpenAI url: %s", str(e))
|
||||||
|
return None
|
||||||
|
|
||||||
|
return AzureOpenAI(
|
||||||
|
api_key=self.genai_config.api_key,
|
||||||
|
api_version=api_version,
|
||||||
|
azure_endpoint=azure_endpoint,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
||||||
|
"""Submit a request to Azure OpenAI."""
|
||||||
|
encoded_images = [base64.b64encode(image).decode("utf-8") for image in images]
|
||||||
|
try:
|
||||||
|
result = self.provider.chat.completions.create(
|
||||||
|
model=self.genai_config.model,
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [{"type": "text", "text": prompt}]
|
||||||
|
+ [
|
||||||
|
{
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": {
|
||||||
|
"url": f"data:image/jpeg;base64,{image}",
|
||||||
|
"detail": "low",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for image in encoded_images
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
timeout=self.timeout,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Azure OpenAI returned an error: %s", str(e))
|
||||||
|
return None
|
||||||
|
if len(result.choices) > 0:
|
||||||
|
return result.choices[0].message.content.strip()
|
||||||
|
return None
|
||||||
@ -70,8 +70,7 @@ os.register_at_fork(after_in_child=reopen_std_streams)
|
|||||||
class LogPipe(threading.Thread):
|
class LogPipe(threading.Thread):
|
||||||
def __init__(self, log_name: str):
|
def __init__(self, log_name: str):
|
||||||
"""Setup the object with a logger and start the thread"""
|
"""Setup the object with a logger and start the thread"""
|
||||||
threading.Thread.__init__(self)
|
super().__init__(daemon=False)
|
||||||
self.daemon = False
|
|
||||||
self.logger = logging.getLogger(log_name)
|
self.logger = logging.getLogger(log_name)
|
||||||
self.level = logging.ERROR
|
self.level = logging.ERROR
|
||||||
self.deque: Deque[str] = deque(maxlen=100)
|
self.deque: Deque[str] = deque(maxlen=100)
|
||||||
|
|||||||
@ -12,7 +12,8 @@ from setproctitle import setproctitle
|
|||||||
|
|
||||||
import frigate.util as util
|
import frigate.util as util
|
||||||
from frigate.detectors import create_detector
|
from frigate.detectors import create_detector
|
||||||
from frigate.detectors.detector_config import InputTensorEnum
|
from frigate.detectors.detector_config import BaseDetectorConfig, InputTensorEnum
|
||||||
|
from frigate.detectors.plugins.rocm import DETECTOR_KEY as ROCM_DETECTOR_KEY
|
||||||
from frigate.util.builtin import EventsPerSecond, load_labels
|
from frigate.util.builtin import EventsPerSecond, load_labels
|
||||||
from frigate.util.image import SharedMemoryFrameManager
|
from frigate.util.image import SharedMemoryFrameManager
|
||||||
from frigate.util.services import listen
|
from frigate.util.services import listen
|
||||||
@ -22,11 +23,11 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class ObjectDetector(ABC):
|
class ObjectDetector(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def detect(self, tensor_input, threshold=0.4):
|
def detect(self, tensor_input, threshold: float = 0.4):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def tensor_transform(desired_shape):
|
def tensor_transform(desired_shape: InputTensorEnum):
|
||||||
# Currently this function only supports BHWC permutations
|
# Currently this function only supports BHWC permutations
|
||||||
if desired_shape == InputTensorEnum.nhwc:
|
if desired_shape == InputTensorEnum.nhwc:
|
||||||
return None
|
return None
|
||||||
@ -37,8 +38,8 @@ def tensor_transform(desired_shape):
|
|||||||
class LocalObjectDetector(ObjectDetector):
|
class LocalObjectDetector(ObjectDetector):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
detector_config=None,
|
detector_config: BaseDetectorConfig = None,
|
||||||
labels=None,
|
labels: str = None,
|
||||||
):
|
):
|
||||||
self.fps = EventsPerSecond()
|
self.fps = EventsPerSecond()
|
||||||
if labels is None:
|
if labels is None:
|
||||||
@ -47,7 +48,13 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
self.labels = load_labels(labels)
|
self.labels = load_labels(labels)
|
||||||
|
|
||||||
if detector_config:
|
if detector_config:
|
||||||
self.input_transform = tensor_transform(detector_config.model.input_tensor)
|
if detector_config.type == ROCM_DETECTOR_KEY:
|
||||||
|
# ROCm requires NHWC as input
|
||||||
|
self.input_transform = None
|
||||||
|
else:
|
||||||
|
self.input_transform = tensor_transform(
|
||||||
|
detector_config.model.input_tensor
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.input_transform = None
|
self.input_transform = None
|
||||||
|
|
||||||
|
|||||||
@ -108,7 +108,12 @@ def is_better_thumbnail(label, current_thumb, new_obj, frame_shape) -> bool:
|
|||||||
|
|
||||||
class TrackedObject:
|
class TrackedObject:
|
||||||
def __init__(
|
def __init__(
|
||||||
self, camera, colormap, camera_config: CameraConfig, frame_cache, obj_data
|
self,
|
||||||
|
camera,
|
||||||
|
colormap,
|
||||||
|
camera_config: CameraConfig,
|
||||||
|
frame_cache,
|
||||||
|
obj_data: dict[str, any],
|
||||||
):
|
):
|
||||||
# set the score history then remove as it is not part of object state
|
# set the score history then remove as it is not part of object state
|
||||||
self.score_history = obj_data["score_history"]
|
self.score_history = obj_data["score_history"]
|
||||||
@ -227,8 +232,8 @@ class TrackedObject:
|
|||||||
if self.attributes[attr["label"]] < attr["score"]:
|
if self.attributes[attr["label"]] < attr["score"]:
|
||||||
self.attributes[attr["label"]] = attr["score"]
|
self.attributes[attr["label"]] = attr["score"]
|
||||||
|
|
||||||
# populate the sub_label for car with highest scoring logo
|
# populate the sub_label for object with highest scoring logo
|
||||||
if self.obj_data["label"] == "car":
|
if self.obj_data["label"] in ["car", "package", "person"]:
|
||||||
recognized_logos = {
|
recognized_logos = {
|
||||||
k: self.attributes[k]
|
k: self.attributes[k]
|
||||||
for k in ["ups", "fedex", "amazon"]
|
for k in ["ups", "fedex", "amazon"]
|
||||||
@ -236,7 +241,13 @@ class TrackedObject:
|
|||||||
}
|
}
|
||||||
if len(recognized_logos) > 0:
|
if len(recognized_logos) > 0:
|
||||||
max_logo = max(recognized_logos, key=recognized_logos.get)
|
max_logo = max(recognized_logos, key=recognized_logos.get)
|
||||||
self.obj_data["sub_label"] = (max_logo, recognized_logos[max_logo])
|
|
||||||
|
# don't overwrite sub label if it is already set
|
||||||
|
if (
|
||||||
|
self.obj_data.get("sub_label") is None
|
||||||
|
or self.obj_data["sub_label"][0] == max_logo
|
||||||
|
):
|
||||||
|
self.obj_data["sub_label"] = (max_logo, recognized_logos[max_logo])
|
||||||
|
|
||||||
# check for significant change
|
# check for significant change
|
||||||
if not self.false_positive:
|
if not self.false_positive:
|
||||||
@ -921,8 +932,7 @@ class TrackedObjectProcessor(threading.Thread):
|
|||||||
ptz_autotracker_thread,
|
ptz_autotracker_thread,
|
||||||
stop_event,
|
stop_event,
|
||||||
):
|
):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="detected_frames_processor")
|
||||||
self.name = "detected_frames_processor"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.dispatcher = dispatcher
|
self.dispatcher = dispatcher
|
||||||
self.tracked_objects_queue = tracked_objects_queue
|
self.tracked_objects_queue = tracked_objects_queue
|
||||||
|
|||||||
@ -122,8 +122,7 @@ class FFMpegConverter(threading.Thread):
|
|||||||
quality: int,
|
quality: int,
|
||||||
birdseye_rtsp: bool = False,
|
birdseye_rtsp: bool = False,
|
||||||
):
|
):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="birdseye_output_converter")
|
||||||
self.name = "birdseye_output_converter"
|
|
||||||
self.camera = "birdseye"
|
self.camera = "birdseye"
|
||||||
self.input_queue = input_queue
|
self.input_queue = input_queue
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
@ -235,7 +234,7 @@ class BroadcastThread(threading.Thread):
|
|||||||
websocket_server,
|
websocket_server,
|
||||||
stop_event: mp.Event,
|
stop_event: mp.Event,
|
||||||
):
|
):
|
||||||
super(BroadcastThread, self).__init__()
|
super().__init__()
|
||||||
self.camera = camera
|
self.camera = camera
|
||||||
self.converter = converter
|
self.converter = converter
|
||||||
self.websocket_server = websocket_server
|
self.websocket_server = websocket_server
|
||||||
|
|||||||
@ -24,8 +24,7 @@ class FFMpegConverter(threading.Thread):
|
|||||||
out_height: int,
|
out_height: int,
|
||||||
quality: int,
|
quality: int,
|
||||||
):
|
):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name=f"{camera}_output_converter")
|
||||||
self.name = f"{camera}_output_converter"
|
|
||||||
self.camera = camera
|
self.camera = camera
|
||||||
self.input_queue = input_queue
|
self.input_queue = input_queue
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
@ -102,7 +101,7 @@ class BroadcastThread(threading.Thread):
|
|||||||
websocket_server,
|
websocket_server,
|
||||||
stop_event: mp.Event,
|
stop_event: mp.Event,
|
||||||
):
|
):
|
||||||
super(BroadcastThread, self).__init__()
|
super().__init__()
|
||||||
self.camera = camera
|
self.camera = camera
|
||||||
self.converter = converter
|
self.converter = converter
|
||||||
self.websocket_server = websocket_server
|
self.websocket_server = websocket_server
|
||||||
|
|||||||
@ -66,8 +66,7 @@ class FFMpegConverter(threading.Thread):
|
|||||||
frame_times: list[float],
|
frame_times: list[float],
|
||||||
requestor: InterProcessRequestor,
|
requestor: InterProcessRequestor,
|
||||||
):
|
):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name=f"{config.name}_preview_converter")
|
||||||
self.name = f"{config.name}_preview_converter"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.frame_times = frame_times
|
self.frame_times = frame_times
|
||||||
self.requestor = requestor
|
self.requestor = requestor
|
||||||
|
|||||||
@ -149,8 +149,7 @@ class PtzAutoTrackerThread(threading.Thread):
|
|||||||
dispatcher: Dispatcher,
|
dispatcher: Dispatcher,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
) -> None:
|
) -> None:
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="ptz_autotracker")
|
||||||
self.name = "ptz_autotracker"
|
|
||||||
self.ptz_autotracker = PtzAutoTracker(
|
self.ptz_autotracker = PtzAutoTracker(
|
||||||
config, onvif, ptz_metrics, dispatcher, stop_event
|
config, onvif, ptz_metrics, dispatcher, stop_event
|
||||||
)
|
)
|
||||||
@ -325,6 +324,12 @@ class PtzAutoTracker:
|
|||||||
def _write_config(self, camera):
|
def _write_config(self, camera):
|
||||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
||||||
|
|
||||||
|
# Check if we can use .yaml instead of .yml
|
||||||
|
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||||
|
|
||||||
|
if os.path.isfile(config_file_yaml):
|
||||||
|
config_file = config_file_yaml
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"{camera}: Writing new config with autotracker motion coefficients: {self.config.cameras[camera].onvif.autotracking.movement_weights}"
|
f"{camera}: Writing new config with autotracker motion coefficients: {self.config.cameras[camera].onvif.autotracking.movement_weights}"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -406,19 +406,19 @@ class OnvifController:
|
|||||||
# The onvif spec says this can report as +INF and -INF, so this may need to be modified
|
# The onvif spec says this can report as +INF and -INF, so this may need to be modified
|
||||||
pan = numpy.interp(
|
pan = numpy.interp(
|
||||||
pan,
|
pan,
|
||||||
[-1, 1],
|
|
||||||
[
|
[
|
||||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Min"],
|
self.cams[camera_name]["relative_fov_range"]["XRange"]["Min"],
|
||||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Max"],
|
self.cams[camera_name]["relative_fov_range"]["XRange"]["Max"],
|
||||||
],
|
],
|
||||||
|
[-1, 1],
|
||||||
)
|
)
|
||||||
tilt = numpy.interp(
|
tilt = numpy.interp(
|
||||||
tilt,
|
tilt,
|
||||||
[-1, 1],
|
|
||||||
[
|
[
|
||||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Min"],
|
self.cams[camera_name]["relative_fov_range"]["YRange"]["Min"],
|
||||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Max"],
|
self.cams[camera_name]["relative_fov_range"]["YRange"]["Max"],
|
||||||
],
|
],
|
||||||
|
[-1, 1],
|
||||||
)
|
)
|
||||||
|
|
||||||
move_request.Speed = {
|
move_request.Speed = {
|
||||||
@ -531,11 +531,11 @@ class OnvifController:
|
|||||||
# function takes in 0 to 1 for zoom, interpolate to the values of the camera.
|
# function takes in 0 to 1 for zoom, interpolate to the values of the camera.
|
||||||
zoom = numpy.interp(
|
zoom = numpy.interp(
|
||||||
zoom,
|
zoom,
|
||||||
[0, 1],
|
|
||||||
[
|
[
|
||||||
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Min"],
|
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Min"],
|
||||||
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Max"],
|
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Max"],
|
||||||
],
|
],
|
||||||
|
[0, 1],
|
||||||
)
|
)
|
||||||
|
|
||||||
move_request.Speed = {"Zoom": speed}
|
move_request.Speed = {"Zoom": speed}
|
||||||
@ -686,11 +686,11 @@ class OnvifController:
|
|||||||
# store absolute zoom level as 0 to 1 interpolated from the values of the camera
|
# store absolute zoom level as 0 to 1 interpolated from the values of the camera
|
||||||
self.ptz_metrics[camera_name].zoom_level.value = numpy.interp(
|
self.ptz_metrics[camera_name].zoom_level.value = numpy.interp(
|
||||||
round(status.Position.Zoom.x, 2),
|
round(status.Position.Zoom.x, 2),
|
||||||
[0, 1],
|
|
||||||
[
|
[
|
||||||
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Min"],
|
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Min"],
|
||||||
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Max"],
|
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Max"],
|
||||||
],
|
],
|
||||||
|
[0, 1],
|
||||||
)
|
)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"{camera_name}: Camera zoom level: {self.ptz_metrics[camera_name].zoom_level.value}"
|
f"{camera_name}: Camera zoom level: {self.ptz_metrics[camera_name].zoom_level.value}"
|
||||||
|
|||||||
@ -23,8 +23,7 @@ class RecordingCleanup(threading.Thread):
|
|||||||
"""Cleanup existing recordings based on retention config."""
|
"""Cleanup existing recordings based on retention config."""
|
||||||
|
|
||||||
def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None:
|
def __init__(self, config: FrigateConfig, stop_event: MpEvent) -> None:
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="recording_cleanup")
|
||||||
self.name = "recording_cleanup"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
|||||||
@ -49,6 +49,7 @@ class RecordingExporter(threading.Thread):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
|
id: str,
|
||||||
camera: str,
|
camera: str,
|
||||||
name: Optional[str],
|
name: Optional[str],
|
||||||
image: Optional[str],
|
image: Optional[str],
|
||||||
@ -56,8 +57,9 @@ class RecordingExporter(threading.Thread):
|
|||||||
end_time: int,
|
end_time: int,
|
||||||
playback_factor: PlaybackFactorEnum,
|
playback_factor: PlaybackFactorEnum,
|
||||||
) -> None:
|
) -> None:
|
||||||
threading.Thread.__init__(self)
|
super().__init__()
|
||||||
self.config = config
|
self.config = config
|
||||||
|
self.export_id = id
|
||||||
self.camera = camera
|
self.camera = camera
|
||||||
self.user_provided_name = name
|
self.user_provided_name = name
|
||||||
self.user_provided_image = image
|
self.user_provided_image = image
|
||||||
@ -172,18 +174,17 @@ class RecordingExporter(threading.Thread):
|
|||||||
logger.debug(
|
logger.debug(
|
||||||
f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}"
|
f"Beginning export for {self.camera} from {self.start_time} to {self.end_time}"
|
||||||
)
|
)
|
||||||
export_id = f"{self.camera}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
|
|
||||||
export_name = (
|
export_name = (
|
||||||
self.user_provided_name
|
self.user_provided_name
|
||||||
or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}"
|
or f"{self.camera.replace('_', ' ')} {self.get_datetime_from_timestamp(self.start_time)} {self.get_datetime_from_timestamp(self.end_time)}"
|
||||||
)
|
)
|
||||||
video_path = f"{EXPORT_DIR}/{export_id}.mp4"
|
video_path = f"{EXPORT_DIR}/{self.export_id}.mp4"
|
||||||
|
|
||||||
thumb_path = self.save_thumbnail(export_id)
|
thumb_path = self.save_thumbnail(self.export_id)
|
||||||
|
|
||||||
Export.insert(
|
Export.insert(
|
||||||
{
|
{
|
||||||
Export.id: export_id,
|
Export.id: self.export_id,
|
||||||
Export.camera: self.camera,
|
Export.camera: self.camera,
|
||||||
Export.name: export_name,
|
Export.name: export_name,
|
||||||
Export.date: self.start_time,
|
Export.date: self.start_time,
|
||||||
@ -257,12 +258,12 @@ class RecordingExporter(threading.Thread):
|
|||||||
)
|
)
|
||||||
logger.error(p.stderr)
|
logger.error(p.stderr)
|
||||||
Path(video_path).unlink(missing_ok=True)
|
Path(video_path).unlink(missing_ok=True)
|
||||||
Export.delete().where(Export.id == export_id).execute()
|
Export.delete().where(Export.id == self.export_id).execute()
|
||||||
Path(thumb_path).unlink(missing_ok=True)
|
Path(thumb_path).unlink(missing_ok=True)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
Export.update({Export.in_progress: False}).where(
|
Export.update({Export.in_progress: False}).where(
|
||||||
Export.id == export_id
|
Export.id == self.export_id
|
||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
logger.debug(f"Finished exporting {video_path}")
|
logger.debug(f"Finished exporting {video_path}")
|
||||||
|
|||||||
@ -62,8 +62,7 @@ class SegmentInfo:
|
|||||||
|
|
||||||
class RecordingMaintainer(threading.Thread):
|
class RecordingMaintainer(threading.Thread):
|
||||||
def __init__(self, config: FrigateConfig, stop_event: MpEvent):
|
def __init__(self, config: FrigateConfig, stop_event: MpEvent):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="recording_maintainer")
|
||||||
self.name = "recording_maintainer"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
# create communication for retained recordings
|
# create communication for retained recordings
|
||||||
@ -129,10 +128,23 @@ class RecordingMaintainer(threading.Thread):
|
|||||||
grouped_recordings[camera], key=lambda s: s["start_time"]
|
grouped_recordings[camera], key=lambda s: s["start_time"]
|
||||||
)
|
)
|
||||||
|
|
||||||
segment_count = len(grouped_recordings[camera])
|
camera_info = self.object_recordings_info[camera]
|
||||||
if segment_count > keep_count:
|
most_recently_processed_frame_time = (
|
||||||
|
camera_info[-1][0] if len(camera_info) > 0 else 0
|
||||||
|
)
|
||||||
|
|
||||||
|
processed_segment_count = len(
|
||||||
|
list(
|
||||||
|
filter(
|
||||||
|
lambda r: r["start_time"].timestamp()
|
||||||
|
< most_recently_processed_frame_time,
|
||||||
|
grouped_recordings[camera],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if processed_segment_count > keep_count:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {segment_count} and discarding the rest..."
|
f"Unable to keep up with recording segments in cache for {camera}. Keeping the {keep_count} most recent segments out of {processed_segment_count} and discarding the rest..."
|
||||||
)
|
)
|
||||||
to_remove = grouped_recordings[camera][:-keep_count]
|
to_remove = grouped_recordings[camera][:-keep_count]
|
||||||
for rec in to_remove:
|
for rec in to_remove:
|
||||||
|
|||||||
@ -146,8 +146,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
|||||||
"""Maintain review segments."""
|
"""Maintain review segments."""
|
||||||
|
|
||||||
def __init__(self, config: FrigateConfig, stop_event: MpEvent):
|
def __init__(self, config: FrigateConfig, stop_event: MpEvent):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="review_segment_maintainer")
|
||||||
self.name = "review_segment_maintainer"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.active_review_segments: dict[str, Optional[PendingReviewSegment]] = {}
|
self.active_review_segments: dict[str, Optional[PendingReviewSegment]] = {}
|
||||||
self.frame_manager = SharedMemoryFrameManager()
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
|
|||||||
@ -27,8 +27,7 @@ class StatsEmitter(threading.Thread):
|
|||||||
stats_tracking: StatsTrackingTypes,
|
stats_tracking: StatsTrackingTypes,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
):
|
):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="frigate_stats_emitter")
|
||||||
self.name = "frigate_stats_emitter"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stats_tracking = stats_tracking
|
self.stats_tracking = stats_tracking
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
|||||||
@ -22,8 +22,7 @@ class StorageMaintainer(threading.Thread):
|
|||||||
"""Maintain frigates recording storage."""
|
"""Maintain frigates recording storage."""
|
||||||
|
|
||||||
def __init__(self, config: FrigateConfig, stop_event) -> None:
|
def __init__(self, config: FrigateConfig, stop_event) -> None:
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="storage_maintainer")
|
||||||
self.name = "storage_maintainer"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
self.camera_storage_stats: dict[str, dict] = {}
|
self.camera_storage_stats: dict[str, dict] = {}
|
||||||
|
|||||||
@ -74,7 +74,7 @@ class TestFfmpegPresets(unittest.TestCase):
|
|||||||
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
|
" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"])
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
"fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12,eq=gamma=1.05"
|
"fps=10,scale_cuda=w=2560:h=1920,hwdownload,format=nv12,eq=gamma=1.4:gamma_weight=0.5"
|
||||||
in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
|
in (" ".join(frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -39,7 +39,8 @@ class TestGpuStats(unittest.TestCase):
|
|||||||
process.stdout = self.intel_results
|
process.stdout = self.intel_results
|
||||||
sp.return_value = process
|
sp.return_value = process
|
||||||
intel_stats = get_intel_gpu_stats()
|
intel_stats = get_intel_gpu_stats()
|
||||||
|
print(f"the intel stats are {intel_stats}")
|
||||||
assert intel_stats == {
|
assert intel_stats == {
|
||||||
"gpu": "1.34%",
|
"gpu": "1.13%",
|
||||||
"mem": "-%",
|
"mem": "-%",
|
||||||
}
|
}
|
||||||
|
|||||||
50
frigate/test/test_obects.py
Normal file
50
frigate/test/test_obects.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import unittest
|
||||||
|
|
||||||
|
from frigate.track.object_attribute import ObjectAttribute
|
||||||
|
|
||||||
|
|
||||||
|
class TestAttribute(unittest.TestCase):
|
||||||
|
def test_overlapping_object_selection(self) -> None:
|
||||||
|
attribute = ObjectAttribute(
|
||||||
|
(
|
||||||
|
"amazon",
|
||||||
|
0.80078125,
|
||||||
|
(847, 242, 883, 255),
|
||||||
|
468,
|
||||||
|
2.769230769230769,
|
||||||
|
(702, 134, 1050, 482),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
objects = [
|
||||||
|
{
|
||||||
|
"label": "car",
|
||||||
|
"score": 0.98828125,
|
||||||
|
"box": (728, 223, 1266, 719),
|
||||||
|
"area": 266848,
|
||||||
|
"ratio": 1.0846774193548387,
|
||||||
|
"region": (349, 0, 1397, 1048),
|
||||||
|
"frame_time": 1727785394.498972,
|
||||||
|
"centroid": (997, 471),
|
||||||
|
"id": "1727785349.150633-408hal",
|
||||||
|
"start_time": 1727785349.150633,
|
||||||
|
"motionless_count": 362,
|
||||||
|
"position_changes": 0,
|
||||||
|
"score_history": [0.98828125, 0.95703125, 0.98828125, 0.98828125],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "person",
|
||||||
|
"score": 0.76953125,
|
||||||
|
"box": (826, 172, 939, 417),
|
||||||
|
"area": 27685,
|
||||||
|
"ratio": 0.46122448979591835,
|
||||||
|
"region": (702, 134, 1050, 482),
|
||||||
|
"frame_time": 1727785394.498972,
|
||||||
|
"centroid": (882, 294),
|
||||||
|
"id": "1727785390.499768-9fbhem",
|
||||||
|
"start_time": 1727785390.499768,
|
||||||
|
"motionless_count": 2,
|
||||||
|
"position_changes": 1,
|
||||||
|
"score_history": [0.8828125, 0.83984375, 0.91796875, 0.94140625],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
assert attribute.find_best_object(objects) == "1727785390.499768-9fbhem"
|
||||||
@ -7,7 +7,7 @@ from multiprocessing import Queue
|
|||||||
from multiprocessing.synchronize import Event as MpEvent
|
from multiprocessing.synchronize import Event as MpEvent
|
||||||
|
|
||||||
from frigate.config import FrigateConfig
|
from frigate.config import FrigateConfig
|
||||||
from frigate.events.maintainer import EventTypeEnum
|
from frigate.events.maintainer import EventStateEnum, EventTypeEnum
|
||||||
from frigate.models import Timeline
|
from frigate.models import Timeline
|
||||||
from frigate.util.builtin import to_relative_box
|
from frigate.util.builtin import to_relative_box
|
||||||
|
|
||||||
@ -23,8 +23,7 @@ class TimelineProcessor(threading.Thread):
|
|||||||
queue: Queue,
|
queue: Queue,
|
||||||
stop_event: MpEvent,
|
stop_event: MpEvent,
|
||||||
) -> None:
|
) -> None:
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="timeline_processor")
|
||||||
self.name = "timeline_processor"
|
|
||||||
self.config = config
|
self.config = config
|
||||||
self.queue = queue
|
self.queue = queue
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
@ -44,10 +43,13 @@ class TimelineProcessor(threading.Thread):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
if input_type == EventTypeEnum.tracked_object:
|
if input_type == EventTypeEnum.tracked_object:
|
||||||
if prev_event_data is not None and event_data is not None:
|
# None prev_event_data is only allowed for the start of an event
|
||||||
self.handle_object_detection(
|
if event_type != EventStateEnum.start and prev_event_data is None:
|
||||||
camera, event_type, prev_event_data, event_data
|
continue
|
||||||
)
|
|
||||||
|
self.handle_object_detection(
|
||||||
|
camera, event_type, prev_event_data, event_data
|
||||||
|
)
|
||||||
elif input_type == EventTypeEnum.api:
|
elif input_type == EventTypeEnum.api:
|
||||||
self.handle_api_entry(camera, event_type, event_data)
|
self.handle_api_entry(camera, event_type, event_data)
|
||||||
|
|
||||||
@ -118,10 +120,10 @@ class TimelineProcessor(threading.Thread):
|
|||||||
for e in self.pre_event_cache[event_id]:
|
for e in self.pre_event_cache[event_id]:
|
||||||
e[Timeline.data]["sub_label"] = event_data["sub_label"]
|
e[Timeline.data]["sub_label"] = event_data["sub_label"]
|
||||||
|
|
||||||
if event_type == "start":
|
if event_type == EventStateEnum.start:
|
||||||
timeline_entry[Timeline.class_type] = "visible"
|
timeline_entry[Timeline.class_type] = "visible"
|
||||||
save = True
|
save = True
|
||||||
elif event_type == "update":
|
elif event_type == EventStateEnum.update:
|
||||||
if (
|
if (
|
||||||
len(prev_event_data["current_zones"]) < len(event_data["current_zones"])
|
len(prev_event_data["current_zones"]) < len(event_data["current_zones"])
|
||||||
and not event_data["stationary"]
|
and not event_data["stationary"]
|
||||||
@ -140,7 +142,7 @@ class TimelineProcessor(threading.Thread):
|
|||||||
event_data["attributes"].keys()
|
event_data["attributes"].keys()
|
||||||
)[0]
|
)[0]
|
||||||
save = True
|
save = True
|
||||||
elif event_type == "end":
|
elif event_type == EventStateEnum.end:
|
||||||
timeline_entry[Timeline.class_type] = "gone"
|
timeline_entry[Timeline.class_type] = "gone"
|
||||||
save = True
|
save = True
|
||||||
|
|
||||||
|
|||||||
44
frigate/track/object_attribute.py
Normal file
44
frigate/track/object_attribute.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
"""Object attribute."""
|
||||||
|
|
||||||
|
from frigate.util.object import area, box_inside
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectAttribute:
|
||||||
|
def __init__(self, raw_data: tuple) -> None:
|
||||||
|
self.label = raw_data[0]
|
||||||
|
self.score = raw_data[1]
|
||||||
|
self.box = raw_data[2]
|
||||||
|
self.area = raw_data[3]
|
||||||
|
self.ratio = raw_data[4]
|
||||||
|
self.region = raw_data[5]
|
||||||
|
|
||||||
|
def get_tracking_data(self) -> dict[str, any]:
|
||||||
|
"""Return data saved to the object."""
|
||||||
|
return {
|
||||||
|
"label": self.label,
|
||||||
|
"score": self.score,
|
||||||
|
"box": self.box,
|
||||||
|
}
|
||||||
|
|
||||||
|
def find_best_object(self, objects: list[dict[str, any]]) -> str:
|
||||||
|
"""Find the best attribute for each object and return its ID."""
|
||||||
|
best_object_area = None
|
||||||
|
best_object_id = None
|
||||||
|
|
||||||
|
for obj in objects:
|
||||||
|
if not box_inside(obj["box"], self.box):
|
||||||
|
continue
|
||||||
|
|
||||||
|
object_area = area(obj["box"])
|
||||||
|
|
||||||
|
# if multiple objects have the same attribute then they
|
||||||
|
# are overlapping, it is most likely that the smaller object
|
||||||
|
# is the one with the attribute
|
||||||
|
if best_object_area is None:
|
||||||
|
best_object_area = object_area
|
||||||
|
best_object_id = obj["id"]
|
||||||
|
elif object_area < best_object_area:
|
||||||
|
best_object_area = object_area
|
||||||
|
best_object_id = obj["id"]
|
||||||
|
|
||||||
|
return best_object_id
|
||||||
@ -198,12 +198,23 @@ def update_yaml_from_url(file_path, url):
|
|||||||
def update_yaml_file(file_path, key_path, new_value):
|
def update_yaml_file(file_path, key_path, new_value):
|
||||||
yaml = YAML()
|
yaml = YAML()
|
||||||
yaml.indent(mapping=2, sequence=4, offset=2)
|
yaml.indent(mapping=2, sequence=4, offset=2)
|
||||||
with open(file_path, "r") as f:
|
|
||||||
data = yaml.load(f)
|
try:
|
||||||
|
with open(file_path, "r") as f:
|
||||||
|
data = yaml.load(f)
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.error(
|
||||||
|
f"Unable to read from Frigate config file {file_path}. Make sure it exists and is readable."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
data = update_yaml(data, key_path, new_value)
|
data = update_yaml(data, key_path, new_value)
|
||||||
with open(file_path, "w") as f:
|
|
||||||
yaml.dump(data, f)
|
try:
|
||||||
|
with open(file_path, "w") as f:
|
||||||
|
yaml.dump(data, f)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unable to write to Frigate config file {file_path}: {e}")
|
||||||
|
|
||||||
|
|
||||||
def update_yaml(data, key_path, new_value):
|
def update_yaml(data, key_path, new_value):
|
||||||
|
|||||||
@ -1,15 +1,29 @@
|
|||||||
|
import faulthandler
|
||||||
import logging
|
import logging
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from logging.handlers import QueueHandler
|
from logging.handlers import QueueHandler
|
||||||
from typing import Any
|
from typing import Any, Callable, Optional
|
||||||
|
|
||||||
import frigate.log
|
import frigate.log
|
||||||
|
|
||||||
|
|
||||||
class BaseProcess(mp.Process):
|
class BaseProcess(mp.Process):
|
||||||
def __init__(self, **kwargs):
|
def __init__(
|
||||||
super().__init__(**kwargs)
|
self,
|
||||||
|
*,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
target: Optional[Callable] = None,
|
||||||
|
args: tuple = (),
|
||||||
|
kwargs: dict = {},
|
||||||
|
daemon: Optional[bool] = None,
|
||||||
|
):
|
||||||
|
super().__init__(
|
||||||
|
name=name, target=target, args=args, kwargs=kwargs, daemon=daemon
|
||||||
|
)
|
||||||
|
|
||||||
def start(self, *args, **kwargs):
|
def start(self, *args, **kwargs):
|
||||||
self.before_start()
|
self.before_start()
|
||||||
@ -46,10 +60,36 @@ class BaseProcess(mp.Process):
|
|||||||
|
|
||||||
|
|
||||||
class Process(BaseProcess):
|
class Process(BaseProcess):
|
||||||
|
logger: logging.Logger
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stop_event(self) -> threading.Event:
|
||||||
|
# Lazily create the stop_event. This allows the signal handler to tell if anyone is
|
||||||
|
# monitoring the stop event, and to raise a SystemExit if not.
|
||||||
|
if "stop_event" not in self.__dict__:
|
||||||
|
self.__dict__["stop_event"] = threading.Event()
|
||||||
|
return self.__dict__["stop_event"]
|
||||||
|
|
||||||
def before_start(self) -> None:
|
def before_start(self) -> None:
|
||||||
self.__log_queue = frigate.log.log_listener.queue
|
self.__log_queue = frigate.log.log_listener.queue
|
||||||
|
|
||||||
def before_run(self) -> None:
|
def before_run(self) -> None:
|
||||||
if self.__log_queue:
|
faulthandler.enable()
|
||||||
logging.basicConfig(handlers=[], force=True)
|
|
||||||
logging.getLogger().addHandler(QueueHandler(self.__log_queue))
|
def receiveSignal(signalNumber, frame):
|
||||||
|
# Get the stop_event through the dict to bypass lazy initialization.
|
||||||
|
stop_event = self.__dict__.get("stop_event")
|
||||||
|
if stop_event is not None:
|
||||||
|
# Someone is monitoring stop_event. We should set it.
|
||||||
|
stop_event.set()
|
||||||
|
else:
|
||||||
|
# Nobody is monitoring stop_event. We should raise SystemExit.
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
signal.signal(signal.SIGTERM, receiveSignal)
|
||||||
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
|
self.logger = logging.getLogger(self.name)
|
||||||
|
|
||||||
|
logging.basicConfig(handlers=[], force=True)
|
||||||
|
logging.getLogger().addHandler(QueueHandler(self.__log_queue))
|
||||||
|
|||||||
@ -279,35 +279,61 @@ def get_intel_gpu_stats() -> dict[str, str]:
|
|||||||
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
|
logger.error(f"Unable to poll intel GPU stats: {p.stderr}")
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
reading = "".join(p.stdout.split())
|
data = json.loads(f'[{"".join(p.stdout.split())}]')
|
||||||
results: dict[str, str] = {}
|
results: dict[str, str] = {}
|
||||||
|
render = {"global": []}
|
||||||
|
video = {"global": []}
|
||||||
|
|
||||||
# render is used for qsv
|
for block in data:
|
||||||
render = []
|
global_engine = block.get("engines")
|
||||||
for result in re.findall(r'"Render/3D/0":{[a-z":\d.,%]+}', reading):
|
|
||||||
packet = json.loads(result[14:])
|
|
||||||
single = packet.get("busy", 0.0)
|
|
||||||
render.append(float(single))
|
|
||||||
|
|
||||||
if render:
|
if global_engine:
|
||||||
render_avg = sum(render) / len(render)
|
render_frame = global_engine.get("Render/3D/0", {}).get("busy")
|
||||||
else:
|
video_frame = global_engine.get("Video/0", {}).get("busy")
|
||||||
render_avg = 1
|
|
||||||
|
|
||||||
# video is used for vaapi
|
if render_frame is not None:
|
||||||
video = []
|
render["global"].append(float(render_frame))
|
||||||
for result in re.findall(r'"Video/\d":{[a-z":\d.,%]+}', reading):
|
|
||||||
packet = json.loads(result[10:])
|
|
||||||
single = packet.get("busy", 0.0)
|
|
||||||
video.append(float(single))
|
|
||||||
|
|
||||||
if video:
|
if video_frame is not None:
|
||||||
video_avg = sum(video) / len(video)
|
video["global"].append(float(video_frame))
|
||||||
else:
|
|
||||||
video_avg = 1
|
|
||||||
|
|
||||||
results["gpu"] = f"{round((video_avg + render_avg) / 2, 2)}%"
|
clients = block.get("clients", {})
|
||||||
|
|
||||||
|
if clients and len(clients):
|
||||||
|
for client_block in clients.values():
|
||||||
|
key = client_block["pid"]
|
||||||
|
|
||||||
|
if render.get(key) is None:
|
||||||
|
render[key] = []
|
||||||
|
video[key] = []
|
||||||
|
|
||||||
|
client_engine = client_block.get("engine-classes", {})
|
||||||
|
|
||||||
|
render_frame = client_engine.get("Render/3D", {}).get("busy")
|
||||||
|
video_frame = client_engine.get("Video", {}).get("busy")
|
||||||
|
|
||||||
|
if render_frame is not None:
|
||||||
|
render[key].append(float(render_frame))
|
||||||
|
|
||||||
|
if video_frame is not None:
|
||||||
|
video[key].append(float(video_frame))
|
||||||
|
|
||||||
|
results["gpu"] = (
|
||||||
|
f"{round(((sum(render['global']) / len(render['global'])) + (sum(video['global']) / len(video['global']))) / 2, 2)}%"
|
||||||
|
)
|
||||||
results["mem"] = "-%"
|
results["mem"] = "-%"
|
||||||
|
|
||||||
|
if len(render.keys()) > 1:
|
||||||
|
results["clients"] = {}
|
||||||
|
|
||||||
|
for key in render.keys():
|
||||||
|
if key == "global":
|
||||||
|
continue
|
||||||
|
|
||||||
|
results["clients"][key] = (
|
||||||
|
f"{round(((sum(render[key]) / len(render[key])) + (sum(video[key]) / len(video[key]))) / 2, 2)}%"
|
||||||
|
)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -27,6 +27,7 @@ from frigate.object_detection import RemoteObjectDetector
|
|||||||
from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
from frigate.ptz.autotrack import ptz_moving_at_frame_time
|
||||||
from frigate.track import ObjectTracker
|
from frigate.track import ObjectTracker
|
||||||
from frigate.track.norfair_tracker import NorfairTracker
|
from frigate.track.norfair_tracker import NorfairTracker
|
||||||
|
from frigate.track.object_attribute import ObjectAttribute
|
||||||
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
|
from frigate.util.builtin import EventsPerSecond, get_tomorrow_at_time
|
||||||
from frigate.util.image import (
|
from frigate.util.image import (
|
||||||
FrameManager,
|
FrameManager,
|
||||||
@ -34,7 +35,6 @@ from frigate.util.image import (
|
|||||||
draw_box_with_label,
|
draw_box_with_label,
|
||||||
)
|
)
|
||||||
from frigate.util.object import (
|
from frigate.util.object import (
|
||||||
box_inside,
|
|
||||||
create_tensor_input,
|
create_tensor_input,
|
||||||
get_cluster_candidates,
|
get_cluster_candidates,
|
||||||
get_cluster_region,
|
get_cluster_region,
|
||||||
@ -94,6 +94,7 @@ def capture_frames(
|
|||||||
ffmpeg_process,
|
ffmpeg_process,
|
||||||
config: CameraConfig,
|
config: CameraConfig,
|
||||||
shm_frame_count: int,
|
shm_frame_count: int,
|
||||||
|
shm_frames: list[str],
|
||||||
frame_shape,
|
frame_shape,
|
||||||
frame_manager: FrameManager,
|
frame_manager: FrameManager,
|
||||||
frame_queue,
|
frame_queue,
|
||||||
@ -108,8 +109,6 @@ def capture_frames(
|
|||||||
skipped_eps = EventsPerSecond()
|
skipped_eps = EventsPerSecond()
|
||||||
skipped_eps.start()
|
skipped_eps.start()
|
||||||
|
|
||||||
shm_frames: list[str] = []
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
fps.value = frame_rate.eps()
|
fps.value = frame_rate.eps()
|
||||||
skipped_fps.value = skipped_eps.eps()
|
skipped_fps.value = skipped_eps.eps()
|
||||||
@ -154,10 +153,6 @@ def capture_frames(
|
|||||||
# if the queue is full, skip this frame
|
# if the queue is full, skip this frame
|
||||||
skipped_eps.update()
|
skipped_eps.update()
|
||||||
|
|
||||||
# clear out frames
|
|
||||||
for frame in shm_frames:
|
|
||||||
frame_manager.delete(frame)
|
|
||||||
|
|
||||||
|
|
||||||
class CameraWatchdog(threading.Thread):
|
class CameraWatchdog(threading.Thread):
|
||||||
def __init__(
|
def __init__(
|
||||||
@ -176,6 +171,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.camera_name = camera_name
|
self.camera_name = camera_name
|
||||||
self.config = config
|
self.config = config
|
||||||
self.shm_frame_count = shm_frame_count
|
self.shm_frame_count = shm_frame_count
|
||||||
|
self.shm_frames: list[str] = []
|
||||||
self.capture_thread = None
|
self.capture_thread = None
|
||||||
self.ffmpeg_detect_process = None
|
self.ffmpeg_detect_process = None
|
||||||
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect")
|
||||||
@ -308,6 +304,7 @@ class CameraWatchdog(threading.Thread):
|
|||||||
self.capture_thread = CameraCapture(
|
self.capture_thread = CameraCapture(
|
||||||
self.config,
|
self.config,
|
||||||
self.shm_frame_count,
|
self.shm_frame_count,
|
||||||
|
self.shm_frames,
|
||||||
self.ffmpeg_detect_process,
|
self.ffmpeg_detect_process,
|
||||||
self.frame_shape,
|
self.frame_shape,
|
||||||
self.frame_queue,
|
self.frame_queue,
|
||||||
@ -348,6 +345,7 @@ class CameraCapture(threading.Thread):
|
|||||||
self,
|
self,
|
||||||
config: CameraConfig,
|
config: CameraConfig,
|
||||||
shm_frame_count: int,
|
shm_frame_count: int,
|
||||||
|
shm_frames: list[str],
|
||||||
ffmpeg_process,
|
ffmpeg_process,
|
||||||
frame_shape,
|
frame_shape,
|
||||||
frame_queue,
|
frame_queue,
|
||||||
@ -359,6 +357,7 @@ class CameraCapture(threading.Thread):
|
|||||||
self.name = f"capture:{config.name}"
|
self.name = f"capture:{config.name}"
|
||||||
self.config = config
|
self.config = config
|
||||||
self.shm_frame_count = shm_frame_count
|
self.shm_frame_count = shm_frame_count
|
||||||
|
self.shm_frames = shm_frames
|
||||||
self.frame_shape = frame_shape
|
self.frame_shape = frame_shape
|
||||||
self.frame_queue = frame_queue
|
self.frame_queue = frame_queue
|
||||||
self.fps = fps
|
self.fps = fps
|
||||||
@ -374,6 +373,7 @@ class CameraCapture(threading.Thread):
|
|||||||
self.ffmpeg_process,
|
self.ffmpeg_process,
|
||||||
self.config,
|
self.config,
|
||||||
self.shm_frame_count,
|
self.shm_frame_count,
|
||||||
|
self.shm_frames,
|
||||||
self.frame_shape,
|
self.frame_shape,
|
||||||
self.frame_manager,
|
self.frame_manager,
|
||||||
self.frame_queue,
|
self.frame_queue,
|
||||||
@ -734,29 +734,34 @@ def process_frames(
|
|||||||
object_tracker.update_frame_times(frame_time)
|
object_tracker.update_frame_times(frame_time)
|
||||||
|
|
||||||
# group the attribute detections based on what label they apply to
|
# group the attribute detections based on what label they apply to
|
||||||
attribute_detections = {}
|
attribute_detections: dict[str, list[ObjectAttribute]] = {}
|
||||||
for label, attribute_labels in model_config.attributes_map.items():
|
for label, attribute_labels in model_config.attributes_map.items():
|
||||||
attribute_detections[label] = [
|
attribute_detections[label] = [
|
||||||
d for d in consolidated_detections if d[0] in attribute_labels
|
ObjectAttribute(d)
|
||||||
|
for d in consolidated_detections
|
||||||
|
if d[0] in attribute_labels
|
||||||
]
|
]
|
||||||
|
|
||||||
# build detections and add attributes
|
# build detections
|
||||||
detections = {}
|
detections = {}
|
||||||
for obj in object_tracker.tracked_objects.values():
|
for obj in object_tracker.tracked_objects.values():
|
||||||
attributes = []
|
detections[obj["id"]] = {**obj, "attributes": []}
|
||||||
# if the objects label has associated attribute detections
|
|
||||||
if obj["label"] in attribute_detections.keys():
|
# find the best object for each attribute to be assigned to
|
||||||
# add them to attributes if they intersect
|
all_objects: list[dict[str, any]] = object_tracker.tracked_objects.values()
|
||||||
for attribute_detection in attribute_detections[obj["label"]]:
|
for attributes in attribute_detections.values():
|
||||||
if box_inside(obj["box"], (attribute_detection[2])):
|
for attribute in attributes:
|
||||||
attributes.append(
|
filtered_objects = filter(
|
||||||
{
|
lambda o: attribute.label
|
||||||
"label": attribute_detection[0],
|
in model_config.attributes_map.get(o["label"], []),
|
||||||
"score": attribute_detection[1],
|
all_objects,
|
||||||
"box": attribute_detection[2],
|
)
|
||||||
}
|
selected_object_id = attribute.find_best_object(filtered_objects)
|
||||||
)
|
|
||||||
detections[obj["id"]] = {**obj, "attributes": attributes}
|
if selected_object_id is not None:
|
||||||
|
detections[selected_object_id]["attributes"].append(
|
||||||
|
attribute.get_tracking_data()
|
||||||
|
)
|
||||||
|
|
||||||
# debug object tracking
|
# debug object tracking
|
||||||
if False:
|
if False:
|
||||||
|
|||||||
@ -12,8 +12,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class FrigateWatchdog(threading.Thread):
|
class FrigateWatchdog(threading.Thread):
|
||||||
def __init__(self, detectors: dict[str, ObjectDetectProcess], stop_event: MpEvent):
|
def __init__(self, detectors: dict[str, ObjectDetectProcess], stop_event: MpEvent):
|
||||||
threading.Thread.__init__(self)
|
super().__init__(name="frigate_watchdog")
|
||||||
self.name = "frigate_watchdog"
|
|
||||||
self.detectors = detectors
|
self.detectors = detectors
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
|
||||||
|
|||||||
@ -17,6 +17,7 @@ import { usePersistence } from "@/hooks/use-persistence";
|
|||||||
import { Skeleton } from "../ui/skeleton";
|
import { Skeleton } from "../ui/skeleton";
|
||||||
import { Button } from "../ui/button";
|
import { Button } from "../ui/button";
|
||||||
import { FaCircleCheck } from "react-icons/fa6";
|
import { FaCircleCheck } from "react-icons/fa6";
|
||||||
|
import { cn } from "@/lib/utils";
|
||||||
|
|
||||||
type AnimatedEventCardProps = {
|
type AnimatedEventCardProps = {
|
||||||
event: ReviewSegment;
|
event: ReviewSegment;
|
||||||
@ -107,9 +108,9 @@ export function AnimatedEventCard({
|
|||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<div
|
<div
|
||||||
className="relative h-24 4k:h-32"
|
className="relative h-24 flex-shrink-0 overflow-hidden rounded md:rounded-lg 4k:h-32"
|
||||||
style={{
|
style={{
|
||||||
aspectRatio: aspectRatio,
|
aspectRatio: alertVideos ? aspectRatio : undefined,
|
||||||
}}
|
}}
|
||||||
onMouseEnter={isDesktop ? () => setIsHovered(true) : undefined}
|
onMouseEnter={isDesktop ? () => setIsHovered(true) : undefined}
|
||||||
onMouseLeave={isDesktop ? () => setIsHovered(false) : undefined}
|
onMouseLeave={isDesktop ? () => setIsHovered(false) : undefined}
|
||||||
@ -133,7 +134,7 @@ export function AnimatedEventCard({
|
|||||||
)}
|
)}
|
||||||
{previews != undefined && (
|
{previews != undefined && (
|
||||||
<div
|
<div
|
||||||
className="size-full cursor-pointer overflow-hidden rounded md:rounded-lg"
|
className="size-full cursor-pointer"
|
||||||
onClick={onOpenReview}
|
onClick={onOpenReview}
|
||||||
onAuxClick={(e) => {
|
onAuxClick={(e) => {
|
||||||
if (e.button === 1) {
|
if (e.button === 1) {
|
||||||
@ -145,7 +146,10 @@ export function AnimatedEventCard({
|
|||||||
>
|
>
|
||||||
{!alertVideos ? (
|
{!alertVideos ? (
|
||||||
<img
|
<img
|
||||||
className="size-full select-none"
|
className={cn(
|
||||||
|
"h-full w-auto min-w-10 select-none object-contain",
|
||||||
|
isSafari && !isLoaded ? "hidden" : "visible",
|
||||||
|
)}
|
||||||
src={`${apiHost}${event.thumb_path.replace("/media/frigate/", "")}`}
|
src={`${apiHost}${event.thumb_path.replace("/media/frigate/", "")}`}
|
||||||
loading={isSafari ? "eager" : "lazy"}
|
loading={isSafari ? "eager" : "lazy"}
|
||||||
onLoad={() => setIsLoaded(true)}
|
onLoad={() => setIsLoaded(true)}
|
||||||
@ -200,7 +204,14 @@ export function AnimatedEventCard({
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{!isLoaded && <Skeleton className="absolute inset-0" />}
|
{!isLoaded && (
|
||||||
|
<Skeleton
|
||||||
|
style={{
|
||||||
|
aspectRatio: alertVideos ? aspectRatio : 16 / 9,
|
||||||
|
}}
|
||||||
|
className="size-full"
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
</TooltipTrigger>
|
</TooltipTrigger>
|
||||||
<TooltipContent>
|
<TooltipContent>
|
||||||
|
|||||||
@ -79,7 +79,7 @@ export default function SearchFilterGroup({
|
|||||||
return [...labels].sort();
|
return [...labels].sort();
|
||||||
}, [config, filterList, filter]);
|
}, [config, filterList, filter]);
|
||||||
|
|
||||||
const { data: allSubLabels } = useSWR("sub_labels");
|
const { data: allSubLabels } = useSWR(["sub_labels", { split_joined: 1 }]);
|
||||||
|
|
||||||
const allZones = useMemo<string[]>(() => {
|
const allZones = useMemo<string[]>(() => {
|
||||||
if (filterList?.zones) {
|
if (filterList?.zones) {
|
||||||
|
|||||||
@ -14,7 +14,7 @@ export default function ImageLoadingIndicator({
|
|||||||
}
|
}
|
||||||
|
|
||||||
return isSafari ? (
|
return isSafari ? (
|
||||||
<div className={cn("pointer-events-none bg-gray-300", className)} />
|
<div className={cn("pointer-events-none bg-background_alt", className)} />
|
||||||
) : (
|
) : (
|
||||||
<Skeleton className={cn("pointer-events-none", className)} />
|
<Skeleton className={cn("pointer-events-none", className)} />
|
||||||
);
|
);
|
||||||
|
|||||||
@ -77,6 +77,17 @@ export default function ObjectLifecycle({
|
|||||||
const [showControls, setShowControls] = useState(false);
|
const [showControls, setShowControls] = useState(false);
|
||||||
const [showZones, setShowZones] = useState(true);
|
const [showZones, setShowZones] = useState(true);
|
||||||
|
|
||||||
|
const aspectRatio = useMemo(() => {
|
||||||
|
if (!config) {
|
||||||
|
return 16 / 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
config.cameras[event.camera].detect.width /
|
||||||
|
config.cameras[event.camera].detect.height
|
||||||
|
);
|
||||||
|
}, [config, event]);
|
||||||
|
|
||||||
const getZoneColor = useCallback(
|
const getZoneColor = useCallback(
|
||||||
(zoneName: string) => {
|
(zoneName: string) => {
|
||||||
const zoneColor =
|
const zoneColor =
|
||||||
@ -240,7 +251,15 @@ export default function ObjectLifecycle({
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
<div className="relative flex flex-row justify-center">
|
<div
|
||||||
|
className={cn(
|
||||||
|
"relative mx-auto flex max-h-[50dvh] flex-row justify-center",
|
||||||
|
!imgLoaded && aspectRatio < 16 / 9 && "h-full",
|
||||||
|
)}
|
||||||
|
style={{
|
||||||
|
aspectRatio: !imgLoaded ? aspectRatio : undefined,
|
||||||
|
}}
|
||||||
|
>
|
||||||
<ImageLoadingIndicator
|
<ImageLoadingIndicator
|
||||||
className="absolute inset-0"
|
className="absolute inset-0"
|
||||||
imgLoaded={imgLoaded}
|
imgLoaded={imgLoaded}
|
||||||
@ -263,7 +282,7 @@ export default function ObjectLifecycle({
|
|||||||
key={event.id}
|
key={event.id}
|
||||||
ref={imgRef}
|
ref={imgRef}
|
||||||
className={cn(
|
className={cn(
|
||||||
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain transition-opacity",
|
"max-h-[50dvh] max-w-full select-none rounded-lg object-contain",
|
||||||
)}
|
)}
|
||||||
loading={isSafari ? "eager" : "lazy"}
|
loading={isSafari ? "eager" : "lazy"}
|
||||||
style={
|
style={
|
||||||
@ -334,7 +353,9 @@ export default function ObjectLifecycle({
|
|||||||
/>
|
/>
|
||||||
</Button>
|
</Button>
|
||||||
</TooltipTrigger>
|
</TooltipTrigger>
|
||||||
<TooltipContent>Adjust annotation settings</TooltipContent>
|
<TooltipPortal>
|
||||||
|
<TooltipContent>Adjust annotation settings</TooltipContent>
|
||||||
|
</TooltipPortal>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@ -37,6 +37,7 @@ import {
|
|||||||
MobilePageHeader,
|
MobilePageHeader,
|
||||||
MobilePageTitle,
|
MobilePageTitle,
|
||||||
} from "@/components/mobile/MobilePage";
|
} from "@/components/mobile/MobilePage";
|
||||||
|
import { useOverlayState } from "@/hooks/use-overlay-state";
|
||||||
|
|
||||||
type ReviewDetailDialogProps = {
|
type ReviewDetailDialogProps = {
|
||||||
review?: ReviewSegment;
|
review?: ReviewSegment;
|
||||||
@ -83,10 +84,15 @@ export default function ReviewDetailDialog({
|
|||||||
|
|
||||||
// dialog and mobile page
|
// dialog and mobile page
|
||||||
|
|
||||||
const [isOpen, setIsOpen] = useState(review != undefined);
|
const [isOpen, setIsOpen] = useOverlayState(
|
||||||
|
"reviewPane",
|
||||||
|
review != undefined,
|
||||||
|
);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
setIsOpen(review != undefined);
|
setIsOpen(review != undefined);
|
||||||
|
// we know that these deps are correct
|
||||||
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
}, [review]);
|
}, [review]);
|
||||||
|
|
||||||
const Overlay = isDesktop ? Sheet : MobilePage;
|
const Overlay = isDesktop ? Sheet : MobilePage;
|
||||||
@ -102,7 +108,7 @@ export default function ReviewDetailDialog({
|
|||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<Overlay
|
<Overlay
|
||||||
open={isOpen}
|
open={isOpen ?? false}
|
||||||
onOpenChange={(open) => {
|
onOpenChange={(open) => {
|
||||||
if (!open) {
|
if (!open) {
|
||||||
setReview(undefined);
|
setReview(undefined);
|
||||||
|
|||||||
@ -27,7 +27,13 @@ import { baseUrl } from "@/api/baseUrl";
|
|||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||||
import { ASPECT_VERTICAL_LAYOUT, ASPECT_WIDE_LAYOUT } from "@/types/record";
|
import { ASPECT_VERTICAL_LAYOUT, ASPECT_WIDE_LAYOUT } from "@/types/record";
|
||||||
import { FaHistory, FaImage, FaRegListAlt, FaVideo } from "react-icons/fa";
|
import {
|
||||||
|
FaChevronDown,
|
||||||
|
FaHistory,
|
||||||
|
FaImage,
|
||||||
|
FaRegListAlt,
|
||||||
|
FaVideo,
|
||||||
|
} from "react-icons/fa";
|
||||||
import { FaRotate } from "react-icons/fa6";
|
import { FaRotate } from "react-icons/fa6";
|
||||||
import ObjectLifecycle from "./ObjectLifecycle";
|
import ObjectLifecycle from "./ObjectLifecycle";
|
||||||
import {
|
import {
|
||||||
@ -45,8 +51,14 @@ import {
|
|||||||
import { ReviewSegment } from "@/types/review";
|
import { ReviewSegment } from "@/types/review";
|
||||||
import { useNavigate } from "react-router-dom";
|
import { useNavigate } from "react-router-dom";
|
||||||
import Chip from "@/components/indicators/Chip";
|
import Chip from "@/components/indicators/Chip";
|
||||||
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
import { capitalizeAll } from "@/utils/stringUtil";
|
||||||
import useGlobalMutation from "@/hooks/use-global-mutate";
|
import useGlobalMutation from "@/hooks/use-global-mutate";
|
||||||
|
import {
|
||||||
|
DropdownMenu,
|
||||||
|
DropdownMenuContent,
|
||||||
|
DropdownMenuItem,
|
||||||
|
DropdownMenuTrigger,
|
||||||
|
} from "@/components/ui/dropdown-menu";
|
||||||
|
|
||||||
const SEARCH_TABS = [
|
const SEARCH_TABS = [
|
||||||
"details",
|
"details",
|
||||||
@ -309,33 +321,36 @@ function ObjectDetailsTab({
|
|||||||
});
|
});
|
||||||
}, [desc, search, mutate]);
|
}, [desc, search, mutate]);
|
||||||
|
|
||||||
const regenerateDescription = useCallback(() => {
|
const regenerateDescription = useCallback(
|
||||||
if (!search) {
|
(source: "snapshot" | "thumbnails") => {
|
||||||
return;
|
if (!search) {
|
||||||
}
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
axios
|
axios
|
||||||
.put(`events/${search.id}/description/regenerate`)
|
.put(`events/${search.id}/description/regenerate?source=${source}`)
|
||||||
.then((resp) => {
|
.then((resp) => {
|
||||||
if (resp.status == 200) {
|
if (resp.status == 200) {
|
||||||
toast.success(
|
toast.success(
|
||||||
`A new description has been requested from ${capitalizeFirstLetter(config?.genai.provider ?? "Generative AI")}. Depending on the speed of your provider, the new description may take some time to regenerate.`,
|
`A new description has been requested from ${capitalizeAll(config?.genai.provider.replaceAll("_", " ") ?? "Generative AI")}. Depending on the speed of your provider, the new description may take some time to regenerate.`,
|
||||||
|
{
|
||||||
|
position: "top-center",
|
||||||
|
duration: 7000,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(() => {
|
||||||
|
toast.error(
|
||||||
|
`Failed to call ${capitalizeAll(config?.genai.provider.replaceAll("_", " ") ?? "Generative AI")} for a new description`,
|
||||||
{
|
{
|
||||||
position: "top-center",
|
position: "top-center",
|
||||||
duration: 7000,
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
});
|
||||||
})
|
},
|
||||||
.catch(() => {
|
[search, config],
|
||||||
toast.error(
|
);
|
||||||
`Failed to call ${capitalizeFirstLetter(config?.genai.provider ?? "Generative AI")} for a new description`,
|
|
||||||
{
|
|
||||||
position: "top-center",
|
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
|
||||||
}, [search, config]);
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex flex-col gap-5">
|
<div className="flex flex-col gap-5">
|
||||||
@ -403,7 +418,37 @@ function ObjectDetailsTab({
|
|||||||
/>
|
/>
|
||||||
<div className="flex w-full flex-row justify-end gap-2">
|
<div className="flex w-full flex-row justify-end gap-2">
|
||||||
{config?.genai.enabled && (
|
{config?.genai.enabled && (
|
||||||
<Button onClick={regenerateDescription}>Regenerate</Button>
|
<div className="flex items-center">
|
||||||
|
<Button
|
||||||
|
className="rounded-r-none border-r-0"
|
||||||
|
onClick={() => regenerateDescription("thumbnails")}
|
||||||
|
>
|
||||||
|
Regenerate
|
||||||
|
</Button>
|
||||||
|
{search.has_snapshot && (
|
||||||
|
<DropdownMenu>
|
||||||
|
<DropdownMenuTrigger asChild>
|
||||||
|
<Button className="rounded-l-none border-l-0 px-2">
|
||||||
|
<FaChevronDown className="size-3" />
|
||||||
|
</Button>
|
||||||
|
</DropdownMenuTrigger>
|
||||||
|
<DropdownMenuContent>
|
||||||
|
<DropdownMenuItem
|
||||||
|
className="cursor-pointer"
|
||||||
|
onClick={() => regenerateDescription("snapshot")}
|
||||||
|
>
|
||||||
|
Regenerate from Snapshot
|
||||||
|
</DropdownMenuItem>
|
||||||
|
<DropdownMenuItem
|
||||||
|
className="cursor-pointer"
|
||||||
|
onClick={() => regenerateDescription("thumbnails")}
|
||||||
|
>
|
||||||
|
Regenerate from Thumbnails
|
||||||
|
</DropdownMenuItem>
|
||||||
|
</DropdownMenuContent>
|
||||||
|
</DropdownMenu>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
)}
|
)}
|
||||||
<Button variant="select" onClick={updateDescription}>
|
<Button variant="select" onClick={updateDescription}>
|
||||||
Save
|
Save
|
||||||
|
|||||||
@ -12,7 +12,7 @@ import {
|
|||||||
import { Event } from "@/types/event";
|
import { Event } from "@/types/event";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import axios from "axios";
|
import axios from "axios";
|
||||||
import { useCallback, useMemo, useState } from "react";
|
import { useCallback, useEffect, useMemo, useState } from "react";
|
||||||
import { isDesktop } from "react-device-detect";
|
import { isDesktop } from "react-device-detect";
|
||||||
import { TransformWrapper, TransformComponent } from "react-zoom-pan-pinch";
|
import { TransformWrapper, TransformComponent } from "react-zoom-pan-pinch";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
@ -62,6 +62,11 @@ export function FrigatePlusDialog({
|
|||||||
upload?.plus_id ? "submitted" : "reviewing",
|
upload?.plus_id ? "submitted" : "reviewing",
|
||||||
);
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => setState(upload?.plus_id ? "submitted" : "reviewing"),
|
||||||
|
[upload],
|
||||||
|
);
|
||||||
|
|
||||||
const onSubmitToPlus = useCallback(
|
const onSubmitToPlus = useCallback(
|
||||||
async (falsePositive: boolean) => {
|
async (falsePositive: boolean) => {
|
||||||
if (!upload) {
|
if (!upload) {
|
||||||
|
|||||||
@ -14,7 +14,8 @@ export default function useKeyboardListener(
|
|||||||
) {
|
) {
|
||||||
const keyDownListener = useCallback(
|
const keyDownListener = useCallback(
|
||||||
(e: KeyboardEvent) => {
|
(e: KeyboardEvent) => {
|
||||||
if (!e) {
|
// @ts-expect-error we know this field exists
|
||||||
|
if (!e || e.target.tagName == "INPUT") {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
import { useFullscreen } from "@/hooks/use-fullscreen";
|
import { useFullscreen } from "@/hooks/use-fullscreen";
|
||||||
|
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
||||||
import {
|
import {
|
||||||
useHashState,
|
useHashState,
|
||||||
usePersistedOverlayState,
|
usePersistedOverlayState,
|
||||||
@ -43,6 +44,18 @@ function Live() {
|
|||||||
const { fullscreen, toggleFullscreen, supportsFullScreen } =
|
const { fullscreen, toggleFullscreen, supportsFullScreen } =
|
||||||
useFullscreen(mainRef);
|
useFullscreen(mainRef);
|
||||||
|
|
||||||
|
useKeyboardListener(["f"], (key, modifiers) => {
|
||||||
|
if (!modifiers.down) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (key) {
|
||||||
|
case "f":
|
||||||
|
toggleFullscreen();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// document title
|
// document title
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
|||||||
@ -1,3 +1,10 @@
|
|||||||
export const capitalizeFirstLetter = (text: string): string => {
|
export const capitalizeFirstLetter = (text: string): string => {
|
||||||
return text.charAt(0).toUpperCase() + text.slice(1);
|
return text.charAt(0).toUpperCase() + text.slice(1);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export const capitalizeAll = (text: string): string => {
|
||||||
|
return text
|
||||||
|
.split(" ")
|
||||||
|
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||||
|
.join(" ");
|
||||||
|
};
|
||||||
|
|||||||
@ -611,23 +611,40 @@ function DetectionReview({
|
|||||||
|
|
||||||
// keyboard
|
// keyboard
|
||||||
|
|
||||||
useKeyboardListener(["a", "r"], (key, modifiers) => {
|
useKeyboardListener(["a", "r", "PageDown", "PageUp"], (key, modifiers) => {
|
||||||
if (modifiers.repeat || !modifiers.down) {
|
if (modifiers.repeat || !modifiers.down) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (key == "a" && modifiers.ctrl) {
|
switch (key) {
|
||||||
onSelectAllReviews();
|
case "a":
|
||||||
}
|
if (modifiers.ctrl) {
|
||||||
|
onSelectAllReviews();
|
||||||
if (key == "r" && selectedReviews.length > 0) {
|
|
||||||
currentItems?.forEach((item) => {
|
|
||||||
if (selectedReviews.includes(item.id)) {
|
|
||||||
item.has_been_reviewed = true;
|
|
||||||
markItemAsReviewed(item);
|
|
||||||
}
|
}
|
||||||
});
|
break;
|
||||||
setSelectedReviews([]);
|
case "r":
|
||||||
|
if (selectedReviews.length > 0) {
|
||||||
|
currentItems?.forEach((item) => {
|
||||||
|
if (selectedReviews.includes(item.id)) {
|
||||||
|
item.has_been_reviewed = true;
|
||||||
|
markItemAsReviewed(item);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
setSelectedReviews([]);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "PageDown":
|
||||||
|
contentRef.current?.scrollBy({
|
||||||
|
top: contentRef.current.clientHeight / 2,
|
||||||
|
behavior: "smooth",
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case "PageUp":
|
||||||
|
contentRef.current?.scrollBy({
|
||||||
|
top: -contentRef.current.clientHeight / 2,
|
||||||
|
behavior: "smooth",
|
||||||
|
});
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@ -35,7 +35,12 @@ export default function ExploreView({
|
|||||||
|
|
||||||
// data
|
// data
|
||||||
|
|
||||||
const { data: events, mutate } = useSWR<SearchResult[]>(
|
const {
|
||||||
|
data: events,
|
||||||
|
mutate,
|
||||||
|
isLoading,
|
||||||
|
isValidating,
|
||||||
|
} = useSWR<SearchResult[]>(
|
||||||
[
|
[
|
||||||
"events/explore",
|
"events/explore",
|
||||||
{
|
{
|
||||||
@ -81,7 +86,7 @@ export default function ExploreView({
|
|||||||
}
|
}
|
||||||
}, [events, searchDetail, setSearchDetail]);
|
}, [events, searchDetail, setSearchDetail]);
|
||||||
|
|
||||||
if (!events) {
|
if (isLoading) {
|
||||||
return (
|
return (
|
||||||
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
||||||
);
|
);
|
||||||
@ -93,6 +98,7 @@ export default function ExploreView({
|
|||||||
<ThumbnailRow
|
<ThumbnailRow
|
||||||
key={label}
|
key={label}
|
||||||
searchResults={filteredEvents}
|
searchResults={filteredEvents}
|
||||||
|
isValidating={isValidating}
|
||||||
objectType={label}
|
objectType={label}
|
||||||
setSearchDetail={setSearchDetail}
|
setSearchDetail={setSearchDetail}
|
||||||
/>
|
/>
|
||||||
@ -104,12 +110,14 @@ export default function ExploreView({
|
|||||||
type ThumbnailRowType = {
|
type ThumbnailRowType = {
|
||||||
objectType: string;
|
objectType: string;
|
||||||
searchResults?: SearchResult[];
|
searchResults?: SearchResult[];
|
||||||
|
isValidating: boolean;
|
||||||
setSearchDetail: (search: SearchResult | undefined) => void;
|
setSearchDetail: (search: SearchResult | undefined) => void;
|
||||||
};
|
};
|
||||||
|
|
||||||
function ThumbnailRow({
|
function ThumbnailRow({
|
||||||
objectType,
|
objectType,
|
||||||
searchResults,
|
searchResults,
|
||||||
|
isValidating,
|
||||||
setSearchDetail,
|
setSearchDetail,
|
||||||
}: ThumbnailRowType) {
|
}: ThumbnailRowType) {
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
@ -123,7 +131,7 @@ function ThumbnailRow({
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="rounded-lg bg-background_alt p-2 md:px-4">
|
<div className="rounded-lg bg-background_alt p-2 md:px-4">
|
||||||
<div className="text-lg capitalize">
|
<div className="flex flex-row items-center text-lg capitalize">
|
||||||
{objectType.replaceAll("_", " ")}
|
{objectType.replaceAll("_", " ")}
|
||||||
{searchResults && (
|
{searchResults && (
|
||||||
<span className="ml-3 text-sm text-secondary-foreground">
|
<span className="ml-3 text-sm text-secondary-foreground">
|
||||||
@ -135,6 +143,7 @@ function ThumbnailRow({
|
|||||||
tracked objects){" "}
|
tracked objects){" "}
|
||||||
</span>
|
</span>
|
||||||
)}
|
)}
|
||||||
|
{isValidating && <ActivityIndicator className="ml-2 size-4" />}
|
||||||
</div>
|
</div>
|
||||||
<div className="flex flex-row items-center space-x-2 py-2">
|
<div className="flex flex-row items-center space-x-2 py-2">
|
||||||
{searchResults?.map((event) => (
|
{searchResults?.map((event) => (
|
||||||
@ -191,7 +200,7 @@ function ExploreThumbnailImage({
|
|||||||
<img
|
<img
|
||||||
ref={imgRef}
|
ref={imgRef}
|
||||||
className={cn(
|
className={cn(
|
||||||
"absolute h-full w-full cursor-pointer rounded-lg object-cover transition-all duration-300 ease-in-out md:rounded-2xl",
|
"absolute h-full w-full cursor-pointer rounded-lg object-cover transition-all duration-300 ease-in-out lg:rounded-2xl",
|
||||||
)}
|
)}
|
||||||
style={
|
style={
|
||||||
isIOS
|
isIOS
|
||||||
|
|||||||
@ -236,6 +236,25 @@ export default function LiveCameraView({
|
|||||||
return "mse";
|
return "mse";
|
||||||
}, [lowBandwidth, mic, webRTC, isRestreamed]);
|
}, [lowBandwidth, mic, webRTC, isRestreamed]);
|
||||||
|
|
||||||
|
useKeyboardListener(["m"], (key, modifiers) => {
|
||||||
|
if (!modifiers.down) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (key) {
|
||||||
|
case "m":
|
||||||
|
if (supportsAudioOutput) {
|
||||||
|
setAudio(!audio);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "t":
|
||||||
|
if (supports2WayTalk) {
|
||||||
|
setMic(!mic);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// layout state
|
// layout state
|
||||||
|
|
||||||
const windowAspectRatio = useMemo(() => {
|
const windowAspectRatio = useMemo(() => {
|
||||||
|
|||||||
@ -13,8 +13,8 @@ import { cn } from "@/lib/utils";
|
|||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import { SearchFilter, SearchResult, SearchSource } from "@/types/search";
|
import { SearchFilter, SearchResult, SearchSource } from "@/types/search";
|
||||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||||
import { isMobileOnly } from "react-device-detect";
|
import { isDesktop, isMobileOnly } from "react-device-detect";
|
||||||
import { LuImage, LuSearchX, LuText } from "react-icons/lu";
|
import { LuColumns, LuImage, LuSearchX, LuText } from "react-icons/lu";
|
||||||
import useSWR from "swr";
|
import useSWR from "swr";
|
||||||
import ExploreView from "../explore/ExploreView";
|
import ExploreView from "../explore/ExploreView";
|
||||||
import useKeyboardListener, {
|
import useKeyboardListener, {
|
||||||
@ -26,6 +26,13 @@ import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
|
|||||||
import { isEqual } from "lodash";
|
import { isEqual } from "lodash";
|
||||||
import { formatDateToLocaleString } from "@/utils/dateUtil";
|
import { formatDateToLocaleString } from "@/utils/dateUtil";
|
||||||
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
import { TooltipPortal } from "@radix-ui/react-tooltip";
|
||||||
|
import { Slider } from "@/components/ui/slider";
|
||||||
|
import {
|
||||||
|
Popover,
|
||||||
|
PopoverContent,
|
||||||
|
PopoverTrigger,
|
||||||
|
} from "@/components/ui/popover";
|
||||||
|
import { usePersistence } from "@/hooks/use-persistence";
|
||||||
|
|
||||||
type SearchViewProps = {
|
type SearchViewProps = {
|
||||||
search: string;
|
search: string;
|
||||||
@ -53,10 +60,26 @@ export default function SearchView({
|
|||||||
loadMore,
|
loadMore,
|
||||||
hasMore,
|
hasMore,
|
||||||
}: SearchViewProps) {
|
}: SearchViewProps) {
|
||||||
|
const contentRef = useRef<HTMLDivElement | null>(null);
|
||||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||||
revalidateOnFocus: false,
|
revalidateOnFocus: false,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// grid
|
||||||
|
|
||||||
|
const [columnCount, setColumnCount] = usePersistence("exploreGridColumns", 4);
|
||||||
|
const effectiveColumnCount = useMemo(() => columnCount ?? 4, [columnCount]);
|
||||||
|
|
||||||
|
const gridClassName = cn("grid w-full gap-2 px-1 gap-2 lg:gap-4 md:mx-2", {
|
||||||
|
"sm:grid-cols-2": effectiveColumnCount <= 2,
|
||||||
|
"sm:grid-cols-3": effectiveColumnCount === 3,
|
||||||
|
"sm:grid-cols-4": effectiveColumnCount === 4,
|
||||||
|
"sm:grid-cols-5": effectiveColumnCount === 5,
|
||||||
|
"sm:grid-cols-6": effectiveColumnCount === 6,
|
||||||
|
"sm:grid-cols-7": effectiveColumnCount === 7,
|
||||||
|
"sm:grid-cols-8": effectiveColumnCount >= 8,
|
||||||
|
});
|
||||||
|
|
||||||
// suggestions values
|
// suggestions values
|
||||||
|
|
||||||
const allLabels = useMemo<string[]>(() => {
|
const allLabels = useMemo<string[]>(() => {
|
||||||
@ -217,13 +240,25 @@ export default function SearchView({
|
|||||||
return newIndex;
|
return newIndex;
|
||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
|
case "PageDown":
|
||||||
|
contentRef.current?.scrollBy({
|
||||||
|
top: contentRef.current.clientHeight / 2,
|
||||||
|
behavior: "smooth",
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case "PageUp":
|
||||||
|
contentRef.current?.scrollBy({
|
||||||
|
top: -contentRef.current.clientHeight / 2,
|
||||||
|
behavior: "smooth",
|
||||||
|
});
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[uniqueResults, inputFocused],
|
[uniqueResults, inputFocused],
|
||||||
);
|
);
|
||||||
|
|
||||||
useKeyboardListener(
|
useKeyboardListener(
|
||||||
["ArrowLeft", "ArrowRight"],
|
["ArrowLeft", "ArrowRight", "PageDown", "PageUp"],
|
||||||
onKeyboardShortcut,
|
onKeyboardShortcut,
|
||||||
!inputFocused,
|
!inputFocused,
|
||||||
);
|
);
|
||||||
@ -324,7 +359,10 @@ export default function SearchView({
|
|||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="no-scrollbar flex flex-1 flex-wrap content-start gap-2 overflow-y-auto">
|
<div
|
||||||
|
ref={contentRef}
|
||||||
|
className="no-scrollbar flex flex-1 flex-wrap content-start gap-2 overflow-y-auto"
|
||||||
|
>
|
||||||
{uniqueResults?.length == 0 && !isLoading && (
|
{uniqueResults?.length == 0 && !isLoading && (
|
||||||
<div className="absolute left-1/2 top-1/2 flex -translate-x-1/2 -translate-y-1/2 flex-col items-center justify-center text-center">
|
<div className="absolute left-1/2 top-1/2 flex -translate-x-1/2 -translate-y-1/2 flex-col items-center justify-center text-center">
|
||||||
<LuSearchX className="size-16" />
|
<LuSearchX className="size-16" />
|
||||||
@ -340,7 +378,7 @@ export default function SearchView({
|
|||||||
)}
|
)}
|
||||||
|
|
||||||
{uniqueResults && (
|
{uniqueResults && (
|
||||||
<div className="grid w-full gap-2 px-1 sm:grid-cols-2 md:mx-2 md:grid-cols-4 md:gap-4 3xl:grid-cols-6">
|
<div className={gridClassName}>
|
||||||
{uniqueResults &&
|
{uniqueResults &&
|
||||||
uniqueResults.map((value, index) => {
|
uniqueResults.map((value, index) => {
|
||||||
const selected = selectedIndex === index;
|
const selected = selectedIndex === index;
|
||||||
@ -409,6 +447,47 @@ export default function SearchView({
|
|||||||
<div className="flex h-12 w-full justify-center">
|
<div className="flex h-12 w-full justify-center">
|
||||||
{hasMore && isLoading && <ActivityIndicator />}
|
{hasMore && isLoading && <ActivityIndicator />}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{isDesktop && columnCount && (
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
"fixed bottom-12 right-3 z-50 flex flex-row gap-2 lg:bottom-9",
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
<Popover>
|
||||||
|
<Tooltip>
|
||||||
|
<TooltipTrigger asChild>
|
||||||
|
<PopoverTrigger asChild>
|
||||||
|
<div className="cursor-pointer rounded-lg bg-secondary text-secondary-foreground opacity-75 transition-all duration-300 hover:bg-muted hover:opacity-100">
|
||||||
|
<LuColumns className="size-5 md:m-[6px]" />
|
||||||
|
</div>
|
||||||
|
</PopoverTrigger>
|
||||||
|
</TooltipTrigger>
|
||||||
|
<TooltipContent>Adjust Grid Columns</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
<PopoverContent className="mr-2 w-80">
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="font-medium leading-none">
|
||||||
|
Grid Columns
|
||||||
|
</div>
|
||||||
|
<div className="flex items-center space-x-4">
|
||||||
|
<Slider
|
||||||
|
value={[effectiveColumnCount]}
|
||||||
|
onValueChange={([value]) => setColumnCount(value)}
|
||||||
|
max={8}
|
||||||
|
min={2}
|
||||||
|
step={1}
|
||||||
|
className="flex-grow"
|
||||||
|
/>
|
||||||
|
<span className="w-9 text-center text-sm font-medium">
|
||||||
|
{effectiveColumnCount}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</PopoverContent>
|
||||||
|
</Popover>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user