diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
index 677126a6d..273182930 100755
--- a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
+++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
@@ -79,6 +79,11 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain.
-keyout "$letsencrypt_path/privkey.pem" -out "$letsencrypt_path/fullchain.pem" 2>/dev/null
fi
+# build templates for optional FRIGATE_BASE_PATH environment variable
+python3 /usr/local/nginx/get_base_path.py | \
+ tempio -template /usr/local/nginx/templates/base_path.gotmpl \
+ -out /usr/local/nginx/conf/base_path.conf
+
# build templates for optional TLS support
python3 /usr/local/nginx/get_tls_settings.py | \
tempio -template /usr/local/nginx/templates/listen.gotmpl \
diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf
index 8a98da1f2..64d6396b2 100644
--- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf
+++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf
@@ -96,6 +96,7 @@ http {
gzip_types application/vnd.apple.mpegurl;
include auth_location.conf;
+ include base_path.conf;
location /vod/ {
include auth_request.conf;
@@ -299,6 +300,18 @@ http {
add_header Cache-Control "public";
}
+ location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
+ access_log off;
+ expires 1y;
+ add_header Cache-Control "public";
+ default_type application/json;
+ proxy_set_header Accept-Encoding "";
+ sub_filter_once off;
+ sub_filter_types application/json;
+ sub_filter '"start_url": "/"' '"start_url" : "$http_x_ingress_path"';
+ sub_filter '"src": "/' '"src": "$http_x_ingress_path/';
+ }
+
sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
diff --git a/docker/main/rootfs/usr/local/nginx/get_base_path.py b/docker/main/rootfs/usr/local/nginx/get_base_path.py
new file mode 100644
index 000000000..e6fc8cfc6
--- /dev/null
+++ b/docker/main/rootfs/usr/local/nginx/get_base_path.py
@@ -0,0 +1,10 @@
+"""Prints the base path as json to stdout."""
+
+import json
+import os
+
+base_path = os.environ.get("FRIGATE_BASE_PATH", "")
+
+result: dict[str, any] = {"base_path": base_path}
+
+print(json.dumps(result))
diff --git a/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl
new file mode 100644
index 000000000..ace4443ee
--- /dev/null
+++ b/docker/main/rootfs/usr/local/nginx/templates/base_path.gotmpl
@@ -0,0 +1,19 @@
+{{ if .base_path }}
+location = {{ .base_path }} {
+ return 302 {{ .base_path }}/;
+}
+
+location ^~ {{ .base_path }}/ {
+ # remove base_url from the path before passing upstream
+ rewrite ^{{ .base_path }}/(.*) /$1 break;
+
+ proxy_pass $scheme://127.0.0.1:8971;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Ingress-Path {{ .base_path }};
+
+ access_log off;
+}
+{{ end }}
diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md
index c889d2d26..1e128e0e3 100644
--- a/docs/docs/configuration/advanced.md
+++ b/docs/docs/configuration/advanced.md
@@ -172,6 +172,38 @@ listen [::]:8971 ipv6only=off ssl;
listen [::]:5000 ipv6only=off;
```
+## Base path
+
+By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing.
+
+### Set Base Path via HTTP Header
+The preferred way to configure the base path is through the `X-Ingress-Path` HTTP header, which needs to be set to the desired base path in an upstream reverse proxy.
+
+For example, in Nginx:
+```
+location /frigate {
+ proxy_set_header X-Ingress-Path /frigate;
+ proxy_pass http://frigate_backend;
+}
+```
+
+### Set Base Path via Environment Variable
+When it is not feasible to set the base path via a HTTP header, it can also be set via the `FRIGATE_BASE_PATH` environment variable in the Docker Compose file.
+
+For example:
+```
+services:
+ frigate:
+ image: blakeblackshear/frigate:latest
+ environment:
+ - FRIGATE_BASE_PATH=/frigate
+```
+
+This can be used for example to access Frigate via a Tailscale agent (https), by simply forwarding all requests to the base path (http):
+```
+tailscale serve --https=443 --bg --set-path /frigate http://localhost:5000/frigate
+```
+
## Custom Dependencies
### Custom ffmpeg build
diff --git a/docs/docs/plus/faq.md b/docs/docs/plus/faq.md
index fb0cd2512..151eb3f60 100644
--- a/docs/docs/plus/faq.md
+++ b/docs/docs/plus/faq.md
@@ -22,3 +22,13 @@ Yes. Models and metadata are stored in the `model_cache` directory within the co
### Can I keep using my Frigate+ models even if I do not renew my subscription?
Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained with your subscription are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models.
+
+### Why can't I submit images to Frigate+?
+
+If you've configured your API key and the Frigate+ Settings page in the UI shows that the key is active, you need to ensure that you've enabled both snapshots and `clean_copy` snapshots for the cameras you'd like to submit images for. Note that `clean_copy` is enabled by default when snapshots are enabled.
+
+```yaml
+snapshots:
+ enabled: true
+ clean_copy: true
+```
diff --git a/frigate/api/app.py b/frigate/api/app.py
index 05013ed12..f19070a3a 100644
--- a/frigate/api/app.py
+++ b/frigate/api/app.py
@@ -9,6 +9,7 @@ import traceback
from datetime import datetime, timedelta
from functools import reduce
from io import StringIO
+from pathlib import Path as FilePath
from typing import Any, Optional
import aiofiles
@@ -174,6 +175,22 @@ def config(request: Request):
config["model"]["all_attributes"] = config_obj.model.all_attributes
config["model"]["non_logo_attributes"] = config_obj.model.non_logo_attributes
+ # Add model plus data if plus is enabled
+ if config["plus"]["enabled"]:
+ model_path = config.get("model", {}).get("path")
+ if model_path:
+ model_json_path = FilePath(model_path).with_suffix(".json")
+ try:
+ with open(model_json_path, "r") as f:
+ model_plus_data = json.load(f)
+ config["model"]["plus"] = model_plus_data
+ except FileNotFoundError:
+ config["model"]["plus"] = None
+ except json.JSONDecodeError:
+ config["model"]["plus"] = None
+ else:
+ config["model"]["plus"] = None
+
# use merged labelamp
for detector_config in config["detectors"].values():
detector_config["model"]["labelmap"] = (
diff --git a/frigate/api/classification.py b/frigate/api/classification.py
index 85b604379..df804f34a 100644
--- a/frigate/api/classification.py
+++ b/frigate/api/classification.py
@@ -6,6 +6,7 @@ import random
import shutil
import string
+import cv2
from fastapi import APIRouter, Depends, Request, UploadFile
from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filename
@@ -14,9 +15,11 @@ from playhouse.shortcuts import model_to_dict
from frigate.api.auth import require_role
from frigate.api.defs.tags import Tags
+from frigate.config.camera import DetectConfig
from frigate.const import FACE_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event
+from frigate.util.path import get_event_snapshot
logger = logging.getLogger(__name__)
@@ -87,16 +90,27 @@ def train_face(request: Request, name: str, body: dict = None):
)
json: dict[str, any] = body or {}
- training_file = os.path.join(
- FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
- )
+ training_file_name = sanitize_filename(json.get("training_file", ""))
+ training_file = os.path.join(FACE_DIR, f"train/{training_file_name}")
+ event_id = json.get("event_id")
- if not training_file or not os.path.isfile(training_file):
+ if not training_file_name and not event_id:
return JSONResponse(
content=(
{
"success": False,
- "message": f"Invalid filename or no file exists: {training_file}",
+ "message": "A training file or event_id must be passed.",
+ }
+ ),
+ status_code=400,
+ )
+
+ if training_file_name and not os.path.isfile(training_file):
+ return JSONResponse(
+ content=(
+ {
+ "success": False,
+ "message": f"Invalid filename or no file exists: {training_file_name}",
}
),
status_code=404,
@@ -106,7 +120,36 @@ def train_face(request: Request, name: str, body: dict = None):
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
new_name = f"{sanitized_name}-{rand_id}.webp"
new_file = os.path.join(FACE_DIR, f"{sanitized_name}/{new_name}")
- shutil.move(training_file, new_file)
+
+ if training_file_name:
+ shutil.move(training_file, new_file)
+ else:
+ try:
+ event: Event = Event.get(Event.id == event_id)
+ except DoesNotExist:
+ return JSONResponse(
+ content=(
+ {
+ "success": False,
+ "message": f"Invalid event_id or no event exists: {event_id}",
+ }
+ ),
+ status_code=404,
+ )
+
+ snapshot = get_event_snapshot(event)
+ face_box = event.data["attributes"][0]["box"]
+ detect_config: DetectConfig = request.app.frigate_config.cameras[
+ event.camera
+ ].detect
+
+ # crop onto the face box minus the bounding box itself
+ x1 = int(face_box[0] * detect_config.width) + 2
+ y1 = int(face_box[1] * detect_config.height) + 2
+ x2 = x1 + int(face_box[2] * detect_config.width) - 4
+ y2 = y1 + int(face_box[3] * detect_config.height) - 4
+ face = snapshot[y1:y2, x1:x2]
+ cv2.imwrite(new_file, face)
context: EmbeddingsContext = request.app.embeddings
context.clear_face_classifier()
@@ -115,7 +158,7 @@ def train_face(request: Request, name: str, body: dict = None):
content=(
{
"success": True,
- "message": f"Successfully saved {training_file} as {new_name}.",
+ "message": f"Successfully saved {training_file_name} as {new_name}.",
}
),
status_code=200,
diff --git a/frigate/api/event.py b/frigate/api/event.py
index 88a865318..c4c763bf7 100644
--- a/frigate/api/event.py
+++ b/frigate/api/event.py
@@ -701,6 +701,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
for k, v in event["data"].items()
if k
in [
+ "attributes",
"type",
"score",
"top_score",
diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py
index 7d97f8586..acb891449 100644
--- a/frigate/data_processing/real_time/face.py
+++ b/frigate/data_processing/real_time/face.py
@@ -27,6 +27,8 @@ from .api import RealTimeProcessorApi
logger = logging.getLogger(__name__)
+MAX_DETECTION_HEIGHT = 1080
+MAX_FACE_ATTEMPTS = 100
MIN_MATCHING_FACES = 2
@@ -88,7 +90,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
os.path.join(MODEL_CACHE_DIR, "facedet/facedet.onnx"),
config="",
input_size=(320, 320),
- score_threshold=self.face_config.detection_threshold,
+ score_threshold=0.5,
nms_threshold=0.3,
)
self.landmark_detector = cv2.face.createFacemarkLBF()
@@ -212,11 +214,23 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.face_recognizer = None
self.label_map = {}
- def __detect_face(self, input: np.ndarray) -> tuple[int, int, int, int]:
+ def __detect_face(
+ self, input: np.ndarray, threshold: float
+ ) -> tuple[int, int, int, int]:
"""Detect faces in input image."""
if not self.face_detector:
return None
+ # YN face detector fails at extreme definitions
+ # this rescales to a size that can properly detect faces
+ # still retaining plenty of detail
+ if input.shape[0] > MAX_DETECTION_HEIGHT:
+ scale_factor = MAX_DETECTION_HEIGHT / input.shape[0]
+ new_width = int(scale_factor * input.shape[1])
+ input = cv2.resize(input, (new_width, MAX_DETECTION_HEIGHT))
+ else:
+ scale_factor = 1
+
self.face_detector.setInputSize((input.shape[1], input.shape[0]))
faces = self.face_detector.detect(input)
@@ -226,11 +240,14 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
face = None
for _, potential_face in enumerate(faces[1]):
+ if potential_face[-1] < threshold:
+ continue
+
raw_bbox = potential_face[0:4].astype(np.uint16)
- x: int = max(raw_bbox[0], 0)
- y: int = max(raw_bbox[1], 0)
- w: int = raw_bbox[2]
- h: int = raw_bbox[3]
+ x: int = int(max(raw_bbox[0], 0) / scale_factor)
+ y: int = int(max(raw_bbox[1], 0) / scale_factor)
+ w: int = int(raw_bbox[2] / scale_factor)
+ h: int = int(raw_bbox[3] / scale_factor)
bbox = (x, y, x + w, y + h)
if face is None or area(bbox) > area(face):
@@ -300,7 +317,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
left, top, right, bottom = person_box
person = rgb[top:bottom, left:right]
- face_box = self.__detect_face(person)
+ face_box = self.__detect_face(person, self.face_config.detection_threshold)
if not face_box:
logger.debug("Detected no faces for person object.")
@@ -406,7 +423,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
),
cv2.IMREAD_COLOR,
)
- face_box = self.__detect_face(img)
+
+ # detect faces with lower confidence since we expect the face
+ # to be visible in uploaded images
+ face_box = self.__detect_face(img, 0.5)
if not face_box:
return {
@@ -463,6 +483,16 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
)
shutil.move(current_file, new_file)
+ files = sorted(
+ os.listdir(folder),
+ key=lambda f: os.path.getctime(os.path.join(folder, f)),
+ reverse=True,
+ )
+
+ # delete oldest face image if maximum is reached
+ if len(files) > MAX_FACE_ATTEMPTS:
+ os.unlink(os.path.join(folder, files[-1]))
+
def expire_object(self, object_id: str):
if object_id in self.detected_faces:
self.detected_faces.pop(object_id)
diff --git a/frigate/util/path.py b/frigate/util/path.py
index dbe51abe5..565f5a357 100644
--- a/frigate/util/path.py
+++ b/frigate/util/path.py
@@ -4,6 +4,9 @@ import base64
import os
from pathlib import Path
+import cv2
+from numpy import ndarray
+
from frigate.const import CLIPS_DIR, THUMB_DIR
from frigate.models import Event
@@ -21,6 +24,11 @@ def get_event_thumbnail_bytes(event: Event) -> bytes | None:
return None
+def get_event_snapshot(event: Event) -> ndarray:
+ media_name = f"{event.camera}-{event.id}"
+ return cv2.imread(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
+
+
### Deletion
diff --git a/web/public/locales/en/common.json b/web/public/locales/en/common.json
index 4ddd9244e..14b88f707 100644
--- a/web/public/locales/en/common.json
+++ b/web/public/locales/en/common.json
@@ -64,6 +64,7 @@
"button": {
"apply": "Apply",
"reset": "Reset",
+ "done": "Done",
"enabled": "Enabled",
"enable": "Enable",
"disabled": "Disabled",
@@ -94,7 +95,8 @@
"play": "Play",
"unselect": "Unselect",
"export": "Export",
- "deleteNow": "Delete Now"
+ "deleteNow": "Delete Now",
+ "next": "Next"
},
"menu": {
"system": "System",
diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json
index b95f744d7..46842b7ea 100644
--- a/web/public/locales/en/views/faceLibrary.json
+++ b/web/public/locales/en/views/faceLibrary.json
@@ -1,4 +1,7 @@
{
+ "description": {
+ "addFace": "Walk through adding a new face to the Face Library."
+ },
"documentTitle": "Face Library - Frigate",
"uploadFaceImage": {
"title": "Upload Face Image",
@@ -6,7 +9,8 @@
},
"createFaceLibrary": {
"title": "Create Face Library",
- "desc": "Create a new face library"
+ "desc": "Create a new face library",
+ "nextSteps": "It is recommended to use the Train tab to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle."
},
"train": {
"title": "Train",
@@ -19,12 +23,13 @@
"uploadImage": "Upload Image",
"reprocessFace": "Reprocess Face"
},
+ "readTheDocs": "Read the documentation to view more details on refining images for the Face Library",
"trainFaceAs": "Train Face as:",
- "trainFaceAsPerson": "Train Face as Person",
+ "trainFace": "Train Face",
"toast": {
"success": {
"uploadedImage": "Successfully uploaded image.",
- "addFaceLibrary": "Successfully add face library.",
+ "addFaceLibrary": "{{name}} has successfully been added to the Face Library!",
"deletedFace": "Successfully deleted face.",
"trainedFace": "Successfully trained face.",
"updatedFaceScore": "Successfully updated face score."
diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json
index f19ac5ee6..3d25b92c1 100644
--- a/web/public/locales/en/views/settings.json
+++ b/web/public/locales/en/views/settings.json
@@ -7,7 +7,8 @@
"masksAndZones": "Mask and Zone Editor - Frigate",
"motionTuner": "Motion Tuner - Frigate",
"object": "Object Settings - Frigate",
- "general": "General Settings - Frigate"
+ "general": "General Settings - Frigate",
+ "frigatePlus": "Frigate+ Settings - Frigate"
},
"menu": {
"uiSettings": "UI Settings",
@@ -17,7 +18,8 @@
"motionTuner": "Motion Tuner",
"debug": "Debug",
"users": "Users",
- "notifications": "Notifications"
+ "notifications": "Notifications",
+ "frigateplus": "Frigate+"
},
"dialog": {
"unsavedChanges": {
@@ -515,5 +517,36 @@
"registerFailed": "Failed to save notification registration."
}
}
+ },
+ "frigatePlus": {
+ "title": "Frigate+ Settings",
+ "apiKey": {
+ "title": "Frigate+ API Key",
+ "validated": "Frigate+ API key is detected and validated",
+ "notValidated": "Frigate+ API key is not detected or not validated",
+ "desc": "The Frigate+ API key enables integration with the Frigate+ service.",
+ "plusLink": "Read more about Frigate+"
+ },
+ "snapshotConfig": {
+ "title": "Snapshot Configuration",
+ "desc": "Submitting to Frigate+ requires both snapshots and clean_copy snapshots to be enabled in your config.",
+ "documentation": "Read the documentation",
+ "cleanCopyWarning": "Some cameras have snapshots enabled but have the clean copy disabled. You need to enable clean_copy in your snapshot config to be able to submit images from these cameras to Frigate+.",
+ "table": {
+ "camera": "Camera",
+ "snapshots": "Snapshots",
+ "cleanCopySnapshots": "clean_copy Snapshots"
+ }
+ },
+ "modelInfo": {
+ "title": "Model Information",
+ "modelType": "Model Type",
+ "trainDate": "Train Date",
+ "baseModel": "Base Model",
+ "supportedDetectors": "Supported Detectors",
+ "cameras": "Cameras",
+ "loading": "Loading model information...",
+ "error": "Failed to load model information"
+ }
}
}
diff --git a/web/src/components/indicators/StepIndicator.tsx b/web/src/components/indicators/StepIndicator.tsx
new file mode 100644
index 000000000..641ae32ca
--- /dev/null
+++ b/web/src/components/indicators/StepIndicator.tsx
@@ -0,0 +1,28 @@
+import { cn } from "@/lib/utils";
+
+type StepIndicatorProps = {
+ steps: string[];
+ currentStep: number;
+};
+export default function StepIndicator({
+ steps,
+ currentStep,
+}: StepIndicatorProps) {
+ return (
+
+ {t("createFaceLibrary.nextSteps")} +
+{t("frigatePlus.apiKey.desc")}
+ {!config?.model.plus && ( + <> ++ {t("frigatePlus.modelInfo.loading")} +
+ )} + {config?.model?.plus === null && ( ++ {t("frigatePlus.modelInfo.error")} +
+ )} + {config?.model?.plus && ( +{config.model.plus.name}
++ {new Date( + config.model.plus.trainDate, + ).toLocaleString()} +
+{config.model.plus.baseModel}
++ {config.model.plus.supportedDetectors.join(", ")} +
+
+
| + {t("frigatePlus.snapshotConfig.table.camera")} + | ++ {t("frigatePlus.snapshotConfig.table.snapshots")} + | +
+ |
+
|---|---|---|
| {name} | +
+ {camera.snapshots.enabled ? (
+ |
+
+ {camera.snapshots?.enabled &&
+ camera.snapshots?.clean_copy ? (
+ |
+