diff --git a/.gitignore b/.gitignore
index c9db2929f..7c97a23a0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,3 +22,8 @@ core
!/web/**/*.ts
.idea/*
.ipynb_checkpoints
+
+# Auto-generated Docker Compose Generator config files
+docs/src/components/DockerComposeGenerator/config/devices.ts
+docs/src/components/DockerComposeGenerator/config/hardware.ts
+docs/src/components/DockerComposeGenerator/config/ports.ts
diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md
index 192f4b4c5..e4999c6e8 100644
--- a/docs/docs/configuration/face_recognition.md
+++ b/docs/docs/configuration/face_recognition.md
@@ -19,7 +19,7 @@ Face recognition requires a one-time internet connection to download detection a
### Face Detection
-When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient.
+When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/index.md#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient.
When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track.
diff --git a/docs/docs/configuration/genai/config.md b/docs/docs/configuration/genai/config.md
index a02a313ba..a512943c9 100644
--- a/docs/docs/configuration/genai/config.md
+++ b/docs/docs/configuration/genai/config.md
@@ -201,7 +201,7 @@ Cloud Generative AI providers require an active internet connection to send imag
### Ollama Cloud
-Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
+Ollama also supports [cloud models](https://ollama.com/cloud), where model inference is performed in the cloud. You can connect directly to Ollama Cloud by setting `base_url` to `https://ollama.com` and providing an API key. Alternatively, you can run Ollama locally and use a cloud model name so your local instance forwards requests to the cloud. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
#### Configuration
@@ -210,7 +210,8 @@ Ollama also supports [cloud models](https://ollama.com/cloud), where your local
1. Navigate to .
- Set **Provider** to `ollama`
- - Set **Base URL** to your local Ollama address (e.g., `http://localhost:11434`)
+ - Set **Base URL** to your local Ollama address (e.g., `http://localhost:11434`) or `https://ollama.com` for direct cloud inference
+ - Set **API key** if required by your endpoint (e.g., when using `https://ollama.com`)
- Set **Model** to the cloud model name
@@ -223,6 +224,16 @@ genai:
model: cloud-model-name
```
+or when using Ollama Cloud directly
+
+```yaml
+genai:
+ provider: ollama
+ base_url: https://ollama.com
+ model: cloud-model-name
+ api_key: your-api-key
+```
+
diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md
index 2821fb7a2..7519f8e8f 100644
--- a/docs/docs/configuration/object_detectors.md
+++ b/docs/docs/configuration/object_detectors.md
@@ -494,7 +494,7 @@ detectors:
| [YOLO-NAS](#yolo-nas) | ✅ | ✅ | |
| [MobileNet v2](#ssdlite-mobilenet-v2) | ✅ | ✅ | Fast and lightweight model, less accurate than larger models |
| [YOLOX](#yolox) | ✅ | ? | |
-| [D-FINE](#d-fine) | ❌ | ❌ | |
+| [D-FINE / DEIMv2](#d-fine--deimv2) | ❌ | ❌ | |
#### SSDLite MobileNet v2
@@ -710,13 +710,13 @@ model:
-#### D-FINE
+#### D-FINE / DEIMv2
-[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
+[D-FINE](https://github.com/Peterande/D-FINE) and [DEIMv2](https://github.com/Intellindust-AI-Lab/DEIMv2) are DETR based models that share the same ONNX input/output format. The ONNX exported models are supported, but not included by default. See the models section for downloading [D-FINE](#downloading-d-fine-model) or [DEIMv2](#downloading-deimv2-model) for use in Frigate.
:::warning
-Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model
+Currently D-FINE / DEIMv2 models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model
:::
@@ -766,6 +766,31 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl
+
+ DEIMv2 Setup & Config
+
+After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
+
+```yaml
+detectors:
+ ov:
+ type: openvino
+ device: CPU
+
+model:
+ model_type: dfine
+ width: 640
+ height: 640
+ input_tensor: nchw
+ input_dtype: float
+ path: /config/model_cache/deimv2_hgnetv2_n.onnx
+ labelmap_path: /labelmap/coco-80.txt
+```
+
+Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
+
+
+
## Apple Silicon detector
The NPU in Apple Silicon can't be accessed from within a container, so the [Apple Silicon detector client](https://github.com/frigate-nvr/apple-silicon-detector) must first be setup. It is recommended to use the Frigate docker image with `-standard-arm64` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-standard-arm64`.
@@ -947,7 +972,7 @@ The AMD GPU kernel is known problematic especially when converting models to mxr
See [ONNX supported models](#supported-models) for supported models, there are some caveats:
-- D-FINE models are not supported
+- D-FINE / DEIMv2 models are not supported
- YOLO-NAS models are known to not run well on integrated GPUs
## ONNX
@@ -1003,7 +1028,7 @@ detectors:
| [RF-DETR](#rf-detr) | ✅ | ❌ | Supports CUDA Graphs for optimal Nvidia performance |
| [YOLO-NAS](#yolo-nas-1) | ⚠️ | ⚠️ | Not supported by CUDA Graphs |
| [YOLOX](#yolox-1) | ✅ | ✅ | Supports CUDA Graphs for optimal Nvidia performance |
-| [D-FINE](#d-fine) | ⚠️ | ❌ | Not supported by CUDA Graphs |
+| [D-FINE / DEIMv2](#d-fine--deimv2-1) | ⚠️ | ❌ | Not supported by CUDA Graphs |
There is no default model provided, the following formats are supported:
@@ -1215,9 +1240,9 @@ model:
-#### D-FINE
+#### D-FINE / DEIMv2
-[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
+[D-FINE](https://github.com/Peterande/D-FINE) and [DEIMv2](https://github.com/Intellindust-AI-Lab/DEIMv2) are DETR based models that share the same ONNX input/output format. The ONNX exported models are supported, but not included by default. See the models section for downloading [D-FINE](#downloading-d-fine-model) or [DEIMv2](#downloading-deimv2-model) for use in Frigate.
D-FINE Setup & Config
@@ -1262,6 +1287,28 @@ model:
+
+ DEIMv2 Setup & Config
+
+After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
+
+```yaml
+detectors:
+ onnx:
+ type: onnx
+
+model:
+ model_type: dfine
+ width: 640
+ height: 640
+ input_tensor: nchw
+ input_dtype: float
+ path: /config/model_cache/deimv2_hgnetv2_n.onnx
+ labelmap_path: /labelmap/coco-80.txt
+```
+
+
+
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## CPU Detector (not recommended)
@@ -1405,7 +1452,7 @@ MemryX `.dfp` models are automatically downloaded at runtime, if enabled, to the
#### YOLO-NAS
-The [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) model included in this detector is downloaded from the [Models Section](#downloading-yolo-nas-model) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
+The [YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) model included in this detector is downloaded from the [Models Section](#downloading-yolo-nas-model) and compiled to DFP with [mx_nc](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage).
**Note:** The default model for the MemryX detector is YOLO-NAS 320x320.
@@ -1459,7 +1506,7 @@ model:
#### YOLOv9
-The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/tools/neural_compiler.html#usage).
+The YOLOv9s model included in this detector is downloaded from [the original GitHub](https://github.com/WongKinYiu/yolov9) like in the [Models Section](#yolov9-1) and compiled to DFP with [mx_nc](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage).
##### Configuration
@@ -1601,19 +1648,39 @@ model:
#### Using a Custom Model
-To use your own model:
+To use your own custom model, first compile it into a [.dfp](https://developer.memryx.com/2p1/specs/files.html#dataflow-program) file, which is the format used by MemryX.
-1. Package your compiled model into a `.zip` file.
+#### Compile the Model
-2. The `.zip` must contain the compiled `.dfp` file.
+Custom models must be compiled using **MemryX SDK 2.1**.
-3. Depending on the model, the compiler may also generate a cropped post-processing network. If present, it will be named with the suffix `_post.onnx`.
+Before compiling your model, install the MemryX Neural Compiler tools from the
+[Install Tools](https://developer.memryx.com/2p1/get_started/install_tools.html) page on the **host**.
-4. Bind-mount the `.zip` file into the container and specify its path using `model.path` in your config.
+> **Note:** It is recommended to compile the model on the host machine, or on another separate machine, rather than inside the Frigate Docker container. Installing the compiler inside Docker may conflict with container packages. It is recommended to create a Python virtual environment and install the compiler there.
-5. Update the `labelmap_path` to match your custom model's labels.
+Once the SDK 2.1 environment is set up, follow the
+[MemryX Compiler](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage) documentation to compile your model.
-For detailed instructions on compiling models, refer to the [MemryX Compiler](https://developer.memryx.com/tools/neural_compiler.html#usage) docs and [Tutorials](https://developer.memryx.com/tutorials/tutorials.html).
+Example:
+
+```bash
+mx_nc -m yolonas.onnx -c 4 --autocrop -v --dfp_fname yolonas.dfp
+```
+
+For detailed instructions on compiling models, refer to the [MemryX Compiler](https://developer.memryx.com/2p1/tools/neural_compiler.html#usage) docs and [Tutorials](https://developer.memryx.com/2p1/tutorials/tutorials.html).
+
+#### Package the Compiled Model
+
+1. Package your compiled model into a `.zip` file.
+
+2. The `.zip` file must contain the compiled `.dfp` file.
+
+3. Depending on the model, the compiler may also generate a cropped post-processing network. If present, it will be named with the suffix `_post.onnx`.
+
+4. Bind-mount the `.zip` file into the container and specify its path using `model.path` in your config.
+
+5. Update `labelmap_path` to match your custom model's labels.
```yaml
# The detector automatically selects the default model if nothing is provided in the config.
@@ -2274,6 +2341,49 @@ COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL
EOF
```
+### Downloading DEIMv2 Model
+
+[DEIMv2](https://github.com/Intellindust-AI-Lab/DEIMv2) can be exported as ONNX by running the command below. Pretrained weights are available on Hugging Face for two backbone families:
+
+- **HGNetv2** (smaller/faster): `atto`, `femto`, `pico`, `n`
+- **DINOv3** (larger/more accurate): `s`, `m`, `l`, `x`
+
+Set `BACKBONE` and `MODEL_SIZE` in the first line to match your desired variant. Hugging Face model names use uppercase (e.g. `HGNetv2_N`, `DINOv3_S`), while config files use lowercase (e.g. `hgnetv2_n`, `dinov3_s`).
+
+```sh
+docker build . --rm --build-arg BACKBONE=hgnetv2 --build-arg MODEL_SIZE=n --output . -f- <<'EOF'
+FROM python:3.11-slim AS build
+RUN apt-get update && apt-get install --no-install-recommends -y git libgl1 libglib2.0-0 && rm -rf /var/lib/apt/lists/*
+COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
+WORKDIR /deimv2
+RUN git clone https://github.com/Intellindust-AI-Lab/DEIMv2.git .
+# Install CPU-only PyTorch first to avoid pulling CUDA variant
+RUN uv pip install --no-cache --system torch torchvision --index-url https://download.pytorch.org/whl/cpu
+RUN uv pip install --no-cache --system -r requirements.txt
+RUN uv pip install --no-cache --system onnx safetensors huggingface_hub
+RUN mkdir -p output
+ARG BACKBONE
+ARG MODEL_SIZE
+# Download from Hugging Face and convert safetensors to pth
+RUN python3 -c "\
+from huggingface_hub import hf_hub_download; \
+from safetensors.torch import load_file; \
+import torch; \
+backbone = '${BACKBONE}'.replace('hgnetv2','HGNetv2').replace('dinov3','DINOv3'); \
+size = '${MODEL_SIZE}'.upper(); \
+st = load_file(hf_hub_download('Intellindust/DEIMv2_' + backbone + '_' + size + '_COCO', 'model.safetensors')); \
+torch.save({'model': st}, 'output/deimv2.pth')"
+RUN sed -i "s/data = torch.rand(2/data = torch.rand(1/" tools/deployment/export_onnx.py
+# HuggingFace safetensors omits frozen constants that the model constructor initializes
+RUN sed -i "s/cfg.model.load_state_dict(state)/cfg.model.load_state_dict(state, strict=False)/" tools/deployment/export_onnx.py
+RUN python3 tools/deployment/export_onnx.py -c configs/deimv2/deimv2_${BACKBONE}_${MODEL_SIZE}_coco.yml -r output/deimv2.pth
+FROM scratch
+ARG BACKBONE
+ARG MODEL_SIZE
+COPY --from=build /deimv2/output/deimv2.onnx /deimv2_${BACKBONE}_${MODEL_SIZE}.onnx
+EOF
+```
+
### Downloading RF-DETR Model
RF-DETR can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=Nano` in the first line to `Nano`, `Small`, or `Medium` size.
diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md
index 614beafed..3d5ef35ba 100644
--- a/docs/docs/configuration/record.md
+++ b/docs/docs/configuration/record.md
@@ -195,7 +195,7 @@ Pre and post capture footage is included in the **recording timeline**, visible
## Will Frigate delete old recordings if my storage runs out?
-As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted.
+If there is less than an hour left of storage, the oldest hour of recordings will be deleted and a message will be printed in the Frigate logs. This emergency cleanup deletes the oldest recordings first regardless of retention settings to reclaim space as quickly as possible.
## Configuring Recording Retention
diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md
index af4d635c6..d488c5410 100644
--- a/docs/docs/configuration/restream.md
+++ b/docs/docs/configuration/restream.md
@@ -236,7 +236,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g
## Advanced Restream Configurations
-The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
+The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands and other applications. An example is below:
:::warning
@@ -244,16 +244,11 @@ The `exec:`, `echo:`, and `expr:` sources are disabled by default for security.
:::
-:::warning
-
-The `exec:`, `echo:`, and `expr:` sources are disabled by default for security. You must set `GO2RTC_ALLOW_ARBITRARY_EXEC=true` to use them. See [Security: Restricted Stream Sources](#security-restricted-stream-sources) for more information.
-
-:::
-
-NOTE: The output will need to be passed with two curly braces `{{output}}`
+NOTE: RTSP output will need to be passed with two curly braces `{{output}}`, whereas pipe output must be passed without curly braces.
```yaml
go2rtc:
streams:
stream1: exec:ffmpeg -hide_banner -re -stream_loop -1 -i /media/BigBuckBunny.mp4 -c copy -rtsp_transport tcp -f rtsp {{output}}
+ stream2: exec:rpicam-vid -t 0 --libav-format h264 -o -
```
diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md
index 5d228a609..485a735d8 100644
--- a/docs/docs/frigate/installation.md
+++ b/docs/docs/frigate/installation.md
@@ -4,12 +4,15 @@ title: Installation
---
import ShmCalculator from '@site/src/components/ShmCalculator'
+import DockerComposeGenerator from '@site/src/components/DockerComposeGenerator'
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant App](https://www.home-assistant.io/apps/). Note that the Home Assistant App is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant App.
:::tip
-If you already have Frigate installed as a Home Assistant App, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate.
+If you already have Frigate installed as a Home Assistant App, check out the [getting started guide](../guides/getting_started.md#configuring-frigate) to configure Frigate.
:::
@@ -286,7 +289,7 @@ The MemryX MX3 Accelerator is available in the M.2 2280 form factor (like an NVM
#### Installation
-To get started with MX3 hardware setup for your system, refer to the [Hardware Setup Guide](https://developer.memryx.com/get_started/hardware_setup.html).
+To get started with MX3 hardware setup for your system, refer to the [Hardware Setup Guide](https://developer.memryx.com/2p1/get_started/install_hardware.html).
Then follow these steps for installing the correct driver/runtime configuration:
@@ -295,6 +298,12 @@ Then follow these steps for installing the correct driver/runtime configuration:
3. Run the script with `./user_installation.sh`
4. **Restart your computer** to complete driver installation.
+:::warning
+
+For manual setup, use **MemryX SDK 2.1** only. Other SDK versions are not supported for this setup. See the [SDK 2.1 documentation](https://developer.memryx.com/2p1/index.html)
+
+:::
+
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
@@ -468,6 +477,16 @@ Finally, configure [hardware object detection](/configuration/object_detectors#a
Running through Docker with Docker Compose is the recommended install method.
+
+
+
+Generate a Frigate Docker Compose configuration based on your hardware and requirements.
+
+
+
+
+
+
```yaml
services:
frigate:
@@ -501,6 +520,10 @@ services:
environment:
FRIGATE_RTSP_PASSWORD: "password"
```
+
+
+
+**Docker CLI**
If you can't use Docker Compose, you can run the container with something similar to this:
diff --git a/docs/package-lock.json b/docs/package-lock.json
index ed766c1ab..222ae031a 100644
--- a/docs/package-lock.json
+++ b/docs/package-lock.json
@@ -14,9 +14,11 @@
"@docusaurus/theme-mermaid": "^3.7.0",
"@inkeep/docusaurus": "^2.0.16",
"@mdx-js/react": "^3.1.0",
+ "@types/js-yaml": "^4.0.9",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.5.1",
"docusaurus-theme-openapi-docs": "^4.5.1",
+ "js-yaml": "^4.1.1",
"prism-react-renderer": "^2.4.1",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
@@ -5747,6 +5749,11 @@
"@types/istanbul-lib-report": "*"
}
},
+ "node_modules/@types/js-yaml": {
+ "version": "4.0.9",
+ "resolved": "https://mirrors.tencent.com/npm/@types/js-yaml/-/js-yaml-4.0.9.tgz",
+ "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg=="
+ },
"node_modules/@types/json-schema": {
"version": "7.0.15",
"resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
@@ -12883,7 +12890,7 @@
},
"node_modules/js-yaml": {
"version": "4.1.1",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "resolved": "https://mirrors.tencent.com/npm/js-yaml/-/js-yaml-4.1.1.tgz",
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"license": "MIT",
"dependencies": {
diff --git a/docs/package.json b/docs/package.json
index 0ff76c473..e57d7a154 100644
--- a/docs/package.json
+++ b/docs/package.json
@@ -3,9 +3,10 @@
"version": "0.0.0",
"private": true,
"scripts": {
+ "build:config": "node scripts/build-config.mjs",
"docusaurus": "docusaurus",
- "start": "npm run regen-docs && docusaurus start --host 0.0.0.0",
- "build": "npm run regen-docs && docusaurus build",
+ "start": "npm run build:config && npm run regen-docs && docusaurus start --host 0.0.0.0",
+ "build": "npm run build:config && npm run regen-docs && docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
@@ -23,9 +24,11 @@
"@docusaurus/theme-mermaid": "^3.7.0",
"@inkeep/docusaurus": "^2.0.16",
"@mdx-js/react": "^3.1.0",
+ "@types/js-yaml": "^4.0.9",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.5.1",
"docusaurus-theme-openapi-docs": "^4.5.1",
+ "js-yaml": "^4.1.1",
"prism-react-renderer": "^2.4.1",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
diff --git a/docs/scripts/build-config.mjs b/docs/scripts/build-config.mjs
new file mode 100644
index 000000000..78926bed5
--- /dev/null
+++ b/docs/scripts/build-config.mjs
@@ -0,0 +1,64 @@
+#!/usr/bin/env node
+
+/**
+ * Build script: reads config.yaml and generates TypeScript files
+ * for the Docker Compose Generator.
+ *
+ * Usage: node scripts/build-config.mjs
+ */
+
+import fs from "node:fs";
+import path from "node:path";
+import { fileURLToPath } from "node:url";
+import yaml from "js-yaml";
+
+const __dirname = path.dirname(fileURLToPath(import.meta.url));
+const CONFIG_DIR = path.resolve(__dirname, "../src/components/DockerComposeGenerator/config");
+const YAML_PATH = path.join(CONFIG_DIR, "config.yaml");
+
+// Read & parse YAML
+const raw = fs.readFileSync(YAML_PATH, "utf8");
+const config = yaml.load(raw);
+
+if (!config.devices || !config.hardware || !config.ports) {
+ console.error("config.yaml must contain 'devices', 'hardware', and 'ports' sections.");
+ process.exit(1);
+}
+
+/**
+ * Generate a .ts file from a section of the YAML config.
+ */
+function generateTsFile(sectionName, items, typeName, varName, mapVarName, yamlFilename) {
+ const jsonItems = JSON.stringify(items, null, 2);
+ // Indent JSON to fit inside the array literal
+ const indented = jsonItems
+ .split("\n")
+ .map((line, i) => (i === 0 ? line : " " + line))
+ .join("\n");
+
+ const content = `/**
+ * AUTO-GENERATED FILE — do not edit directly.
+ * Source: ${yamlFilename}
+ * To update, edit the YAML file and run: npm run build:config
+ */
+
+import type { ${typeName} } from "./types";
+
+export const ${varName}: ${typeName}[] = ${indented};
+
+/** Lookup map for quick access by ID */
+export const ${mapVarName}: Map = new Map(${varName}.map((item) => [item.id, item]));
+`;
+
+ const outPath = path.join(CONFIG_DIR, `${sectionName}.ts`);
+ fs.writeFileSync(outPath, content, "utf8");
+ console.log(` ✓ Generated ${sectionName}.ts (${items.length} items)`);
+}
+
+console.log("Building config from config.yaml...");
+
+generateTsFile("devices", config.devices, "DeviceConfig", "devices", "deviceMap", "config.yaml");
+generateTsFile("hardware", config.hardware, "HardwareOption", "hardwareOptions", "hardwareMap", "config.yaml");
+generateTsFile("ports", config.ports, "PortConfig", "ports", "portMap", "config.yaml");
+
+console.log("Done!");
diff --git a/docs/src/components/DockerComposeGenerator/DockerComposeGenerator.tsx b/docs/src/components/DockerComposeGenerator/DockerComposeGenerator.tsx
new file mode 100644
index 000000000..b8a8a8fc8
--- /dev/null
+++ b/docs/src/components/DockerComposeGenerator/DockerComposeGenerator.tsx
@@ -0,0 +1,108 @@
+import React from "react";
+import Admonition from "@theme/Admonition";
+import DeviceSelector from "./components/DeviceSelector";
+import HardwareOptions from "./components/HardwareOptions";
+import PortConfigSection from "./components/PortConfig";
+import StoragePaths from "./components/StoragePaths";
+import NvidiaGpuConfig from "./components/NvidiaGpuConfig";
+import OtherOptions from "./components/OtherOptions";
+import GeneratedOutput from "./components/GeneratedOutput";
+import { useConfigGenerator } from "./hooks/useConfigGenerator";
+import styles from "./styles.module.css";
+
+/**
+ * Simple markdown-link-to-React renderer for help text.
+ * Only supports [text](url) syntax — no nested brackets.
+ */
+function renderHelpText(text: string): React.ReactNode {
+ const parts = text.split(/(\[[^\]]+\]\([^)]+\))/g);
+ return parts.map((part, i) => {
+ const match = part.match(/^\[([^\]]+)\]\(([^)]+)\)$/);
+ if (match) {
+ return (
+
+ {match[1]}
+
+ );
+ }
+ return {part};
+ });
+}
+
+export default function DockerComposeGenerator() {
+ const {
+ deviceId, device, hardwareEnabled,
+ portEnabled,
+ nvidiaGpuCount, nvidiaGpuDeviceId,
+ configPath, mediaPath, rtspPassword, timezone, shmSize,
+ shmSizeError, gpuDeviceIdError, configPathError, mediaPathError,
+ hasAnyHardware, generatedYaml,
+ selectDevice, toggleHardware, togglePort,
+ handleShmSizeChange, handleConfigPathChange, handleMediaPathChange,
+ handleNvidiaGpuCountChange, handleNvidiaGpuDeviceIdChange,
+ setRtspPassword, setTimezone, isHardwareDisabled,
+ } = useConfigGenerator();
+
+ return (
+