mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-03-25 09:38:22 +03:00
Compare commits
10 Commits
6dfc9cbf0f
...
67b40d3e04
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67b40d3e04 | ||
|
|
dd1297b66b | ||
|
|
2361e4ed08 | ||
|
|
a2f57c81cb | ||
|
|
0f9f9bc443 | ||
|
|
8b7cc127bf | ||
|
|
bf01852197 | ||
|
|
a4ece9dae3 | ||
|
|
f862ef5d0c | ||
|
|
f74df040bb |
@ -55,7 +55,7 @@ function setup_homekit_config() {
|
||||
|
||||
if [[ ! -f "${config_path}" ]]; then
|
||||
echo "[INFO] Creating empty HomeKit config file..."
|
||||
echo '{}' > "${config_path}"
|
||||
echo 'homekit: {}' > "${config_path}"
|
||||
fi
|
||||
|
||||
# Convert YAML to JSON for jq processing
|
||||
@ -70,12 +70,14 @@ function setup_homekit_config() {
|
||||
jq '
|
||||
# Keep only the homekit section if it exists, otherwise empty object
|
||||
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
|
||||
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || echo '{"homekit": {}}' > "${cleaned_json}"
|
||||
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || {
|
||||
echo '{"homekit": {}}' > "${cleaned_json}"
|
||||
}
|
||||
|
||||
# Convert back to YAML and write to the config file
|
||||
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
|
||||
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
|
||||
echo '{"homekit": {}}' > "${config_path}"
|
||||
echo 'homekit: {}' > "${config_path}"
|
||||
}
|
||||
|
||||
# Clean up temp files
|
||||
|
||||
@ -39,7 +39,7 @@ For object classification:
|
||||
|
||||
:::note
|
||||
|
||||
A tracked object can only have a single sub label. If you are using Face Recognition and you configure an object classification model for `person` using the sub label type, your sub label may not be assigned correctly as it depends on which enrichment completes its analysis first. Consider using the `attribute` type instead.
|
||||
A tracked object can only have a single sub label. If you are using Triggers or Face Recognition and you configure an object classification model for `person` using the sub label type, your sub label may not be assigned correctly as it depends on which enrichment completes its analysis first. Consider using the `attribute` type instead.
|
||||
|
||||
:::
|
||||
|
||||
@ -89,9 +89,9 @@ Creating and training the model is done within the Frigate UI using the `Classif
|
||||
|
||||
### Step 1: Name and Define
|
||||
|
||||
Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Include a `none` class for objects that don't fit any specific category.
|
||||
Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Frigate will automatically include a `none` class for objects that don't fit any specific category.
|
||||
|
||||
For example: To classify your two cats, create a model named "Our Cats" and create two classes, "Charlie" and "Leo". Create a third class, "none", for other neighborhood cats that are not your own.
|
||||
For example: To classify your two cats, create a model named "Our Cats" and create two classes, "Charlie" and "Leo". A third class, "none", will be created automatically for other neighborhood cats that are not your own.
|
||||
|
||||
### Step 2: Assign Training Examples
|
||||
|
||||
|
||||
@ -16,12 +16,13 @@ Review summaries provide structured JSON responses that are saved for each revie
|
||||
```
|
||||
- `title` (string): A concise, direct title that describes the purpose or overall action (e.g., "Person taking out trash", "Joe walking dog").
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish, including setting, detected objects, and their observable actions.
|
||||
- `shortSummary` (string): A brief 2-sentence summary of the scene, suitable for notifications. This is a condensed version of the scene description.
|
||||
- `confidence` (float): 0-1 confidence in the analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous.
|
||||
- `other_concerns` (list): List of user-defined concerns that may need additional investigation.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
|
||||
```
|
||||
|
||||
This will show in multiple places in the UI to give additional context about each activity, and allow viewing more details when extra attention is required. Frigate's built in notifications will also automatically show the title and description when the data is available.
|
||||
This will show in multiple places in the UI to give additional context about each activity, and allow viewing more details when extra attention is required. Frigate's built in notifications will automatically show the title and `shortSummary` when the data is available, while the full `scene` description is available in the UI for detailed review.
|
||||
|
||||
### Defining Typical Activity
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@ title: Updating
|
||||
|
||||
# Updating Frigate
|
||||
|
||||
The current stable version of Frigate is **0.16.2**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.2).
|
||||
The current stable version of Frigate is **0.17.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.17.0).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
|
||||
@ -33,21 +33,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.2` instead of `0.15.2`). For example:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.3`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
image: ghcr.io/blakeblackshear/frigate:0.17.0
|
||||
```
|
||||
- Then pull the image:
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.17.0
|
||||
```
|
||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||
- If using `docker run`:
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.2`, `0.16.2-tensorrt`, or `stable`):
|
||||
- Pull the image with the appropriate tag (e.g., `0.17.0`, `0.17.0-tensorrt`, or `stable`):
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.17.0
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
@ -105,8 +105,8 @@ If an update causes issues:
|
||||
1. Stop Frigate.
|
||||
2. Restore your backed-up config file and database.
|
||||
3. Revert to the previous image version:
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`), and re-run `docker compose up -d`.
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`), and re-run `docker compose up -d`.
|
||||
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
|
||||
4. Verify the old version is running again.
|
||||
|
||||
|
||||
@ -245,6 +245,12 @@ To load a preview gif of a review item:
|
||||
https://HA_URL/api/frigate/notifications/<review-id>/review_preview.gif
|
||||
```
|
||||
|
||||
To load the thumbnail of a review item:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<review-id>/<camera>/review_thumbnail.webp
|
||||
```
|
||||
|
||||
<a name="streams"></a>
|
||||
|
||||
## RTSP stream
|
||||
|
||||
@ -38,3 +38,7 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
|
||||
## [Periscope](https://github.com/maksz42/periscope)
|
||||
|
||||
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.
|
||||
|
||||
## [Scrypted - Frigate bridge plugin](https://github.com/apocaliss92/scrypted-frigate-bridge)
|
||||
|
||||
[Scrypted - Frigate bridge](https://github.com/apocaliss92/scrypted-frigate-bridge) is an plugin that allows to ingest Frigate detections, motion, videoclips on Scrypted as well as provide templates to export rebroadcast configurations on Frigate.
|
||||
|
||||
@ -15,13 +15,11 @@ There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yo
|
||||
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
|
||||
|
||||
| Model Type | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, Apple Silicon\*, and Rockchip NPUs. |
|
||||
|
||||
_\* Support coming in 0.17_
|
||||
| Model Type | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX, Apple Silicon, and Rockchip NPUs. |
|
||||
|
||||
### YOLOv9 Details
|
||||
|
||||
@ -39,7 +37,7 @@ If you have a Hailo device, you will need to specify the hardware you have when
|
||||
|
||||
#### Rockchip (RKNN) Support
|
||||
|
||||
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is coming in 0.17.
|
||||
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is available in 0.17 and later.
|
||||
|
||||
## Supported detector types
|
||||
|
||||
@ -55,7 +53,7 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi
|
||||
| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` |
|
||||
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` |
|
||||
|
||||
_\* Requires manual conversion in 0.16. Automatic conversion coming in 0.17._
|
||||
_\* Requires manual conversion in 0.16. Automatic conversion available in 0.17 and later._
|
||||
|
||||
## Improving your model
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from typing import Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
@ -16,5 +16,5 @@ class ExportRecordingsBody(BaseModel):
|
||||
source: PlaybackSourceEnum = Field(
|
||||
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||
)
|
||||
name: str = Field(title="Friendly name", default=None, max_length=256)
|
||||
name: Optional[str] = Field(title="Friendly name", default=None, max_length=256)
|
||||
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||
|
||||
@ -388,7 +388,7 @@ class WebPushClient(Communicator):
|
||||
else:
|
||||
title = base_title
|
||||
|
||||
message = payload["after"]["data"]["metadata"]["scene"]
|
||||
message = payload["after"]["data"]["metadata"]["shortSummary"]
|
||||
else:
|
||||
zone_names = payload["after"]["data"]["zones"]
|
||||
formatted_zone_names = []
|
||||
|
||||
@ -28,6 +28,7 @@ from frigate.util.builtin import (
|
||||
get_ffmpeg_arg_list,
|
||||
)
|
||||
from frigate.util.config import (
|
||||
CURRENT_CONFIG_VERSION,
|
||||
StreamInfoRetriever,
|
||||
convert_area_to_pixels,
|
||||
find_config_file,
|
||||
@ -76,11 +77,12 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
DEFAULT_CONFIG = f"""
|
||||
mqtt:
|
||||
enabled: False
|
||||
|
||||
cameras: {} # No cameras defined, UI wizard should be used
|
||||
cameras: {{}} # No cameras defined, UI wizard should be used
|
||||
version: {CURRENT_CONFIG_VERSION}
|
||||
"""
|
||||
|
||||
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
|
||||
@ -753,8 +755,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if new_config and f.tell() == 0:
|
||||
f.write(DEFAULT_CONFIG)
|
||||
logger.info(
|
||||
"Created default config file, see the getting started docs \
|
||||
for configuration https://docs.frigate.video/guides/getting_started"
|
||||
"Created default config file, see the getting started docs for configuration: https://docs.frigate.video/guides/getting_started"
|
||||
)
|
||||
|
||||
f.seek(0)
|
||||
|
||||
@ -8,6 +8,9 @@ class ReviewMetadata(BaseModel):
|
||||
scene: str = Field(
|
||||
description="A comprehensive description of the setting and entities, including relevant context and plausible inferences if supported by visual evidence."
|
||||
)
|
||||
shortSummary: str = Field(
|
||||
description="A brief 2-sentence summary of the scene, suitable for notifications. Should capture the key activity and context without full detail."
|
||||
)
|
||||
confidence: float = Field(
|
||||
description="A float between 0 and 1 representing your overall confidence in this analysis."
|
||||
)
|
||||
|
||||
@ -203,7 +203,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
# post processors
|
||||
self.post_processors: list[PostProcessorApi] = []
|
||||
|
||||
if any(c.review.genai.enabled_in_config for c in self.config.cameras.values()):
|
||||
if self.genai_client is not None and any(
|
||||
c.review.genai.enabled_in_config for c in self.config.cameras.values()
|
||||
):
|
||||
self.post_processors.append(
|
||||
ReviewDescriptionProcessor(
|
||||
self.config, self.requestor, self.metrics, self.genai_client
|
||||
@ -244,7 +246,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
self.post_processors.append(semantic_trigger_processor)
|
||||
|
||||
if any(c.objects.genai.enabled_in_config for c in self.config.cameras.values()):
|
||||
if self.genai_client is not None and any(
|
||||
c.objects.genai.enabled_in_config for c in self.config.cameras.values()
|
||||
):
|
||||
self.post_processors.append(
|
||||
ObjectDescriptionProcessor(
|
||||
self.config,
|
||||
@ -629,7 +633,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
camera, frame_name, _, _, motion_boxes, _ = data
|
||||
|
||||
if not camera or len(motion_boxes) == 0:
|
||||
if not camera or len(motion_boxes) == 0 or camera not in self.config.cameras:
|
||||
return
|
||||
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
@ -101,6 +101,7 @@ When forming your description:
|
||||
Your response MUST be a flat JSON object with:
|
||||
- `title` (string): A concise, direct title that describes the primary action or event in the sequence, not just what you literally see. Use spatial context when available to make titles more meaningful. When multiple objects/actions are present, prioritize whichever is most prominent or occurs first. Use names from "Objects in Scene" based on what you visually observe. If you see both a name and an unidentified object of the same type but visually observe only one person/object, use ONLY the name. Examples: "Joe walking dog", "Person taking out trash", "Vehicle arriving in driveway", "Joe accessing vehicle", "Person leaving porch for driveway".
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish, in chronological order. Start by describing how the sequence begins, then describe the progression of events. **Describe all significant movements and actions in the order they occur.** For example, if a vehicle arrives and then a person exits, describe both actions sequentially. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `shortSummary` (string): A brief 2-sentence summary of the scene, suitable for notifications. Should capture the key activity and context without full detail. This should be a condensed version of the scene description above.
|
||||
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined in "Normal Activity Patterns for This Property" above. Your threat level must be consistent with your scene description and the guidance above.
|
||||
{get_concern_prompt()}
|
||||
@ -192,6 +193,8 @@ Input format: Each event is a JSON object with:
|
||||
- "title", "scene", "confidence", "potential_threat_level" (0-2), "other_concerns", "camera", "time", "start_time", "end_time"
|
||||
- "context": array of related events from other cameras that occurred during overlapping time periods
|
||||
|
||||
**Note: Use the "scene" field for event descriptions in the report. Ignore any "shortSummary" field if present.**
|
||||
|
||||
Report Structure - Use this EXACT format:
|
||||
|
||||
# Security Summary - {time_range}
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
from httpx import TimeoutException
|
||||
from httpx import RemoteProtocolError, TimeoutException
|
||||
from ollama import Client as ApiClient
|
||||
from ollama import ResponseError
|
||||
|
||||
@ -68,7 +68,12 @@ class OllamaClient(GenAIClient):
|
||||
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
|
||||
)
|
||||
return result["response"].strip()
|
||||
except (TimeoutException, ResponseError, ConnectionError) as e:
|
||||
except (
|
||||
TimeoutException,
|
||||
ResponseError,
|
||||
RemoteProtocolError,
|
||||
ConnectionError,
|
||||
) as e:
|
||||
logger.warning("Ollama returned an error: %s", str(e))
|
||||
return None
|
||||
|
||||
|
||||
@ -139,9 +139,11 @@ class OutputProcess(FrigateProcess):
|
||||
if CameraConfigUpdateEnum.add in updates:
|
||||
for camera in updates["add"]:
|
||||
jsmpeg_cameras[camera] = JsmpegCamera(
|
||||
cam_config, self.stop_event, websocket_server
|
||||
self.config.cameras[camera], self.stop_event, websocket_server
|
||||
)
|
||||
preview_recorders[camera] = PreviewRecorder(
|
||||
self.config.cameras[camera]
|
||||
)
|
||||
preview_recorders[camera] = PreviewRecorder(cam_config)
|
||||
preview_write_times[camera] = 0
|
||||
|
||||
if (
|
||||
|
||||
@ -42,11 +42,10 @@ def get_latest_version(config: FrigateConfig) -> str:
|
||||
"https://api.github.com/repos/blakeblackshear/frigate/releases/latest",
|
||||
timeout=10,
|
||||
)
|
||||
response = request.json()
|
||||
except (RequestException, JSONDecodeError):
|
||||
return "unknown"
|
||||
|
||||
response = request.json()
|
||||
|
||||
if request.ok and response and "tag_name" in response:
|
||||
return str(response.get("tag_name").replace("v", ""))
|
||||
else:
|
||||
|
||||
@ -137,6 +137,11 @@ export default function ClassificationModelWizardDialog({
|
||||
onClose();
|
||||
};
|
||||
|
||||
const handleSuccessClose = () => {
|
||||
dispatch({ type: "RESET" });
|
||||
onClose();
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
open={open}
|
||||
@ -207,7 +212,7 @@ export default function ClassificationModelWizardDialog({
|
||||
step1Data={wizardState.step1Data}
|
||||
step2Data={wizardState.step2Data}
|
||||
initialData={wizardState.step3Data}
|
||||
onClose={onClose}
|
||||
onClose={handleSuccessClose}
|
||||
onBack={handleBack}
|
||||
/>
|
||||
)}
|
||||
|
||||
@ -18,6 +18,7 @@ import PlatformAwareDialog from "../overlay/dialog/PlatformAwareDialog";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import useSWR from "swr";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { useUserPersistence } from "@/hooks/use-user-persistence";
|
||||
|
||||
type CalendarFilterButtonProps = {
|
||||
reviewSummary?: ReviewSummary;
|
||||
@ -105,6 +106,7 @@ export function CalendarRangeFilterButton({
|
||||
const { t } = useTranslation(["components/filter"]);
|
||||
const { data: config } = useSWR<FrigateConfig>("config");
|
||||
const timezone = useTimezone(config);
|
||||
const [weekStartsOn] = useUserPersistence("weekStartsOn", 0);
|
||||
const [open, setOpen] = useState(false);
|
||||
|
||||
const selectedDate = useFormattedRange(
|
||||
@ -138,6 +140,7 @@ export function CalendarRangeFilterButton({
|
||||
initialDateTo={range?.to}
|
||||
timezone={timezone}
|
||||
showCompare={false}
|
||||
weekStartsOn={weekStartsOn}
|
||||
onUpdate={(range) => {
|
||||
updateSelectedRange(range.range);
|
||||
setOpen(false);
|
||||
|
||||
@ -13,6 +13,7 @@ import { Drawer, DrawerContent, DrawerTrigger } from "../ui/drawer";
|
||||
import FilterSwitch from "./FilterSwitch";
|
||||
import { FaVideo } from "react-icons/fa";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { useAllowedCameras } from "@/hooks/use-allowed-cameras";
|
||||
|
||||
type CameraFilterButtonProps = {
|
||||
allCameras: string[];
|
||||
@ -35,6 +36,30 @@ export function CamerasFilterButton({
|
||||
const [currentCameras, setCurrentCameras] = useState<string[] | undefined>(
|
||||
selectedCameras,
|
||||
);
|
||||
const allowedCameras = useAllowedCameras();
|
||||
|
||||
// Filter cameras to only include those the user has access to
|
||||
const filteredCameras = useMemo(
|
||||
() => allCameras.filter((camera) => allowedCameras.includes(camera)),
|
||||
[allCameras, allowedCameras],
|
||||
);
|
||||
|
||||
// Filter groups to only include those with at least one allowed camera
|
||||
const filteredGroups = useMemo(
|
||||
() =>
|
||||
groups
|
||||
.map(([name, config]) => {
|
||||
const allowedGroupCameras = config.cameras.filter((camera) =>
|
||||
allowedCameras.includes(camera),
|
||||
);
|
||||
return [name, { ...config, cameras: allowedGroupCameras }] as [
|
||||
string,
|
||||
CameraGroupConfig,
|
||||
];
|
||||
})
|
||||
.filter(([, config]) => config.cameras.length > 0),
|
||||
[groups, allowedCameras],
|
||||
);
|
||||
|
||||
const buttonText = useMemo(() => {
|
||||
if (isMobile) {
|
||||
@ -79,8 +104,8 @@ export function CamerasFilterButton({
|
||||
);
|
||||
const content = (
|
||||
<CamerasFilterContent
|
||||
allCameras={allCameras}
|
||||
groups={groups}
|
||||
allCameras={filteredCameras}
|
||||
groups={filteredGroups}
|
||||
currentCameras={currentCameras}
|
||||
mainCamera={mainCamera}
|
||||
setCurrentCameras={setCurrentCameras}
|
||||
|
||||
@ -54,7 +54,7 @@ export default function SetPasswordDialog({
|
||||
config?.auth?.refresh_time ?? undefined;
|
||||
const refreshTimeLabel = refreshSeconds
|
||||
? formatSecondsToDuration(refreshSeconds)
|
||||
: "30 minutes";
|
||||
: t("time.30minutes", { ns: "common" });
|
||||
|
||||
// visibility toggles for password fields
|
||||
const [showOldPassword, setShowOldPassword] = useState<boolean>(false);
|
||||
|
||||
@ -260,7 +260,7 @@ function MSEPlayer({
|
||||
// @ts-expect-error for typing
|
||||
value: codecs(MediaSource.isTypeSupported),
|
||||
},
|
||||
3000,
|
||||
(fallbackTimeout ?? 3) * 1000,
|
||||
).catch(() => {
|
||||
if (wsRef.current) {
|
||||
onDisconnect();
|
||||
@ -290,7 +290,7 @@ function MSEPlayer({
|
||||
type: "mse",
|
||||
value: codecs(MediaSource.isTypeSupported),
|
||||
},
|
||||
3000,
|
||||
(fallbackTimeout ?? 3) * 1000,
|
||||
).catch(() => {
|
||||
if (wsRef.current) {
|
||||
onDisconnect();
|
||||
|
||||
@ -31,7 +31,8 @@ type WizardState = {
|
||||
type WizardAction =
|
||||
| { type: "UPDATE_DATA"; payload: Partial<WizardFormData> }
|
||||
| { type: "UPDATE_AND_NEXT"; payload: Partial<WizardFormData> }
|
||||
| { type: "RESET_NAVIGATE" };
|
||||
| { type: "RESET_NAVIGATE" }
|
||||
| { type: "RESET_ALL" };
|
||||
|
||||
const wizardReducer = (
|
||||
state: WizardState,
|
||||
@ -50,6 +51,11 @@ const wizardReducer = (
|
||||
};
|
||||
case "RESET_NAVIGATE":
|
||||
return { ...state, shouldNavigateNext: false };
|
||||
case "RESET_ALL":
|
||||
return {
|
||||
wizardData: { streams: [] },
|
||||
shouldNavigateNext: false,
|
||||
};
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
@ -84,13 +90,13 @@ export default function CameraWizardDialog({
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
setCurrentStep(0);
|
||||
dispatch({ type: "UPDATE_DATA", payload: { streams: [] } });
|
||||
dispatch({ type: "RESET_ALL" });
|
||||
}
|
||||
}, [open]);
|
||||
|
||||
const handleClose = useCallback(() => {
|
||||
setCurrentStep(0);
|
||||
dispatch({ type: "UPDATE_DATA", payload: { streams: [] } });
|
||||
dispatch({ type: "RESET_ALL" });
|
||||
onClose();
|
||||
}, [onClose]);
|
||||
|
||||
|
||||
@ -35,6 +35,8 @@ export interface DateRangePickerProps {
|
||||
showCompare?: boolean;
|
||||
/** timezone */
|
||||
timezone?: string;
|
||||
/** First day of the week: 0 = Sunday, 1 = Monday */
|
||||
weekStartsOn?: number;
|
||||
}
|
||||
|
||||
const getDateAdjustedForTimezone = (
|
||||
@ -91,6 +93,7 @@ export function DateRangePicker({
|
||||
onUpdate,
|
||||
onReset,
|
||||
showCompare = true,
|
||||
weekStartsOn = 0,
|
||||
}: DateRangePickerProps) {
|
||||
const [isOpen, setIsOpen] = useState(false);
|
||||
|
||||
@ -150,7 +153,9 @@ export function DateRangePicker({
|
||||
if (!preset) throw new Error(`Unknown date range preset: ${presetName}`);
|
||||
const from = new TZDate(new Date(), timezone);
|
||||
const to = new TZDate(new Date(), timezone);
|
||||
const first = from.getDate() - from.getDay();
|
||||
const dayOfWeek = from.getDay();
|
||||
const daysFromWeekStart = (dayOfWeek - weekStartsOn + 7) % 7;
|
||||
const first = from.getDate() - daysFromWeekStart;
|
||||
|
||||
switch (preset.name) {
|
||||
case "today":
|
||||
@ -184,8 +189,8 @@ export function DateRangePicker({
|
||||
to.setHours(23, 59, 59, 999);
|
||||
break;
|
||||
case "lastWeek":
|
||||
from.setDate(from.getDate() - 7 - from.getDay());
|
||||
to.setDate(to.getDate() - to.getDay() - 1);
|
||||
from.setDate(first - 7);
|
||||
to.setDate(first - 1);
|
||||
from.setHours(0, 0, 0, 0);
|
||||
to.setHours(23, 59, 59, 999);
|
||||
break;
|
||||
|
||||
@ -23,5 +23,6 @@ export const supportedLanguageKeys = [
|
||||
"lt",
|
||||
"uk",
|
||||
"cs",
|
||||
"sk",
|
||||
"hu",
|
||||
];
|
||||
|
||||
@ -219,12 +219,12 @@ function Exports() {
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
) : (
|
||||
) : exports !== undefined ? (
|
||||
<div className="absolute left-1/2 top-1/2 flex -translate-x-1/2 -translate-y-1/2 flex-col items-center justify-center text-center">
|
||||
<LuFolderX className="size-16" />
|
||||
{t("noExports")}
|
||||
</div>
|
||||
)}
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@ -22,6 +22,7 @@ import { SearchTab } from "@/components/overlay/detail/SearchDetailDialog";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
import { LuSearchX } from "react-icons/lu";
|
||||
|
||||
type ExploreViewProps = {
|
||||
setSearchDetail: (search: SearchResult | undefined) => void;
|
||||
@ -86,6 +87,15 @@ export default function ExploreView({
|
||||
);
|
||||
}
|
||||
|
||||
if (eventsByLabel && Object.keys(eventsByLabel).length == 0 && !isLoading) {
|
||||
return (
|
||||
<div className="absolute left-1/2 top-1/2 flex -translate-x-1/2 -translate-y-1/2 flex-col items-center justify-center text-center">
|
||||
<LuSearchX className="size-16" />
|
||||
{t("noTrackedObjects")}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="mx-2 space-y-4">
|
||||
{Object.entries(eventsByLabel).map(([label, filteredEvents]) => (
|
||||
|
||||
Loading…
Reference in New Issue
Block a user