diff --git a/frigate/api/auth.py b/frigate/api/auth.py index 95ee4f9dc..d3b50067c 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -893,13 +893,9 @@ async def update_password( except DoesNotExist: return JSONResponse(content={"message": "User not found"}, status_code=404) - # Require old_password when: - # 1. Non-admin user is changing another user's password (admin only action) - # 2. Any user is changing their own password - is_changing_own_password = current_username == username - is_non_admin = current_role != "admin" - - if is_changing_own_password or is_non_admin: + # Require old_password when non-admin user is changing any password + # Admin users changing passwords do NOT need to provide the current password + if current_role != "admin": if not body.old_password: return JSONResponse( content={"message": "Current password is required"}, diff --git a/frigate/data_processing/real_time/bird.py b/frigate/data_processing/real_time/bird.py index e599ab0fb..8d6e1b2dc 100644 --- a/frigate/data_processing/real_time/bird.py +++ b/frigate/data_processing/real_time/bird.py @@ -19,11 +19,6 @@ from frigate.util.object import calculate_region from ..types import DataProcessorMetrics from .api import RealTimeProcessorApi -try: - from tflite_runtime.interpreter import Interpreter -except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter - logger = logging.getLogger(__name__) @@ -35,7 +30,7 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): metrics: DataProcessorMetrics, ): super().__init__(config, metrics) - self.interpreter: Interpreter = None + self.interpreter: Any | None = None self.sub_label_publisher = sub_label_publisher self.tensor_input_details: dict[str, Any] = None self.tensor_output_details: dict[str, Any] = None @@ -82,6 +77,11 @@ class BirdRealTimeProcessor(RealTimeProcessorApi): @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: + try: + from tflite_runtime.interpreter import Interpreter + except ModuleNotFoundError: + from tensorflow.lite.python.interpreter import Interpreter + self.interpreter = Interpreter( model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"), num_threads=2, diff --git a/frigate/data_processing/real_time/custom_classification.py b/frigate/data_processing/real_time/custom_classification.py index 25ec3bb86..dd011b48e 100644 --- a/frigate/data_processing/real_time/custom_classification.py +++ b/frigate/data_processing/real_time/custom_classification.py @@ -29,11 +29,6 @@ from frigate.util.object import box_overlaps, calculate_region from ..types import DataProcessorMetrics from .api import RealTimeProcessorApi -try: - from tflite_runtime.interpreter import Interpreter -except ModuleNotFoundError: - from tensorflow.lite.python.interpreter import Interpreter - logger = logging.getLogger(__name__) MAX_OBJECT_CLASSIFICATIONS = 16 @@ -52,7 +47,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): self.requestor = requestor self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") - self.interpreter: Interpreter | None = None + self.interpreter: Any | None = None self.tensor_input_details: dict[str, Any] | None = None self.tensor_output_details: dict[str, Any] | None = None self.labelmap: dict[int, str] = {} @@ -74,6 +69,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi): @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: + try: + from tflite_runtime.interpreter import Interpreter + except ModuleNotFoundError: + from tensorflow.lite.python.interpreter import Interpreter + model_path = os.path.join(self.model_dir, "model.tflite") labelmap_path = os.path.join(self.model_dir, "labelmap.txt") @@ -345,7 +345,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): self.model_config = model_config self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name) self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train") - self.interpreter: Interpreter | None = None + self.interpreter: Any | None = None self.sub_label_publisher = sub_label_publisher self.requestor = requestor self.tensor_input_details: dict[str, Any] | None = None @@ -368,6 +368,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi): @redirect_output_to_logger(logger, logging.DEBUG) def __build_detector(self) -> None: + try: + from tflite_runtime.interpreter import Interpreter + except ModuleNotFoundError: + from tensorflow.lite.python.interpreter import Interpreter + model_path = os.path.join(self.model_dir, "model.tflite") labelmap_path = os.path.join(self.model_dir, "labelmap.txt") diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index 78a251c42..33d09dcc3 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -146,6 +146,29 @@ class EmbeddingMaintainer(threading.Thread): self.detected_license_plates: dict[str, dict[str, Any]] = {} self.genai_client = get_genai_client(config) + # Pre-import TensorFlow/tflite on main thread to avoid atexit registration issues + # when importing from worker threads later (e.g., during dynamic config updates) + if ( + self.config.classification.bird.enabled + or len(self.config.classification.custom) > 0 + ): + try: + from tflite_runtime.interpreter import Interpreter # noqa: F401 + except ModuleNotFoundError: + try: + from tensorflow.lite.python.interpreter import ( # noqa: F401 + Interpreter, + ) + + logger.debug( + "Pre-imported TensorFlow Interpreter on main thread for classification models" + ) + except Exception as e: + logger.warning( + f"Failed to pre-import TensorFlow Interpreter: {e}. " + "Classification models may fail to load if added dynamically." + ) + # model runners to share between realtime and post processors if self.config.lpr.enabled: lpr_model_runner = LicensePlateModelRunner( diff --git a/frigate/ffmpeg_presets.py b/frigate/ffmpeg_presets.py index 36f7828fc..43272a6d1 100644 --- a/frigate/ffmpeg_presets.py +++ b/frigate/ffmpeg_presets.py @@ -153,7 +153,7 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = { FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}", "preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}", "preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v main -level:v 4.1 -async_depth:v 1 {2}", - FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -hwaccel cuda -hwaccel_device {3} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}", + FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}", "preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}", "preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}", FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}", diff --git a/frigate/util/classification.py b/frigate/util/classification.py index 03229cc73..7777af51c 100644 --- a/frigate/util/classification.py +++ b/frigate/util/classification.py @@ -499,6 +499,10 @@ def _extract_keyframes( """ Extract keyframes from recordings at specified timestamps and crop to specified regions. + This implementation batches work by running multiple ffmpeg snapshot commands + concurrently, which significantly reduces total runtime compared to + processing each timestamp serially. + Args: ffmpeg_path: Path to ffmpeg binary timestamps: List of timestamp dicts from _select_balanced_timestamps @@ -508,15 +512,21 @@ def _extract_keyframes( Returns: List of paths to successfully extracted and cropped keyframe images """ - keyframe_paths = [] + from concurrent.futures import ThreadPoolExecutor, as_completed - for idx, ts_info in enumerate(timestamps): + if not timestamps: + return [] + + # Limit the number of concurrent ffmpeg processes so we don't overload the host. + max_workers = min(5, len(timestamps)) + + def _process_timestamp(idx: int, ts_info: dict) -> tuple[int, str | None]: camera = ts_info["camera"] timestamp = ts_info["timestamp"] if camera not in camera_crops: logger.warning(f"No crop coordinates for camera {camera}") - continue + return idx, None norm_x1, norm_y1, norm_x2, norm_y2 = camera_crops[camera] @@ -533,7 +543,7 @@ def _extract_keyframes( .get() ) except Exception: - continue + return idx, None relative_time = timestamp - recording.start_time @@ -547,38 +557,57 @@ def _extract_keyframes( height=None, ) - if image_data: - nparr = np.frombuffer(image_data, np.uint8) - img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + if not image_data: + return idx, None - if img is not None: - height, width = img.shape[:2] + nparr = np.frombuffer(image_data, np.uint8) + img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) - x1 = int(norm_x1 * width) - y1 = int(norm_y1 * height) - x2 = int(norm_x2 * width) - y2 = int(norm_y2 * height) + if img is None: + return idx, None - x1_clipped = max(0, min(x1, width)) - y1_clipped = max(0, min(y1, height)) - x2_clipped = max(0, min(x2, width)) - y2_clipped = max(0, min(y2, height)) + height, width = img.shape[:2] - if x2_clipped > x1_clipped and y2_clipped > y1_clipped: - cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped] - resized = cv2.resize(cropped, (224, 224)) + x1 = int(norm_x1 * width) + y1 = int(norm_y1 * height) + x2 = int(norm_x2 * width) + y2 = int(norm_y2 * height) - output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg") - cv2.imwrite(output_path, resized) - keyframe_paths.append(output_path) + x1_clipped = max(0, min(x1, width)) + y1_clipped = max(0, min(y1, height)) + x2_clipped = max(0, min(x2, width)) + y2_clipped = max(0, min(y2, height)) + if x2_clipped <= x1_clipped or y2_clipped <= y1_clipped: + return idx, None + + cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped] + resized = cv2.resize(cropped, (224, 224)) + + output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg") + cv2.imwrite(output_path, resized) + return idx, output_path except Exception as e: logger.debug( f"Failed to extract frame from {recording.path} at {relative_time}s: {e}" ) - continue + return idx, None - return keyframe_paths + keyframes_with_index: list[tuple[int, str]] = [] + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_idx = { + executor.submit(_process_timestamp, idx, ts_info): idx + for idx, ts_info in enumerate(timestamps) + } + + for future in as_completed(future_to_idx): + _, path = future.result() + if path: + keyframes_with_index.append((future_to_idx[future], path)) + + keyframes_with_index.sort(key=lambda item: item[0]) + return [path for _, path in keyframes_with_index] def _select_distinct_images( diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index 2adcc53ff..1946a1c62 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -679,7 +679,7 @@ "desc": "Manage this Frigate instance's user accounts." }, "addUser": "Add User", - "updatePassword": "Update Password", + "updatePassword": "Reset Password", "toast": { "success": { "createUser": "User {{user}} created successfully", @@ -700,7 +700,7 @@ "role": "Role", "noUsers": "No users found.", "changeRole": "Change user role", - "password": "Password", + "password": "Reset Password", "deleteUser": "Delete user" }, "dialog": { diff --git a/web/src/App.tsx b/web/src/App.tsx index 2fbfa4c99..b458d9ec3 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -14,6 +14,7 @@ import ProtectedRoute from "@/components/auth/ProtectedRoute"; import { AuthProvider } from "@/context/auth-context"; import useSWR from "swr"; import { FrigateConfig } from "./types/frigateConfig"; +import ActivityIndicator from "@/components/indicators/activity-indicator"; const Live = lazy(() => import("@/pages/Live")); const Events = lazy(() => import("@/pages/Events")); @@ -50,6 +51,13 @@ function DefaultAppView() { const { data: config } = useSWR("config", { revalidateOnFocus: false, }); + + // Compute required roles for main routes, ensuring we have config first + // to prevent race condition where custom roles are temporarily unavailable + const mainRouteRoles = config?.auth?.roles + ? Object.keys(config.auth.roles) + : undefined; + return (
{isDesktop && } @@ -68,13 +76,11 @@ function DefaultAppView() { + mainRouteRoles ? ( + + ) : ( + + ) } > } /> diff --git a/web/src/components/classification/wizard/Step3ChooseExamples.tsx b/web/src/components/classification/wizard/Step3ChooseExamples.tsx index c4978a1b8..d15e45b8c 100644 --- a/web/src/components/classification/wizard/Step3ChooseExamples.tsx +++ b/web/src/components/classification/wizard/Step3ChooseExamples.tsx @@ -141,7 +141,37 @@ export default function Step3ChooseExamples({ ); await Promise.all(categorizePromises); - // Step 2.5: Create empty folders for classes that don't have any images + // Step 2.5: Delete any unselected images from train folder + // For state models, all images must be classified, so unselected images should be removed + // For object models, unselected images are assigned to "none" so they're already categorized + if (step1Data.modelType === "state") { + try { + // Fetch current train images to see what's left after categorization + const trainImagesResponse = await axios.get( + `/classification/${step1Data.modelName}/train`, + ); + const remainingTrainImages = trainImagesResponse.data || []; + + const categorizedImageNames = new Set(Object.keys(classifications)); + const unselectedImages = remainingTrainImages.filter( + (imageName) => !categorizedImageNames.has(imageName), + ); + + if (unselectedImages.length > 0) { + await axios.post( + `/classification/${step1Data.modelName}/train/delete`, + { + ids: unselectedImages, + }, + ); + } + } catch (error) { + // Silently fail - unselected images will remain but won't cause issues + // since the frontend filters out images that don't match expected format + } + } + + // Step 2.6: Create empty folders for classes that don't have any images // This ensures all classes are available in the dataset view later const classesWithImages = new Set( Object.values(classifications).filter((c) => c && c !== "none"), diff --git a/web/src/components/overlay/detail/DetailActionsMenu.tsx b/web/src/components/overlay/detail/DetailActionsMenu.tsx index ee4184d0f..87f77eaf8 100644 --- a/web/src/components/overlay/detail/DetailActionsMenu.tsx +++ b/web/src/components/overlay/detail/DetailActionsMenu.tsx @@ -49,6 +49,29 @@ export default function DetailActionsMenu({ search.data?.type === "audio" ? null : [`review/event/${search.id}`], ); + // don't render menu at all if no options are available + const hasSemanticSearchOption = + config?.semantic_search.enabled && + setSimilarity !== undefined && + search.data?.type === "object"; + + const hasReviewItem = !!(reviewItem && reviewItem.id); + + const hasAdminTriggerOption = + isAdmin && + config?.semantic_search.enabled && + search.data?.type === "object"; + + if ( + !search.has_snapshot && + !search.has_clip && + !hasSemanticSearchOption && + !hasReviewItem && + !hasAdminTriggerOption + ) { + return null; + } + return ( diff --git a/web/src/views/classification/ModelTrainingView.tsx b/web/src/views/classification/ModelTrainingView.tsx index 02abc021b..ea9facd08 100644 --- a/web/src/views/classification/ModelTrainingView.tsx +++ b/web/src/views/classification/ModelTrainingView.tsx @@ -866,6 +866,12 @@ function TrainGrid({ }; }) .filter((data) => { + // Ignore images that don't match the expected format (event-camera-timestamp-state-score.webp) + // Expected format has 5 parts when split by "-", and score should be a valid number + if (data.score === undefined || isNaN(data.score) || !data.name) { + return false; + } + if (!trainFilter) { return true; }