Miscellaneous Fixes (0.17 beta) (#21301)
Some checks are pending
CI / AMD64 Build (push) Waiting to run
CI / ARM Build (push) Waiting to run
CI / Jetson Jetpack 6 (push) Waiting to run
CI / AMD64 Extra Build (push) Blocked by required conditions
CI / ARM Extra Build (push) Blocked by required conditions
CI / Synaptics Build (push) Blocked by required conditions
CI / Assemble and push default build (push) Blocked by required conditions

* Wait for config to load before evaluating route access

Fix race condition where custom role users are temporarily denied access after login while config is still loading. Defer route rendering in DefaultAppView until config is available so the complete role list is known before ProtectedRoute evaluates permissions

* Use batching for state classification generation

* Ignore incorrect scoring images if they make it through the deletion

* Delete unclassified images

* mitigate tensorflow atexit crash by pre-importing tflite/tensorflow on main thread

Pre-import Interpreter in embeddings maintainer and add defensive lazy imports in classification processors to avoid worker-thread tensorflow imports causing "can't register atexit after shutdown"

* don't require old password for users with admin role when changing passwords

* don't render actions menu if no options are available

* Remove hwaccel arg as it is not used for encoding

* change password button text

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
Josh Hawkins 2025-12-16 08:11:53 -06:00 committed by GitHub
parent 818cccb2e3
commit e7d047715d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 174 additions and 56 deletions

View File

@ -893,13 +893,9 @@ async def update_password(
except DoesNotExist:
return JSONResponse(content={"message": "User not found"}, status_code=404)
# Require old_password when:
# 1. Non-admin user is changing another user's password (admin only action)
# 2. Any user is changing their own password
is_changing_own_password = current_username == username
is_non_admin = current_role != "admin"
if is_changing_own_password or is_non_admin:
# Require old_password when non-admin user is changing any password
# Admin users changing passwords do NOT need to provide the current password
if current_role != "admin":
if not body.old_password:
return JSONResponse(
content={"message": "Current password is required"},

View File

@ -19,11 +19,6 @@ from frigate.util.object import calculate_region
from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
@ -35,7 +30,7 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
metrics: DataProcessorMetrics,
):
super().__init__(config, metrics)
self.interpreter: Interpreter = None
self.interpreter: Any | None = None
self.sub_label_publisher = sub_label_publisher
self.tensor_input_details: dict[str, Any] = None
self.tensor_output_details: dict[str, Any] = None
@ -82,6 +77,11 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
self.interpreter = Interpreter(
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
num_threads=2,

View File

@ -29,11 +29,6 @@ from frigate.util.object import box_overlaps, calculate_region
from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__)
MAX_OBJECT_CLASSIFICATIONS = 16
@ -52,7 +47,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.requestor = requestor
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
self.interpreter: Interpreter | None = None
self.interpreter: Any | None = None
self.tensor_input_details: dict[str, Any] | None = None
self.tensor_output_details: dict[str, Any] | None = None
self.labelmap: dict[int, str] = {}
@ -74,6 +69,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
model_path = os.path.join(self.model_dir, "model.tflite")
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
@ -345,7 +345,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.model_config = model_config
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
self.interpreter: Interpreter | None = None
self.interpreter: Any | None = None
self.sub_label_publisher = sub_label_publisher
self.requestor = requestor
self.tensor_input_details: dict[str, Any] | None = None
@ -368,6 +368,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
model_path = os.path.join(self.model_dir, "model.tflite")
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")

View File

@ -146,6 +146,29 @@ class EmbeddingMaintainer(threading.Thread):
self.detected_license_plates: dict[str, dict[str, Any]] = {}
self.genai_client = get_genai_client(config)
# Pre-import TensorFlow/tflite on main thread to avoid atexit registration issues
# when importing from worker threads later (e.g., during dynamic config updates)
if (
self.config.classification.bird.enabled
or len(self.config.classification.custom) > 0
):
try:
from tflite_runtime.interpreter import Interpreter # noqa: F401
except ModuleNotFoundError:
try:
from tensorflow.lite.python.interpreter import ( # noqa: F401
Interpreter,
)
logger.debug(
"Pre-imported TensorFlow Interpreter on main thread for classification models"
)
except Exception as e:
logger.warning(
f"Failed to pre-import TensorFlow Interpreter: {e}. "
"Classification models may fail to load if added dynamically."
)
# model runners to share between realtime and post processors
if self.config.lpr.enabled:
lpr_model_runner = LicensePlateModelRunner(

View File

@ -153,7 +153,7 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}",
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v main -level:v 4.1 -async_depth:v 1 {2}",
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -hwaccel cuda -hwaccel_device {3} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}",
FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",

View File

@ -499,6 +499,10 @@ def _extract_keyframes(
"""
Extract keyframes from recordings at specified timestamps and crop to specified regions.
This implementation batches work by running multiple ffmpeg snapshot commands
concurrently, which significantly reduces total runtime compared to
processing each timestamp serially.
Args:
ffmpeg_path: Path to ffmpeg binary
timestamps: List of timestamp dicts from _select_balanced_timestamps
@ -508,15 +512,21 @@ def _extract_keyframes(
Returns:
List of paths to successfully extracted and cropped keyframe images
"""
keyframe_paths = []
from concurrent.futures import ThreadPoolExecutor, as_completed
for idx, ts_info in enumerate(timestamps):
if not timestamps:
return []
# Limit the number of concurrent ffmpeg processes so we don't overload the host.
max_workers = min(5, len(timestamps))
def _process_timestamp(idx: int, ts_info: dict) -> tuple[int, str | None]:
camera = ts_info["camera"]
timestamp = ts_info["timestamp"]
if camera not in camera_crops:
logger.warning(f"No crop coordinates for camera {camera}")
continue
return idx, None
norm_x1, norm_y1, norm_x2, norm_y2 = camera_crops[camera]
@ -533,7 +543,7 @@ def _extract_keyframes(
.get()
)
except Exception:
continue
return idx, None
relative_time = timestamp - recording.start_time
@ -547,38 +557,57 @@ def _extract_keyframes(
height=None,
)
if image_data:
nparr = np.frombuffer(image_data, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if not image_data:
return idx, None
if img is not None:
height, width = img.shape[:2]
nparr = np.frombuffer(image_data, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
x1 = int(norm_x1 * width)
y1 = int(norm_y1 * height)
x2 = int(norm_x2 * width)
y2 = int(norm_y2 * height)
if img is None:
return idx, None
x1_clipped = max(0, min(x1, width))
y1_clipped = max(0, min(y1, height))
x2_clipped = max(0, min(x2, width))
y2_clipped = max(0, min(y2, height))
height, width = img.shape[:2]
if x2_clipped > x1_clipped and y2_clipped > y1_clipped:
cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
resized = cv2.resize(cropped, (224, 224))
x1 = int(norm_x1 * width)
y1 = int(norm_y1 * height)
x2 = int(norm_x2 * width)
y2 = int(norm_y2 * height)
output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg")
cv2.imwrite(output_path, resized)
keyframe_paths.append(output_path)
x1_clipped = max(0, min(x1, width))
y1_clipped = max(0, min(y1, height))
x2_clipped = max(0, min(x2, width))
y2_clipped = max(0, min(y2, height))
if x2_clipped <= x1_clipped or y2_clipped <= y1_clipped:
return idx, None
cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
resized = cv2.resize(cropped, (224, 224))
output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg")
cv2.imwrite(output_path, resized)
return idx, output_path
except Exception as e:
logger.debug(
f"Failed to extract frame from {recording.path} at {relative_time}s: {e}"
)
continue
return idx, None
return keyframe_paths
keyframes_with_index: list[tuple[int, str]] = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_idx = {
executor.submit(_process_timestamp, idx, ts_info): idx
for idx, ts_info in enumerate(timestamps)
}
for future in as_completed(future_to_idx):
_, path = future.result()
if path:
keyframes_with_index.append((future_to_idx[future], path))
keyframes_with_index.sort(key=lambda item: item[0])
return [path for _, path in keyframes_with_index]
def _select_distinct_images(

View File

@ -679,7 +679,7 @@
"desc": "Manage this Frigate instance's user accounts."
},
"addUser": "Add User",
"updatePassword": "Update Password",
"updatePassword": "Reset Password",
"toast": {
"success": {
"createUser": "User {{user}} created successfully",
@ -700,7 +700,7 @@
"role": "Role",
"noUsers": "No users found.",
"changeRole": "Change user role",
"password": "Password",
"password": "Reset Password",
"deleteUser": "Delete user"
},
"dialog": {

View File

@ -14,6 +14,7 @@ import ProtectedRoute from "@/components/auth/ProtectedRoute";
import { AuthProvider } from "@/context/auth-context";
import useSWR from "swr";
import { FrigateConfig } from "./types/frigateConfig";
import ActivityIndicator from "@/components/indicators/activity-indicator";
const Live = lazy(() => import("@/pages/Live"));
const Events = lazy(() => import("@/pages/Events"));
@ -50,6 +51,13 @@ function DefaultAppView() {
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
// Compute required roles for main routes, ensuring we have config first
// to prevent race condition where custom roles are temporarily unavailable
const mainRouteRoles = config?.auth?.roles
? Object.keys(config.auth.roles)
: undefined;
return (
<div className="size-full overflow-hidden">
{isDesktop && <Sidebar />}
@ -68,13 +76,11 @@ function DefaultAppView() {
<Routes>
<Route
element={
<ProtectedRoute
requiredRoles={
config?.auth.roles
? Object.keys(config.auth.roles)
: ["admin", "viewer"]
}
/>
mainRouteRoles ? (
<ProtectedRoute requiredRoles={mainRouteRoles} />
) : (
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
)
}
>
<Route index element={<Live />} />

View File

@ -141,7 +141,37 @@ export default function Step3ChooseExamples({
);
await Promise.all(categorizePromises);
// Step 2.5: Create empty folders for classes that don't have any images
// Step 2.5: Delete any unselected images from train folder
// For state models, all images must be classified, so unselected images should be removed
// For object models, unselected images are assigned to "none" so they're already categorized
if (step1Data.modelType === "state") {
try {
// Fetch current train images to see what's left after categorization
const trainImagesResponse = await axios.get<string[]>(
`/classification/${step1Data.modelName}/train`,
);
const remainingTrainImages = trainImagesResponse.data || [];
const categorizedImageNames = new Set(Object.keys(classifications));
const unselectedImages = remainingTrainImages.filter(
(imageName) => !categorizedImageNames.has(imageName),
);
if (unselectedImages.length > 0) {
await axios.post(
`/classification/${step1Data.modelName}/train/delete`,
{
ids: unselectedImages,
},
);
}
} catch (error) {
// Silently fail - unselected images will remain but won't cause issues
// since the frontend filters out images that don't match expected format
}
}
// Step 2.6: Create empty folders for classes that don't have any images
// This ensures all classes are available in the dataset view later
const classesWithImages = new Set(
Object.values(classifications).filter((c) => c && c !== "none"),

View File

@ -49,6 +49,29 @@ export default function DetailActionsMenu({
search.data?.type === "audio" ? null : [`review/event/${search.id}`],
);
// don't render menu at all if no options are available
const hasSemanticSearchOption =
config?.semantic_search.enabled &&
setSimilarity !== undefined &&
search.data?.type === "object";
const hasReviewItem = !!(reviewItem && reviewItem.id);
const hasAdminTriggerOption =
isAdmin &&
config?.semantic_search.enabled &&
search.data?.type === "object";
if (
!search.has_snapshot &&
!search.has_clip &&
!hasSemanticSearchOption &&
!hasReviewItem &&
!hasAdminTriggerOption
) {
return null;
}
return (
<DropdownMenu open={isOpen} onOpenChange={setIsOpen}>
<DropdownMenuTrigger>

View File

@ -866,6 +866,12 @@ function TrainGrid({
};
})
.filter((data) => {
// Ignore images that don't match the expected format (event-camera-timestamp-state-score.webp)
// Expected format has 5 parts when split by "-", and score should be a valid number
if (data.score === undefined || isNaN(data.score) || !data.name) {
return false;
}
if (!trainFilter) {
return true;
}