mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-06 05:24:11 +03:00
Various Tweaks (#20713)
* Adjust for commutes * Tweaks * Don't show no models view in grid * Add text-md to inputs * Adjust train title for mobile * Cleanup prompt more * Use i18n functions for tooltip * Fix model complexity causing crash * Cleanup
This commit is contained in:
parent
61549a0151
commit
29bc213c04
@ -108,7 +108,7 @@ class GenAIReviewConfig(FrigateBaseModel):
|
||||
default="""### Normal Activity Indicators (Level 0)
|
||||
- Known/verified people in any zone
|
||||
- People with pets in residential areas
|
||||
- Brief activity near vehicles: approaching vehicles, brief standing, then leaving or entering vehicle (unloading, loading, checking something)
|
||||
- Brief activity near vehicles: approaching vehicles, brief standing, then leaving or entering vehicle (unloading, loading, checking something). Very short sequences (under 15 seconds) of vehicle access during typical hours (6 AM - 10 PM) are almost always normal.
|
||||
- Deliveries or services: brief approach to doors/porches, standing briefly, placing or retrieving items, then leaving
|
||||
- Access to private areas: entering back yards, garages, or homes (with or without visible purpose in frame)
|
||||
- Brief movement through semi-public areas (driveways, front yards) with items or approaching structure/vehicle
|
||||
@ -121,7 +121,7 @@ class GenAIReviewConfig(FrigateBaseModel):
|
||||
- Climbing or jumping fences/barriers to access property
|
||||
- Attempting to conceal actions or items from view
|
||||
- Prolonged presence without purpose: remaining in same area (near vehicles, private zones) throughout most/all of the sequence without clear activity or task. Brief stops (a few seconds of standing) are normal; sustained presence (most of the duration) without interaction is concerning.
|
||||
- Activity at unusual hours (very late night/early morning) combined with suspicious behavior patterns
|
||||
- Activity at unusual hours (11 PM - 5 AM) combined with suspicious behavior patterns. Normal commute/daytime hours (6 AM - 6 PM) do not increase suspicion by themselves.
|
||||
|
||||
### Critical Threat Indicators (Level 2)
|
||||
- Holding break-in tools (crowbars, pry bars, bolt cutters)
|
||||
@ -131,9 +131,11 @@ class GenAIReviewConfig(FrigateBaseModel):
|
||||
- Active property damage or theft
|
||||
|
||||
### Assessment Guidance
|
||||
When evaluating activity, first check if it matches Normal Activity Indicators. If it clearly matches normal patterns (brief vehicle access, delivery behavior, known people, pet activity), assign Level 0. Only consider Level 1 if the activity shows clear suspicious behaviors that don't fit normal patterns (testing access, stealing items, lingering across many frames without task, forced entry attempts).
|
||||
**Default to Level 0** for brief activity during normal hours. When evaluating, first check if it matches Normal Activity Indicators. Very short sequences (under 15 seconds) of vehicle access, deliveries, or movement through property during typical hours (6 AM - 11 PM) should be Level 0 unless there are clear suspicious actions visible (testing doors, stealing, climbing barriers).
|
||||
|
||||
These patterns are guidance, not rigid rules. Consider the complete context: time, zone, objects, and sequence of actions. Brief activity with apparent purpose is generally normal. Sustained problematic behavior or clear security violations warrant elevation.""",
|
||||
Only assign Level 1 if the activity shows clear suspicious behaviors: testing access points, stealing items, lingering throughout most of the sequence without task, climbing barriers, or other explicit violations. Brief activity with apparent purpose (approaching vehicle, delivery, passing through) is Level 0.
|
||||
|
||||
Consider duration, time, zone, and actions holistically. Brief is normal; sustained suspicious behavior is concerning.""",
|
||||
title="Custom activity context prompt defining normal and suspicious activity patterns for this property.",
|
||||
)
|
||||
|
||||
|
||||
@ -120,7 +120,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
|
||||
thumbs = self.get_recording_frames(
|
||||
camera,
|
||||
final_data["start_time"],
|
||||
final_data["start_time"] - buffer_extension,
|
||||
final_data["end_time"] + buffer_extension,
|
||||
height=480, # Use 480p for good balance between quality and token usage
|
||||
)
|
||||
@ -421,13 +421,12 @@ def run_analysis(
|
||||
name = sub_labels_list[i].replace("_", " ").title()
|
||||
unified_objects.append(f"{name} ({object_type})")
|
||||
|
||||
# Add non-verified objects as "Unrecognized (type)"
|
||||
for label in objects_list:
|
||||
if "-verified" in label:
|
||||
continue
|
||||
elif label in labelmap_objects:
|
||||
object_type = label.replace("_", " ")
|
||||
unified_objects.append(f"Unrecognized ({object_type})")
|
||||
object_type = label.replace("_", " ").title()
|
||||
unified_objects.append(object_type)
|
||||
|
||||
analytics_data["unified_objects"] = unified_objects
|
||||
|
||||
|
||||
@ -21,22 +21,26 @@ def is_arm64_platform() -> bool:
|
||||
return machine in ("aarch64", "arm64", "armv8", "armv7l")
|
||||
|
||||
|
||||
def get_ort_session_options() -> ort.SessionOptions | None:
|
||||
def get_ort_session_options(
|
||||
is_complex_model: bool = False,
|
||||
) -> ort.SessionOptions | None:
|
||||
"""Get ONNX Runtime session options with appropriate settings.
|
||||
|
||||
On ARM/RKNN platforms, use basic optimizations to avoid graph fusion issues
|
||||
that can break certain models. On amd64, use default optimizations for better performance.
|
||||
"""
|
||||
sess_options = None
|
||||
Args:
|
||||
is_complex_model: Whether the model needs basic optimization to avoid graph fusion issues.
|
||||
|
||||
if is_arm64_platform():
|
||||
Returns:
|
||||
SessionOptions with appropriate optimization level, or None for default settings.
|
||||
"""
|
||||
if is_complex_model:
|
||||
sess_options = ort.SessionOptions()
|
||||
sess_options.graph_optimization_level = (
|
||||
ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
|
||||
)
|
||||
|
||||
return sess_options
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# Import OpenVINO only when needed to avoid circular dependencies
|
||||
try:
|
||||
@ -103,6 +107,21 @@ class BaseModelRunner(ABC):
|
||||
class ONNXModelRunner(BaseModelRunner):
|
||||
"""Run ONNX models using ONNX Runtime."""
|
||||
|
||||
@staticmethod
|
||||
def is_cpu_complex_model(model_type: str) -> bool:
|
||||
"""Check if model needs basic optimization level to avoid graph fusion issues.
|
||||
|
||||
Some models (like Jina-CLIP) have issues with aggressive optimizations like
|
||||
SimplifiedLayerNormFusion that create or expect nodes that don't exist.
|
||||
"""
|
||||
# Import here to avoid circular imports
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
return model_type in [
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def is_migraphx_complex_model(model_type: str) -> bool:
|
||||
# Import here to avoid circular imports
|
||||
@ -496,7 +515,9 @@ def get_optimized_runner(
|
||||
return ONNXModelRunner(
|
||||
ort.InferenceSession(
|
||||
model_path,
|
||||
sess_options=get_ort_session_options(),
|
||||
sess_options=get_ort_session_options(
|
||||
ONNXModelRunner.is_cpu_complex_model(model_type)
|
||||
),
|
||||
providers=providers,
|
||||
provider_options=options,
|
||||
)
|
||||
|
||||
@ -94,12 +94,13 @@ When forming your description:
|
||||
- Note visible details such as clothing, items being carried or placed, tools or equipment present, and how they interact with the property or objects.
|
||||
- Consider the full sequence chronologically: what happens from start to finish, how duration and actions relate to the location and objects involved.
|
||||
- **Use the actual timestamp provided in "Activity started at"** below for time of day context—do not infer time from image brightness or darkness. Unusual hours (late night/early morning) should increase suspicion when the observable behavior itself appears questionable. However, recognize that some legitimate activities can occur at any hour.
|
||||
- **Weigh all evidence holistically**: Match the activity against both the normal and suspicious patterns above, then evaluate based on the complete context (zone, objects, time, actions). Activities matching normal patterns should be Level 0. Activities matching suspicious indicators should be Level 1. Use your judgment for edge cases.
|
||||
- **Consider duration as a primary factor**: Very short sequences (under 15 seconds) during normal hours (6 AM - 11 PM) are almost always Level 0 unless explicit suspicious actions are visible (testing doors, stealing, climbing). Brief activity with apparent purpose (vehicle access, deliveries, passing through) is normal.
|
||||
- **Weigh all evidence holistically**: Match the activity against both the normal and suspicious patterns above, then evaluate based on the complete context (zone, objects, time, actions, duration). Activities matching normal patterns should be Level 0. Activities matching suspicious indicators should be Level 1. Use your judgment for edge cases.
|
||||
|
||||
## Response Format
|
||||
|
||||
Your response MUST be a flat JSON object with:
|
||||
- `title` (string): A concise, one-sentence title that captures the main activity. Use names from "Objects in Scene" based on what you visually observe. If you see both a recognized name and "Unrecognized" for the same type but visually observe only one person/object, use ONLY the recognized name. Examples: "Joe walking dog in backyard", "Britt near vehicle in driveway", "Joe and an unrecognized person on front porch".
|
||||
- `title` (string): A concise, one-sentence title that captures the main activity. Use names from "Objects in Scene" based on what you visually observe. If you see both a name and an unidentified object of the same type but visually observe only one person/object, use ONLY the name. Examples: "Joe walking dog in backyard", "Joe near vehicle in driveway", "Joe and a person on front porch".
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below. Your threat level must be consistent with your scene description and the guidance above.
|
||||
@ -107,8 +108,8 @@ Your response MUST be a flat JSON object with:
|
||||
|
||||
## Threat Level Definitions
|
||||
|
||||
- 0 — **Normal activity**: The observable activity matches Normal Activity Indicators (brief vehicle access, deliveries, known people, pet activity, services). The evidence supports a benign explanation when considering zone, objects, time, and actions together. **Brief activities with apparent legitimate purpose are generally Level 0.**
|
||||
- 1 — **Potentially suspicious**: The observable activity matches Suspicious Activity Indicators (testing access, stealing items, climbing barriers, lingering without interaction across multiple frames, unusual hours with suspicious behavior). The activity shows concerning patterns that warrant human review. **Requires clear suspicious behavior, not just ambiguity.**
|
||||
- 0 — **Normal activity**: The observable activity matches Normal Activity Indicators (brief vehicle access, deliveries, known people, pet activity, services). **Very short sequences (under 15 seconds) during normal hours (6 AM - 11 PM) with apparent purpose (vehicle access, deliveries, passing through) are Level 0.** Brief activities are generally normal.
|
||||
- 1 — **Potentially suspicious**: The observable activity matches Suspicious Activity Indicators (testing access, stealing items, climbing barriers, lingering throughout most of sequence without task, unusual hours 11 PM - 5 AM with suspicious behavior). **Requires clear suspicious actions visible in frames, not just ambiguity or brief presence.**
|
||||
- 2 — **Immediate threat**: Clear evidence of active criminal activity, forced entry, break-in, vandalism, aggression, weapons, theft in progress, or property damage.
|
||||
|
||||
## Sequence Details
|
||||
@ -119,11 +120,11 @@ Your response MUST be a flat JSON object with:
|
||||
|
||||
## Objects in Scene
|
||||
|
||||
Each line represents a detection state, not necessarily unique individuals. Named objects are recognized/verified identities; "Unrecognized" indicates objects detected but not identified.
|
||||
Each line represents a detection state, not necessarily unique individuals. Objects with names in parentheses (e.g., "Name (person)") are verified identities. Objects without names (e.g., "Person") are detected but not identified.
|
||||
|
||||
**CRITICAL: When you see both recognized and unrecognized entries of the same type (e.g., "Name (person)" and "Unrecognized (person)"), visually count how many distinct people/objects you actually see based on appearance and clothing. If you observe only ONE person throughout the sequence, use ONLY the recognized name (e.g., "Name"), not "Unrecognized". The same person may be recognized in some frames but not others. Only describe both recognized and unrecognized if you visually see MULTIPLE distinct people with clearly different appearances.**
|
||||
**CRITICAL: When you see both recognized and unrecognized entries of the same type (e.g., "Joe (person)" and "Person"), visually count how many distinct people/objects you actually see based on appearance and clothing. If you observe only ONE person throughout the sequence, use ONLY the recognized name (e.g., "Joe"). The same person may be recognized in some frames but not others. Only describe both if you visually see MULTIPLE distinct people with clearly different appearances.**
|
||||
|
||||
**Note: "Unrecognized" is NOT an indicator of suspicious activity—it simply means the system hasn't identified that object.**
|
||||
**Note: Unidentified objects (without names) are NOT indicators of suspicious activity—they simply mean the system hasn't identified that object.**
|
||||
{get_objects_list()}
|
||||
|
||||
## Important Notes
|
||||
@ -164,10 +165,8 @@ Each line represents a detection state, not necessarily unique individuals. Name
|
||||
try:
|
||||
metadata = ReviewMetadata.model_validate_json(clean_json)
|
||||
|
||||
if any(
|
||||
not obj.startswith("Unrecognized")
|
||||
for obj in review_data["unified_objects"]
|
||||
):
|
||||
# If any verified objects (contain parentheses with name), set to 0
|
||||
if any("(" in obj for obj in review_data["unified_objects"]):
|
||||
metadata.potential_threat_level = 0
|
||||
|
||||
metadata.time = review_data["start"]
|
||||
|
||||
@ -43,6 +43,7 @@
|
||||
},
|
||||
"train": {
|
||||
"title": "Recent Classifications",
|
||||
"titleShort": "Recent",
|
||||
"aria": "Select Recent Classifications"
|
||||
},
|
||||
"categories": "Classes",
|
||||
|
||||
@ -207,7 +207,7 @@ export default function Step1NameAndDefine({
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
className="h-8"
|
||||
className="text-md h-8"
|
||||
placeholder={t("wizard.step1.namePlaceholder")}
|
||||
{...field}
|
||||
/>
|
||||
@ -448,7 +448,7 @@ export default function Step1NameAndDefine({
|
||||
<FormControl>
|
||||
<div className="flex items-center gap-2">
|
||||
<Input
|
||||
className="h-8"
|
||||
className="text-md h-8"
|
||||
placeholder={t("wizard.step1.classPlaceholder")}
|
||||
{...field}
|
||||
/>
|
||||
|
||||
@ -16,7 +16,6 @@ import ImageLoadingIndicator from "../indicators/ImageLoadingIndicator";
|
||||
import useContextMenu from "@/hooks/use-contextmenu";
|
||||
import ActivityIndicator from "../indicators/activity-indicator";
|
||||
import { TimeRange } from "@/types/timeline";
|
||||
import { capitalizeFirstLetter } from "@/utils/stringUtil";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { InProgressPreview, VideoPreview } from "../preview/ScrubbablePreview";
|
||||
import { Preview } from "@/types/preview";
|
||||
@ -24,6 +23,7 @@ import { baseUrl } from "@/api/baseUrl";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { FaExclamationTriangle } from "react-icons/fa";
|
||||
import { MdOutlinePersonSearch } from "react-icons/md";
|
||||
import { getTranslatedLabel } from "@/utils/i18n";
|
||||
|
||||
type PreviewPlayerProps = {
|
||||
review: ReviewSegment;
|
||||
@ -270,7 +270,9 @@ export default function PreviewThumbnailPlayer({
|
||||
</TooltipTrigger>
|
||||
</div>
|
||||
<TooltipContent className="smart-capitalize">
|
||||
{[
|
||||
{review.data.metadata
|
||||
? review.data.metadata.title
|
||||
: [
|
||||
...new Set([
|
||||
...(review.data.objects || []),
|
||||
...(review.data.sub_labels || []),
|
||||
@ -278,12 +280,12 @@ export default function PreviewThumbnailPlayer({
|
||||
]),
|
||||
]
|
||||
.filter(
|
||||
(item) => item !== undefined && !item.includes("-verified"),
|
||||
(item) =>
|
||||
item !== undefined && !item.includes("-verified"),
|
||||
)
|
||||
.map((text) => capitalizeFirstLetter(text))
|
||||
.map((text) => getTranslatedLabel(text))
|
||||
.sort()
|
||||
.join(", ")
|
||||
.replaceAll("-verified", "")}
|
||||
.join(", ")}
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
{!!(
|
||||
|
||||
@ -130,22 +130,22 @@ export default function ModelSelectionView({
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="grid auto-rows-max grid-cols-2 gap-2 overflow-y-auto p-2 md:grid-cols-4 lg:grid-cols-5 xl:grid-cols-6 2xl:grid-cols-8 3xl:grid-cols-10">
|
||||
{selectedClassificationConfigs.length === 0 ? (
|
||||
<NoModelsView
|
||||
onCreateModel={() => setNewModel(true)}
|
||||
modelType={pageToggle}
|
||||
/>
|
||||
) : (
|
||||
selectedClassificationConfigs.map((config) => (
|
||||
<div className="grid auto-rows-max grid-cols-2 gap-2 overflow-y-auto p-2 md:grid-cols-4 lg:grid-cols-5 xl:grid-cols-6 2xl:grid-cols-8 3xl:grid-cols-10">
|
||||
{selectedClassificationConfigs.map((config) => (
|
||||
<ModelCard
|
||||
key={config.name}
|
||||
config={config}
|
||||
onClick={() => onClick(config)}
|
||||
/>
|
||||
))
|
||||
)}
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ import {
|
||||
useRef,
|
||||
useState,
|
||||
} from "react";
|
||||
import { isDesktop } from "react-device-detect";
|
||||
import { isDesktop, isMobileOnly } from "react-device-detect";
|
||||
import { Trans, useTranslation } from "react-i18next";
|
||||
import { LuPencil, LuTrash2 } from "react-icons/lu";
|
||||
import { toast } from "sonner";
|
||||
@ -436,8 +436,24 @@ function LibrarySelector({
|
||||
onRename,
|
||||
}: LibrarySelectorProps) {
|
||||
const { t } = useTranslation(["views/classificationModel"]);
|
||||
|
||||
// data
|
||||
|
||||
const [confirmDelete, setConfirmDelete] = useState<string | null>(null);
|
||||
const [renameClass, setRenameFace] = useState<string | null>(null);
|
||||
const pageTitle = useMemo(() => {
|
||||
if (pageToggle != "train") {
|
||||
return pageToggle;
|
||||
}
|
||||
|
||||
if (isMobileOnly) {
|
||||
return t("train.titleShort");
|
||||
}
|
||||
|
||||
return t("train.title");
|
||||
}, [pageToggle, t]);
|
||||
|
||||
// interaction
|
||||
|
||||
const handleDeleteFace = useCallback(
|
||||
(name: string) => {
|
||||
@ -507,7 +523,7 @@ function LibrarySelector({
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button className="flex justify-between smart-capitalize">
|
||||
{pageToggle == "train" ? t("train.title") : pageToggle}
|
||||
{pageTitle}
|
||||
<span className="ml-2 text-primary-variant">
|
||||
(
|
||||
{(pageToggle &&
|
||||
|
||||
Loading…
Reference in New Issue
Block a user