Miscellaneous fixes (0.17 Beta) (#21443)

* Use thread lock for JinaV2 call as it sets multiple internal fields while being called

* fix audio label translation in explore filter

* Show event in all cases, even without non-none match

* improve i18n key fallback when translation files aren't loaded

just display a valid time now instead of "invalid time"

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
Nicolas Mowen 2025-12-29 08:31:54 -07:00 committed by GitHub
parent 3655b9269d
commit e2a1208c90
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 96 additions and 58 deletions

View File

@ -3,6 +3,7 @@
import io import io
import logging import logging
import os import os
import threading
import numpy as np import numpy as np
from PIL import Image from PIL import Image
@ -53,6 +54,11 @@ class JinaV2Embedding(BaseEmbedding):
self.tokenizer = None self.tokenizer = None
self.image_processor = None self.image_processor = None
self.runner = None self.runner = None
# Lock to prevent concurrent calls (text and vision share this instance)
self._call_lock = threading.Lock()
# download the model and tokenizer
files_names = list(self.download_urls.keys()) + [self.tokenizer_file] files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
if not all( if not all(
os.path.exists(os.path.join(self.download_path, n)) for n in files_names os.path.exists(os.path.join(self.download_path, n)) for n in files_names
@ -200,37 +206,40 @@ class JinaV2Embedding(BaseEmbedding):
def __call__( def __call__(
self, inputs: list[str] | list[Image.Image] | list[str], embedding_type=None self, inputs: list[str] | list[Image.Image] | list[str], embedding_type=None
) -> list[np.ndarray]: ) -> list[np.ndarray]:
self.embedding_type = embedding_type # Lock the entire call to prevent race conditions when text and vision
if not self.embedding_type: # embeddings are called concurrently from different threads
raise ValueError( with self._call_lock:
"embedding_type must be specified either in __init__ or __call__" self.embedding_type = embedding_type
) if not self.embedding_type:
raise ValueError(
"embedding_type must be specified either in __init__ or __call__"
)
self._load_model_and_utils() self._load_model_and_utils()
processed = self._preprocess_inputs(inputs) processed = self._preprocess_inputs(inputs)
batch_size = len(processed) batch_size = len(processed)
# Prepare ONNX inputs with matching batch sizes # Prepare ONNX inputs with matching batch sizes
onnx_inputs = {} onnx_inputs = {}
if self.embedding_type == "text": if self.embedding_type == "text":
onnx_inputs["input_ids"] = np.stack([x[0] for x in processed]) onnx_inputs["input_ids"] = np.stack([x[0] for x in processed])
onnx_inputs["pixel_values"] = np.zeros( onnx_inputs["pixel_values"] = np.zeros(
(batch_size, 3, 512, 512), dtype=np.float32 (batch_size, 3, 512, 512), dtype=np.float32
) )
elif self.embedding_type == "vision": elif self.embedding_type == "vision":
onnx_inputs["input_ids"] = np.zeros((batch_size, 16), dtype=np.int64) onnx_inputs["input_ids"] = np.zeros((batch_size, 16), dtype=np.int64)
onnx_inputs["pixel_values"] = np.stack([x[0] for x in processed]) onnx_inputs["pixel_values"] = np.stack([x[0] for x in processed])
else: else:
raise ValueError("Invalid embedding type") raise ValueError("Invalid embedding type")
# Run inference # Run inference
outputs = self.runner.run(onnx_inputs) outputs = self.runner.run(onnx_inputs)
if self.embedding_type == "text": if self.embedding_type == "text":
embeddings = outputs[2] # text embeddings embeddings = outputs[2] # text embeddings
elif self.embedding_type == "vision": elif self.embedding_type == "vision":
embeddings = outputs[3] # image embeddings embeddings = outputs[3] # image embeddings
else: else:
raise ValueError("Invalid embedding type") raise ValueError("Invalid embedding type")
embeddings = self._postprocess_outputs(embeddings) embeddings = self._postprocess_outputs(embeddings)
return [embedding for embedding in embeddings] return [embedding for embedding in embeddings]

View File

@ -251,11 +251,30 @@ function GeneralFilterButton({
updateLabelFilter, updateLabelFilter,
}: GeneralFilterButtonProps) { }: GeneralFilterButtonProps) {
const { t } = useTranslation(["components/filter"]); const { t } = useTranslation(["components/filter"]);
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const [open, setOpen] = useState(false); const [open, setOpen] = useState(false);
const [currentLabels, setCurrentLabels] = useState<string[] | undefined>( const [currentLabels, setCurrentLabels] = useState<string[] | undefined>(
selectedLabels, selectedLabels,
); );
const allAudioListenLabels = useMemo<Set<string>>(() => {
if (!config) {
return new Set<string>();
}
const labels = new Set<string>();
Object.values(config.cameras).forEach((camera) => {
if (camera?.audio?.enabled) {
camera.audio.listen.forEach((label) => {
labels.add(label);
});
}
});
return labels;
}, [config]);
const buttonText = useMemo(() => { const buttonText = useMemo(() => {
if (isMobile) { if (isMobile) {
return t("labels.all.short"); return t("labels.all.short");
@ -266,13 +285,17 @@ function GeneralFilterButton({
} }
if (selectedLabels.length == 1) { if (selectedLabels.length == 1) {
return getTranslatedLabel(selectedLabels[0]); const label = selectedLabels[0];
return getTranslatedLabel(
label,
allAudioListenLabels.has(label) ? "audio" : "object",
);
} }
return t("labels.count", { return t("labels.count", {
count: selectedLabels.length, count: selectedLabels.length,
}); });
}, [selectedLabels, t]); }, [selectedLabels, allAudioListenLabels, t]);
// ui // ui

View File

@ -925,11 +925,11 @@ function FaceAttemptGroup({
[onRefresh, t], [onRefresh, t],
); );
// Create ClassifiedEvent from Event (face recognition uses sub_label)
const classifiedEvent: ClassifiedEvent | undefined = useMemo(() => { const classifiedEvent: ClassifiedEvent | undefined = useMemo(() => {
if (!event || !event.sub_label || event.sub_label === "none") { if (!event) {
return undefined; return undefined;
} }
return { return {
id: event.id, id: event.id,
label: event.sub_label, label: event.sub_label,

View File

@ -79,6 +79,24 @@ i18n
parseMissingKeyHandler: (key: string) => { parseMissingKeyHandler: (key: string) => {
const parts = key.split("."); const parts = key.split(".");
// eslint-disable-next-line no-console
console.warn(`Missing translation key: ${key}`);
if (parts[0] === "time" && parts[1]?.includes("formattedTimestamp")) {
// Extract the format type from the last part (12hour, 24hour)
const formatType = parts[parts.length - 1];
// Return actual date-fns format strings as fallbacks
const formatDefaults: Record<string, string> = {
"12hour": "h:mm aaa",
"24hour": "HH:mm",
};
if (formatDefaults[formatType]) {
return formatDefaults[formatType];
}
}
// Handle special cases for objects and audio // Handle special cases for objects and audio
if (parts[0] === "object" || parts[0] === "audio") { if (parts[0] === "object" || parts[0] === "audio") {
return ( return (

View File

@ -1043,34 +1043,22 @@ function ObjectTrainGrid({
return undefined; return undefined;
} }
const classificationType = model.object_config.classification_type; let label: string | undefined = undefined;
let score: number | undefined = undefined;
if (classificationType === "attribute") { if (model.object_config.classification_type === "attribute") {
// For attribute type, look at event.data[model.name] label = event.data[model.name] as string | undefined;
const attributeValue = event.data[model.name] as string | undefined; score = event.data[`${model.name}_score`] as number | undefined;
const attributeScore = event.data[`${model.name}_score`] as
| number
| undefined;
if (attributeValue && attributeValue !== "none") {
return {
id: event.id,
label: attributeValue,
score: attributeScore,
};
}
} else { } else {
// For sub_label type, use event.sub_label label = event.sub_label;
if (event.sub_label && event.sub_label !== "none") { score = event.data.sub_label_score;
return {
id: event.id,
label: event.sub_label,
score: event.data?.sub_label_score,
};
}
} }
return undefined; return {
id: event.id,
label: label,
score: score,
};
}, },
[model], [model],
); );