mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-01-22 20:18:30 +03:00
Miscellaneous fixes (0.17 Beta) (#21443)
* Use thread lock for JinaV2 call as it sets multiple internal fields while being called * fix audio label translation in explore filter * Show event in all cases, even without non-none match * improve i18n key fallback when translation files aren't loaded just display a valid time now instead of "invalid time" --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
parent
3655b9269d
commit
e2a1208c90
@ -3,6 +3,7 @@
|
|||||||
import io
|
import io
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import threading
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
@ -53,6 +54,11 @@ class JinaV2Embedding(BaseEmbedding):
|
|||||||
self.tokenizer = None
|
self.tokenizer = None
|
||||||
self.image_processor = None
|
self.image_processor = None
|
||||||
self.runner = None
|
self.runner = None
|
||||||
|
|
||||||
|
# Lock to prevent concurrent calls (text and vision share this instance)
|
||||||
|
self._call_lock = threading.Lock()
|
||||||
|
|
||||||
|
# download the model and tokenizer
|
||||||
files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
|
files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
|
||||||
if not all(
|
if not all(
|
||||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||||
@ -200,6 +206,9 @@ class JinaV2Embedding(BaseEmbedding):
|
|||||||
def __call__(
|
def __call__(
|
||||||
self, inputs: list[str] | list[Image.Image] | list[str], embedding_type=None
|
self, inputs: list[str] | list[Image.Image] | list[str], embedding_type=None
|
||||||
) -> list[np.ndarray]:
|
) -> list[np.ndarray]:
|
||||||
|
# Lock the entire call to prevent race conditions when text and vision
|
||||||
|
# embeddings are called concurrently from different threads
|
||||||
|
with self._call_lock:
|
||||||
self.embedding_type = embedding_type
|
self.embedding_type = embedding_type
|
||||||
if not self.embedding_type:
|
if not self.embedding_type:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
|||||||
@ -251,11 +251,30 @@ function GeneralFilterButton({
|
|||||||
updateLabelFilter,
|
updateLabelFilter,
|
||||||
}: GeneralFilterButtonProps) {
|
}: GeneralFilterButtonProps) {
|
||||||
const { t } = useTranslation(["components/filter"]);
|
const { t } = useTranslation(["components/filter"]);
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||||
|
revalidateOnFocus: false,
|
||||||
|
});
|
||||||
const [open, setOpen] = useState(false);
|
const [open, setOpen] = useState(false);
|
||||||
const [currentLabels, setCurrentLabels] = useState<string[] | undefined>(
|
const [currentLabels, setCurrentLabels] = useState<string[] | undefined>(
|
||||||
selectedLabels,
|
selectedLabels,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const allAudioListenLabels = useMemo<Set<string>>(() => {
|
||||||
|
if (!config) {
|
||||||
|
return new Set<string>();
|
||||||
|
}
|
||||||
|
|
||||||
|
const labels = new Set<string>();
|
||||||
|
Object.values(config.cameras).forEach((camera) => {
|
||||||
|
if (camera?.audio?.enabled) {
|
||||||
|
camera.audio.listen.forEach((label) => {
|
||||||
|
labels.add(label);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return labels;
|
||||||
|
}, [config]);
|
||||||
|
|
||||||
const buttonText = useMemo(() => {
|
const buttonText = useMemo(() => {
|
||||||
if (isMobile) {
|
if (isMobile) {
|
||||||
return t("labels.all.short");
|
return t("labels.all.short");
|
||||||
@ -266,13 +285,17 @@ function GeneralFilterButton({
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (selectedLabels.length == 1) {
|
if (selectedLabels.length == 1) {
|
||||||
return getTranslatedLabel(selectedLabels[0]);
|
const label = selectedLabels[0];
|
||||||
|
return getTranslatedLabel(
|
||||||
|
label,
|
||||||
|
allAudioListenLabels.has(label) ? "audio" : "object",
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return t("labels.count", {
|
return t("labels.count", {
|
||||||
count: selectedLabels.length,
|
count: selectedLabels.length,
|
||||||
});
|
});
|
||||||
}, [selectedLabels, t]);
|
}, [selectedLabels, allAudioListenLabels, t]);
|
||||||
|
|
||||||
// ui
|
// ui
|
||||||
|
|
||||||
|
|||||||
@ -925,11 +925,11 @@ function FaceAttemptGroup({
|
|||||||
[onRefresh, t],
|
[onRefresh, t],
|
||||||
);
|
);
|
||||||
|
|
||||||
// Create ClassifiedEvent from Event (face recognition uses sub_label)
|
|
||||||
const classifiedEvent: ClassifiedEvent | undefined = useMemo(() => {
|
const classifiedEvent: ClassifiedEvent | undefined = useMemo(() => {
|
||||||
if (!event || !event.sub_label || event.sub_label === "none") {
|
if (!event) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
id: event.id,
|
id: event.id,
|
||||||
label: event.sub_label,
|
label: event.sub_label,
|
||||||
|
|||||||
@ -79,6 +79,24 @@ i18n
|
|||||||
parseMissingKeyHandler: (key: string) => {
|
parseMissingKeyHandler: (key: string) => {
|
||||||
const parts = key.split(".");
|
const parts = key.split(".");
|
||||||
|
|
||||||
|
// eslint-disable-next-line no-console
|
||||||
|
console.warn(`Missing translation key: ${key}`);
|
||||||
|
|
||||||
|
if (parts[0] === "time" && parts[1]?.includes("formattedTimestamp")) {
|
||||||
|
// Extract the format type from the last part (12hour, 24hour)
|
||||||
|
const formatType = parts[parts.length - 1];
|
||||||
|
|
||||||
|
// Return actual date-fns format strings as fallbacks
|
||||||
|
const formatDefaults: Record<string, string> = {
|
||||||
|
"12hour": "h:mm aaa",
|
||||||
|
"24hour": "HH:mm",
|
||||||
|
};
|
||||||
|
|
||||||
|
if (formatDefaults[formatType]) {
|
||||||
|
return formatDefaults[formatType];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Handle special cases for objects and audio
|
// Handle special cases for objects and audio
|
||||||
if (parts[0] === "object" || parts[0] === "audio") {
|
if (parts[0] === "object" || parts[0] === "audio") {
|
||||||
return (
|
return (
|
||||||
|
|||||||
@ -1043,34 +1043,22 @@ function ObjectTrainGrid({
|
|||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
const classificationType = model.object_config.classification_type;
|
let label: string | undefined = undefined;
|
||||||
|
let score: number | undefined = undefined;
|
||||||
|
|
||||||
if (classificationType === "attribute") {
|
if (model.object_config.classification_type === "attribute") {
|
||||||
// For attribute type, look at event.data[model.name]
|
label = event.data[model.name] as string | undefined;
|
||||||
const attributeValue = event.data[model.name] as string | undefined;
|
score = event.data[`${model.name}_score`] as number | undefined;
|
||||||
const attributeScore = event.data[`${model.name}_score`] as
|
|
||||||
| number
|
|
||||||
| undefined;
|
|
||||||
|
|
||||||
if (attributeValue && attributeValue !== "none") {
|
|
||||||
return {
|
|
||||||
id: event.id,
|
|
||||||
label: attributeValue,
|
|
||||||
score: attributeScore,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// For sub_label type, use event.sub_label
|
label = event.sub_label;
|
||||||
if (event.sub_label && event.sub_label !== "none") {
|
score = event.data.sub_label_score;
|
||||||
return {
|
|
||||||
id: event.id,
|
|
||||||
label: event.sub_label,
|
|
||||||
score: event.data?.sub_label_score,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return undefined;
|
return {
|
||||||
|
id: event.id,
|
||||||
|
label: label,
|
||||||
|
score: score,
|
||||||
|
};
|
||||||
},
|
},
|
||||||
[model],
|
[model],
|
||||||
);
|
);
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user