From 29de723267bd4b16684d8bdd582eb1accb044bfd Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Tue, 24 Aug 2021 06:50:04 -0500 Subject: [PATCH 001/132] limit legacy expiration to files after the oldest recording in the db --- frigate/record.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/frigate/record.py b/frigate/record.py index c5150103d..e1aed08f1 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -245,23 +245,32 @@ class RecordingCleanup(threading.Thread): def expire_files(self): logger.debug("Start expire files (legacy).") - shortest_retention = self.config.record.retain_days default_expire = ( datetime.datetime.now().timestamp() - SECONDS_IN_DAY * self.config.record.retain_days ) delete_before = {} + for name, camera in self.config.cameras.items(): delete_before[name] = ( datetime.datetime.now().timestamp() - SECONDS_IN_DAY * camera.record.retain_days ) - if camera.record.retain_days < shortest_retention: - shortest_retention = camera.record.retain_days - logger.debug(f"Shortest retention: {shortest_retention}") + # find all the recordings older than the oldest recording in the db + oldest_recording = ( + Recordings.select().order_by(Recordings.start_time.desc()).get() + ) + + oldest_timestamp = ( + oldest_recording.start_time + if oldest_recording + else datetime.datetime.now().timestamp() + ) + + logger.debug(f"Oldest recording in the db: {oldest_timestamp}") process = sp.run( - ["find", RECORD_DIR, "-type", "f", "-mtime", f"+{shortest_retention}"], + ["find", RECORD_DIR, "-type", "f", "-newermt", f"@{oldest_timestamp}"], capture_output=True, text=True, ) @@ -269,9 +278,6 @@ class RecordingCleanup(threading.Thread): for f in files_to_check: p = Path(f) - # Ignore files that have a record in the recordings DB - if Recordings.select().where(Recordings.path == str(p)).count(): - continue if p.stat().st_mtime < delete_before.get(p.parent.name, default_expire): p.unlink(missing_ok=True) From bddde74c06806552188772f8c3cc1d1bfa583ff1 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Tue, 24 Aug 2021 07:01:29 -0500 Subject: [PATCH 002/132] Update issue templates --- .github/ISSUE_TEMPLATE/feature_request.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..57f76d308 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: enhancement +assignees: '' + +--- + +**Describe what you are trying to accomplish and why in non technical terms** +I want to be able to ... so that I can ... + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. From 10ab70080ad4f8b741a481dc68d5d90c00426913 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E1=97=AA=D1=94=CE=BD=CE=B9=CE=B7=20=E1=97=B7=CF=85=D0=BD?= =?UTF-8?q?=CA=9F?= Date: Tue, 24 Aug 2021 08:59:31 -0400 Subject: [PATCH 003/132] fix: consistent error logging to mqtt connection issues (#1578) --- frigate/mqtt.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frigate/mqtt.py b/frigate/mqtt.py index 78b6590a9..dd34cd2b0 100644 --- a/frigate/mqtt.py +++ b/frigate/mqtt.py @@ -96,14 +96,14 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics): threading.current_thread().name = "mqtt" if rc != 0: if rc == 3: - logger.error("MQTT Server unavailable") + logger.error("Unable to connect to MQTT server: MQTT Server unavailable") elif rc == 4: - logger.error("MQTT Bad username or password") + logger.error("Unable to connect to MQTT server: MQTT Bad username or password") elif rc == 5: - logger.error("MQTT Not authorized") + logger.error("Unable to connect to MQTT server: MQTT Not authorized") else: logger.error( - "Unable to connect to MQTT: Connection refused. Error code: " + "Unable to connect to MQTT server: Connection refused. Error code: " + str(rc) ) From 4efc5848168e5454aa32c9030852b19693d9760a Mon Sep 17 00:00:00 2001 From: Bernt Christian Egeland Date: Thu, 26 Aug 2021 13:54:36 +0200 Subject: [PATCH 004/132] Move event-view to events table. (#1596) * fixed position for Dialog * added eventId to deleted item * removed page route redirect + New Close Button * event component added to events list. New delete reducer * removed event route * moved delete reducer to event page * removed redundant event details * keep aspect ratio * keep aspect ratio * removed old buttons - repositioned to top * removed console.log * event view function * removed clip header * top position * centered image if no clips avail * comments * linting * lint * added scrollIntoView when event has been mounted * added Clip header * added scrollIntoView to test * lint * useRef to scroll event into view * removed unused functions * reverted changes to event.test * scroll into view * moved delete reducer * removed commented code * styling * moved close button to right side * Added new close svg icon Co-authored-by: Bernt Christian Egeland --- web/src/App.jsx | 1 - web/src/api/index.jsx | 23 ++--- web/src/components/Dialog.jsx | 2 +- web/src/icons/Close.jsx | 13 +++ web/src/index.css | 9 ++ web/src/routes/Event.jsx | 161 ++++++++++++++-------------------- web/src/routes/Events.jsx | 157 +++++++++++++++++++++------------ 7 files changed, 192 insertions(+), 174 deletions(-) create mode 100644 web/src/icons/Close.jsx diff --git a/web/src/App.jsx b/web/src/App.jsx index 4ddedece5..f6dd29451 100644 --- a/web/src/App.jsx +++ b/web/src/App.jsx @@ -28,7 +28,6 @@ export default function App() { - diff --git a/web/src/api/index.jsx b/web/src/api/index.jsx index 3b68a3a87..f6ef556d8 100644 --- a/web/src/api/index.jsx +++ b/web/src/api/index.jsx @@ -18,7 +18,7 @@ const initialState = Object.freeze({ const Api = createContext(initialState); -function reducer(state, { type, payload, meta }) { +function reducer(state, { type, payload }) { switch (type) { case 'REQUEST': { const { url, fetchId } = payload; @@ -36,22 +36,9 @@ function reducer(state, { type, payload, meta }) { } case 'DELETE': { const { eventId } = payload; - return produce(state, (draftState) => { - Object.keys(draftState.queries).map((url, index) => { - // If data has no array length then just return state. - if (!('data' in draftState.queries[url]) || !draftState.queries[url].data.length) return state; - - //Find the index to remove - const removeIndex = draftState.queries[url].data.map((event) => event.id).indexOf(eventId); - if (removeIndex === -1) return state; - - // We need to keep track of deleted items, This will be used to re-calculate "ReachEnd" for auto load new events. Events.jsx - const totDeleted = state.queries[url].deleted || 0; - - // Splice the deleted index. - draftState.queries[url].data.splice(removeIndex, 1); - draftState.queries[url].deleted = totDeleted + 1; + Object.keys(draftState.queries).map((url) => { + draftState.queries[url].deletedId = eventId; }); }); } @@ -111,9 +98,9 @@ export function useFetch(url, fetchId) { const data = state.queries[url].data || null; const status = state.queries[url].status; - const deleted = state.queries[url].deleted || 0; + const deletedId = state.queries[url].deletedId || 0; - return { data, status, deleted }; + return { data, status, deletedId }; } export function useDelete() { diff --git a/web/src/components/Dialog.jsx b/web/src/components/Dialog.jsx index aefc323b4..472dc3e92 100644 --- a/web/src/components/Dialog.jsx +++ b/web/src/components/Dialog.jsx @@ -19,7 +19,7 @@ export default function Dialog({ actions = [], portalRootID = 'dialogs', title,
+ + + + ); +} + +export default memo(Close); diff --git a/web/src/index.css b/web/src/index.css index 1ccb2fad7..2278ef964 100644 --- a/web/src/index.css +++ b/web/src/index.css @@ -29,3 +29,12 @@ .jsmpeg canvas { position: static !important; } + +/* +Event.js +Maintain aspect ratio and scale down the video container +Could not find a proper tailwind css. +*/ +.outer-max-width { + max-width: 60%; +} diff --git a/web/src/routes/Event.jsx b/web/src/routes/Event.jsx index 3cbe4e60f..06025d75e 100644 --- a/web/src/routes/Event.jsx +++ b/web/src/routes/Event.jsx @@ -1,25 +1,32 @@ import { h, Fragment } from 'preact'; -import { useCallback, useState } from 'preact/hooks'; -import { route } from 'preact-router'; +import { useCallback, useState, useEffect } from 'preact/hooks'; import ActivityIndicator from '../components/ActivityIndicator'; import Button from '../components/Button'; import Clip from '../icons/Clip'; +import Close from '../icons/Close'; import Delete from '../icons/Delete'; import Snapshot from '../icons/Snapshot'; import Dialog from '../components/Dialog'; import Heading from '../components/Heading'; -import Link from '../components/Link'; import VideoPlayer from '../components/VideoPlayer'; import { FetchStatus, useApiHost, useEvent, useDelete } from '../api'; -import { Table, Thead, Tbody, Th, Tr, Td } from '../components/Table'; -export default function Event({ eventId }) { +export default function Event({ eventId, close, scrollRef }) { const apiHost = useApiHost(); const { data, status } = useEvent(eventId); const [showDialog, setShowDialog] = useState(false); + const [shouldScroll, setShouldScroll] = useState(true); const [deleteStatus, setDeleteStatus] = useState(FetchStatus.NONE); const setDeleteEvent = useDelete(); + useEffect(() => { + // Scroll event into view when component has been mounted. + if (shouldScroll && scrollRef && scrollRef[eventId]) { + scrollRef[eventId].scrollIntoView(); + setShouldScroll(false); + } + }, [data, scrollRef, eventId, shouldScroll]); + const handleClickDelete = () => { setShowDialog(true); }; @@ -40,7 +47,6 @@ export default function Event({ eventId }) { if (success) { setDeleteStatus(FetchStatus.LOADED); setShowDialog(false); - route('/events', true); } }, [eventId, setShowDialog, setDeleteEvent]); @@ -48,18 +54,25 @@ export default function Event({ eventId }) { return ; } - const startime = new Date(data.start_time * 1000); - const endtime = new Date(data.end_time * 1000); - return (
-
- - {data.camera} {data.label} {startime.toLocaleString()} - - +
+
+ + +
+
+ + +
{showDialog ? ( ) : null}
- - - - - - - - - - - - - - - - - - - - - - - - -
KeyValue
Camera - {data.camera} -
Timeframe - {startime.toLocaleString()} – {endtime.toLocaleString()} -
Score{(data.top_score * 100).toFixed(2)}%
Zones{data.zones.join(', ')}
- - {data.has_clip ? ( - - Clip - {}} - /> -
- - -
-
- ) : ( - - {data.has_snapshot ? 'Best Image' : 'Thumbnail'} - {`${data.label} - - )} +
+
+ {data.has_clip ? ( + + Clip + {}} + /> + + ) : ( + + {data.has_snapshot ? 'Best Image' : 'Thumbnail'} + {`${data.label} + + )} +
+
); } diff --git a/web/src/routes/Events.jsx b/web/src/routes/Events.jsx index e74bbadc4..4db9413df 100644 --- a/web/src/routes/Events.jsx +++ b/web/src/routes/Events.jsx @@ -1,10 +1,11 @@ -import { h } from 'preact'; +import { h, Fragment } from 'preact'; import ActivityIndicator from '../components/ActivityIndicator'; import Heading from '../components/Heading'; import Link from '../components/Link'; import Select from '../components/Select'; import produce from 'immer'; import { route } from 'preact-router'; +import Event from './Event'; import { useIntersectionObserver } from '../hooks'; import { FetchStatus, useApiHost, useConfig, useEvents } from '../api'; import { Table, Thead, Tbody, Tfoot, Th, Tr, Td } from '../components/Table'; @@ -12,9 +13,20 @@ import { useCallback, useEffect, useMemo, useReducer, useState } from 'preact/ho const API_LIMIT = 25; -const initialState = Object.freeze({ events: [], reachedEnd: false, searchStrings: {} }); +const initialState = Object.freeze({ events: [], reachedEnd: false, searchStrings: {}, deleted: 0 }); const reducer = (state = initialState, action) => { switch (action.type) { + case 'DELETE_EVENT': { + const { deletedId } = action; + + return produce(state, (draftState) => { + const idx = draftState.events.findIndex((e) => e.id === deletedId); + if (idx === -1) return state; + + draftState.events.splice(idx, 1); + draftState.deleted++; + }); + } case 'APPEND_EVENTS': { const { meta: { searchString }, @@ -24,6 +36,7 @@ const reducer = (state = initialState, action) => { return produce(state, (draftState) => { draftState.searchStrings[searchString] = true; draftState.events.push(...payload); + draftState.deleted = 0; }); } @@ -54,11 +67,13 @@ function removeDefaultSearchKeys(searchParams) { export default function Events({ path: pathname, limit = API_LIMIT } = {}) { const apiHost = useApiHost(); - const [{ events, reachedEnd, searchStrings }, dispatch] = useReducer(reducer, initialState); + const [{ events, reachedEnd, searchStrings, deleted }, dispatch] = useReducer(reducer, initialState); const { searchParams: initialSearchParams } = new URL(window.location); + const [viewEvent, setViewEvent] = useState(null); const [searchString, setSearchString] = useState(`${defaultSearchString(limit)}&${initialSearchParams.toString()}`); - const { data, status, deleted } = useEvents(searchString); + const { data, status, deletedId } = useEvents(searchString); + const scrollToRef = {}; useEffect(() => { if (data && !(searchString in searchStrings)) { dispatch({ type: 'APPEND_EVENTS', payload: data, meta: { searchString } }); @@ -67,7 +82,11 @@ export default function Events({ path: pathname, limit = API_LIMIT } = {}) { if (data && Array.isArray(data) && data.length + deleted < limit) { dispatch({ type: 'REACHED_END', meta: { searchString } }); } - }, [data, limit, searchString, searchStrings, deleted]); + + if (deletedId) { + dispatch({ type: 'DELETE_EVENT', deletedId }); + } + }, [data, limit, searchString, searchStrings, deleted, deletedId]); const [entry, setIntersectNode] = useIntersectionObserver(); @@ -100,7 +119,16 @@ export default function Events({ path: pathname, limit = API_LIMIT } = {}) { [limit, pathname, setSearchString] ); + const viewEventHandler = (id) => { + //Toggle event view + if (viewEvent === id) return setViewEvent(null); + + //Set event id to be rendered. + setViewEvent(id); + }; + const searchParams = useMemo(() => new URLSearchParams(searchString), [searchString]); + return (
Events @@ -123,70 +151,83 @@ export default function Events({ path: pathname, limit = API_LIMIT } = {}) { {events.map( - ( - { camera, id, label, start_time: startTime, end_time: endTime, thumbnail, top_score: score, zones }, - i - ) => { + ({ camera, id, label, start_time: startTime, end_time: endTime, top_score: score, zones }, i) => { const start = new Date(parseInt(startTime * 1000, 10)); const end = new Date(parseInt(endTime * 1000, 10)); const ref = i === events.length - 1 ? lastCellRef : undefined; return ( - - - - + + + viewEventHandler(id)} + ref={ref} + data-start-time={startTime} + data-reached-end={reachedEnd} + > + (scrollToRef[id] = el)} + width="150" + height="150" + className="cursor-pointer" + style="min-height: 48px; min-width: 48px;" + src={`${apiHost}/api/events/${id}/thumbnail.jpg`} + /> + + + + - - - - - - - - - {(score * 100).toFixed(2)}% - -
    - {zones.map((zone) => ( -
  • - -
  • - ))} -
- - {start.toLocaleDateString()} - {start.toLocaleTimeString()} - {end.toLocaleTimeString()} - + + + + + {(score * 100).toFixed(2)}% + +
    + {zones.map((zone) => ( +
  • + +
  • + ))} +
+ + {start.toLocaleDateString()} + {start.toLocaleTimeString()} + {end.toLocaleTimeString()} + + {viewEvent === id ? ( + + + setViewEvent(null)} scrollRef={scrollToRef} /> + + + ) : null} + ); } )} - + {status === FetchStatus.LOADING ? : reachedEnd ? 'No more events' : null} From 3d6dad7e7eae538a4e380bef978f7fd7cfb454ed Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Fri, 27 Aug 2021 07:26:11 -0500 Subject: [PATCH 005/132] reverse sort within a day for recordings --- web/src/components/RecordingPlaylist.jsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/src/components/RecordingPlaylist.jsx b/web/src/components/RecordingPlaylist.jsx index 69efb94cc..2ad389d85 100644 --- a/web/src/components/RecordingPlaylist.jsx +++ b/web/src/components/RecordingPlaylist.jsx @@ -21,7 +21,7 @@ export default function RecordingPlaylist({ camera, recordings, selectedDate, se events={recording.events} selected={recording.date === selectedDate} > - {recording.recordings.map((item, i) => ( + {recording.recordings.slice().reverse().map((item, i) => (
Date: Sat, 28 Aug 2021 07:42:30 -0500 Subject: [PATCH 006/132] fix match for websocket url (fixes #1633) --- frigate/output.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/frigate/output.py b/frigate/output.py index 3ae840b59..8da9d4803 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -75,8 +75,9 @@ class BroadcastThread(threading.Thread): ws_iter = iter(websockets.values()) for ws in ws_iter: - if not ws.terminated and ws.environ["PATH_INFO"].endswith( - self.camera + if ( + not ws.terminated + and ws.environ["PATH_INFO"] == f"/{self.camera}" ): try: ws.send(buf, binary=True) From 6ccff71408c0c7a8aca1074e97ecc8b4a6f7cf06 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 28 Aug 2021 07:43:51 -0500 Subject: [PATCH 007/132] handle missing camera names --- frigate/record.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/frigate/record.py b/frigate/record.py index e1aed08f1..2ceda3652 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -78,7 +78,10 @@ class RecordingMaintainer(threading.Thread): start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S") # Just delete files if recordings are turned off - if not self.config.cameras[camera].record.enabled: + if ( + not camera in self.config.cameras + or not self.config.cameras[camera].record.enabled + ): Path(cache_path).unlink(missing_ok=True) continue From 0d352f3d8ac0d1e8d0213ae04480b37c9140442b Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 28 Aug 2021 08:04:29 -0500 Subject: [PATCH 008/132] use model from frogfish release --- docker/Dockerfile.base | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile.base b/docker/Dockerfile.base index 8b025a23c..ec1b5a0aa 100644 --- a/docker/Dockerfile.base +++ b/docker/Dockerfile.base @@ -40,8 +40,8 @@ COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/ # get model and labels COPY labelmap.txt /labelmap.txt -RUN wget -q https://github.com/google-coral/test_data/raw/master/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite -O /edgetpu_model.tflite -RUN wget -q https://github.com/google-coral/test_data/raw/master/ssdlite_mobiledet_coco_qat_postprocess.tflite -O /cpu_model.tflite +RUN wget -q https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite -O /edgetpu_model.tflite +RUN wget -q https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite -O /cpu_model.tflite WORKDIR /opt/frigate/ ADD frigate frigate/ From 11c425a7eb35735fc487bb89978131d1e20fefd4 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 28 Aug 2021 08:16:25 -0500 Subject: [PATCH 009/132] error on invalid role --- frigate/config.py | 8 +++++++- frigate/test/test_config.py | 29 +++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/frigate/config.py b/frigate/config.py index ea6ea3280..66035bcce 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -332,9 +332,15 @@ class FfmpegConfig(BaseModel): ) +class CameraRoleEnum(str, Enum): + record = "record" + rtmp = "rtmp" + detect = "detect" + + class CameraInput(BaseModel): path: str = Field(title="Camera input path.") - roles: List[str] = Field(title="Roles assigned to this input.") + roles: List[CameraRoleEnum] = Field(title="Roles assigned to this input.") global_args: Union[str, List[str]] = Field( default_factory=list, title="FFmpeg global arguments." ) diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 20fda3a05..20cab822e 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -32,8 +32,8 @@ class TestConfig(unittest.TestCase): assert self.minimal == frigate_config.dict(exclude_unset=True) runtime_config = frigate_config.runtime_config - assert "coral" in runtime_config.detectors.keys() - assert runtime_config.detectors["coral"].type == DetectorTypeEnum.edgetpu + assert "cpu" in runtime_config.detectors.keys() + assert runtime_config.detectors["cpu"].type == DetectorTypeEnum.cpu def test_invalid_mqtt_config(self): config = { @@ -692,6 +692,31 @@ class TestConfig(unittest.TestCase): runtime_config = frigate_config.runtime_config assert runtime_config.model.merged_labelmap[0] == "person" + def test_fails_on_invalid_role(self): + + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect", "clips"], + }, + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + + self.assertRaises(ValidationError, lambda: FrigateConfig(**config)) + if __name__ == "__main__": unittest.main(verbosity=2) From fa5ec8d0193b104ca6d2e18d8128c06d0785614d Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 28 Aug 2021 08:51:29 -0500 Subject: [PATCH 010/132] cleanup global and camera detect config (fixes #1615) --- docs/docs/configuration/cameras.md | 6 +-- frigate/config.py | 23 ++++---- frigate/test/test_config.py | 85 ++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 12 deletions(-) diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index 7592a5560..b95c8caca 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -284,11 +284,11 @@ cameras: # Required: Camera level detect settings detect: - # Required: width of the frame for the input with the detect role + # Optional: width of the frame for the input with the detect role (default: shown below) width: 1280 - # Required: height of the frame for the input with the detect role + # Optional: height of the frame for the input with the detect role (default: shown below) height: 720 - # Required: desired fps for your camera for the input with the detect role + # Optional: desired fps for your camera for the input with the detect role (default: shown below) # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. fps: 5 # Optional: enables detection for the camera (default: True) diff --git a/frigate/config.py b/frigate/config.py index 66035bcce..ae280b267 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -149,9 +149,11 @@ class RuntimeMotionConfig(MotionConfig): class DetectConfig(BaseModel): - height: int = Field(title="Height of the stream for the detect role.") - width: int = Field(title="Width of the stream for the detect role.") - fps: int = Field(title="Number of frames per second to process through detection.") + height: int = Field(default=720, title="Height of the stream for the detect role.") + width: int = Field(default=1280, title="Width of the stream for the detect role.") + fps: int = Field( + default=5, title="Number of frames per second to process through detection." + ) enabled: bool = Field(default=True, title="Detection Enabled.") max_disappeared: Optional[int] = Field( title="Maximum number of frames the object can dissapear before detection ends." @@ -466,7 +468,7 @@ class CameraConfig(BaseModel): default_factory=ObjectConfig, title="Object configuration." ) motion: Optional[MotionConfig] = Field(title="Motion detection configuration.") - detect: DetectConfig = Field(title="Object detection configuration.") + detect: Optional[DetectConfig] = Field(title="Object detection configuration.") timestamp_style: TimestampStyleConfig = Field( default_factory=TimestampStyleConfig, title="Timestamp style configuration." ) @@ -701,6 +703,14 @@ class FrigateConfig(BaseModel): {"name": name, **merged_config} ) + # Default detect configuration + if camera_config.detect is None: + camera_config.detect = DetectConfig() + + max_disappeared = camera_config.detect.fps * 5 + if camera_config.detect.max_disappeared is None: + camera_config.detect.max_disappeared = max_disappeared + # FFMPEG input substitution for input in camera_config.ffmpeg.inputs: input.path = input.path.format(**FRIGATE_ENV_VARS) @@ -748,11 +758,6 @@ class FrigateConfig(BaseModel): **camera_config.motion.dict(exclude_unset=True), ) - # Default detect configuration - max_disappeared = camera_config.detect.fps * 5 - if camera_config.detect.max_disappeared is None: - camera_config.detect.max_disappeared = max_disappeared - # Default live configuration if camera_config.live is None: camera_config.live = CameraLiveConfig() diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 20cab822e..9e71e3d27 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -717,6 +717,91 @@ class TestConfig(unittest.TestCase): self.assertRaises(ValidationError, lambda: FrigateConfig(**config)) + def test_global_detect(self): + + config = { + "mqtt": {"host": "mqtt"}, + "detect": {"max_disappeared": 1}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].detect.max_disappeared == 1 + assert runtime_config.cameras["back"].detect.height == 1080 + + def test_default_detect(self): + + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + } + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].detect.max_disappeared == 25 + assert runtime_config.cameras["back"].detect.height == 720 + + def test_global_detect_merge(self): + + config = { + "mqtt": {"host": "mqtt"}, + "detect": {"max_disappeared": 1, "height": 720}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "detect": { + "height": 1080, + "width": 1920, + "fps": 5, + }, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].detect.max_disappeared == 1 + assert runtime_config.cameras["back"].detect.height == 1080 + assert runtime_config.cameras["back"].detect.width == 1920 + if __name__ == "__main__": unittest.main(verbosity=2) From fbea51372f3d5afb2ebb7bddda4178f38cbdc067 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 28 Aug 2021 09:14:00 -0500 Subject: [PATCH 011/132] sync global snapshot options (fixes #1621) --- docs/docs/configuration/index.md | 15 ------ frigate/config.py | 33 +++++-------- frigate/test/test_config.py | 82 ++++++++++++++++++++++++++++++++ 3 files changed, 94 insertions(+), 36 deletions(-) diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 9128fead5..74bacde50 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -167,21 +167,6 @@ record: person: 15 ``` -## `snapshots` - -Can be overridden at the camera level. Global snapshot retention settings. - -```yaml -# Optional: Configuration for the jpg snapshots written to the clips directory for each event -snapshots: - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 -``` - ### `ffmpeg` Can be overridden at the camera level. diff --git a/frigate/config.py b/frigate/config.py index ae280b267..910e78d27 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -371,7 +371,7 @@ class CameraFfmpegConfig(FfmpegConfig): return v -class CameraSnapshotsConfig(BaseModel): +class SnapshotsConfig(BaseModel): enabled: bool = Field(default=False, title="Snapshots enabled.") clean_copy: bool = Field( default=True, title="Create a clean copy of the snapshot image." @@ -457,9 +457,11 @@ class CameraConfig(BaseModel): rtmp: CameraRtmpConfig = Field( default_factory=CameraRtmpConfig, title="RTMP restreaming configuration." ) - live: Optional[CameraLiveConfig] = Field(title="Live playback settings.") - snapshots: CameraSnapshotsConfig = Field( - default_factory=CameraSnapshotsConfig, title="Snapshot configuration." + live: CameraLiveConfig = Field( + default_factory=CameraLiveConfig, title="Live playback settings." + ) + snapshots: SnapshotsConfig = Field( + default_factory=SnapshotsConfig, title="Snapshot configuration." ) mqtt: CameraMqttConfig = Field( default_factory=CameraMqttConfig, title="MQTT configuration." @@ -468,7 +470,9 @@ class CameraConfig(BaseModel): default_factory=ObjectConfig, title="Object configuration." ) motion: Optional[MotionConfig] = Field(title="Motion detection configuration.") - detect: Optional[DetectConfig] = Field(title="Object detection configuration.") + detect: DetectConfig = Field( + default_factory=DetectConfig, title="Object detection configuration." + ) timestamp_style: TimestampStyleConfig = Field( default_factory=TimestampStyleConfig, title="Timestamp style configuration." ) @@ -628,12 +632,6 @@ class LoggerConfig(BaseModel): ) -class SnapshotsConfig(BaseModel): - retain: RetainConfig = Field( - default_factory=RetainConfig, title="Global snapshot retention configuration." - ) - - class FrigateConfig(BaseModel): mqtt: MqttConfig = Field(title="MQTT Configuration.") database: DatabaseConfig = Field( @@ -670,8 +668,8 @@ class FrigateConfig(BaseModel): motion: Optional[MotionConfig] = Field( title="Global motion detection configuration." ) - detect: Optional[DetectConfig] = Field( - title="Global object tracking configuration." + detect: DetectConfig = Field( + default_factory=DetectConfig, title="Global object tracking configuration." ) cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.") @@ -703,10 +701,7 @@ class FrigateConfig(BaseModel): {"name": name, **merged_config} ) - # Default detect configuration - if camera_config.detect is None: - camera_config.detect = DetectConfig() - + # Default max_disappeared configuration max_disappeared = camera_config.detect.fps * 5 if camera_config.detect.max_disappeared is None: camera_config.detect.max_disappeared = max_disappeared @@ -758,10 +753,6 @@ class FrigateConfig(BaseModel): **camera_config.motion.dict(exclude_unset=True), ) - # Default live configuration - if camera_config.live is None: - camera_config.live = CameraLiveConfig() - config.cameras[name] = camera_config return config diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 9e71e3d27..e0d09d191 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -802,6 +802,88 @@ class TestConfig(unittest.TestCase): assert runtime_config.cameras["back"].detect.height == 1080 assert runtime_config.cameras["back"].detect.width == 1920 + def test_global_snapshots(self): + + config = { + "mqtt": {"host": "mqtt"}, + "snapshots": {"enabled": True}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "snapshots": { + "height": 100, + }, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].snapshots.enabled + assert runtime_config.cameras["back"].snapshots.height == 100 + + def test_default_snapshots(self): + + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + } + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].snapshots.bounding_box + assert runtime_config.cameras["back"].snapshots.quality == 70 + + def test_global_snapshots_merge(self): + + config = { + "mqtt": {"host": "mqtt"}, + "snapshots": {"bounding_box": False, "height": 300}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "snapshots": { + "height": 150, + "enabled": True, + }, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].snapshots.bounding_box == False + assert runtime_config.cameras["back"].snapshots.height == 150 + assert runtime_config.cameras["back"].snapshots.enabled + if __name__ == "__main__": unittest.main(verbosity=2) From 46fe06e779bd4aa15dfcf30c0d76e39e45e1ee6c Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 28 Aug 2021 21:26:23 -0500 Subject: [PATCH 012/132] tweak vod settings for varying iframe intervals --- docker/rootfs/usr/local/nginx/conf/nginx.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/rootfs/usr/local/nginx/conf/nginx.conf b/docker/rootfs/usr/local/nginx/conf/nginx.conf index 259d2668a..dc40f4883 100644 --- a/docker/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/rootfs/usr/local/nginx/conf/nginx.conf @@ -52,6 +52,8 @@ http { vod_mode mapped; vod_max_mapping_response_size 1m; vod_upstream_location /api; + vod_align_segments_to_key_frames on; + vod_manifest_segment_durations_mode accurate; # vod caches vod_metadata_cache metadata_cache 512m; From d74021af4733d7d59c5c8eb0e4fbc50cac9bf109 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sun, 29 Aug 2021 07:46:09 -0500 Subject: [PATCH 013/132] reverse sort events within hour --- web/src/components/RecordingPlaylist.jsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/src/components/RecordingPlaylist.jsx b/web/src/components/RecordingPlaylist.jsx index 2ad389d85..da04e1770 100644 --- a/web/src/components/RecordingPlaylist.jsx +++ b/web/src/components/RecordingPlaylist.jsx @@ -35,7 +35,7 @@ export default function RecordingPlaylist({ camera, recordings, selectedDate, se
{item.events.length} Events
- {item.events.map((event) => ( + {item.events.slice().reverse().map((event) => ( ))}
From 7d65c059941038f297ad65f6e349243e7ef6e114 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Mon, 30 Aug 2021 06:58:50 -0500 Subject: [PATCH 014/132] properly handle scenario with no recordings --- frigate/record.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/frigate/record.py b/frigate/record.py index 2ceda3652..65cf8e2cc 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -10,8 +10,7 @@ import threading from pathlib import Path import psutil - -from peewee import JOIN +from peewee import JOIN, DoesNotExist from frigate.config import FrigateConfig from frigate.const import CACHE_DIR, RECORD_DIR @@ -261,15 +260,14 @@ class RecordingCleanup(threading.Thread): ) # find all the recordings older than the oldest recording in the db - oldest_recording = ( - Recordings.select().order_by(Recordings.start_time.desc()).get() - ) + try: + oldest_recording = ( + Recordings.select().order_by(Recordings.start_time.desc()).get() + ) - oldest_timestamp = ( - oldest_recording.start_time - if oldest_recording - else datetime.datetime.now().timestamp() - ) + oldest_timestamp = oldest_recording.start_time + except DoesNotExist: + oldest_timestamp = datetime.datetime.now().timestamp() logger.debug(f"Oldest recording in the db: {oldest_timestamp}") process = sp.run( From 8cc834633edd796119accf0c1e772238d569c6b4 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Wed, 1 Sep 2021 06:44:05 -0500 Subject: [PATCH 015/132] reduce db queries for recording cleanup --- frigate/record.py | 104 +++++++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 47 deletions(-) diff --git a/frigate/record.py b/frigate/record.py index 65cf8e2cc..260374b8f 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -157,18 +157,18 @@ class RecordingCleanup(threading.Thread): logger.debug("Start deleted cameras.") # Handle deleted cameras + expire_days = self.config.record.retain_days + expire_before = ( + datetime.datetime.now() - datetime.timedelta(days=expire_days) + ).timestamp() no_camera_recordings: Recordings = Recordings.select().where( Recordings.camera.not_in(list(self.config.cameras.keys())), + Recordings.end_time < expire_before, ) for recording in no_camera_recordings: - expire_days = self.config.record.retain_days - expire_before = ( - datetime.datetime.now() - datetime.timedelta(days=expire_days) - ).timestamp() - if recording.end_time < expire_before: - Path(recording.path).unlink(missing_ok=True) - Recordings.delete_by_id(recording.id) + Path(recording.path).unlink(missing_ok=True) + Recordings.delete_by_id(recording.id) logger.debug("End deleted cameras.") logger.debug("Start all cameras.") @@ -185,59 +185,69 @@ class RecordingCleanup(threading.Thread): ).timestamp() expire_date = min(min_end, expire_before) - # Get recordings to remove + # Get recordings to check for expiration recordings: Recordings = Recordings.select().where( Recordings.camera == camera, Recordings.end_time < expire_date, ) - for recording in recordings: - # See if there are any associated events - events: Event = Event.select().where( - Event.camera == recording.camera, - ( - Event.start_time.between( - recording.start_time, recording.end_time - ) - | Event.end_time.between( - recording.start_time, recording.end_time - ) - | ( - (recording.start_time > Event.start_time) - & (recording.end_time < Event.end_time) - ) - ), - ) - keep = False - event_ids = set() + # Get all the events to check against + events: Event = Event.select().where( + Event.camera == camera, Event.end_time < expire_date, Event.has_clip + ) - event: Event - for event in events: - event_ids.add(event.id) - # Check event/label retention and keep the recording if within window - expire_days_event = ( - 0 - if not config.record.events.enabled - else config.record.events.retain.objects.get( - event.label, config.record.events.retain.default - ) + # mark has_clip false for all expired events + expired_event_ids = set() + for event in events: + # get the date that this event should expire + expire_days_event = ( + 0 + if not config.record.events.enabled + else config.record.events.retain.objects.get( + event.label, config.record.events.retain.default ) - expire_before_event = ( - datetime.datetime.now() - - datetime.timedelta(days=expire_days_event) - ).timestamp() - if recording.end_time >= expire_before_event: + ) + expire_before_event = ( + datetime.datetime.now() - datetime.timedelta(days=expire_days_event) + ).timestamp() + # if the event is expired + if event.start_time < expire_before_event: + event.has_clip = False + expired_event_ids.add(event.id) + + if expired_event_ids: + # Update associated events + Event.update(has_clip=False).where( + Event.id.in_(list(expired_event_ids)) + ).execute() + + # loop over recordings and see if they overlap with any non-expired events + for recording in recordings: + keep = False + for event in events: + if not event.has_clip: + continue + if ( + ( # event starts in this segment + event.start_time > recording.start_time + and event.start_time < recording.end_time + ) + or ( # event ends in this segment + event.end_time > recording.start_time + and event.end_time < recording.end_time + ) + or ( # event spans this segment + recording.start_time > event.start_time + and recording.end_time < event.end_time + ) + ): keep = True + break # Delete recordings outside of the retention window if not keep: Path(recording.path).unlink(missing_ok=True) Recordings.delete_by_id(recording.id) - if event_ids: - # Update associated events - Event.update(has_clip=False).where( - Event.id.in_(list(event_ids)) - ).execute() logger.debug(f"End camera: {camera}.") From a1e52c51b1b9cc479d895aa4f564a401be483d7c Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Wed, 1 Sep 2021 07:06:52 -0500 Subject: [PATCH 016/132] dont expire events in two places --- frigate/record.py | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/frigate/record.py b/frigate/record.py index 260374b8f..c4e132e1c 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -196,37 +196,10 @@ class RecordingCleanup(threading.Thread): Event.camera == camera, Event.end_time < expire_date, Event.has_clip ) - # mark has_clip false for all expired events - expired_event_ids = set() - for event in events: - # get the date that this event should expire - expire_days_event = ( - 0 - if not config.record.events.enabled - else config.record.events.retain.objects.get( - event.label, config.record.events.retain.default - ) - ) - expire_before_event = ( - datetime.datetime.now() - datetime.timedelta(days=expire_days_event) - ).timestamp() - # if the event is expired - if event.start_time < expire_before_event: - event.has_clip = False - expired_event_ids.add(event.id) - - if expired_event_ids: - # Update associated events - Event.update(has_clip=False).where( - Event.id.in_(list(expired_event_ids)) - ).execute() - # loop over recordings and see if they overlap with any non-expired events for recording in recordings: keep = False for event in events: - if not event.has_clip: - continue if ( ( # event starts in this segment event.start_time > recording.start_time From 8e1c15291d264079098862af1addd8d38b9acd73 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Thu, 2 Sep 2021 08:24:53 -0500 Subject: [PATCH 017/132] optimize checking recordings for events sorts events and recordings so you can avoid a cartesian product of checking all events against all recordings --- frigate/record.py | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/frigate/record.py b/frigate/record.py index c4e132e1c..cb00d59a3 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -186,20 +186,38 @@ class RecordingCleanup(threading.Thread): expire_date = min(min_end, expire_before) # Get recordings to check for expiration - recordings: Recordings = Recordings.select().where( - Recordings.camera == camera, - Recordings.end_time < expire_date, + recordings: Recordings = ( + Recordings.select() + .where( + Recordings.camera == camera, + Recordings.end_time < expire_date, + ) + .order_by(Recordings.start_time.desc()) + .objects() ) # Get all the events to check against - events: Event = Event.select().where( - Event.camera == camera, Event.end_time < expire_date, Event.has_clip + events: Event = ( + Event.select() + .where( + Event.camera == camera, Event.end_time < expire_date, Event.has_clip + ) + .order_by(Event.start_time.desc()) + .objects() ) # loop over recordings and see if they overlap with any non-expired events + event_start = 0 + logger.debug( + f"Checking {len(recordings)} recordings against {len(events)} events" + ) for recording in recordings: keep = False - for event in events: + # since the events and recordings are sorted, we can skip events + # that start after the previous recording segment ended + for idx in range(event_start, len(events)): + event = events[idx] + # logger.debug(f"Checking event {event.id}") if ( ( # event starts in this segment event.start_time > recording.start_time @@ -217,6 +235,10 @@ class RecordingCleanup(threading.Thread): keep = True break + # if the event starts after the current recording, skip it next time + if event.start_time > recording.end_time: + event_start = idx + # Delete recordings outside of the retention window if not keep: Path(recording.path).unlink(missing_ok=True) From 56480dc1ef422898e08f95ae69092de83160be1f Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Thu, 2 Sep 2021 20:40:38 -0500 Subject: [PATCH 018/132] bulk delete recordings --- frigate/record.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/frigate/record.py b/frigate/record.py index cb00d59a3..536d72a82 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -193,7 +193,6 @@ class RecordingCleanup(threading.Thread): Recordings.end_time < expire_date, ) .order_by(Recordings.start_time.desc()) - .objects() ) # Get all the events to check against @@ -208,10 +207,8 @@ class RecordingCleanup(threading.Thread): # loop over recordings and see if they overlap with any non-expired events event_start = 0 - logger.debug( - f"Checking {len(recordings)} recordings against {len(events)} events" - ) - for recording in recordings: + deleted_recordings = set() + for recording in recordings.objects().iterator(): keep = False # since the events and recordings are sorted, we can skip events # that start after the previous recording segment ended @@ -242,7 +239,9 @@ class RecordingCleanup(threading.Thread): # Delete recordings outside of the retention window if not keep: Path(recording.path).unlink(missing_ok=True) - Recordings.delete_by_id(recording.id) + deleted_recordings.add(recording.id) + + (Recordings.delete().where(Recordings.id << deleted_recordings).execute()) logger.debug(f"End camera: {camera}.") From 6c28613defd8e52a46d9cfdb99bf706556309479 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Fri, 3 Sep 2021 06:31:06 -0500 Subject: [PATCH 019/132] moar speed --- frigate/record.py | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/frigate/record.py b/frigate/record.py index 536d72a82..c2c97f18b 100644 --- a/frigate/record.py +++ b/frigate/record.py @@ -214,33 +214,24 @@ class RecordingCleanup(threading.Thread): # that start after the previous recording segment ended for idx in range(event_start, len(events)): event = events[idx] - # logger.debug(f"Checking event {event.id}") - if ( - ( # event starts in this segment - event.start_time > recording.start_time - and event.start_time < recording.end_time - ) - or ( # event ends in this segment - event.end_time > recording.start_time - and event.end_time < recording.end_time - ) - or ( # event spans this segment - recording.start_time > event.start_time - and recording.end_time < event.end_time - ) - ): - keep = True + + # if the next event ends before this segment starts, break + if event.end_time < recording.start_time: break - # if the event starts after the current recording, skip it next time + # if the next event starts after the current segment ends, skip it if event.start_time > recording.end_time: event_start = idx + continue + + keep = True # Delete recordings outside of the retention window if not keep: Path(recording.path).unlink(missing_ok=True) deleted_recordings.add(recording.id) + logger.debug(f"Expiring {len(deleted_recordings)} recordings") (Recordings.delete().where(Recordings.id << deleted_recordings).execute()) logger.debug(f"End camera: {camera}.") From 65855e23d9455c201c090f1b6c82422ed1e04934 Mon Sep 17 00:00:00 2001 From: drinfernoo <2319508+drinfernoo@users.noreply.github.com> Date: Fri, 3 Sep 2021 05:03:36 -0700 Subject: [PATCH 020/132] Add RTMP and timestamp style to global config (#1674) * :memo::white_check_mark::wrench: - Make RTMP config global Fixes #1671 * :memo::white_check_mark::wrench: - Make timestamp style config global Fixes #1656 * fix test function names * formatter Co-authored-by: Blake Blackshear --- docs/docs/configuration/cameras.md | 8 +- docs/docs/configuration/index.md | 41 ++++++++ frigate/config.py | 15 ++- frigate/test/test_config.py | 150 +++++++++++++++++++++++++++++ 4 files changed, 210 insertions(+), 4 deletions(-) diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index b95c8caca..50af4697f 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -215,6 +215,12 @@ Frigate can re-stream your video feed as a RTMP feed for other applications such Some video feeds are not compatible with RTMP. If you are experiencing issues, check to make sure your camera feed is h264 with AAC audio. If your camera doesn't support a compatible format for RTMP, you can use the ffmpeg args to re-encode it on the fly at the expense of increased CPU utilization. +```yaml +rtmp: + # Optional: Enable the RTMP stream (default: True) + enabled: True +``` + ## Timestamp style configuration For the debug view and snapshots it is possible to embed a timestamp in the feed. In some instances the default position obstructs important space, visibility or contrast is too low because of color or the datetime format does not match ones desire. @@ -357,7 +363,7 @@ cameras: # Optional: RTMP re-stream configuration rtmp: - # Required: Enable the RTMP stream (default: True) + # Optional: Enable the RTMP stream (default: True) enabled: True # Optional: Live stream configuration for WebUI diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 74bacde50..9fb6f738b 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -233,3 +233,44 @@ birdseye: # continuous - all cameras are included always mode: objects ``` + +### `rtmp` + +Can be overridden at the camera level. See the [cameras configuration page](cameras.md) for more information about RTMP streaming. + +```yaml +rtmp: + # Optional: Enable the RTMP stream (default: True) + enabled: True +``` + +## `timestamp_style` + +Can be overridden at the camera level. See the [cameras configuration page](cameras.md) for more information about timestamp styling. + +```yaml +# Optional: in-feed timestamp style configuration +timestamp_style: + # Optional: Position of the timestamp (default: shown below) + # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) + position: "tl" + # Optional: Format specifier conform to the Python package "datetime" (default: shown below) + # Additional Examples: + # german: "%d.%m.%Y %H:%M:%S" + format: "%m/%d/%Y %H:%M:%S" + # Optional: Color of font + color: + # All Required when color is specified (default: shown below) + red: 255 + green: 255 + blue: 255 + # Optional: Scale factor for font (default: shown below) + scale: 1.0 + # Optional: Line thickness of font (default: shown below) + thickness: 2 + # Optional: Effect of lettering (default: shown below) + # None (No effect), + # "solid" (solid background in inverse color of font) + # "shadow" (shadow for font) + effect: None +``` \ No newline at end of file diff --git a/frigate/config.py b/frigate/config.py index 910e78d27..cea3aecf3 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -432,7 +432,7 @@ class CameraMqttConfig(BaseModel): ) -class CameraRtmpConfig(BaseModel): +class RtmpConfig(BaseModel): enabled: bool = Field(default=True, title="RTMP restreaming enabled.") @@ -454,8 +454,8 @@ class CameraConfig(BaseModel): record: RecordConfig = Field( default_factory=RecordConfig, title="Record configuration." ) - rtmp: CameraRtmpConfig = Field( - default_factory=CameraRtmpConfig, title="RTMP restreaming configuration." + rtmp: RtmpConfig = Field( + default_factory=RtmpConfig, title="RTMP restreaming configuration." ) live: CameraLiveConfig = Field( default_factory=CameraLiveConfig, title="Live playback settings." @@ -656,6 +656,9 @@ class FrigateConfig(BaseModel): snapshots: SnapshotsConfig = Field( default_factory=SnapshotsConfig, title="Global snapshots configuration." ) + rtmp: RtmpConfig = Field( + default_factory=RtmpConfig, title="Global RTMP restreaming configuration." + ) birdseye: BirdseyeConfig = Field( default_factory=BirdseyeConfig, title="Birdseye configuration." ) @@ -672,6 +675,10 @@ class FrigateConfig(BaseModel): default_factory=DetectConfig, title="Global object tracking configuration." ) cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.") + timestamp_style: TimestampStyleConfig = Field( + default_factory=TimestampStyleConfig, + title="Global timestamp style configuration.", + ) @property def runtime_config(self) -> FrigateConfig: @@ -687,10 +694,12 @@ class FrigateConfig(BaseModel): include={ "record": ..., "snapshots": ..., + "rtmp": ..., "objects": ..., "motion": ..., "detect": ..., "ffmpeg": ..., + "timestamp_style": ..., }, exclude_unset=True, ) diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index e0d09d191..22f27ca4d 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -884,6 +884,156 @@ class TestConfig(unittest.TestCase): assert runtime_config.cameras["back"].snapshots.height == 150 assert runtime_config.cameras["back"].snapshots.enabled + def test_global_rtmp(self): + + config = { + "mqtt": {"host": "mqtt"}, + "rtmp": {"enabled": True}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].rtmp.enabled + + def test_default_rtmp(self): + + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + } + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].rtmp.enabled + + def test_global_rtmp_merge(self): + + config = { + "mqtt": {"host": "mqtt"}, + "rtmp": {"enabled": False}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "rtmp": { + "enabled": True, + }, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].rtmp.enabled + + def test_global_timestamp_style(self): + + config = { + "mqtt": {"host": "mqtt"}, + "timestamp_style": {"position": "bl", "scale": 1.5}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].timestamp_style.position == "bl" + assert runtime_config.cameras["back"].timestamp_style.scale == 1.5 + + def test_default_timestamp_style(self): + + config = { + "mqtt": {"host": "mqtt"}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + } + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].timestamp_style.position == "tl" + assert runtime_config.cameras["back"].timestamp_style.scale == 1.0 + + def test_global_timestamp_style_merge(self): + + config = { + "mqtt": {"host": "mqtt"}, + "rtmp": {"enabled": False}, + "timestamp_style": {"position": "br", "scale": 2.0}, + "cameras": { + "back": { + "ffmpeg": { + "inputs": [ + { + "path": "rtsp://10.0.0.1:554/video", + "roles": ["detect"], + }, + ] + }, + "timestamp_style": {"position": "bl", "scale": 1.5}, + } + }, + } + frigate_config = FrigateConfig(**config) + assert config == frigate_config.dict(exclude_unset=True) + + runtime_config = frigate_config.runtime_config + assert runtime_config.cameras["back"].timestamp_style.position == "bl" + assert runtime_config.cameras["back"].timestamp_style.scale == 1.5 + if __name__ == "__main__": unittest.main(verbosity=2) From faf103152ade0b6de7c65857a5391947b558fb44 Mon Sep 17 00:00:00 2001 From: Peter Campion-Bye Date: Tue, 31 Aug 2021 16:45:21 +0100 Subject: [PATCH 021/132] Update optimizing.md Need note about increasing GPU memory on Pi - otherwise ffmpeg hwaccel won't work --- docs/docs/configuration/optimizing.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docs/configuration/optimizing.md b/docs/docs/configuration/optimizing.md index 8700b2e52..a17862eff 100644 --- a/docs/docs/configuration/optimizing.md +++ b/docs/docs/configuration/optimizing.md @@ -14,6 +14,7 @@ title: Optimizing performance Frigate works on Raspberry Pi 3b/4 and x86 machines. It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. Raspberry Pi 3/4 (32-bit OS) +Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Performance Options > GPU Memory). **NOTICE**: If you are using the addon, ensure you turn off `Protection mode` for hardware acceleration. ```yaml From b8df419badcde5a137c76bf71be958ada4e5abb3 Mon Sep 17 00:00:00 2001 From: Bernt Christian Egeland Date: Mon, 30 Aug 2021 18:21:20 +0200 Subject: [PATCH 022/132] hide birdseye nav if not enabled --- web/src/Sidebar.jsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/src/Sidebar.jsx b/web/src/Sidebar.jsx index cb97c8f05..2d684f1fe 100644 --- a/web/src/Sidebar.jsx +++ b/web/src/Sidebar.jsx @@ -10,6 +10,7 @@ import NavigationDrawer, { Destination, Separator } from './components/Navigatio export default function Sidebar() { const { data: config } = useConfig(); const cameras = useMemo(() => Object.entries(config.cameras), [config]); + const { birdseye } = config; return ( }> @@ -49,7 +50,7 @@ export default function Sidebar() { ) : null } - + {birdseye?.enabled ? : null} From 00ff76a0b982bffc894bb817c23acfcd1621979b Mon Sep 17 00:00:00 2001 From: Bernt Christian Egeland Date: Fri, 3 Sep 2021 14:11:23 +0200 Subject: [PATCH 023/132] Events performance (#1645) * rearrange event route and splitted into several components * useIntersectionObserver * re-arrange * searchstring improvement * added xs tailwind breakpoint * useOuterClick hook * cleaned up * removed some video controls for mobile devices * lint * moved hooks to global folder * moved buttons for small devices * added button groups Co-authored-by: Bernt Christian Egeland --- web/src/components/AppBar.jsx | 3 +- web/src/components/Table.jsx | 11 +- web/src/components/VideoPlayer.jsx | 2 +- web/src/hooks/useClickOutside.jsx | 22 ++ web/src/hooks/useSearchString.jsx | 25 ++ web/src/index.css | 17 +- web/src/routes/Event.jsx | 119 ++++++- web/src/routes/Events.jsx | 326 ------------------ web/src/routes/Events/components/filter.jsx | 31 ++ .../routes/Events/components/filterable.jsx | 32 ++ web/src/routes/Events/components/filters.jsx | 39 +++ web/src/routes/Events/components/index.jsx | 3 + .../routes/Events/components/tableHead.jsx | 18 + web/src/routes/Events/components/tableRow.jsx | 119 +++++++ web/src/routes/Events/index.jsx | 107 ++++++ web/src/routes/Events/reducer.jsx | 47 +++ web/src/routes/index.js | 2 +- web/tailwind.config.js | 1 + 18 files changed, 572 insertions(+), 352 deletions(-) create mode 100644 web/src/hooks/useClickOutside.jsx create mode 100644 web/src/hooks/useSearchString.jsx delete mode 100644 web/src/routes/Events.jsx create mode 100644 web/src/routes/Events/components/filter.jsx create mode 100644 web/src/routes/Events/components/filterable.jsx create mode 100644 web/src/routes/Events/components/filters.jsx create mode 100644 web/src/routes/Events/components/index.jsx create mode 100644 web/src/routes/Events/components/tableHead.jsx create mode 100644 web/src/routes/Events/components/tableRow.jsx create mode 100644 web/src/routes/Events/index.jsx create mode 100644 web/src/routes/Events/reducer.jsx diff --git a/web/src/components/AppBar.jsx b/web/src/components/AppBar.jsx index 567fe291d..1003fee55 100644 --- a/web/src/components/AppBar.jsx +++ b/web/src/components/AppBar.jsx @@ -37,7 +37,8 @@ export default function AppBar({ title: Title, overflowRef, onOverflowClick }) { return (
+ {children} ); @@ -30,9 +30,10 @@ export function Tfoot({ children, className = '', ...attrs }) { ); } -export function Tr({ children, className = '', ...attrs }) { +export function Tr({ children, className = '', reference, ...attrs }) { return ( @@ -49,9 +50,9 @@ export function Th({ children, className = '', colspan, ...attrs }) { ); } -export function Td({ children, className = '', colspan, ...attrs }) { +export function Td({ children, className = '', reference, colspan, ...attrs }) { return ( - + {children} ); diff --git a/web/src/components/VideoPlayer.jsx b/web/src/components/VideoPlayer.jsx index 3560d964a..24ba747b5 100644 --- a/web/src/components/VideoPlayer.jsx +++ b/web/src/components/VideoPlayer.jsx @@ -88,7 +88,7 @@ export default function VideoPlayer({ children, options, seekOptions = {}, onRea return (
-
); diff --git a/web/src/hooks/useClickOutside.jsx b/web/src/hooks/useClickOutside.jsx new file mode 100644 index 000000000..8fc14660c --- /dev/null +++ b/web/src/hooks/useClickOutside.jsx @@ -0,0 +1,22 @@ +import { useEffect, useRef } from 'preact/hooks'; + +// https://stackoverflow.com/a/54292872/2693528 +export const useClickOutside = (callback) => { + const callbackRef = useRef(); // initialize mutable ref, which stores callback + const innerRef = useRef(); // returned to client, who marks "border" element + + // update cb on each render, so second useEffect has access to current value + useEffect(() => { + callbackRef.current = callback; + }); + + useEffect(() => { + document.addEventListener('click', handleClick); + return () => document.removeEventListener('click', handleClick); + function handleClick(e) { + if (innerRef.current && callbackRef.current && !innerRef.current.contains(e.target)) callbackRef.current(e); + } + }, []); + + return innerRef; // convenience for client (doesn't need to init ref himself) +}; diff --git a/web/src/hooks/useSearchString.jsx b/web/src/hooks/useSearchString.jsx new file mode 100644 index 000000000..1dde57dcc --- /dev/null +++ b/web/src/hooks/useSearchString.jsx @@ -0,0 +1,25 @@ +import { useState, useCallback } from 'preact/hooks'; + +const defaultSearchString = (limit) => `include_thumbnails=0&limit=${limit}`; + +export const useSearchString = (limit, searchParams) => { + const { searchParams: initialSearchParams } = new URL(window.location); + const _searchParams = searchParams || initialSearchParams.toString(); + + const [searchString, changeSearchString] = useState(`${defaultSearchString(limit)}&${_searchParams}`); + + const setSearchString = useCallback( + (limit, searchString) => { + changeSearchString(`${defaultSearchString(limit)}&${searchString}`); + }, + [changeSearchString] + ); + + const removeDefaultSearchKeys = useCallback((searchParams) => { + searchParams.delete('limit'); + searchParams.delete('include_thumbnails'); + searchParams.delete('before'); + }, []); + + return { searchString, setSearchString, removeDefaultSearchKeys }; +}; diff --git a/web/src/index.css b/web/src/index.css index 2278ef964..b7b93a69e 100644 --- a/web/src/index.css +++ b/web/src/index.css @@ -36,5 +36,20 @@ Maintain aspect ratio and scale down the video container Could not find a proper tailwind css. */ .outer-max-width { - max-width: 60%; + max-width: 70%; +} + +/* + Hide some videoplayer controls on mobile devices to + align the video player and bottom control bar properly. +*/ +@media only screen and (max-width: 700px) { + .small-player .vjs-time-control, + .small-player .vjs-time-divider { + display: none; + } + div.vjs-control-bar > .skip-back.skip-5, + div.vjs-control-bar > .skip-forward.skip-10 { + display: none; + } } diff --git a/web/src/routes/Event.jsx b/web/src/routes/Event.jsx index 06025d75e..d69a882e0 100644 --- a/web/src/routes/Event.jsx +++ b/web/src/routes/Event.jsx @@ -1,7 +1,10 @@ import { h, Fragment } from 'preact'; import { useCallback, useState, useEffect } from 'preact/hooks'; +import Link from '../components/Link'; import ActivityIndicator from '../components/ActivityIndicator'; import Button from '../components/Button'; +import ArrowDown from '../icons/ArrowDropdown'; +import ArrowDropup from '../icons/ArrowDropup'; import Clip from '../icons/Clip'; import Close from '../icons/Close'; import Delete from '../icons/Delete'; @@ -9,12 +12,46 @@ import Snapshot from '../icons/Snapshot'; import Dialog from '../components/Dialog'; import Heading from '../components/Heading'; import VideoPlayer from '../components/VideoPlayer'; +import { Table, Thead, Tbody, Th, Tr, Td } from '../components/Table'; import { FetchStatus, useApiHost, useEvent, useDelete } from '../api'; +const ActionButtonGroup = ({ className, handleClickDelete, close }) => ( +
+ + +
+); + +const DownloadButtonGroup = ({ className, apiHost, eventId }) => ( + + + + +); + export default function Event({ eventId, close, scrollRef }) { const apiHost = useApiHost(); const { data, status } = useEvent(eventId); const [showDialog, setShowDialog] = useState(false); + const [showDetails, setShowDetails] = useState(false); const [shouldScroll, setShouldScroll] = useState(true); const [deleteStatus, setDeleteStatus] = useState(FetchStatus.NONE); const setDeleteEvent = useDelete(); @@ -25,6 +62,13 @@ export default function Event({ eventId, close, scrollRef }) { scrollRef[eventId].scrollIntoView(); setShouldScroll(false); } + return () => { + // When opening new event window, the previous one will sometimes cause the + // navbar to be visible, hence the "hide nav" code bellow. + // Navbar will be hided if we add the - translate - y - full class.appBar.js + const element = document.getElementById('appbar'); + if (element) element.classList.add('-translate-y-full'); + }; }, [data, scrollRef, eventId, shouldScroll]); const handleClickDelete = () => { @@ -54,25 +98,28 @@ export default function Event({ eventId, close, scrollRef }) { return ; } + const startime = new Date(data.start_time * 1000); + const endtime = new Date(data.end_time * 1000); return (
-
-
- - -
-
- -
+ {showDialog ? ( ) : null}
-
-
+
+ {showDetails ? ( + + + + + + + + + + + + + + + + + + + + + + + +
KeyValue
Camera + {data.camera} +
Timeframe + {startime.toLocaleString()} – {endtime.toLocaleString()} +
Score{(data.top_score * 100).toFixed(2)}%
Zones{data.zones.join(', ')}
+ ) : null} +
+ +
+
{data.has_clip ? ( Clip
+
+ + +
); } diff --git a/web/src/routes/Events.jsx b/web/src/routes/Events.jsx deleted file mode 100644 index 4db9413df..000000000 --- a/web/src/routes/Events.jsx +++ /dev/null @@ -1,326 +0,0 @@ -import { h, Fragment } from 'preact'; -import ActivityIndicator from '../components/ActivityIndicator'; -import Heading from '../components/Heading'; -import Link from '../components/Link'; -import Select from '../components/Select'; -import produce from 'immer'; -import { route } from 'preact-router'; -import Event from './Event'; -import { useIntersectionObserver } from '../hooks'; -import { FetchStatus, useApiHost, useConfig, useEvents } from '../api'; -import { Table, Thead, Tbody, Tfoot, Th, Tr, Td } from '../components/Table'; -import { useCallback, useEffect, useMemo, useReducer, useState } from 'preact/hooks'; - -const API_LIMIT = 25; - -const initialState = Object.freeze({ events: [], reachedEnd: false, searchStrings: {}, deleted: 0 }); -const reducer = (state = initialState, action) => { - switch (action.type) { - case 'DELETE_EVENT': { - const { deletedId } = action; - - return produce(state, (draftState) => { - const idx = draftState.events.findIndex((e) => e.id === deletedId); - if (idx === -1) return state; - - draftState.events.splice(idx, 1); - draftState.deleted++; - }); - } - case 'APPEND_EVENTS': { - const { - meta: { searchString }, - payload, - } = action; - - return produce(state, (draftState) => { - draftState.searchStrings[searchString] = true; - draftState.events.push(...payload); - draftState.deleted = 0; - }); - } - - case 'REACHED_END': { - const { - meta: { searchString }, - } = action; - return produce(state, (draftState) => { - draftState.reachedEnd = true; - draftState.searchStrings[searchString] = true; - }); - } - - case 'RESET': - return initialState; - - default: - return state; - } -}; - -const defaultSearchString = (limit) => `include_thumbnails=0&limit=${limit}`; -function removeDefaultSearchKeys(searchParams) { - searchParams.delete('limit'); - searchParams.delete('include_thumbnails'); - searchParams.delete('before'); -} - -export default function Events({ path: pathname, limit = API_LIMIT } = {}) { - const apiHost = useApiHost(); - const [{ events, reachedEnd, searchStrings, deleted }, dispatch] = useReducer(reducer, initialState); - const { searchParams: initialSearchParams } = new URL(window.location); - const [viewEvent, setViewEvent] = useState(null); - const [searchString, setSearchString] = useState(`${defaultSearchString(limit)}&${initialSearchParams.toString()}`); - const { data, status, deletedId } = useEvents(searchString); - - const scrollToRef = {}; - useEffect(() => { - if (data && !(searchString in searchStrings)) { - dispatch({ type: 'APPEND_EVENTS', payload: data, meta: { searchString } }); - } - - if (data && Array.isArray(data) && data.length + deleted < limit) { - dispatch({ type: 'REACHED_END', meta: { searchString } }); - } - - if (deletedId) { - dispatch({ type: 'DELETE_EVENT', deletedId }); - } - }, [data, limit, searchString, searchStrings, deleted, deletedId]); - - const [entry, setIntersectNode] = useIntersectionObserver(); - - useEffect(() => { - if (entry && entry.isIntersecting) { - const { startTime } = entry.target.dataset; - const { searchParams } = new URL(window.location); - searchParams.set('before', parseFloat(startTime) - 0.0001); - - setSearchString(`${defaultSearchString(limit)}&${searchParams.toString()}`); - } - }, [entry, limit]); - - const lastCellRef = useCallback( - (node) => { - if (node !== null && !reachedEnd) { - setIntersectNode(node); - } - }, - [setIntersectNode, reachedEnd] - ); - - const handleFilter = useCallback( - (searchParams) => { - dispatch({ type: 'RESET' }); - removeDefaultSearchKeys(searchParams); - setSearchString(`${defaultSearchString(limit)}&${searchParams.toString()}`); - route(`${pathname}?${searchParams.toString()}`); - }, - [limit, pathname, setSearchString] - ); - - const viewEventHandler = (id) => { - //Toggle event view - if (viewEvent === id) return setViewEvent(null); - - //Set event id to be rendered. - setViewEvent(id); - }; - - const searchParams = useMemo(() => new URLSearchParams(searchString), [searchString]); - - return ( -
- Events - - - -
- - - - - - - - - - - - - - {events.map( - ({ camera, id, label, start_time: startTime, end_time: endTime, top_score: score, zones }, i) => { - const start = new Date(parseInt(startTime * 1000, 10)); - const end = new Date(parseInt(endTime * 1000, 10)); - const ref = i === events.length - 1 ? lastCellRef : undefined; - return ( - - - - - - - - - - - - {viewEvent === id ? ( - - - - ) : null} - - ); - } - )} - - - - - - -
- CameraLabelScoreZonesDateStartEnd
- viewEventHandler(id)} - ref={ref} - data-start-time={startTime} - data-reached-end={reachedEnd} - > - (scrollToRef[id] = el)} - width="150" - height="150" - className="cursor-pointer" - style="min-height: 48px; min-width: 48px;" - src={`${apiHost}/api/events/${id}/thumbnail.jpg`} - /> - - - - - - {(score * 100).toFixed(2)}% -
    - {zones.map((zone) => ( -
  • - -
  • - ))} -
-
{start.toLocaleDateString()}{start.toLocaleTimeString()}{end.toLocaleTimeString()}
- setViewEvent(null)} scrollRef={scrollToRef} /> -
- {status === FetchStatus.LOADING ? : reachedEnd ? 'No more events' : null} -
-
-
- ); -} - -function Filterable({ onFilter, pathname, searchParams, paramName, name }) { - const href = useMemo(() => { - const params = new URLSearchParams(searchParams.toString()); - params.set(paramName, name); - removeDefaultSearchKeys(params); - return `${pathname}?${params.toString()}`; - }, [searchParams, paramName, pathname, name]); - - const handleClick = useCallback( - (event) => { - event.preventDefault(); - route(href, true); - const params = new URLSearchParams(searchParams.toString()); - params.set(paramName, name); - onFilter(params); - }, - [href, searchParams, onFilter, paramName, name] - ); - - return ( - - {name} - - ); -} - -function Filters({ onChange, searchParams }) { - const { data } = useConfig(); - - const cameras = useMemo(() => Object.keys(data.cameras), [data]); - - const zones = useMemo( - () => - Object.values(data.cameras) - .reduce((memo, camera) => { - memo = memo.concat(Object.keys(camera.zones)); - return memo; - }, []) - .filter((value, i, self) => self.indexOf(value) === i), - [data] - ); - - const labels = useMemo(() => { - return Object.values(data.cameras) - .reduce((memo, camera) => { - memo = memo.concat(camera.objects?.track || []); - return memo; - }, data.objects?.track || []) - .filter((value, i, self) => self.indexOf(value) === i); - }, [data]); - - return ( -
- - - -
- ); -} - -function Filter({ onChange, searchParams, paramName, options }) { - const handleSelect = useCallback( - (key) => { - const newParams = new URLSearchParams(searchParams.toString()); - if (key !== 'all') { - newParams.set(paramName, key); - } else { - newParams.delete(paramName); - } - - onChange(newParams); - }, - [searchParams, paramName, onChange] - ); - - const selectOptions = useMemo(() => ['all', ...options], [options]); - - return ( - + ); +}; +export default Filter; diff --git a/web/src/routes/Events/components/filterable.jsx b/web/src/routes/Events/components/filterable.jsx new file mode 100644 index 000000000..b23e38eea --- /dev/null +++ b/web/src/routes/Events/components/filterable.jsx @@ -0,0 +1,32 @@ +import { h } from 'preact'; +import { useCallback, useMemo } from 'preact/hooks'; +import Link from '../../../components/Link'; +import { route } from 'preact-router'; + +const Filterable = ({ onFilter, pathname, searchParams, paramName, name, removeDefaultSearchKeys }) => { + const href = useMemo(() => { + const params = new URLSearchParams(searchParams.toString()); + params.set(paramName, name); + removeDefaultSearchKeys(params); + return `${pathname}?${params.toString()}`; + }, [searchParams, paramName, pathname, name, removeDefaultSearchKeys]); + + const handleClick = useCallback( + (event) => { + event.preventDefault(); + route(href, true); + const params = new URLSearchParams(searchParams.toString()); + params.set(paramName, name); + onFilter(params); + }, + [href, searchParams, onFilter, paramName, name] + ); + + return ( + + {name} + + ); +}; + +export default Filterable; diff --git a/web/src/routes/Events/components/filters.jsx b/web/src/routes/Events/components/filters.jsx new file mode 100644 index 000000000..e08b4ea65 --- /dev/null +++ b/web/src/routes/Events/components/filters.jsx @@ -0,0 +1,39 @@ +import { h } from 'preact'; +import Filter from './filter'; +import { useConfig } from '../../../api'; +import { useMemo } from 'preact/hooks'; + +const Filters = ({ onChange, searchParams }) => { + const { data } = useConfig(); + + const cameras = useMemo(() => Object.keys(data.cameras), [data]); + + const zones = useMemo( + () => + Object.values(data.cameras) + .reduce((memo, camera) => { + memo = memo.concat(Object.keys(camera.zones)); + return memo; + }, []) + .filter((value, i, self) => self.indexOf(value) === i), + [data] + ); + + const labels = useMemo(() => { + return Object.values(data.cameras) + .reduce((memo, camera) => { + memo = memo.concat(camera.objects?.track || []); + return memo; + }, data.objects?.track || []) + .filter((value, i, self) => self.indexOf(value) === i); + }, [data]); + + return ( +
+ + + +
+ ); +}; +export default Filters; diff --git a/web/src/routes/Events/components/index.jsx b/web/src/routes/Events/components/index.jsx new file mode 100644 index 000000000..6c03b671f --- /dev/null +++ b/web/src/routes/Events/components/index.jsx @@ -0,0 +1,3 @@ +export { default as TableHead } from './tableHead'; +export { default as TableRow } from './tableRow'; +export { default as Filters } from './filters'; diff --git a/web/src/routes/Events/components/tableHead.jsx b/web/src/routes/Events/components/tableHead.jsx new file mode 100644 index 000000000..69d60d65b --- /dev/null +++ b/web/src/routes/Events/components/tableHead.jsx @@ -0,0 +1,18 @@ +import { h } from 'preact'; +import { Thead, Th, Tr } from '../../../components/Table'; + +const TableHead = () => ( + + + + Camera + Label + Score + Zones + Date + Start + End + + +); +export default TableHead; diff --git a/web/src/routes/Events/components/tableRow.jsx b/web/src/routes/Events/components/tableRow.jsx new file mode 100644 index 000000000..262f3408a --- /dev/null +++ b/web/src/routes/Events/components/tableRow.jsx @@ -0,0 +1,119 @@ +import { h } from 'preact'; +import { memo } from 'preact/compat'; +import { useCallback, useState, useMemo } from 'preact/hooks'; +import { Tr, Td, Tbody } from '../../../components/Table'; +import Filterable from './filterable'; +import Event from '../../Event'; +import { useSearchString } from '../../../hooks/useSearchString'; +import { useClickOutside } from '../../../hooks/useClickOutside'; + +const EventsRow = memo( + ({ + id, + apiHost, + start_time: startTime, + end_time: endTime, + scrollToRef, + lastRowRef, + handleFilter, + pathname, + limit, + camera, + label, + top_score: score, + zones, + }) => { + const [viewEvent, setViewEvent] = useState(null); + const { searchString, removeDefaultSearchKeys } = useSearchString(limit); + const searchParams = useMemo(() => new URLSearchParams(searchString), [searchString]); + + const innerRef = useClickOutside(() => { + setViewEvent(null); + }); + + const viewEventHandler = useCallback( + (id) => { + //Toggle event view + if (viewEvent === id) return setViewEvent(null); + //Set event id to be rendered. + setViewEvent(id); + }, + [viewEvent] + ); + + const start = new Date(parseInt(startTime * 1000, 10)); + const end = new Date(parseInt(endTime * 1000, 10)); + + return ( + + + + viewEventHandler(id)} + ref={lastRowRef} + data-start-time={startTime} + // data-reached-end={reachedEnd} <-- Enable this will cause all events to re-render when reaching end. + > + + + + + + + + + + {(score * 100).toFixed(2)}% + +
    + {zones.map((zone) => ( +
  • + +
  • + ))} +
+ + {start.toLocaleDateString()} + {start.toLocaleTimeString()} + {end.toLocaleTimeString()} + + {viewEvent === id ? ( + + (scrollToRef[id] = el)}> + setViewEvent(null)} scrollRef={scrollToRef} /> + + + ) : null} + + ); + } +); + +export default EventsRow; diff --git a/web/src/routes/Events/index.jsx b/web/src/routes/Events/index.jsx new file mode 100644 index 000000000..0f0c03cb8 --- /dev/null +++ b/web/src/routes/Events/index.jsx @@ -0,0 +1,107 @@ +import { h } from 'preact'; +import ActivityIndicator from '../../components/ActivityIndicator'; +import Heading from '../../components/Heading'; +import { TableHead, Filters, TableRow } from './components'; +import { route } from 'preact-router'; +import { FetchStatus, useApiHost, useEvents } from '../../api'; +import { Table, Tfoot, Tr, Td } from '../../components/Table'; +import { useCallback, useEffect, useMemo, useReducer } from 'preact/hooks'; +import { reducer, initialState } from './reducer'; +import { useSearchString } from '../../hooks/useSearchString'; +import { useIntersectionObserver } from '../../hooks'; + +const API_LIMIT = 25; + +export default function Events({ path: pathname, limit = API_LIMIT } = {}) { + const apiHost = useApiHost(); + const { searchString, setSearchString, removeDefaultSearchKeys } = useSearchString(limit); + const [{ events, reachedEnd, searchStrings, deleted }, dispatch] = useReducer(reducer, initialState); + const { data, status, deletedId } = useEvents(searchString); + + const scrollToRef = useMemo(() => Object, []); + + useEffect(() => { + if (data && !(searchString in searchStrings)) { + dispatch({ type: 'APPEND_EVENTS', payload: data, meta: { searchString } }); + } + + if (data && Array.isArray(data) && data.length + deleted < limit) { + dispatch({ type: 'REACHED_END', meta: { searchString } }); + } + + if (deletedId) { + dispatch({ type: 'DELETE_EVENT', deletedId }); + } + }, [data, limit, searchString, searchStrings, deleted, deletedId]); + + const [entry, setIntersectNode] = useIntersectionObserver(); + + useEffect(() => { + if (entry && entry.isIntersecting) { + const { startTime } = entry.target.dataset; + const { searchParams } = new URL(window.location); + searchParams.set('before', parseFloat(startTime) - 0.0001); + setSearchString(limit, searchParams.toString()); + } + }, [entry, limit, setSearchString]); + + const lastCellRef = useCallback( + (node) => { + if (node !== null && !reachedEnd) { + setIntersectNode(node); + } + }, + [setIntersectNode, reachedEnd] + ); + + const handleFilter = useCallback( + (searchParams) => { + dispatch({ type: 'RESET' }); + removeDefaultSearchKeys(searchParams); + setSearchString(limit, searchParams.toString()); + route(`${pathname}?${searchParams.toString()}`); + }, + [limit, pathname, setSearchString, removeDefaultSearchKeys] + ); + + const searchParams = useMemo(() => new URLSearchParams(searchString), [searchString]); + + const RenderTableRow = useCallback( + (props) => ( + + ), + [apiHost, handleFilter, pathname, scrollToRef] + ); + return ( +
+ Events + +
+ + + + {events.map((props, idx) => { + const lastRowRef = idx === events.length - 1 ? lastCellRef : undefined; + return ; + })} + + + + + + +
+ {status === FetchStatus.LOADING ? : reachedEnd ? 'No more events' : null} +
+
+
+ ); +} diff --git a/web/src/routes/Events/reducer.jsx b/web/src/routes/Events/reducer.jsx new file mode 100644 index 000000000..8dce7cdb7 --- /dev/null +++ b/web/src/routes/Events/reducer.jsx @@ -0,0 +1,47 @@ +import produce from 'immer'; + +export const initialState = Object.freeze({ events: [], reachedEnd: false, searchStrings: {}, deleted: 0 }); + +export const reducer = (state = initialState, action) => { + switch (action.type) { + case 'DELETE_EVENT': { + const { deletedId } = action; + + return produce(state, (draftState) => { + const idx = draftState.events.findIndex((e) => e.id === deletedId); + if (idx === -1) return state; + + draftState.events.splice(idx, 1); + draftState.deleted++; + }); + } + case 'APPEND_EVENTS': { + const { + meta: { searchString }, + payload, + } = action; + + return produce(state, (draftState) => { + draftState.searchStrings[searchString] = true; + draftState.events.push(...payload); + draftState.deleted = 0; + }); + } + + case 'REACHED_END': { + const { + meta: { searchString }, + } = action; + return produce(state, (draftState) => { + draftState.reachedEnd = true; + draftState.searchStrings[searchString] = true; + }); + } + + case 'RESET': + return initialState; + + default: + return state; + } +}; diff --git a/web/src/routes/index.js b/web/src/routes/index.js index c5b40de8c..d9b776d87 100644 --- a/web/src/routes/index.js +++ b/web/src/routes/index.js @@ -19,7 +19,7 @@ export async function getBirdseye(url, cb, props) { } export async function getEvents(url, cb, props) { - const module = await import('./Events.jsx'); + const module = await import('./Events'); return module.default; } diff --git a/web/tailwind.config.js b/web/tailwind.config.js index 025c0255c..1a1775327 100644 --- a/web/tailwind.config.js +++ b/web/tailwind.config.js @@ -4,6 +4,7 @@ module.exports = { theme: { extend: { screens: { + xs: '480px', '2xl': '1536px', '3xl': '1720px', }, From 7fc5297f602aa5ddf1c4bdb443f2246b6c122cb4 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Fri, 3 Sep 2021 07:13:05 -0500 Subject: [PATCH 024/132] aarch64 makefile fix --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3cf7d0fb8..4173e089a 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,7 @@ aarch64_frigate: version web docker build --no-cache --tag frigate-base --build-arg ARCH=aarch64 --build-arg FFMPEG_VERSION=1.0.0 --build-arg WHEELS_VERSION=1.0.3 --build-arg NGINX_VERSION=1.0.2 --file docker/Dockerfile.base . docker build --no-cache --tag frigate --file docker/Dockerfile.aarch64 . -armv7_all: armv7_wheels armv7_ffmpeg armv7_frigate +aarch64_all: aarch64_wheels aarch64_ffmpeg aarch64_frigate armv7_wheels: docker build --tag blakeblackshear/frigate-wheels:1.0.3-armv7 --file docker/Dockerfile.wheels . From f63a7cb6c0e16abc4bd7d6ffc655dc6f7d528656 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 4 Sep 2021 16:34:48 -0500 Subject: [PATCH 025/132] remove font_scale in timestamp_style and calculate dynamically again --- docs/docs/configuration/cameras.md | 44 ++++++++++++++---------------- frigate/config.py | 19 +++++++++++-- frigate/object_processing.py | 2 -- frigate/util.py | 16 ++++++++++- 4 files changed, 51 insertions(+), 30 deletions(-) diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index 50af4697f..dc1c18307 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -241,8 +241,6 @@ timestamp_style: red: 255 green: 255 blue: 255 - # Optional: Scale factor for font (default: shown below) - scale: 1.0 # Optional: Line thickness of font (default: shown below) thickness: 2 # Optional: Effect of lettering (default: shown below) @@ -439,28 +437,26 @@ cameras: # Optional: In-feed timestamp style configuration timestamp_style: - # Optional: Position of the timestamp (default: shown below) - # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) - position: "tl" - # Optional: Format specifier conform to the Python package "datetime" (default: shown below) - # Additional Examples: - # german: "%d.%m.%Y %H:%M:%S" - format: "%m/%d/%Y %H:%M:%S" - # Optional: Color of font - color: - # All Required when color is specified (default: shown below) - red: 255 - green: 255 - blue: 255 - # Optional: Scale factor for font (default: shown below) - scale: 1.0 - # Optional: Line thickness of font (default: shown below) - thickness: 2 - # Optional: Effect of lettering (default: shown below) - # None (No effect), - # "solid" (solid background in inverse color of font) - # "shadow" (shadow for font) - effect: None + # Optional: Position of the timestamp (default: shown below) + # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) + position: "tl" + # Optional: Format specifier conform to the Python package "datetime" (default: shown below) + # Additional Examples: + # german: "%d.%m.%Y %H:%M:%S" + format: "%m/%d/%Y %H:%M:%S" + # Optional: Color of font + color: + # All Required when color is specified (default: shown below) + red: 255 + green: 255 + blue: 255 + # Optional: Line thickness of font (default: shown below) + thickness: 2 + # Optional: Effect of lettering (default: shown below) + # None (No effect), + # "solid" (solid background in inverse color of font) + # "shadow" (shadow for font) + effect: None ``` ## Camera specific configuration diff --git a/frigate/config.py b/frigate/config.py index cea3aecf3..49932e609 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -405,13 +405,26 @@ class ColorConfig(BaseModel): blue: int = Field(default=255, le=0, ge=255, title="Blue") +class TimestampPositionEnum(str, Enum): + tl = "tl" + tr = "tr" + bl = "bl" + br = "br" + + +class TimestampEffectEnum(str, Enum): + solid = "solid" + shadow = "shadow" + + class TimestampStyleConfig(BaseModel): - position: str = Field(default="tl", title="Timestamp position.") + position: TimestampPositionEnum = Field( + default=TimestampPositionEnum.tl, title="Timestamp position." + ) format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.") color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.") - scale: float = Field(default=1.0, title="Timestamp scale.") thickness: int = Field(default=2, title="Timestamp thickness.") - effect: Optional[str] = Field(title="Timestamp effect.") + effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.") class CameraMqttConfig(BaseModel): diff --git a/frigate/object_processing.py b/frigate/object_processing.py index 4ebd20870..e5c7f1e5b 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -275,7 +275,6 @@ class TrackedObject: self.thumbnail_data["frame_time"], self.camera_config.timestamp_style.format, font_effect=self.camera_config.timestamp_style.effect, - font_scale=self.camera_config.timestamp_style.scale, font_thickness=self.camera_config.timestamp_style.thickness, font_color=(color.red, color.green, color.blue), position=self.camera_config.timestamp_style.position, @@ -411,7 +410,6 @@ class CameraState: frame_time, self.camera_config.timestamp_style.format, font_effect=self.camera_config.timestamp_style.effect, - font_scale=self.camera_config.timestamp_style.scale, font_thickness=self.camera_config.timestamp_style.thickness, font_color=(color.red, color.green, color.blue), position=self.camera_config.timestamp_style.position, diff --git a/frigate/util.py b/frigate/util.py index dae3845f2..9c93285be 100755 --- a/frigate/util.py +++ b/frigate/util.py @@ -51,18 +51,32 @@ def draw_timestamp( timestamp, timestamp_format, font_effect=None, - font_scale=1.0, font_thickness=2, font_color=(255, 255, 255), position="tl", ): time_to_show = datetime.datetime.fromtimestamp(timestamp).strftime(timestamp_format) + + # calculate a dynamic font size + size = cv2.getTextSize( + time_to_show, + cv2.FONT_HERSHEY_SIMPLEX, + fontScale=1.0, + thickness=font_thickness, + ) + + text_width = size[0][0] + desired_size = max(150, 0.33 * frame.shape[1]) + font_scale = desired_size / text_width + + # calculate the actual size with the dynamic scale size = cv2.getTextSize( time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=font_thickness, ) + image_width = frame.shape[1] image_height = frame.shape[0] text_width = size[0][0] From 8109445fdd57cfac98b85c7c559769988519f213 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 4 Sep 2021 16:39:56 -0500 Subject: [PATCH 026/132] fix color config for ts (fixes #1679) --- frigate/config.py | 6 +++--- frigate/object_processing.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/frigate/config.py b/frigate/config.py index 49932e609..8c9052948 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -400,9 +400,9 @@ class SnapshotsConfig(BaseModel): class ColorConfig(BaseModel): - red: int = Field(default=255, le=0, ge=255, title="Red") - green: int = Field(default=255, le=0, ge=255, title="Green") - blue: int = Field(default=255, le=0, ge=255, title="Blue") + red: int = Field(default=255, ge=0, le=255, title="Red") + green: int = Field(default=255, ge=0, le=255, title="Green") + blue: int = Field(default=255, ge=0, le=255, title="Blue") class TimestampPositionEnum(str, Enum): diff --git a/frigate/object_processing.py b/frigate/object_processing.py index e5c7f1e5b..38bacc0e2 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -276,7 +276,7 @@ class TrackedObject: self.camera_config.timestamp_style.format, font_effect=self.camera_config.timestamp_style.effect, font_thickness=self.camera_config.timestamp_style.thickness, - font_color=(color.red, color.green, color.blue), + font_color=(color.blue, color.green, color.red), position=self.camera_config.timestamp_style.position, ) @@ -411,7 +411,7 @@ class CameraState: self.camera_config.timestamp_style.format, font_effect=self.camera_config.timestamp_style.effect, font_thickness=self.camera_config.timestamp_style.thickness, - font_color=(color.red, color.green, color.blue), + font_color=(color.blue, color.green, color.red), position=self.camera_config.timestamp_style.position, ) From e8eb3125a5fbad301baa5be9d841d3c6bc765864 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 4 Sep 2021 16:56:01 -0500 Subject: [PATCH 027/132] disallow extra keys in config --- frigate/config.py | 58 +++++++++++++++++++++---------------- frigate/test/test_config.py | 10 +++---- 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/frigate/config.py b/frigate/config.py index 8c9052948..8d54a441d 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -9,7 +9,7 @@ from typing import Dict, List, Optional, Tuple, Union import matplotlib.pyplot as plt import numpy as np import yaml -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, Extra, Field, validator from pydantic.fields import PrivateAttr from frigate.const import BASE_DIR, CACHE_DIR, RECORD_DIR @@ -29,18 +29,23 @@ DEFAULT_TRACKED_OBJECTS = ["person"] DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}} +class FrigateBaseModel(BaseModel): + class Config: + extra = Extra.forbid + + class DetectorTypeEnum(str, Enum): edgetpu = "edgetpu" cpu = "cpu" -class DetectorConfig(BaseModel): +class DetectorConfig(FrigateBaseModel): type: DetectorTypeEnum = Field(default=DetectorTypeEnum.cpu, title="Detector Type") device: str = Field(default="usb", title="Device Type") num_threads: int = Field(default=3, title="Number of detection threads") -class MqttConfig(BaseModel): +class MqttConfig(FrigateBaseModel): host: str = Field(title="MQTT Host") port: int = Field(default=1883, title="MQTT Port") topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix") @@ -60,7 +65,7 @@ class MqttConfig(BaseModel): return v -class RetainConfig(BaseModel): +class RetainConfig(FrigateBaseModel): default: int = Field(default=10, title="Default retention period.") objects: Dict[str, int] = Field( default_factory=dict, title="Object retention period." @@ -68,7 +73,7 @@ class RetainConfig(BaseModel): # DEPRECATED: Will eventually be removed -class ClipsConfig(BaseModel): +class ClipsConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Save clips.") max_seconds: int = Field(default=300, title="Maximum clip duration.") pre_capture: int = Field(default=5, title="Seconds to capture before event starts.") @@ -85,7 +90,7 @@ class ClipsConfig(BaseModel): ) -class RecordConfig(BaseModel): +class RecordConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable record on all cameras.") retain_days: int = Field(default=0, title="Recording retention period in days.") events: ClipsConfig = Field( @@ -93,7 +98,7 @@ class RecordConfig(BaseModel): ) -class MotionConfig(BaseModel): +class MotionConfig(FrigateBaseModel): threshold: int = Field( default=25, title="Motion detection threshold (1-255).", @@ -146,9 +151,10 @@ class RuntimeMotionConfig(MotionConfig): class Config: arbitrary_types_allowed = True + extra = Extra.ignore -class DetectConfig(BaseModel): +class DetectConfig(FrigateBaseModel): height: int = Field(default=720, title="Height of the stream for the detect role.") width: int = Field(default=1280, title="Width of the stream for the detect role.") fps: int = Field( @@ -160,7 +166,7 @@ class DetectConfig(BaseModel): ) -class FilterConfig(BaseModel): +class FilterConfig(FrigateBaseModel): min_area: int = Field( default=0, title="Minimum area of bounding box for object to be counted." ) @@ -201,8 +207,10 @@ class RuntimeFilterConfig(FilterConfig): class Config: arbitrary_types_allowed = True + extra = Extra.ignore +# this uses the base model because the color is an extra attribute class ZoneConfig(BaseModel): filters: Dict[str, FilterConfig] = Field( default_factory=dict, title="Zone filters." @@ -244,7 +252,7 @@ class ZoneConfig(BaseModel): self._contour = np.array([]) -class ObjectConfig(BaseModel): +class ObjectConfig(FrigateBaseModel): track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.") filters: Optional[Dict[str, FilterConfig]] = Field(title="Object filters.") mask: Union[str, List[str]] = Field(default="", title="Object mask.") @@ -256,7 +264,7 @@ class BirdseyeModeEnum(str, Enum): continuous = "continuous" -class BirdseyeConfig(BaseModel): +class BirdseyeConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Enable birdseye view.") width: int = Field(default=1280, title="Birdseye width.") height: int = Field(default=720, title="Birdseye height.") @@ -303,7 +311,7 @@ RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [ ] -class FfmpegOutputArgsConfig(BaseModel): +class FfmpegOutputArgsConfig(FrigateBaseModel): detect: Union[str, List[str]] = Field( default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT, title="Detect role FFmpeg output arguments.", @@ -318,7 +326,7 @@ class FfmpegOutputArgsConfig(BaseModel): ) -class FfmpegConfig(BaseModel): +class FfmpegConfig(FrigateBaseModel): global_args: Union[str, List[str]] = Field( default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments." ) @@ -340,7 +348,7 @@ class CameraRoleEnum(str, Enum): detect = "detect" -class CameraInput(BaseModel): +class CameraInput(FrigateBaseModel): path: str = Field(title="Camera input path.") roles: List[CameraRoleEnum] = Field(title="Roles assigned to this input.") global_args: Union[str, List[str]] = Field( @@ -371,7 +379,7 @@ class CameraFfmpegConfig(FfmpegConfig): return v -class SnapshotsConfig(BaseModel): +class SnapshotsConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Snapshots enabled.") clean_copy: bool = Field( default=True, title="Create a clean copy of the snapshot image." @@ -399,7 +407,7 @@ class SnapshotsConfig(BaseModel): ) -class ColorConfig(BaseModel): +class ColorConfig(FrigateBaseModel): red: int = Field(default=255, ge=0, le=255, title="Red") green: int = Field(default=255, ge=0, le=255, title="Green") blue: int = Field(default=255, ge=0, le=255, title="Blue") @@ -417,7 +425,7 @@ class TimestampEffectEnum(str, Enum): shadow = "shadow" -class TimestampStyleConfig(BaseModel): +class TimestampStyleConfig(FrigateBaseModel): position: TimestampPositionEnum = Field( default=TimestampPositionEnum.tl, title="Timestamp position." ) @@ -427,7 +435,7 @@ class TimestampStyleConfig(BaseModel): effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.") -class CameraMqttConfig(BaseModel): +class CameraMqttConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Send image over MQTT.") timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.") bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.") @@ -445,16 +453,16 @@ class CameraMqttConfig(BaseModel): ) -class RtmpConfig(BaseModel): +class RtmpConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="RTMP restreaming enabled.") -class CameraLiveConfig(BaseModel): +class CameraLiveConfig(FrigateBaseModel): height: int = Field(default=720, title="Live camera view height") quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality") -class CameraConfig(BaseModel): +class CameraConfig(FrigateBaseModel): name: Optional[str] = Field(title="Camera name.") ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.") best_image_timeout: int = Field( @@ -590,13 +598,13 @@ class CameraConfig(BaseModel): return [part for part in cmd if part != ""] -class DatabaseConfig(BaseModel): +class DatabaseConfig(FrigateBaseModel): path: str = Field( default=os.path.join(BASE_DIR, "frigate.db"), title="Database path." ) -class ModelConfig(BaseModel): +class ModelConfig(FrigateBaseModel): width: int = Field(default=320, title="Object detection model input width.") height: int = Field(default=320, title="Object detection model input height.") labelmap: Dict[int, str] = Field( @@ -636,7 +644,7 @@ class LogLevelEnum(str, Enum): critical = "critical" -class LoggerConfig(BaseModel): +class LoggerConfig(FrigateBaseModel): default: LogLevelEnum = Field( default=LogLevelEnum.info, title="Default logging level." ) @@ -645,7 +653,7 @@ class LoggerConfig(BaseModel): ) -class FrigateConfig(BaseModel): +class FrigateConfig(FrigateBaseModel): mqtt: MqttConfig = Field(title="MQTT Configuration.") database: DatabaseConfig = Field( default_factory=DatabaseConfig, title="Database configuration." diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 22f27ca4d..8bf96736a 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -962,7 +962,7 @@ class TestConfig(unittest.TestCase): config = { "mqtt": {"host": "mqtt"}, - "timestamp_style": {"position": "bl", "scale": 1.5}, + "timestamp_style": {"position": "bl"}, "cameras": { "back": { "ffmpeg": { @@ -981,7 +981,6 @@ class TestConfig(unittest.TestCase): runtime_config = frigate_config.runtime_config assert runtime_config.cameras["back"].timestamp_style.position == "bl" - assert runtime_config.cameras["back"].timestamp_style.scale == 1.5 def test_default_timestamp_style(self): @@ -1005,14 +1004,13 @@ class TestConfig(unittest.TestCase): runtime_config = frigate_config.runtime_config assert runtime_config.cameras["back"].timestamp_style.position == "tl" - assert runtime_config.cameras["back"].timestamp_style.scale == 1.0 def test_global_timestamp_style_merge(self): config = { "mqtt": {"host": "mqtt"}, "rtmp": {"enabled": False}, - "timestamp_style": {"position": "br", "scale": 2.0}, + "timestamp_style": {"position": "br", "thickness": 2}, "cameras": { "back": { "ffmpeg": { @@ -1023,7 +1021,7 @@ class TestConfig(unittest.TestCase): }, ] }, - "timestamp_style": {"position": "bl", "scale": 1.5}, + "timestamp_style": {"position": "bl", "thickness": 4}, } }, } @@ -1032,7 +1030,7 @@ class TestConfig(unittest.TestCase): runtime_config = frigate_config.runtime_config assert runtime_config.cameras["back"].timestamp_style.position == "bl" - assert runtime_config.cameras["back"].timestamp_style.scale == 1.5 + assert runtime_config.cameras["back"].timestamp_style.thickness == 4 if __name__ == "__main__": From d35b09b18fb2fa5c9945f965fde9668e16e78c84 Mon Sep 17 00:00:00 2001 From: Dermot Duffy Date: Sun, 5 Sep 2021 08:42:38 -0700 Subject: [PATCH 028/132] Refresh the HA installation instructions. --- docs/docs/usage/home-assistant.md | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/docs/docs/usage/home-assistant.md b/docs/docs/usage/home-assistant.md index bf0b577b1..110ea6d6e 100644 --- a/docs/docs/usage/home-assistant.md +++ b/docs/docs/usage/home-assistant.md @@ -8,18 +8,18 @@ The best way to integrate with Home Assistant is to use the [official integratio ## Installation -Available via HACS as a [custom repository](https://hacs.xyz/docs/faq/custom_repositories). To install: +### Preparation -- Add the custom repository: +The Frigate integration requires the `mqtt` integration to be installed and +manually configured first. -``` -Home Assistant > HACS > Integrations > [...] > Custom Repositories -``` +See the [MQTT integration +documentation](https://www.home-assistant.io/integrations/mqtt/) for more +details. -| Key | Value | -| -------------- | ----------------------------------------------------------- | -| Repository URL | https://github.com/blakeblackshear/frigate-hass-integration | -| Category | Integration | +### Integration installation + +Available via HACS as a default repository. To install: - Use [HACS](https://hacs.xyz/) to install the integration: @@ -38,6 +38,12 @@ Note: You will also need [media_source](https://www.home-assistant.io/integrations/media_source/) enabled in your Home Assistant configuration for the Media Browser to appear. +### (Optional) Lovelace Card Installation + +To install the optional companion Lovelace card, please see the [separate +installation instructions](https://github.com/dermotduffy/frigate-hass-card) for +that card. + ## Configuration When configuring the integration, you will be asked for the following parameters: From 288b1a05629879887ea63ea623aa3d9fb23b5523 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Wed, 8 Sep 2021 08:02:26 -0500 Subject: [PATCH 029/132] remove nested enabled config setting on events --- frigate/config.py | 20 +++++++++----------- frigate/events.py | 6 ++---- frigate/test/test_config.py | 2 +- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/frigate/config.py b/frigate/config.py index 8d54a441d..bfd54b1ba 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -72,29 +72,27 @@ class RetainConfig(FrigateBaseModel): ) -# DEPRECATED: Will eventually be removed -class ClipsConfig(FrigateBaseModel): - enabled: bool = Field(default=False, title="Save clips.") - max_seconds: int = Field(default=300, title="Maximum clip duration.") - pre_capture: int = Field(default=5, title="Seconds to capture before event starts.") - post_capture: int = Field(default=5, title="Seconds to capture after event ends.") +class EventsConfig(FrigateBaseModel): + max_seconds: int = Field(default=300, title="Maximum event duration.") + pre_capture: int = Field(default=5, title="Seconds to retain before event starts.") + post_capture: int = Field(default=5, title="Seconds to retain after event ends.") required_zones: List[str] = Field( default_factory=list, - title="List of required zones to be entered in order to save the clip.", + title="List of required zones to be entered in order to save the event.", ) objects: Optional[List[str]] = Field( - title="List of objects to be detected in order to save the clip.", + title="List of objects to be detected in order to save the event.", ) retain: RetainConfig = Field( - default_factory=RetainConfig, title="Clip retention settings." + default_factory=RetainConfig, title="Event retention settings." ) class RecordConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable record on all cameras.") retain_days: int = Field(default=0, title="Recording retention period in days.") - events: ClipsConfig = Field( - default_factory=ClipsConfig, title="Event specific settings." + events: EventsConfig = Field( + default_factory=EventsConfig, title="Event specific settings." ) diff --git a/frigate/events.py b/frigate/events.py index 3293d19bb..a67200e4b 100644 --- a/frigate/events.py +++ b/frigate/events.py @@ -35,10 +35,8 @@ class EventProcessor(threading.Thread): record_config: RecordConfig = self.config.cameras[camera].record - # Recording clips is disabled - if not record_config.enabled or ( - record_config.retain_days == 0 and not record_config.events.enabled - ): + # Recording is disabled + if not record_config.enabled: return False # If there are required zones and there is no overlap diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 8bf96736a..49fcf6d03 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -473,7 +473,7 @@ class TestConfig(unittest.TestCase): "width": 1920, "fps": 5, }, - "record": {"events": {"enabled": True}}, + "record": {"events": {}}, } }, } From 89e317a6bb31de351f2ce6c8e9cea4aa95f60262 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 11 Sep 2021 08:34:27 -0500 Subject: [PATCH 030/132] store start/end event with pre/post capture to avoid expiring wanted recordings --- frigate/events.py | 16 ++++++++-------- frigate/http.py | 12 +++--------- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/frigate/events.py b/frigate/events.py index a67200e4b..156cf80ce 100644 --- a/frigate/events.py +++ b/frigate/events.py @@ -6,12 +6,12 @@ import threading import time from pathlib import Path -from frigate.config import FrigateConfig, RecordConfig -from frigate.const import CLIPS_DIR -from frigate.models import Event, Recordings - from peewee import fn +from frigate.config import EventsConfig, FrigateConfig, RecordConfig +from frigate.const import CLIPS_DIR +from frigate.models import Event + logger = logging.getLogger(__name__) @@ -74,17 +74,17 @@ class EventProcessor(threading.Thread): self.events_in_process[event_data["id"]] = event_data if event_type == "end": - record_config: RecordConfig = self.config.cameras[camera].record - has_clip = self.should_create_clip(camera, event_data) + event_config: EventsConfig = self.config.cameras[camera].record.events + if has_clip or event_data["has_snapshot"]: Event.create( id=event_data["id"], label=event_data["label"], camera=camera, - start_time=event_data["start_time"], - end_time=event_data["end_time"], + start_time=event_data["start_time"] - event_config.pre_capture, + end_time=event_data["end_time"] + event_config.post_capture, top_score=event_data["top_score"], false_positive=event_data["false_positive"], zones=list(event_data["entered_zones"]), diff --git a/frigate/http.py b/frigate/http.py index cef448beb..1ef35dd92 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -242,14 +242,11 @@ def event_clip(id): if not event.has_clip: return "Clip not available", 404 - event_config = current_app.frigate_config.cameras[event.camera].record.events - start_ts = event.start_time - event_config.pre_capture - end_ts = event.end_time + event_config.post_capture file_name = f"{event.camera}-{id}.mp4" clip_path = os.path.join(CLIPS_DIR, file_name) if not os.path.isfile(clip_path): - return recording_clip(event.camera, start_ts, end_ts) + return recording_clip(event.camera, event.start_time, event.end_time) response = make_response() response.headers["Content-Description"] = "File Transfer" @@ -697,15 +694,12 @@ def vod_event(id): if not event.has_clip: return "Clip not available", 404 - event_config = current_app.frigate_config.cameras[event.camera].record.events - start_ts = event.start_time - event_config.pre_capture - end_ts = event.end_time + event_config.post_capture clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{id}.mp4") if not os.path.isfile(clip_path): - return vod_ts(event.camera, start_ts, end_ts) + return vod_ts(event.camera, event.start_time, event.end_time) - duration = int((end_ts - start_ts) * 1000) + duration = int((event.end_time - event.start_time) * 1000) return jsonify( { "cache": True, From a7b7a45b23c7b541d2779e585625e7de517d61a9 Mon Sep 17 00:00:00 2001 From: Jason Hunter Date: Sun, 12 Sep 2021 02:06:37 -0400 Subject: [PATCH 031/132] allow for custom object detection model via configuration --- frigate/app.py | 3 +++ frigate/config.py | 4 +++- frigate/edgetpu.py | 14 ++++++++++---- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index bf3f12989..e73c56c6f 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -170,6 +170,7 @@ class FrigateApp: self.mqtt_relay.start() def start_detectors(self): + model_path = self.config.model.path model_shape = (self.config.model.height, self.config.model.width) for name in self.config.cameras.keys(): self.detection_out_events[name] = mp.Event() @@ -199,6 +200,7 @@ class FrigateApp: name, self.detection_queue, self.detection_out_events, + model_path, model_shape, "cpu", detector.num_threads, @@ -208,6 +210,7 @@ class FrigateApp: name, self.detection_queue, self.detection_out_events, + model_path, model_shape, detector.device, detector.num_threads, diff --git a/frigate/config.py b/frigate/config.py index bfd54b1ba..edba6a410 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -603,6 +603,8 @@ class DatabaseConfig(FrigateBaseModel): class ModelConfig(FrigateBaseModel): + path: Optional[str] = Field(title="Custom Object detection model path.") + labelmap_path: Optional[str] = Field(title="Label map for custom object detector.") width: int = Field(default=320, title="Object detection model input width.") height: int = Field(default=320, title="Object detection model input height.") labelmap: Dict[int, str] = Field( @@ -623,7 +625,7 @@ class ModelConfig(FrigateBaseModel): super().__init__(**config) self._merged_labelmap = { - **load_labels("/labelmap.txt"), + **load_labels(config.get("labelmap_path", "/labelmap.txt")), **config.get("labelmap", {}), } diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py index 62c35eaf5..1992c6b35 100644 --- a/frigate/edgetpu.py +++ b/frigate/edgetpu.py @@ -45,7 +45,7 @@ class ObjectDetector(ABC): class LocalObjectDetector(ObjectDetector): - def __init__(self, tf_device=None, num_threads=3, labels=None): + def __init__(self, tf_device=None, model_path=None, num_threads=3, labels=None): self.fps = EventsPerSecond() if labels is None: self.labels = {} @@ -64,7 +64,7 @@ class LocalObjectDetector(ObjectDetector): edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config) logger.info("TPU found") self.interpreter = tflite.Interpreter( - model_path="/edgetpu_model.tflite", + model_path=model_path or "/edgetpu_model.tflite", experimental_delegates=[edge_tpu_delegate], ) except ValueError: @@ -77,7 +77,7 @@ class LocalObjectDetector(ObjectDetector): "CPU detectors are not recommended and should only be used for testing or for trial purposes." ) self.interpreter = tflite.Interpreter( - model_path="/cpu_model.tflite", num_threads=num_threads + model_path=model_path or "/cpu_model.tflite", num_threads=num_threads ) self.interpreter.allocate_tensors() @@ -133,6 +133,7 @@ def run_detector( out_events: Dict[str, mp.Event], avg_speed, start, + model_path, model_shape, tf_device, num_threads, @@ -152,7 +153,9 @@ def run_detector( signal.signal(signal.SIGINT, receiveSignal) frame_manager = SharedMemoryFrameManager() - object_detector = LocalObjectDetector(tf_device=tf_device, num_threads=num_threads) + object_detector = LocalObjectDetector( + tf_device=tf_device, model_path=model_path, num_threads=num_threads + ) outputs = {} for name in out_events.keys(): @@ -189,6 +192,7 @@ class EdgeTPUProcess: name, detection_queue, out_events, + model_path, model_shape, tf_device=None, num_threads=3, @@ -199,6 +203,7 @@ class EdgeTPUProcess: self.avg_inference_speed = mp.Value("d", 0.01) self.detection_start = mp.Value("d", 0.0) self.detect_process = None + self.model_path = model_path self.model_shape = model_shape self.tf_device = tf_device self.num_threads = num_threads @@ -226,6 +231,7 @@ class EdgeTPUProcess: self.out_events, self.avg_inference_speed, self.detection_start, + self.model_path, self.model_shape, self.tf_device, self.num_threads, From 0320d94ea6fc57ddef2f0b5b70d4ea14cf8253df Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sun, 12 Sep 2021 14:48:21 -0500 Subject: [PATCH 032/132] docs updates --- docs/docs/configuration/advanced.md | 82 +- docs/docs/configuration/camera_specific.md | 82 + docs/docs/configuration/cameras.md | 536 +- docs/docs/configuration/detectors.md | 20 +- .../configuration/hardware_acceleration.md | 70 + docs/docs/configuration/index.md | 419 +- docs/docs/configuration/masks.md | 39 + docs/docs/configuration/nvdec.md | 19 +- docs/docs/configuration/objects.mdx | 7 +- docs/docs/configuration/optimizing.md | 73 - docs/docs/configuration/record.md | 10 + docs/docs/configuration/rtmp.md | 8 + docs/docs/configuration/snapshots.md | 6 + docs/docs/configuration/zones.md | 10 + docs/docs/{troubleshooting.md => faqs.md} | 8 +- docs/docs/guides/camera_setup.md | 47 + .../false_positives.md | 0 docs/docs/guides/getting_started.md | 193 + docs/docs/hardware.md | 29 +- docs/docs/how-it-works.md | 13 - docs/docs/index.md | 8 +- docs/docs/installation.md | 123 +- docs/docs/{usage => integrations}/api.md | 20 +- .../{usage => integrations}/home-assistant.md | 0 docs/docs/{usage => integrations}/howtos.md | 0 docs/docs/{usage => integrations}/mqtt.md | 0 docs/docs/usage/web.md | 10 - docs/package-lock.json | 9129 ++++++----------- docs/package.json | 4 +- docs/sidebars.js | 26 +- docs/static/img/resolutions.png | Bin 0 -> 66119 bytes web/package-lock.json | 78 +- web/package.json | 2 +- 33 files changed, 3911 insertions(+), 7160 deletions(-) create mode 100644 docs/docs/configuration/camera_specific.md create mode 100644 docs/docs/configuration/hardware_acceleration.md create mode 100644 docs/docs/configuration/masks.md delete mode 100644 docs/docs/configuration/optimizing.md create mode 100644 docs/docs/configuration/record.md create mode 100644 docs/docs/configuration/rtmp.md create mode 100644 docs/docs/configuration/snapshots.md create mode 100644 docs/docs/configuration/zones.md rename docs/docs/{troubleshooting.md => faqs.md} (87%) create mode 100644 docs/docs/guides/camera_setup.md rename docs/docs/{configuration => guides}/false_positives.md (100%) create mode 100644 docs/docs/guides/getting_started.md delete mode 100644 docs/docs/how-it-works.md rename docs/docs/{usage => integrations}/api.md (92%) rename docs/docs/{usage => integrations}/home-assistant.md (100%) rename docs/docs/{usage => integrations}/howtos.md (100%) rename docs/docs/{usage => integrations}/mqtt.md (100%) delete mode 100644 docs/docs/usage/web.md create mode 100644 docs/static/img/resolutions.png diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index eafc91d99..9c87db95a 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -1,50 +1,11 @@ --- id: advanced -title: Advanced -sidebar_label: Advanced +title: Advanced Options +sidebar_label: Advanced Options --- ## Advanced configuration -### `motion` - -Global motion detection config. These may also be defined at the camera level. - -```yaml -motion: - # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) - # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. - # The value should be between 1 and 255. - threshold: 25 - # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: ~0.17% of the motion frame area) - # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller - # moving objects. - contour_area: 100 - # Optional: Alpha value passed to cv2.accumulateWeighted when averaging the motion delta across multiple frames (default: shown below) - # Higher values mean the current frame impacts the delta a lot, and a single raindrop may register as motion. - # Too low and a fast moving person wont be detected as motion. - delta_alpha: 0.2 - # Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below) - # Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster. - # Low values will cause things like moving shadows to be detected as motion for longer. - # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ - frame_alpha: 0.2 - # Optional: Height of the resized motion frame (default: 1/6th of the original frame height, but no less than 180) - # This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense of higher CPU usage. - # Lower values result in less CPU, but small changes may not register as motion. - frame_height: 180 -``` - -### `detect` - -Global object detection settings. These may also be defined at the camera level. - -```yaml -detect: - # Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate) - max_disappeared: 25 -``` - ### `logger` Change the default log level for troubleshooting purposes. @@ -72,12 +33,7 @@ Examples of available modules are: ### `environment_vars` -This section can be used to set environment variables for those unable to modify the environment of the container (ie. within Hass.io) - -```yaml -environment_vars: - EXAMPLE_VAR: value -``` +This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS) ### `database` @@ -87,40 +43,8 @@ If you are storing your database on a network share (SMB, NFS, etc), you may get This may need to be in a custom location if network storage is used for the media folder. -```yaml -database: - path: /media/frigate/frigate.db -``` - -### `detectors` - -```yaml -detectors: - # Required: name of the detector - coral: - # Required: type of the detector - # Valid values are 'edgetpu' (requires device property below) and 'cpu'. - type: edgetpu - # Optional: device name as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api - device: usb - # Optional: num_threads value passed to the tflite.Interpreter (default: shown below) - # This value is only used for CPU types - num_threads: 3 -``` - ### `model` If using a custom model, the width and height will need to be specified. The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. By default, truck is renamed to car because they are often confused. You cannot add new object types, but you can change the names of existing objects in the model. - -```yaml -model: - # Required: height of the trained model - height: 320 - # Required: width of the trained model - width: 320 - # Optional: labelmap overrides - labelmap: - 7: car -``` diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md new file mode 100644 index 000000000..092ffc607 --- /dev/null +++ b/docs/docs/configuration/camera_specific.md @@ -0,0 +1,82 @@ +--- +id: camera_specific +title: Camera Specific Configurations +--- + +### MJPEG Cameras + +The input and output parameters need to be adjusted for MJPEG cameras + +```yaml +input_args: + - -avoid_negative_ts + - make_zero + - -fflags + - nobuffer + - -flags + - low_delay + - -strict + - experimental + - -fflags + - +genpts+discardcorrupt + - -r + - "3" # <---- adjust depending on your desired frame rate from the mjpeg image + - -use_wallclock_as_timestamps + - "1" +``` + +Note that mjpeg cameras require encoding the video into h264 for recording, and rtmp roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. + +```yaml +output_args: + record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v libx264 -an + rtmp: -c:v libx264 -an -f flv +``` + +### RTMP Cameras (Reolink 410/520 and possibly others) + +The input parameters need to be adjusted for RTMP cameras + +```yaml +ffmpeg: + input_args: + - -avoid_negative_ts + - make_zero + - -fflags + - nobuffer + - -flags + - low_delay + - -strict + - experimental + - -fflags + - +genpts+discardcorrupt + - -rw_timeout + - "5000000" + - -use_wallclock_as_timestamps + - "1" + - -f + - live_flv +``` + +### Blue Iris RTSP Cameras + +You will need to remove `nobuffer` flag for Blue Iris RTSP cameras + +```yaml +ffmpeg: + input_args: + - -avoid_negative_ts + - make_zero + - -flags + - low_delay + - -strict + - experimental + - -fflags + - +genpts+discardcorrupt + - -rtsp_transport + - tcp + - -stimeout + - "5000000" + - -use_wallclock_as_timestamps + - "1" +``` diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index dc1c18307..ef6e05dd2 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -5,17 +5,15 @@ title: Cameras ## Setting Up Camera Inputs -Up to 4 inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa. +Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa. Each role can only be assigned to one input per camera. The options for roles are as follows: -| Role | Description | -| -------- | ------------------------------------------------------------------------------------- | -| `detect` | Main feed for object detection | -| `record` | Saves segments of the video feed based on configuration settings. [docs](#recordings) | -| `rtmp` | Broadcast as an RTMP feed for other services to consume. [docs](#rtmp-streams) | - -### Example +| Role | Description | +| -------- | ----------------------------------------------------------------------------------------------- | +| `detect` | Main feed for object detection | +| `record` | Saves segments of the video feed based on configuration settings. [docs](/configuration/record) | +| `rtmp` | Broadcast as an RTMP feed for other services to consume. [docs](/configuration/rtmp) | ```yaml mqtt: @@ -34,526 +32,14 @@ cameras: detect: width: 1280 height: 720 - fps: 5 ``` -`width`, `height`, and `fps` are only used for the `detect` role. Other streams are passed through, so there is no need to specify the resolution. - -## Masks & Zones - -### Masks - -Masks are used to ignore initial detection in areas of your camera's field of view. - -There are two types of masks available: - -- **Motion masks**: Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the video feed with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. Over masking will make it more difficult for objects to be tracked. To see this effect, create a mask, and then watch the video feed with `Motion Boxes` enabled again. -- **Object filter masks**: Object filter masks are used to filter out false positives for a given object type. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell frigate that anything in your yard is a false positive. - -To create a poly mask: - -1. Visit the [web UI](/usage/web) -1. Click the camera you wish to create a mask for -1. Click "Mask & Zone creator" -1. Click "Add" on the type of mask or zone you would like to create -1. Click on the camera's latest image to create a masked area. The yaml representation will be updated in real-time -1. When you've finished creating your mask, click "Copy" and paste the contents into your `config.yaml` file and restart Frigate - -Example of a finished row corresponding to the below example image: - -```yaml -motion: - mask: "0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432" -``` - -![poly](/img/example-mask-poly.png) - -```yaml -# Optional: camera level motion config -motion: - # Optional: motion mask - # NOTE: see docs for more detailed info on creating masks - mask: 0,900,1080,900,1080,1920,0,1920 -``` - -### Zones - -Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera. - -During testing, `draw_zones` should be set in the config to draw the zone on the frames so you can adjust as needed. The zone line will increase in thickness when any object enters the zone. - -To create a zone, follow the same steps above for a "Motion mask", but use the section of the web UI for creating a zone instead. - -```yaml -# Optional: zones for this camera -zones: - # Required: name of the zone - # NOTE: This must be different than any camera names, but can match with another zone on another - # camera. - front_steps: - # Required: List of x,y coordinates to define the polygon of the zone. - # NOTE: Coordinates can be generated at https://www.image-map.net/ - coordinates: 545,1077,747,939,788,805 - # Optional: List of objects that can trigger this zone (default: all tracked objects) - objects: - - person - # Optional: Zone level object filters. - # NOTE: The global and camera filters are applied upstream. - filters: - person: - min_area: 5000 - max_area: 100000 - threshold: 0.7 -``` - -## Objects - -For a list of available objects, see the [objects documentation](./objects.mdx). - -```yaml -# Optional: Camera level object filters config. -objects: - track: - - person - - car - # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object. - # NOTE: This mask is COMBINED with the object type specific mask below - mask: 0,0,1000,0,1000,200,0,200 - filters: - person: - min_area: 5000 - max_area: 100000 - min_score: 0.5 - threshold: 0.7 - # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object - mask: 0,0,1000,0,1000,200,0,200 -``` - -## Recordings - -24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config. - -Exported clips are also created off of these recordings. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed. - -These recordings will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264. - -:::caution -Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set. -::: - -```yaml -record: - # Optional: Enable recording (default: shown below) - enabled: False - # Optional: Number of days to retain (default: shown below) - retain_days: 0 - # Optional: Event recording settings - events: - # Optional: Enable event recording retention settings (default: shown below) - enabled: False - # Optional: Maximum length of time to retain video during long events. (default: shown below) - # NOTE: If an object is being tracked for longer than this amount of time, the cache - # will begin to expire and the resulting clip will be the last x seconds of the event unless retain_days under record is > 0. - max_seconds: 300 - # Optional: Number of seconds before the event to include in the event (default: shown below) - pre_capture: 5 - # Optional: Number of seconds after the event to include in the event (default: shown below) - post_capture: 5 - # Optional: Objects to save event for. (default: all tracked objects) - objects: - - person - # Optional: Restrict event to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Retention settings for event - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 -``` - -## Snapshots - -Frigate can save a snapshot image to `/media/frigate/clips` for each event named as `-.jpg`. - -```yaml -# Optional: Configuration for the jpg snapshots written to the clips directory for each event -snapshots: - # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) - # This value can be set via MQTT and will be updated in startup based on retained value - enabled: False - # Optional: Enable writing a clean copy png snapshot to /media/frigate/clips (default: shown below) - # Only works if snapshots are enabled. This image is intended to be used for training purposes. - clean_copy: True - # Optional: print a timestamp on the snapshots (default: shown below) - timestamp: False - # Optional: draw bounding box on the snapshots (default: shown below) - bounding_box: False - # Optional: crop the snapshot (default: shown below) - crop: False - # Optional: height to resize the snapshot to (default: original size) - height: 175 - # Optional: jpeg encode quality (default: shown below) - quality: 70 - # Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Camera override for retention settings (default: global values) - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 -``` - -## RTMP streams - -Frigate can re-stream your video feed as a RTMP feed for other applications such as Home Assistant to utilize it at `rtmp:///live/`. Port 1935 must be open. This allows you to use a video feed for detection in frigate and Home Assistant live view at the same time without having to make two separate connections to the camera. The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. - -Some video feeds are not compatible with RTMP. If you are experiencing issues, check to make sure your camera feed is h264 with AAC audio. If your camera doesn't support a compatible format for RTMP, you can use the ffmpeg args to re-encode it on the fly at the expense of increased CPU utilization. - -```yaml -rtmp: - # Optional: Enable the RTMP stream (default: True) - enabled: True -``` - -## Timestamp style configuration - -For the debug view and snapshots it is possible to embed a timestamp in the feed. In some instances the default position obstructs important space, visibility or contrast is too low because of color or the datetime format does not match ones desire. - -```yaml -# Optional: in-feed timestamp style configuration -timestamp_style: - # Optional: Position of the timestamp (default: shown below) - # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) - position: "tl" - # Optional: Format specifier conform to the Python package "datetime" (default: shown below) - # Additional Examples: - # german: "%d.%m.%Y %H:%M:%S" - format: "%m/%d/%Y %H:%M:%S" - # Optional: Color of font - color: - # All Required when color is specified (default: shown below) - red: 255 - green: 255 - blue: 255 - # Optional: Line thickness of font (default: shown below) - thickness: 2 - # Optional: Effect of lettering (default: shown below) - # None (No effect), - # "solid" (solid background in inverse color of font) - # "shadow" (shadow for font) - effect: None -``` - -## Full example - -The following is a full example of all of the options together for a camera configuration +Additional cameras are simply added to the config under the `cameras` entry. ```yaml +mqtt: ... cameras: - # Required: name of the camera - back: - # Required: ffmpeg settings for the camera - ffmpeg: - # Required: A list of input streams for the camera. See documentation for more information. - inputs: - # Required: the path to the stream - # NOTE: Environment variables that begin with 'FRIGATE_' may be referenced in {} - - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - # Required: list of roles for this stream. valid values are: detect,record,rtmp - # NOTICE: In addition to assigning the record, and rtmp roles, - # they must also be enabled in the camera config. - roles: - - detect - - rtmp - # Optional: stream specific global args (default: inherit) - global_args: - # Optional: stream specific hwaccel args (default: inherit) - hwaccel_args: - # Optional: stream specific input args (default: inherit) - input_args: - # Optional: camera specific global args (default: inherit) - global_args: - # Optional: camera specific hwaccel args (default: inherit) - hwaccel_args: - # Optional: camera specific input args (default: inherit) - input_args: - # Optional: camera specific output args (default: inherit) - output_args: - - # Required: Camera level detect settings - detect: - # Optional: width of the frame for the input with the detect role (default: shown below) - width: 1280 - # Optional: height of the frame for the input with the detect role (default: shown below) - height: 720 - # Optional: desired fps for your camera for the input with the detect role (default: shown below) - # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. - fps: 5 - # Optional: enables detection for the camera (default: True) - # This value can be set via MQTT and will be updated in startup based on retained value - enabled: True - # Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate) - max_disappeared: 25 - - # Optional: camera level motion config - motion: - # Optional: motion mask - # NOTE: see docs for more detailed info on creating masks - mask: 0,900,1080,900,1080,1920,0,1920 - - # Optional: timeout for highest scoring image before allowing it - # to be replaced by a newer image. (default: shown below) - best_image_timeout: 60 - - # Optional: zones for this camera - zones: - # Required: name of the zone - # NOTE: This must be different than any camera names, but can match with another zone on another - # camera. - front_steps: - # Required: List of x,y coordinates to define the polygon of the zone. - # NOTE: Coordinates can be generated at https://www.image-map.net/ - coordinates: 545,1077,747,939,788,805 - # Optional: List of objects that can trigger this zone (default: all tracked objects) - objects: - - person - # Optional: Zone level object filters. - # NOTE: The global and camera filters are applied upstream. - filters: - person: - min_area: 5000 - max_area: 100000 - threshold: 0.7 - - # Optional: 24/7 recording configuration - record: - # Optional: Enable recording (default: global setting) - enabled: False - # Optional: Number of days to retain (default: global setting) - retain_days: 30 - # Optional: Event recording settings - events: - # Required: enables event recordings for the camera (default: shown below) - # This value can be set via MQTT and will be updated in startup based on retained value - enabled: False - # Optional: Number of seconds before the event to include (default: shown below) - pre_capture: 5 - # Optional: Number of seconds after the event to include (default: shown below) - post_capture: 5 - # Optional: Objects to save events for. (default: all tracked objects) - objects: - - person - # Optional: Restrict events to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Camera override for retention settings (default: global values) - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 - - # Optional: RTMP re-stream configuration - rtmp: - # Optional: Enable the RTMP stream (default: True) - enabled: True - - # Optional: Live stream configuration for WebUI - live: - # Optional: Set the height of the live stream. (default: 720) - # This must be less than or equal to the height of the detect stream. Lower resolutions - # reduce bandwidth required for viewing the live stream. Width is computed to match known aspect ratio. - height: 720 - # Optional: Set the encode quality of the live stream (default: shown below) - # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. - quality: 8 - - # Optional: Configuration for the jpg snapshots written to the clips directory for each event - snapshots: - # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) - # This value can be set via MQTT and will be updated in startup based on retained value - enabled: False - # Optional: print a timestamp on the snapshots (default: shown below) - timestamp: False - # Optional: draw bounding box on the snapshots (default: shown below) - bounding_box: False - # Optional: crop the snapshot (default: shown below) - crop: False - # Optional: height to resize the snapshot to (default: original size) - height: 175 - # Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Camera override for retention settings (default: global values) - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 - - # Optional: Configuration for the jpg snapshots published via MQTT - mqtt: - # Optional: Enable publishing snapshot via mqtt for camera (default: shown below) - # NOTE: Only applies to publishing image data to MQTT via 'frigate///snapshot'. - # All other messages will still be published. - enabled: True - # Optional: print a timestamp on the snapshots (default: shown below) - timestamp: True - # Optional: draw bounding box on the snapshots (default: shown below) - bounding_box: True - # Optional: crop the snapshot (default: shown below) - crop: True - # Optional: height to resize the snapshot to (default: shown below) - height: 270 - # Optional: jpeg encode quality (default: shown below) - quality: 70 - # Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - - # Optional: Camera level object filters config. - objects: - track: - - person - - car - # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object. - # NOTE: This mask is COMBINED with the object type specific mask below - mask: 0,0,1000,0,1000,200,0,200 - filters: - person: - min_area: 5000 - max_area: 100000 - min_score: 0.5 - threshold: 0.7 - # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object - mask: 0,0,1000,0,1000,200,0,200 - - # Optional: In-feed timestamp style configuration - timestamp_style: - # Optional: Position of the timestamp (default: shown below) - # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) - position: "tl" - # Optional: Format specifier conform to the Python package "datetime" (default: shown below) - # Additional Examples: - # german: "%d.%m.%Y %H:%M:%S" - format: "%m/%d/%Y %H:%M:%S" - # Optional: Color of font - color: - # All Required when color is specified (default: shown below) - red: 255 - green: 255 - blue: 255 - # Optional: Line thickness of font (default: shown below) - thickness: 2 - # Optional: Effect of lettering (default: shown below) - # None (No effect), - # "solid" (solid background in inverse color of font) - # "shadow" (shadow for font) - effect: None -``` - -## Camera specific configuration - -### MJPEG Cameras - -The input and output parameters need to be adjusted for MJPEG cameras - -```yaml -input_args: - - -avoid_negative_ts - - make_zero - - -fflags - - nobuffer - - -flags - - low_delay - - -strict - - experimental - - -fflags - - +genpts+discardcorrupt - - -r - - "3" # <---- adjust depending on your desired frame rate from the mjpeg image - - -use_wallclock_as_timestamps - - "1" -``` - -Note that mjpeg cameras require encoding the video into h264 for recording, and rtmp roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. - -```yaml -output_args: - record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v libx264 -an - rtmp: -c:v libx264 -an -f flv -``` - -### RTMP Cameras - -The input parameters need to be adjusted for RTMP cameras - -```yaml -ffmpeg: - input_args: - - -avoid_negative_ts - - make_zero - - -fflags - - nobuffer - - -flags - - low_delay - - -strict - - experimental - - -fflags - - +genpts+discardcorrupt - - -use_wallclock_as_timestamps - - "1" -``` - -### Reolink 410/520 (possibly others) - -Several users have reported success with the rtmp video from Reolink cameras. - -```yaml -ffmpeg: - input_args: - - -avoid_negative_ts - - make_zero - - -fflags - - nobuffer - - -flags - - low_delay - - -strict - - experimental - - -fflags - - +genpts+discardcorrupt - - -rw_timeout - - "5000000" - - -use_wallclock_as_timestamps - - "1" -``` - -### Blue Iris RTSP Cameras - -You will need to remove `nobuffer` flag for Blue Iris RTSP cameras - -```yaml -ffmpeg: - input_args: - - -avoid_negative_ts - - make_zero - - -flags - - low_delay - - -strict - - experimental - - -fflags - - +genpts+discardcorrupt - - -rtsp_transport - - tcp - - -stimeout - - "5000000" - - -use_wallclock_as_timestamps - - "1" + back: ... + front: ... + side: ... ``` diff --git a/docs/docs/configuration/detectors.md b/docs/docs/configuration/detectors.md index 58ec474ef..176138f65 100644 --- a/docs/docs/configuration/detectors.md +++ b/docs/docs/configuration/detectors.md @@ -3,13 +3,13 @@ id: detectors title: Detectors --- -The default config will look for a USB Coral device. If you do not have a Coral, you will need to configure a CPU detector. If you have PCI or multiple Coral devices, you need to configure your detector devices in the config file. When using multiple detectors, they run in dedicated processes, but pull from a common queue of requested detections across all cameras. +By default, Frigate will use a single CPU detector. If you have a Coral, you will need to configure your detector devices in the config file. When using multiple detectors, they run in dedicated processes, but pull from a common queue of requested detections across all cameras. Frigate supports `edgetpu` and `cpu` as detector types. The device value should be specified according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). **Note**: There is no support for Nvidia GPUs to perform object detection with tensorflow. It can be used for ffmpeg decoding, but not object detection. -Single USB Coral: +### Single USB Coral ```yaml detectors: @@ -18,7 +18,7 @@ detectors: device: usb ``` -Multiple USB Corals: +### Multiple USB Corals ```yaml detectors: @@ -30,16 +30,16 @@ detectors: device: usb:1 ``` -Native Coral (Dev Board): +### Native Coral (Dev Board) ```yaml detectors: coral: type: edgetpu - device: '' + device: "" ``` -Multiple PCIE/M.2 Corals: +### Multiple PCIE/M.2 Corals ```yaml detectors: @@ -51,7 +51,7 @@ detectors: device: pci:1 ``` -Mixing Corals: +### Mixing Corals ```yaml detectors: @@ -63,12 +63,16 @@ detectors: device: pci ``` -CPU Detectors (not recommended): +### CPU Detectors (not recommended) ```yaml detectors: cpu1: type: cpu + num_threads: 3 cpu2: type: cpu + num_threads: 3 ``` + +When using CPU detectors, you can add a CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. diff --git a/docs/docs/configuration/hardware_acceleration.md b/docs/docs/configuration/hardware_acceleration.md new file mode 100644 index 000000000..e2153f4f2 --- /dev/null +++ b/docs/docs/configuration/hardware_acceleration.md @@ -0,0 +1,70 @@ +--- +id: hardware_acceleration +title: Hardware Acceleration +--- + +It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro + +### Raspberry Pi 3/4 (32-bit OS) + +Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Performance Options > GPU Memory). +**NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. + +```yaml +ffmpeg: + hwaccel_args: + - -c:v + - h264_mmal +``` + +### Raspberry Pi 3/4 (64-bit OS) + +**NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. + +```yaml +ffmpeg: + hwaccel_args: + - -c:v + - h264_v4l2m2m +``` + +### Intel-based CPUs (<10th Generation) via Quicksync + +```yaml +ffmpeg: + hwaccel_args: + - -hwaccel + - vaapi + - -hwaccel_device + - /dev/dri/renderD128 + - -hwaccel_output_format + - yuv420p +``` + +### Intel-based CPUs (>=10th Generation) via Quicksync + +```yaml +ffmpeg: + hwaccel_args: + - -hwaccel + - qsv + - -qsv_device + - /dev/dri/renderD128 +``` + +### AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver + +**Note:** You also need to set `LIBVA_DRIVER_NAME=radeonsi` as an environment variable on the container. + +```yaml +ffmpeg: + hwaccel_args: + - -hwaccel + - vaapi + - -hwaccel_device + - /dev/dri/renderD128 +``` + +### NVIDIA GPU + +NVIDIA GPU based decoding via NVDEC is supported, but requires special configuration. See the [NVIDIA NVDEC documentation](/configuration/nvdec) for more details. diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 9fb6f738b..ed0f6b3eb 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -1,13 +1,13 @@ --- id: index -title: Configuration +title: Configuration File --- -For HassOS installations, the default location for the config file is `/config/frigate.yml`. +For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`) and named `frigate.yml`. -For all other installations, the default location for the config file is '/config/config.yml'. This can be overridden with the `CONFIG_FILE` environment variable. Camera specific ffmpeg parameters are documented [here](cameras.md). +For all other installation types, the config file should be mapped to `/config/config.yml` inside the container. -It is recommended to start with a minimal configuration and add to it: +It is recommended to start with a minimal configuration and add to it as described in [this guide](/guides/getting_started): ```yaml mqtt: @@ -23,12 +23,15 @@ cameras: detect: width: 1280 height: 720 - fps: 5 ``` -## Required +### Full configuration reference: -## `mqtt` +:::caution + +It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions. + +::: ```yaml mqtt: @@ -37,10 +40,10 @@ mqtt: # Optional: port (default: shown below) port: 1883 # Optional: topic prefix (default: shown below) - # WARNING: must be unique if you are running multiple instances + # NOTE: must be unique if you are running multiple instances topic_prefix: frigate # Optional: client id (default: shown below) - # WARNING: must be unique if you are running multiple instances + # NOTE: must be unique if you are running multiple instances client_id: frigate # Optional: user user: mqtt_user @@ -61,59 +64,39 @@ mqtt: tls_insecure: false # Optional: interval in seconds for publishing stats (default: shown below) stats_interval: 60 -``` -## `cameras` +# Optional: Detectors configuration. Defaults to a single CPU detector +detectors: + # Required: name of the detector + coral: + # Required: type of the detector + # Valid values are 'edgetpu' (requires device property below) and 'cpu'. + type: edgetpu + # Optional: device name as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api + device: usb + # Optional: num_threads value passed to the tflite.Interpreter (default: shown below) + # This value is only used for CPU types + num_threads: 3 -Each of your cameras must be configured. The following is the minimum required to register a camera in Frigate. Check the [camera configuration page](cameras.md) for a complete list of options. - -```yaml -cameras: - # Name of your camera - front_door: - ffmpeg: - inputs: - - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - roles: - - detect - - rtmp - detect: - width: 1280 - height: 720 - fps: 5 -``` - -## Optional - -### `database` - -```yaml +# Optional: Database configuration database: # The path to store the SQLite DB (default: shown below) path: /media/frigate/frigate.db -``` -### `model` - -```yaml # Optional: model modifications model: + # Optional: path to the model (default: automatic based on detector) + path: /edgetpu_model.tflite + # Optional: path to the labelmap (default: shown below) + labelmap_path: /labelmap.txt # Required: Object detection model input width (default: shown below) width: 320 # Required: Object detection model input height (default: shown below) height: 320 - # Optional: Label name modifications + # Optional: Label name modifications. These are merged into the standard labelmap. labelmap: - 2: vehicle # previously "car" -``` + 2: vehicle -### `detectors` - -Check the [detectors configuration page](detectors.md) for a complete list of options. - -### `logger` - -```yaml # Optional: logger verbosity settings logger: # Optional: Default log verbosity (default: shown below) @@ -121,102 +104,12 @@ logger: # Optional: Component specific logger overrides logs: frigate.event: debug -``` -### `record` +# Optional: set environment variables +environment_vars: + EXAMPLE_VAR: value -Can be overridden at the camera level. 24/7 recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM/DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding and are available in Home Assistant's media browser. Each camera supports a configurable retention policy in the config. - -Exported clips are also created off of these recordings. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed. - -These recordings will not be playable in the web UI or in Home Assistant's media browser unless your camera sends video as h264. - -:::caution -Previous versions of frigate included `-vsync drop` in input parameters. This is not compatible with FFmpeg's segment feature and must be removed from your input parameters if you have overrides set. -::: - -```yaml -record: - # Optional: Enable recording (default: shown below) - enabled: False - # Optional: Number of days to retain (default: shown below) - retain_days: 0 - # Optional: Event recording settings - events: - # Optional: Enable event recording retention settings (default: shown below) - enabled: False - # Optional: Maximum length of time to retain video during long events. (default: shown below) - # NOTE: If an object is being tracked for longer than this amount of time, the cache - # will begin to expire and the resulting clip will be the last x seconds of the event unless retain_days under record is > 0. - max_seconds: 300 - # Optional: Number of seconds before the event to include (default: shown below) - pre_capture: 5 - # Optional: Number of seconds after the event to include (default: shown below) - post_capture: 5 - # Optional: Objects to save recordings for. (default: all tracked objects) - objects: - - person - # Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Retention settings for events - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 -``` - -### `ffmpeg` - -Can be overridden at the camera level. - -```yaml -ffmpeg: - # Optional: global ffmpeg args (default: shown below) - global_args: -hide_banner -loglevel warning - # Optional: global hwaccel args (default: shown below) - # NOTE: See hardware acceleration docs for your specific device - hwaccel_args: [] - # Optional: global input args (default: shown below) - input_args: -avoid_negative_ts make_zero -fflags +genpts+discardcorrupt -rtsp_transport tcp -stimeout 5000000 -use_wallclock_as_timestamps 1 - # Optional: global output args - output_args: - # Optional: output args for detect streams (default: shown below) - detect: -f rawvideo -pix_fmt yuv420p - # Optional: output args for record streams (default: shown below) - record: -f segment -segment_time 60 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an - # Optional: output args for rtmp streams (default: shown below) - rtmp: -c copy -f flv -``` - -### `objects` - -Can be overridden at the camera level. For a list of available objects, see the [objects documentation](./objects.mdx). - -```yaml -objects: - # Optional: list of objects to track from labelmap.txt (default: shown below) - track: - - person - # Optional: filters to reduce false positives for specific object types - filters: - person: - # Optional: minimum width*height of the bounding box for the detected object (default: 0) - min_area: 5000 - # Optional: maximum width*height of the bounding box for the detected object (default: 24000000) - max_area: 100000 - # Optional: minimum score for the object to initiate tracking (default: shown below) - min_score: 0.5 - # Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below) - threshold: 0.7 -``` - -### `birdseye` - -A dynamic combined camera view of all tracked cameras. This is optimized for minimal bandwidth and server resource utilization. Encoding is only performed when actively viewing the video feed, and only active (defined by the mode) cameras are included in the view. - -```yaml +# Optional: birdseye configuration birdseye: # Optional: Enable birdseye view (default: shown below) enabled: True @@ -232,24 +125,169 @@ birdseye: # motion - cameras are included if motion was detected in the last 30 seconds # continuous - all cameras are included always mode: objects -``` -### `rtmp` +# Optional: ffmpeg configuration +ffmpeg: + # Optional: global ffmpeg args (default: shown below) + global_args: -hide_banner -loglevel warning + # Optional: global hwaccel args (default: shown below) + # NOTE: See hardware acceleration docs for your specific device + hwaccel_args: [] + # Optional: global input args (default: shown below) + input_args: -avoid_negative_ts make_zero -fflags +genpts+discardcorrupt -rtsp_transport tcp -stimeout 5000000 -use_wallclock_as_timestamps 1 + # Optional: global output args + output_args: + # Optional: output args for detect streams (default: shown below) + detect: -f rawvideo -pix_fmt yuv420p + # Optional: output args for record streams (default: shown below) + record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c copy -an + # Optional: output args for rtmp streams (default: shown below) + rtmp: -c copy -f flv -Can be overridden at the camera level. See the [cameras configuration page](cameras.md) for more information about RTMP streaming. +# Optional: Detect configuration +# NOTE: Can be overridden at the camera level +detect: + # Optional: width of the frame for the input with the detect role (default: shown below) + width: 1280 + # Optional: height of the frame for the input with the detect role (default: shown below) + height: 720 + # Optional: desired fps for your camera for the input with the detect role (default: shown below) + # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. + fps: 5 + # Optional: enables detection for the camera (default: True) + # This value can be set via MQTT and will be updated in startup based on retained value + enabled: True + # Optional: Number of frames without a detection before frigate considers an object to be gone. (default: 5x the frame rate) + max_disappeared: 25 -```yaml +# Optional: Object configuration +# NOTE: Can be overridden at the camera level +objects: + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) + # Checks based on the bottom center of the bounding box of the object. + # NOTE: This mask is COMBINED with the object type specific mask below + mask: 0,0,1000,0,1000,200,0,200 + # Optional: filters to reduce false positives for specific object types + filters: + person: + # Optional: minimum width*height of the bounding box for the detected object (default: 0) + min_area: 5000 + # Optional: maximum width*height of the bounding box for the detected object (default: 24000000) + max_area: 100000 + # Optional: minimum score for the object to initiate tracking (default: shown below) + min_score: 0.5 + # Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below) + threshold: 0.7 + # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) + # Checks based on the bottom center of the bounding box of the object + mask: 0,0,1000,0,1000,200,0,200 + +# Optional: Motion configuration +# NOTE: Can be overridden at the camera level +motion: + # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) + # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. + # The value should be between 1 and 255. + threshold: 25 + # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: ~0.17% of the motion frame area) + # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller + # moving objects. + contour_area: 100 + # Optional: Alpha value passed to cv2.accumulateWeighted when averaging the motion delta across multiple frames (default: shown below) + # Higher values mean the current frame impacts the delta a lot, and a single raindrop may register as motion. + # Too low and a fast moving person wont be detected as motion. + delta_alpha: 0.2 + # Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below) + # Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster. + # Low values will cause things like moving shadows to be detected as motion for longer. + # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ + frame_alpha: 0.2 + # Optional: Height of the resized motion frame (default: 1/6th of the original frame height, but no less than 180) + # This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense of higher CPU usage. + # Lower values result in less CPU, but small changes may not register as motion. + frame_height: 180 + # Optional: motion mask + # NOTE: see docs for more detailed info on creating masks + mask: 0,900,1080,900,1080,1920,0,1920 + +# Optional: Record configuration +# NOTE: Can be overridden at the camera level +record: + # Optional: Enable recording (default: shown below) + enabled: False + # Optional: Number of days to retain recordings regardless of events (default: shown below) + # NOTE: This should be set to 0 and retention should be defined in events section below + # if you only want to retain recordings of events. + retain_days: 0 + # Optional: Event recording settings + events: + # Optional: Maximum length of time to retain video during long events. (default: shown below) + # NOTE: If an object is being tracked for longer than this amount of time, the retained recordings + # will be the last x seconds of the event unless retain_days under record is > 0. + max_seconds: 300 + # Optional: Number of seconds before the event to include (default: shown below) + pre_capture: 5 + # Optional: Number of seconds after the event to include (default: shown below) + post_capture: 5 + # Optional: Objects to save recordings for. (default: all tracked objects) + objects: + - person + # Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Retention settings for recordings of events + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Per object retention days + objects: + person: 15 + +# Optional: Configuration for the jpg snapshots written to the clips directory for each event +# NOTE: Can be overridden at the camera level +snapshots: + # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) + # This value can be set via MQTT and will be updated in startup based on retained value + enabled: False + # Optional: print a timestamp on the snapshots (default: shown below) + timestamp: False + # Optional: draw bounding box on the snapshots (default: shown below) + bounding_box: False + # Optional: crop the snapshot (default: shown below) + crop: False + # Optional: height to resize the snapshot to (default: original size) + height: 175 + # Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Camera override for retention settings (default: global values) + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Per object retention days + objects: + person: 15 + +# Optional: RTMP configuration +# NOTE: Can be overridden at the camera level rtmp: # Optional: Enable the RTMP stream (default: True) enabled: True -``` -## `timestamp_style` +# Optional: Live stream configuration for WebUI +# NOTE: Can be overridden at the camera level +live: + # Optional: Set the height of the live stream. (default: 720) + # This must be less than or equal to the height of the detect stream. Lower resolutions + # reduce bandwidth required for viewing the live stream. Width is computed to match known aspect ratio. + height: 720 + # Optional: Set the encode quality of the live stream (default: shown below) + # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. + quality: 8 -Can be overridden at the camera level. See the [cameras configuration page](cameras.md) for more information about timestamp styling. - -```yaml # Optional: in-feed timestamp style configuration +# NOTE: Can be overridden at the camera level timestamp_style: # Optional: Position of the timestamp (default: shown below) # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) @@ -264,8 +302,6 @@ timestamp_style: red: 255 green: 255 blue: 255 - # Optional: Scale factor for font (default: shown below) - scale: 1.0 # Optional: Line thickness of font (default: shown below) thickness: 2 # Optional: Effect of lettering (default: shown below) @@ -273,4 +309,79 @@ timestamp_style: # "solid" (solid background in inverse color of font) # "shadow" (shadow for font) effect: None -``` \ No newline at end of file + +# Required +cameras: + # Required: name of the camera + back: + # Required: ffmpeg settings for the camera + ffmpeg: + # Required: A list of input streams for the camera. See documentation for more information. + inputs: + # Required: the path to the stream + # NOTE: Environment variables that begin with 'FRIGATE_' may be referenced in {} + - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + # Required: list of roles for this stream. valid values are: detect,record,rtmp + # NOTICE: In addition to assigning the record, and rtmp roles, + # they must also be enabled in the camera config. + roles: + - detect + - rtmp + # Optional: stream specific global args (default: inherit) + # global_args: + # Optional: stream specific hwaccel args (default: inherit) + # hwaccel_args: + # Optional: stream specific input args (default: inherit) + # input_args: + # Optional: camera specific global args (default: inherit) + # global_args: + # Optional: camera specific hwaccel args (default: inherit) + # hwaccel_args: + # Optional: camera specific input args (default: inherit) + # input_args: + # Optional: camera specific output args (default: inherit) + # output_args: + + # Optional: timeout for highest scoring image before allowing it + # to be replaced by a newer image. (default: shown below) + best_image_timeout: 60 + + # Optional: zones for this camera + zones: + # Required: name of the zone + # NOTE: This must be different than any camera names, but can match with another zone on another + # camera. + front_steps: + # Required: List of x,y coordinates to define the polygon of the zone. + # NOTE: Coordinates can be generated at https://www.image-map.net/ + coordinates: 545,1077,747,939,788,805 + # Optional: List of objects that can trigger this zone (default: all tracked objects) + objects: + - person + # Optional: Zone level object filters. + # NOTE: The global and camera filters are applied upstream. + filters: + person: + min_area: 5000 + max_area: 100000 + threshold: 0.7 + + # Optional: Configuration for the jpg snapshots published via MQTT + mqtt: + # Optional: Enable publishing snapshot via mqtt for camera (default: shown below) + # NOTE: Only applies to publishing image data to MQTT via 'frigate///snapshot'. + # All other messages will still be published. + enabled: True + # Optional: print a timestamp on the snapshots (default: shown below) + timestamp: True + # Optional: draw bounding box on the snapshots (default: shown below) + bounding_box: True + # Optional: crop the snapshot (default: shown below) + crop: True + # Optional: height to resize the snapshot to (default: shown below) + height: 270 + # Optional: jpeg encode quality (default: shown below) + quality: 70 + # Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones) + required_zones: [] +``` diff --git a/docs/docs/configuration/masks.md b/docs/docs/configuration/masks.md new file mode 100644 index 000000000..0ddf02e50 --- /dev/null +++ b/docs/docs/configuration/masks.md @@ -0,0 +1,39 @@ +--- +id: masks +title: Masks +--- + +There are two types of masks available: + +**Motion masks**: Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the debug feed with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. Over masking will make it more difficult for objects to be tracked. To see this effect, create a mask, and then watch the video feed with `Motion Boxes` enabled again. + +**Object filter masks**: Object filter masks are used to filter out false positives for a given object type based on location. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell frigate that anything in your yard is a false positive. + +To create a poly mask: + +1. Visit the Web UI +1. Click the camera you wish to create a mask for +1. Select "Debug" at the top +1. Expand the "Options" below the video feed +1. Click "Mask & Zone creator" +1. Click "Add" on the type of mask or zone you would like to create +1. Click on the camera's latest image to create a masked area. The yaml representation will be updated in real-time +1. When you've finished creating your mask, click "Copy" and paste the contents into your config file and restart Frigate + +Example of a finished row corresponding to the below example image: + +```yaml +motion: + mask: "0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432" +``` + +Multiple masks can be listed. + +```yaml +motion: + mask: + - 458,1346,336,973,317,869,375,866,432 + - 0,461,3,0,1919,0,1919,843,1699,492,1344 +``` + +![poly](/img/example-mask-poly.png) diff --git a/docs/docs/configuration/nvdec.md b/docs/docs/configuration/nvdec.md index b2ec01a47..66d48113b 100644 --- a/docs/docs/configuration/nvdec.md +++ b/docs/docs/configuration/nvdec.md @@ -1,6 +1,6 @@ --- id: nvdec -title: nVidia hardware decoder +title: NVIDIA hardware decoder --- Certain nvidia cards include a hardware decoder, which can greatly improve the @@ -23,7 +23,7 @@ In order to pass NVDEC, the docker engine must be set to `nvidia` and the enviro In a docker compose file, these lines need to be set: -``` +```yaml services: frigate: ... @@ -41,7 +41,7 @@ The decoder you choose will depend on the input video. A list of supported codecs (you can use `ffmpeg -decoders | grep cuvid` in the container to get a list) -``` +```shell V..... h263_cuvid Nvidia CUVID H263 decoder (codec h263) V..... h264_cuvid Nvidia CUVID H264 decoder (codec h264) V..... hevc_cuvid Nvidia CUVID HEVC decoder (codec hevc) @@ -57,10 +57,9 @@ A list of supported codecs (you can use `ffmpeg -decoders | grep cuvid` in the c For example, for H265 video (hevc), you'll select `hevc_cuvid`. Add `-c:v hevc_cuvid` to your ffmpeg input arguments: -``` +```yaml ffmpeg: - input_args: - ... + input_args: ... - -c:v - hevc_cuvid ``` @@ -100,10 +99,10 @@ processes: To further improve performance, you can set ffmpeg to skip frames in the output, using the fps filter: -``` - output_args: - - -filter:v - - fps=fps=5 +```yaml +output_args: + - -filter:v + - fps=fps=5 ``` This setting, for example, allows Frigate to consume my 10-15fps camera streams on diff --git a/docs/docs/configuration/objects.mdx b/docs/docs/configuration/objects.mdx index a8608c286..fd42a4399 100644 --- a/docs/docs/configuration/objects.mdx +++ b/docs/docs/configuration/objects.mdx @@ -1,12 +1,11 @@ --- id: objects -title: Default available objects -sidebar_label: Available objects +title: Objects --- import labels from "../../../labelmap.txt"; -By default, Frigate includes the following object models from the Google Coral test data. +By default, Frigate includes the following object models from the Google Coral test data. Note that `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused.