Merge pull request #2 from blakeblackshear/release-0.9.0

Release 0.9.0
This commit is contained in:
ElMoribond 2021-06-20 17:30:27 +02:00 committed by GitHub
commit 6d02150b00
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 153 additions and 34 deletions

View File

@ -14,7 +14,8 @@ RUN groupadd --gid $USER_GID $USERNAME \
&& echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
&& chmod 0440 /etc/sudoers.d/$USERNAME && chmod 0440 /etc/sudoers.d/$USERNAME
RUN apt-get install -y git curl vim htop RUN apt-get update \
&& apt-get install -y git curl vim htop
RUN pip3 install pylint black RUN pip3 install pylint black

View File

@ -170,6 +170,9 @@ snapshots:
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
# This value can be set via MQTT and will be updated in startup based on retained value # This value can be set via MQTT and will be updated in startup based on retained value
enabled: False enabled: False
# Optional: Enable writing a clean copy png snapshot to /media/frigate/clips (default: shown below)
# Only works if snapshots are enabled. This image is intended to be used for training purposes.
clean_copy: True
# Optional: print a timestamp on the snapshots (default: shown below) # Optional: print a timestamp on the snapshots (default: shown below)
timestamp: False timestamp: False
# Optional: draw bounding box on the snapshots (default: shown below) # Optional: draw bounding box on the snapshots (default: shown below)

View File

@ -37,6 +37,7 @@ Message published for each changed event. The first message is published when th
"id": "1607123955.475377-mxklsc", "id": "1607123955.475377-mxklsc",
"camera": "front_door", "camera": "front_door",
"frame_time": 1607123961.837752, "frame_time": 1607123961.837752,
"snapshot_time": 1607123961.837752,
"label": "person", "label": "person",
"top_score": 0.958984375, "top_score": 0.958984375,
"false_positive": false, "false_positive": false,
@ -54,6 +55,7 @@ Message published for each changed event. The first message is published when th
"id": "1607123955.475377-mxklsc", "id": "1607123955.475377-mxklsc",
"camera": "front_door", "camera": "front_door",
"frame_time": 1607123962.082975, "frame_time": 1607123962.082975,
"snapshot_time": 1607123961.837752,
"label": "person", "label": "person",
"top_score": 0.958984375, "top_score": 0.958984375,
"false_positive": false, "false_positive": false,

View File

@ -180,14 +180,23 @@ class FrigateApp:
model_shape = (self.config.model.height, self.config.model.width) model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys(): for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event() self.detection_out_events[name] = mp.Event()
try:
shm_in = mp.shared_memory.SharedMemory( shm_in = mp.shared_memory.SharedMemory(
name=name, name=name,
create=True, create=True,
size=self.config.model.height*self.config.model.width * 3, size=self.config.model.height*self.config.model.width * 3,
) )
except FileExistsError:
shm_in = mp.shared_memory.SharedMemory(name=name)
try:
shm_out = mp.shared_memory.SharedMemory( shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4 name=f"out-{name}", create=True, size=20 * 6 * 4
) )
except FileExistsError:
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
self.detection_shms.append(shm_in) self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out) self.detection_shms.append(shm_out)

View File

@ -634,6 +634,7 @@ CAMERAS_SCHEMA = vol.Schema(
}, },
vol.Optional("snapshots", default={}): { vol.Optional("snapshots", default={}): {
vol.Optional("enabled", default=False): bool, vol.Optional("enabled", default=False): bool,
vol.Optional("clean_copy", default=True): bool,
vol.Optional("timestamp", default=False): bool, vol.Optional("timestamp", default=False): bool,
vol.Optional("bounding_box", default=False): bool, vol.Optional("bounding_box", default=False): bool,
vol.Optional("crop", default=False): bool, vol.Optional("crop", default=False): bool,
@ -665,6 +666,7 @@ CAMERAS_SCHEMA = vol.Schema(
@dataclasses.dataclass @dataclasses.dataclass
class CameraSnapshotsConfig: class CameraSnapshotsConfig:
enabled: bool enabled: bool
clean_copy: bool
timestamp: bool timestamp: bool
bounding_box: bool bounding_box: bool
crop: bool crop: bool
@ -676,6 +678,7 @@ class CameraSnapshotsConfig:
def build(cls, config, global_config) -> CameraSnapshotsConfig: def build(cls, config, global_config) -> CameraSnapshotsConfig:
return CameraSnapshotsConfig( return CameraSnapshotsConfig(
enabled=config["enabled"], enabled=config["enabled"],
clean_copy=config["clean_copy"],
timestamp=config["timestamp"], timestamp=config["timestamp"],
bounding_box=config["bounding_box"], bounding_box=config["bounding_box"],
crop=config["crop"], crop=config["crop"],
@ -689,6 +692,7 @@ class CameraSnapshotsConfig:
def to_dict(self) -> Dict[str, Any]: def to_dict(self) -> Dict[str, Any]:
return { return {
"enabled": self.enabled, "enabled": self.enabled,
"clean_copy": self.clean_copy,
"timestamp": self.timestamp, "timestamp": self.timestamp,
"bounding_box": self.bounding_box, "bounding_box": self.bounding_box,
"crop": self.crop, "crop": self.crop,

View File

@ -281,9 +281,9 @@ class EventCleanup(threading.Thread):
self.stop_event = stop_event self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys()) self.camera_keys = list(self.config.cameras.keys())
def expire(self, media): def expire(self, media_type):
## Expire events from unlisted cameras based on the global config ## Expire events from unlisted cameras based on the global config
if media == "clips": if media_type == "clips":
retain_config = self.config.clips.retain retain_config = self.config.clips.retain
file_extension = "mp4" file_extension = "mp4"
update_params = {"has_clip": False} update_params = {"has_clip": False}
@ -314,8 +314,16 @@ class EventCleanup(threading.Thread):
# delete the media from disk # delete the media from disk
for event in expired_events: for event in expired_events:
media_name = f"{event.camera}-{event.id}" media_name = f"{event.camera}-{event.id}"
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}") media_path = Path(
media.unlink(missing_ok=True) f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
)
media_path.unlink(missing_ok=True)
if file_extension == "jpg":
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry # update the clips attribute for the db entry
update_query = Event.update(update_params).where( update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys), Event.camera.not_in(self.camera_keys),
@ -326,7 +334,7 @@ class EventCleanup(threading.Thread):
## Expire events from cameras based on the camera config ## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items(): for name, camera in self.config.cameras.items():
if media == "clips": if media_type == "clips":
retain_config = camera.clips.retain retain_config = camera.clips.retain
else: else:
retain_config = camera.snapshots.retain retain_config = camera.snapshots.retain
@ -351,10 +359,15 @@ class EventCleanup(threading.Thread):
# delete the grabbed clips from disk # delete the grabbed clips from disk
for event in expired_events: for event in expired_events:
media_name = f"{event.camera}-{event.id}" media_name = f"{event.camera}-{event.id}"
media = Path( media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}" f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
) )
media.unlink(missing_ok=True) media_path.unlink(missing_ok=True)
if file_extension == "jpg":
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry # update the clips attribute for the db entry
update_query = Event.update(update_params).where( update_query = Event.update(update_params).where(
Event.camera == name, Event.camera == name,
@ -385,11 +398,11 @@ class EventCleanup(threading.Thread):
logger.debug(f"Removing duplicate: {event.id}") logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}" media_name = f"{event.camera}-{event.id}"
if event.has_snapshot: if event.has_snapshot:
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media.unlink(missing_ok=True) media_path.unlink(missing_ok=True)
if event.has_clip: if event.has_clip:
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4") media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media.unlink(missing_ok=True) media_path.unlink(missing_ok=True)
( (
Event.delete() Event.delete()

View File

@ -153,10 +153,16 @@ class TrackedObject:
return significant_update return significant_update
def to_dict(self, include_thumbnail: bool = False): def to_dict(self, include_thumbnail: bool = False):
snapshot_time = (
self.thumbnail_data["frame_time"]
if not self.thumbnail_data is None
else 0.0
)
event = { event = {
"id": self.obj_data["id"], "id": self.obj_data["id"],
"camera": self.camera, "camera": self.camera,
"frame_time": self.obj_data["frame_time"], "frame_time": self.obj_data["frame_time"],
"snapshot_time": snapshot_time,
"label": self.obj_data["label"], "label": self.obj_data["label"],
"top_score": self.top_score, "top_score": self.top_score,
"false_positive": self.false_positive, "false_positive": self.false_positive,
@ -192,6 +198,27 @@ class TrackedObject:
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8)) ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
return jpg.tobytes() return jpg.tobytes()
def get_clean_png(self):
if self.thumbnail_data is None:
return None
try:
best_frame = cv2.cvtColor(
self.frame_cache[self.thumbnail_data["frame_time"]],
cv2.COLOR_YUV2BGR_I420,
)
except KeyError:
logger.warning(
f"Unable to create clean png because frame {self.thumbnail_data['frame_time']} is not in the cache"
)
return None
ret, png = cv2.imencode(".png", best_frame)
if ret:
return png.tobytes()
else:
return None
def get_jpg_bytes( def get_jpg_bytes(
self, timestamp=False, bounding_box=False, crop=False, height=None self, timestamp=False, bounding_box=False, crop=False, height=None
): ):
@ -615,6 +642,23 @@ class TrackedObjectProcessor(threading.Thread):
) as j: ) as j:
j.write(jpg_bytes) j.write(jpg_bytes)
event_data["has_snapshot"] = True event_data["has_snapshot"] = True
# write clean snapshot if enabled
if snapshot_config.clean_copy:
png_bytes = obj.get_clean_png()
if png_bytes is None:
logger.warning(
f"Unable to save clean snapshot for {obj.obj_data['id']}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"{camera}-{obj.obj_data['id']}-clean.png",
),
"wb",
) as p:
p.write(png_bytes)
self.event_queue.put(("end", camera, event_data)) self.event_queue.put(("end", camera, event_data))
def snapshot(camera, obj: TrackedObject, current_frame_time): def snapshot(camera, obj: TrackedObject, current_frame_time):

View File

@ -5,14 +5,13 @@ import JSMpeg from '@cycjimmy/jsmpeg-player';
export default function JSMpegPlayer({ camera }) { export default function JSMpegPlayer({ camera }) {
const playerRef = useRef(); const playerRef = useRef();
const canvasRef = useRef();
const url = `${baseUrl.replace(/^http/, 'ws')}/live/${camera}` const url = `${baseUrl.replace(/^http/, 'ws')}/live/${camera}`
useEffect(() => { useEffect(() => {
const video = new JSMpeg.VideoElement( const video = new JSMpeg.VideoElement(
playerRef.current, playerRef.current,
url, url,
{canvas: canvasRef.current}, {},
{protocols: [], audio: false} {protocols: [], audio: false}
); );
@ -22,8 +21,6 @@ export default function JSMpegPlayer({ camera }) {
}, [url]); }, [url]);
return ( return (
<div ref={playerRef} className="jsmpeg"> <div ref={playerRef} class="jsmpeg" />
<canvas ref={canvasRef} className="relative w-full" />
</div>
); );
} }

View File

@ -25,3 +25,7 @@
transform: rotate(360deg); transform: rotate(360deg);
} }
} }
.jsmpeg canvas {
position: static !important;
}

View File

@ -1,5 +1,6 @@
import { h } from 'preact'; import { h, Fragment } from 'preact';
import AutoUpdatingCameraImage from '../components/AutoUpdatingCameraImage'; import AutoUpdatingCameraImage from '../components/AutoUpdatingCameraImage';
import JSMpegPlayer from '../components/JSMpegPlayer';
import Button from '../components/Button'; import Button from '../components/Button';
import Card from '../components/Card'; import Card from '../components/Card';
import Heading from '../components/Heading'; import Heading from '../components/Heading';
@ -16,6 +17,7 @@ export default function Camera({ camera }) {
const { data: config } = useConfig(); const { data: config } = useConfig();
const apiHost = useApiHost(); const apiHost = useApiHost();
const [showSettings, setShowSettings] = useState(false); const [showSettings, setShowSettings] = useState(false);
const [viewMode, setViewMode] = useState('live');
const cameraConfig = config?.cameras[camera]; const cameraConfig = config?.cameras[camera];
const [options, setOptions] = usePersistence(`${camera}-feed`, emptyObject); const [options, setOptions] = usePersistence(`${camera}-feed`, emptyObject);
@ -79,9 +81,19 @@ export default function Camera({ camera }) {
</div> </div>
) : null; ) : null;
return ( let player;
<div className="space-y-4"> if (viewMode === 'live') {
<Heading size="2xl">{camera}</Heading> player = (
<Fragment>
<div>
<JSMpegPlayer camera={camera} />
</div>
</Fragment>
);
}
else if (viewMode === 'debug') {
player = (
<Fragment>
<div> <div>
<AutoUpdatingCameraImage camera={camera} searchParams={searchParams} /> <AutoUpdatingCameraImage camera={camera} searchParams={searchParams} />
</div> </div>
@ -93,6 +105,25 @@ export default function Camera({ camera }) {
<span>{showSettings ? 'Hide' : 'Show'} Options</span> <span>{showSettings ? 'Hide' : 'Show'} Options</span>
</Button> </Button>
{showSettings ? <Card header="Options" elevated={false} content={optionContent} /> : null} {showSettings ? <Card header="Options" elevated={false} content={optionContent} /> : null}
</Fragment>
);
}
return (
<div className="space-y-4">
<Heading size="2xl">{camera}</Heading>
<div>
<nav className="flex justify-end">
<button onClick={() => setViewMode('live')} className={viewMode === 'live' ? 'text-gray-600 py-0 px-4 block hover:text-gray-500 focus:outline-none border-b-2 font-medium border-gray-500' : 'text-gray-600 py-0 px-4 block hover:text-gray-500'}>
Live
</button>
<button onClick={() => setViewMode('debug')} className={viewMode === 'debug' ? 'text-gray-600 py-0 px-4 block hover:text-gray-500 focus:outline-none border-b-2 font-medium border-gray-500' : 'text-gray-600 py-0 px-4 block hover:text-gray-500'}>
Debug
</button>
</nav>
</div>
{player}
<div className="space-y-4"> <div className="space-y-4">
<Heading size="sm">Tracked objects</Heading> <Heading size="sm">Tracked objects</Heading>

View File

@ -3,6 +3,7 @@ import * as AutoUpdatingCameraImage from '../../components/AutoUpdatingCameraIma
import * as Api from '../../api'; import * as Api from '../../api';
import * as Context from '../../context'; import * as Context from '../../context';
import Camera from '../Camera'; import Camera from '../Camera';
import * as JSMpegPlayer from '../../components/JSMpegPlayer';
import { fireEvent, render, screen } from '@testing-library/preact'; import { fireEvent, render, screen } from '@testing-library/preact';
describe('Camera Route', () => { describe('Camera Route', () => {
@ -18,6 +19,9 @@ describe('Camera Route', () => {
jest.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => { jest.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => {
return <div data-testid="mock-image">{searchParams.toString()}</div>; return <div data-testid="mock-image">{searchParams.toString()}</div>;
}); });
jest.spyOn(JSMpegPlayer, 'default').mockImplementation(() => {
return <div data-testid="mock-jsmpeg" />;
});
}); });
test('reads camera feed options from persistence', async () => { test('reads camera feed options from persistence', async () => {
@ -32,7 +36,10 @@ describe('Camera Route', () => {
}, },
mockSetOptions, mockSetOptions,
]); ]);
render(<Camera camera="front" />); render(<Camera camera="front" />);
fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options')); fireEvent.click(screen.queryByText('Show Options'));
expect(screen.queryByTestId('mock-image')).toHaveTextContent( expect(screen.queryByTestId('mock-image')).toHaveTextContent(
'bbox=1&timestamp=0&zones=1&mask=0&motion=1&regions=0' 'bbox=1&timestamp=0&zones=1&mask=0&motion=1&regions=0'
@ -41,17 +48,21 @@ describe('Camera Route', () => {
test('updates camera feed options to persistence', async () => { test('updates camera feed options to persistence', async () => {
mockUsePersistence mockUsePersistence
.mockReturnValueOnce([{}, mockSetOptions])
.mockReturnValueOnce([{}, mockSetOptions]) .mockReturnValueOnce([{}, mockSetOptions])
.mockReturnValueOnce([{ bbox: true }, mockSetOptions]) .mockReturnValueOnce([{ bbox: true }, mockSetOptions])
.mockReturnValueOnce([{ bbox: true, timestamp: true }, mockSetOptions]); .mockReturnValueOnce([{ bbox: true, timestamp: true }, mockSetOptions]);
render(<Camera camera="front" />); render(<Camera camera="front" />);
fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options')); fireEvent.click(screen.queryByText('Show Options'));
fireEvent.change(screen.queryByTestId('bbox-input'), { target: { checked: true } }); fireEvent.change(screen.queryByTestId('bbox-input'), { target: { checked: true } });
fireEvent.change(screen.queryByTestId('timestamp-input'), { target: { checked: true } }); fireEvent.change(screen.queryByTestId('timestamp-input'), { target: { checked: true } });
fireEvent.click(screen.queryByText('Hide Options')); fireEvent.click(screen.queryByText('Hide Options'));
expect(mockUsePersistence).toHaveBeenCalledTimes(4);
expect(mockSetOptions).toHaveBeenCalledTimes(2);
expect(mockSetOptions).toHaveBeenCalledWith({ bbox: true, timestamp: true }); expect(mockSetOptions).toHaveBeenCalledWith({ bbox: true, timestamp: true });
expect(screen.queryByTestId('mock-image')).toHaveTextContent('bbox=1&timestamp=1'); expect(screen.queryByTestId('mock-image')).toHaveTextContent('bbox=1&timestamp=1');
}); });