Merge pull request #2 from blakeblackshear/release-0.9.0

Release 0.9.0
This commit is contained in:
ElMoribond 2021-06-20 17:30:27 +02:00 committed by GitHub
commit 6d02150b00
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 153 additions and 34 deletions

View File

@ -14,7 +14,8 @@ RUN groupadd --gid $USER_GID $USERNAME \
&& echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
&& chmod 0440 /etc/sudoers.d/$USERNAME
RUN apt-get install -y git curl vim htop
RUN apt-get update \
&& apt-get install -y git curl vim htop
RUN pip3 install pylint black

View File

@ -170,6 +170,9 @@ snapshots:
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
# This value can be set via MQTT and will be updated in startup based on retained value
enabled: False
# Optional: Enable writing a clean copy png snapshot to /media/frigate/clips (default: shown below)
# Only works if snapshots are enabled. This image is intended to be used for training purposes.
clean_copy: True
# Optional: print a timestamp on the snapshots (default: shown below)
timestamp: False
# Optional: draw bounding box on the snapshots (default: shown below)

View File

@ -37,6 +37,7 @@ Message published for each changed event. The first message is published when th
"id": "1607123955.475377-mxklsc",
"camera": "front_door",
"frame_time": 1607123961.837752,
"snapshot_time": 1607123961.837752,
"label": "person",
"top_score": 0.958984375,
"false_positive": false,
@ -54,6 +55,7 @@ Message published for each changed event. The first message is published when th
"id": "1607123955.475377-mxklsc",
"camera": "front_door",
"frame_time": 1607123962.082975,
"snapshot_time": 1607123961.837752,
"label": "person",
"top_score": 0.958984375,
"false_positive": false,

View File

@ -180,14 +180,23 @@ class FrigateApp:
model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
try:
shm_in = mp.shared_memory.SharedMemory(
name=name,
create=True,
size=self.config.model.height*self.config.model.width * 3,
)
except FileExistsError:
shm_in = mp.shared_memory.SharedMemory(name=name)
try:
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
except FileExistsError:
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)

View File

@ -634,6 +634,7 @@ CAMERAS_SCHEMA = vol.Schema(
},
vol.Optional("snapshots", default={}): {
vol.Optional("enabled", default=False): bool,
vol.Optional("clean_copy", default=True): bool,
vol.Optional("timestamp", default=False): bool,
vol.Optional("bounding_box", default=False): bool,
vol.Optional("crop", default=False): bool,
@ -665,6 +666,7 @@ CAMERAS_SCHEMA = vol.Schema(
@dataclasses.dataclass
class CameraSnapshotsConfig:
enabled: bool
clean_copy: bool
timestamp: bool
bounding_box: bool
crop: bool
@ -676,6 +678,7 @@ class CameraSnapshotsConfig:
def build(cls, config, global_config) -> CameraSnapshotsConfig:
return CameraSnapshotsConfig(
enabled=config["enabled"],
clean_copy=config["clean_copy"],
timestamp=config["timestamp"],
bounding_box=config["bounding_box"],
crop=config["crop"],
@ -689,6 +692,7 @@ class CameraSnapshotsConfig:
def to_dict(self) -> Dict[str, Any]:
return {
"enabled": self.enabled,
"clean_copy": self.clean_copy,
"timestamp": self.timestamp,
"bounding_box": self.bounding_box,
"crop": self.crop,

View File

@ -281,9 +281,9 @@ class EventCleanup(threading.Thread):
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media):
def expire(self, media_type):
## Expire events from unlisted cameras based on the global config
if media == "clips":
if media_type == "clips":
retain_config = self.config.clips.retain
file_extension = "mp4"
update_params = {"has_clip": False}
@ -314,8 +314,16 @@ class EventCleanup(threading.Thread):
# delete the media from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media.unlink(missing_ok=True)
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
)
media_path.unlink(missing_ok=True)
if file_extension == "jpg":
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
@ -326,7 +334,7 @@ class EventCleanup(threading.Thread):
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
if media == "clips":
if media_type == "clips":
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
@ -351,10 +359,15 @@ class EventCleanup(threading.Thread):
# delete the grabbed clips from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media = Path(
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
)
media.unlink(missing_ok=True)
media_path.unlink(missing_ok=True)
if file_extension == "jpg":
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera == name,
@ -385,11 +398,11 @@ class EventCleanup(threading.Thread):
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media.unlink(missing_ok=True)
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media_path.unlink(missing_ok=True)
if event.has_clip:
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media.unlink(missing_ok=True)
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media_path.unlink(missing_ok=True)
(
Event.delete()

View File

@ -153,10 +153,16 @@ class TrackedObject:
return significant_update
def to_dict(self, include_thumbnail: bool = False):
snapshot_time = (
self.thumbnail_data["frame_time"]
if not self.thumbnail_data is None
else 0.0
)
event = {
"id": self.obj_data["id"],
"camera": self.camera,
"frame_time": self.obj_data["frame_time"],
"snapshot_time": snapshot_time,
"label": self.obj_data["label"],
"top_score": self.top_score,
"false_positive": self.false_positive,
@ -192,6 +198,27 @@ class TrackedObject:
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
return jpg.tobytes()
def get_clean_png(self):
if self.thumbnail_data is None:
return None
try:
best_frame = cv2.cvtColor(
self.frame_cache[self.thumbnail_data["frame_time"]],
cv2.COLOR_YUV2BGR_I420,
)
except KeyError:
logger.warning(
f"Unable to create clean png because frame {self.thumbnail_data['frame_time']} is not in the cache"
)
return None
ret, png = cv2.imencode(".png", best_frame)
if ret:
return png.tobytes()
else:
return None
def get_jpg_bytes(
self, timestamp=False, bounding_box=False, crop=False, height=None
):
@ -615,6 +642,23 @@ class TrackedObjectProcessor(threading.Thread):
) as j:
j.write(jpg_bytes)
event_data["has_snapshot"] = True
# write clean snapshot if enabled
if snapshot_config.clean_copy:
png_bytes = obj.get_clean_png()
if png_bytes is None:
logger.warning(
f"Unable to save clean snapshot for {obj.obj_data['id']}."
)
else:
with open(
os.path.join(
CLIPS_DIR,
f"{camera}-{obj.obj_data['id']}-clean.png",
),
"wb",
) as p:
p.write(png_bytes)
self.event_queue.put(("end", camera, event_data))
def snapshot(camera, obj: TrackedObject, current_frame_time):

View File

@ -5,14 +5,13 @@ import JSMpeg from '@cycjimmy/jsmpeg-player';
export default function JSMpegPlayer({ camera }) {
const playerRef = useRef();
const canvasRef = useRef();
const url = `${baseUrl.replace(/^http/, 'ws')}/live/${camera}`
useEffect(() => {
const video = new JSMpeg.VideoElement(
playerRef.current,
url,
{canvas: canvasRef.current},
{},
{protocols: [], audio: false}
);
@ -22,8 +21,6 @@ export default function JSMpegPlayer({ camera }) {
}, [url]);
return (
<div ref={playerRef} className="jsmpeg">
<canvas ref={canvasRef} className="relative w-full" />
</div>
<div ref={playerRef} class="jsmpeg" />
);
}

View File

@ -25,3 +25,7 @@
transform: rotate(360deg);
}
}
.jsmpeg canvas {
position: static !important;
}

View File

@ -1,5 +1,6 @@
import { h } from 'preact';
import { h, Fragment } from 'preact';
import AutoUpdatingCameraImage from '../components/AutoUpdatingCameraImage';
import JSMpegPlayer from '../components/JSMpegPlayer';
import Button from '../components/Button';
import Card from '../components/Card';
import Heading from '../components/Heading';
@ -16,6 +17,7 @@ export default function Camera({ camera }) {
const { data: config } = useConfig();
const apiHost = useApiHost();
const [showSettings, setShowSettings] = useState(false);
const [viewMode, setViewMode] = useState('live');
const cameraConfig = config?.cameras[camera];
const [options, setOptions] = usePersistence(`${camera}-feed`, emptyObject);
@ -79,9 +81,19 @@ export default function Camera({ camera }) {
</div>
) : null;
return (
<div className="space-y-4">
<Heading size="2xl">{camera}</Heading>
let player;
if (viewMode === 'live') {
player = (
<Fragment>
<div>
<JSMpegPlayer camera={camera} />
</div>
</Fragment>
);
}
else if (viewMode === 'debug') {
player = (
<Fragment>
<div>
<AutoUpdatingCameraImage camera={camera} searchParams={searchParams} />
</div>
@ -93,6 +105,25 @@ export default function Camera({ camera }) {
<span>{showSettings ? 'Hide' : 'Show'} Options</span>
</Button>
{showSettings ? <Card header="Options" elevated={false} content={optionContent} /> : null}
</Fragment>
);
}
return (
<div className="space-y-4">
<Heading size="2xl">{camera}</Heading>
<div>
<nav className="flex justify-end">
<button onClick={() => setViewMode('live')} className={viewMode === 'live' ? 'text-gray-600 py-0 px-4 block hover:text-gray-500 focus:outline-none border-b-2 font-medium border-gray-500' : 'text-gray-600 py-0 px-4 block hover:text-gray-500'}>
Live
</button>
<button onClick={() => setViewMode('debug')} className={viewMode === 'debug' ? 'text-gray-600 py-0 px-4 block hover:text-gray-500 focus:outline-none border-b-2 font-medium border-gray-500' : 'text-gray-600 py-0 px-4 block hover:text-gray-500'}>
Debug
</button>
</nav>
</div>
{player}
<div className="space-y-4">
<Heading size="sm">Tracked objects</Heading>

View File

@ -3,6 +3,7 @@ import * as AutoUpdatingCameraImage from '../../components/AutoUpdatingCameraIma
import * as Api from '../../api';
import * as Context from '../../context';
import Camera from '../Camera';
import * as JSMpegPlayer from '../../components/JSMpegPlayer';
import { fireEvent, render, screen } from '@testing-library/preact';
describe('Camera Route', () => {
@ -18,6 +19,9 @@ describe('Camera Route', () => {
jest.spyOn(AutoUpdatingCameraImage, 'default').mockImplementation(({ searchParams }) => {
return <div data-testid="mock-image">{searchParams.toString()}</div>;
});
jest.spyOn(JSMpegPlayer, 'default').mockImplementation(() => {
return <div data-testid="mock-jsmpeg" />;
});
});
test('reads camera feed options from persistence', async () => {
@ -32,7 +36,10 @@ describe('Camera Route', () => {
},
mockSetOptions,
]);
render(<Camera camera="front" />);
fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options'));
expect(screen.queryByTestId('mock-image')).toHaveTextContent(
'bbox=1&timestamp=0&zones=1&mask=0&motion=1&regions=0'
@ -41,17 +48,21 @@ describe('Camera Route', () => {
test('updates camera feed options to persistence', async () => {
mockUsePersistence
.mockReturnValueOnce([{}, mockSetOptions])
.mockReturnValueOnce([{}, mockSetOptions])
.mockReturnValueOnce([{ bbox: true }, mockSetOptions])
.mockReturnValueOnce([{ bbox: true, timestamp: true }, mockSetOptions]);
render(<Camera camera="front" />);
fireEvent.click(screen.queryByText('Debug'));
fireEvent.click(screen.queryByText('Show Options'));
fireEvent.change(screen.queryByTestId('bbox-input'), { target: { checked: true } });
fireEvent.change(screen.queryByTestId('timestamp-input'), { target: { checked: true } });
fireEvent.click(screen.queryByText('Hide Options'));
expect(mockUsePersistence).toHaveBeenCalledTimes(4);
expect(mockSetOptions).toHaveBeenCalledTimes(2);
expect(mockSetOptions).toHaveBeenCalledWith({ bbox: true, timestamp: true });
expect(screen.queryByTestId('mock-image')).toHaveTextContent('bbox=1&timestamp=1');
});