Refactor import statements in frigate/events.py and frigate/record.py to use faster_fifo library instead of multiprocessing

This commit is contained in:
Sergey Krashevich 2023-07-02 03:43:23 +03:00
parent 6b0a68d0dd
commit 44a2453f12
No known key found for this signature in database
GPG Key ID: 625171324E7D3856
3 changed files with 6 additions and 5 deletions

View File

@ -13,7 +13,7 @@ from frigate.const import CLIPS_DIR
from frigate.models import Event
from frigate.types import CameraMetricsTypes
from multiprocessing.queues import Queue
from faster_fifo import Queue
from multiprocessing.synchronize import Event as MpEvent
from typing import Dict

View File

@ -10,6 +10,7 @@ import subprocess as sp
import threading
from collections import defaultdict
from pathlib import Path
import faster_fifo as ff
import psutil
from peewee import JOIN, DoesNotExist
@ -42,7 +43,7 @@ def remove_empty_directories(directory):
class RecordingMaintainer(threading.Thread):
def __init__(
self, config: FrigateConfig, recordings_info_queue: mp.Queue, stop_event
self, config: FrigateConfig, recordings_info_queue: ff.Queue, stop_event
):
threading.Thread.__init__(self)
self.name = "recording_maint"
@ -111,7 +112,6 @@ class RecordingMaintainer(threading.Thread):
grouped_recordings[camera] = grouped_recordings[camera][-keep_count:]
for camera, recordings in grouped_recordings.items():
# clear out all the recording info for old frames
while (
len(self.recordings_info[camera]) > 0

View File

@ -7,6 +7,7 @@ import subprocess as sp
import json
import re
import signal
import time
import traceback
import urllib.parse
import yaml
@ -14,7 +15,7 @@ import yaml
from abc import ABC, abstractmethod
from collections import Counter
from collections.abc import Mapping
from multiprocessing import shared_memory
from multiprocessing import shared_memory, RawValue
from queue import Empty, Full
from typing import Any, AnyStr, Optional, Tuple
@ -1081,7 +1082,7 @@ class LimitedQueue(FFQueue):
):
super().__init__(max_size_bytes=max_size_bytes, loads=loads, dumps=dumps)
self.maxsize = maxsize
self.size = multiprocessing.RawValue(
self.size = RawValue(
ctypes.c_int, 0
) # Add a counter for the number of items in the queue