mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-05-09 15:05:26 +03:00
Adjust docs
This commit is contained in:
parent
96a4707164
commit
223023ed42
@ -15,13 +15,13 @@ To use Generative AI, you must define a single provider at the global level of y
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
enabled: True
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-1.5-flash
|
||||
|
||||
cameras:
|
||||
front_camera:
|
||||
objects:
|
||||
genai:
|
||||
enabled: True # <- enable GenAI for your front camera
|
||||
use_snapshot: True
|
||||
@ -30,6 +30,7 @@ cameras:
|
||||
required_zones:
|
||||
- steps
|
||||
indoor_camera:
|
||||
objects:
|
||||
genai:
|
||||
enabled: False # <- disable GenAI for your indoor camera
|
||||
```
|
||||
@ -68,7 +69,6 @@ You should have at least 8 GB of RAM available (or VRAM if running on GPU) to ru
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
enabled: True
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: llava:7b
|
||||
@ -95,7 +95,6 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
enabled: True
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-1.5-flash
|
||||
@ -117,7 +116,6 @@ To start using OpenAI, you must first [create an API key](https://platform.opena
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
enabled: True
|
||||
provider: openai
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
model: gpt-4o
|
||||
@ -145,7 +143,6 @@ To start using Azure OpenAI, you must first [create a resource](https://learn.mi
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
enabled: True
|
||||
provider: azure_openai
|
||||
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
@ -188,22 +185,25 @@ You are also able to define custom prompts in your configuration.
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
enabled: True
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: llava
|
||||
|
||||
objects:
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||
object_prompts:
|
||||
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||
```
|
||||
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
front_door:
|
||||
objects:
|
||||
genai:
|
||||
enabled: True
|
||||
use_snapshot: True
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
|
||||
object_prompts:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user