homesec 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- homesec/__init__.py +20 -0
- homesec/app.py +393 -0
- homesec/cli.py +159 -0
- homesec/config/__init__.py +18 -0
- homesec/config/loader.py +109 -0
- homesec/config/validation.py +82 -0
- homesec/errors.py +71 -0
- homesec/health/__init__.py +5 -0
- homesec/health/server.py +226 -0
- homesec/interfaces.py +249 -0
- homesec/logging_setup.py +176 -0
- homesec/maintenance/__init__.py +1 -0
- homesec/maintenance/cleanup_clips.py +632 -0
- homesec/models/__init__.py +79 -0
- homesec/models/alert.py +32 -0
- homesec/models/clip.py +71 -0
- homesec/models/config.py +362 -0
- homesec/models/events.py +184 -0
- homesec/models/filter.py +62 -0
- homesec/models/source.py +77 -0
- homesec/models/storage.py +12 -0
- homesec/models/vlm.py +99 -0
- homesec/pipeline/__init__.py +6 -0
- homesec/pipeline/alert_policy.py +5 -0
- homesec/pipeline/core.py +639 -0
- homesec/plugins/__init__.py +62 -0
- homesec/plugins/alert_policies/__init__.py +80 -0
- homesec/plugins/alert_policies/default.py +111 -0
- homesec/plugins/alert_policies/noop.py +60 -0
- homesec/plugins/analyzers/__init__.py +126 -0
- homesec/plugins/analyzers/openai.py +446 -0
- homesec/plugins/filters/__init__.py +124 -0
- homesec/plugins/filters/yolo.py +317 -0
- homesec/plugins/notifiers/__init__.py +80 -0
- homesec/plugins/notifiers/mqtt.py +189 -0
- homesec/plugins/notifiers/multiplex.py +106 -0
- homesec/plugins/notifiers/sendgrid_email.py +228 -0
- homesec/plugins/storage/__init__.py +116 -0
- homesec/plugins/storage/dropbox.py +272 -0
- homesec/plugins/storage/local.py +108 -0
- homesec/plugins/utils.py +63 -0
- homesec/py.typed +0 -0
- homesec/repository/__init__.py +5 -0
- homesec/repository/clip_repository.py +552 -0
- homesec/sources/__init__.py +17 -0
- homesec/sources/base.py +224 -0
- homesec/sources/ftp.py +209 -0
- homesec/sources/local_folder.py +238 -0
- homesec/sources/rtsp.py +1251 -0
- homesec/state/__init__.py +10 -0
- homesec/state/postgres.py +501 -0
- homesec/storage_paths.py +46 -0
- homesec/telemetry/__init__.py +0 -0
- homesec/telemetry/db/__init__.py +1 -0
- homesec/telemetry/db/log_table.py +16 -0
- homesec/telemetry/db_log_handler.py +246 -0
- homesec/telemetry/postgres_settings.py +42 -0
- homesec-0.1.0.dist-info/METADATA +446 -0
- homesec-0.1.0.dist-info/RECORD +62 -0
- homesec-0.1.0.dist-info/WHEEL +4 -0
- homesec-0.1.0.dist-info/entry_points.txt +2 -0
- homesec-0.1.0.dist-info/licenses/LICENSE +201 -0
homesec/pipeline/core.py
ADDED
|
@@ -0,0 +1,639 @@
|
|
|
1
|
+
"""ClipPipeline orchestrator - core processing logic."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import logging
|
|
7
|
+
import time
|
|
8
|
+
from collections.abc import Awaitable, Callable
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import TYPE_CHECKING, TypeVar
|
|
12
|
+
|
|
13
|
+
from homesec.errors import FilterError, NotifyError, UploadError, VLMError
|
|
14
|
+
from homesec.models.alert import Alert, AlertDecision
|
|
15
|
+
from homesec.models.clip import Clip
|
|
16
|
+
from homesec.models.config import Config
|
|
17
|
+
from homesec.models.filter import FilterResult
|
|
18
|
+
from homesec.models.vlm import AnalysisResult
|
|
19
|
+
from homesec.repository import ClipRepository
|
|
20
|
+
from homesec.plugins.notifiers.multiplex import NotifierEntry
|
|
21
|
+
from homesec.storage_paths import build_clip_path
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from homesec.interfaces import (
|
|
25
|
+
AlertPolicy,
|
|
26
|
+
Notifier,
|
|
27
|
+
ObjectFilter,
|
|
28
|
+
StorageBackend,
|
|
29
|
+
VLMAnalyzer,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
TResult = TypeVar("TResult")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass(frozen=True)
|
|
38
|
+
class UploadOutcome:
|
|
39
|
+
storage_uri: str
|
|
40
|
+
view_url: str | None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class ClipPipeline:
|
|
44
|
+
"""Orchestrates clip processing through all pipeline stages.
|
|
45
|
+
|
|
46
|
+
Implements error-as-value pattern: stage methods return Result | Error
|
|
47
|
+
instead of raising. This enables partial failures (e.g., upload fails
|
|
48
|
+
but filter+notify still run).
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
config: Config,
|
|
54
|
+
storage: StorageBackend,
|
|
55
|
+
repository: ClipRepository,
|
|
56
|
+
filter_plugin: ObjectFilter,
|
|
57
|
+
vlm_plugin: VLMAnalyzer,
|
|
58
|
+
notifier: Notifier,
|
|
59
|
+
alert_policy: "AlertPolicy",
|
|
60
|
+
notifier_entries: list[NotifierEntry] | None = None,
|
|
61
|
+
) -> None:
|
|
62
|
+
"""Initialize pipeline with all dependencies."""
|
|
63
|
+
self._config = config
|
|
64
|
+
self._storage = storage
|
|
65
|
+
self._repository = repository
|
|
66
|
+
self._filter = filter_plugin
|
|
67
|
+
self._vlm = vlm_plugin
|
|
68
|
+
self._notifier = notifier
|
|
69
|
+
self._notifier_entries = self._resolve_notifier_entries(
|
|
70
|
+
notifier, notifier_entries
|
|
71
|
+
)
|
|
72
|
+
self._alert_policy = alert_policy
|
|
73
|
+
|
|
74
|
+
# Track in-flight processing
|
|
75
|
+
self._tasks: set[asyncio.Task[None]] = set()
|
|
76
|
+
|
|
77
|
+
# Concurrency limits
|
|
78
|
+
self._sem_global = asyncio.Semaphore(config.concurrency.max_clips_in_flight)
|
|
79
|
+
self._sem_upload = asyncio.Semaphore(config.concurrency.upload_workers)
|
|
80
|
+
self._sem_filter = asyncio.Semaphore(config.concurrency.filter_workers)
|
|
81
|
+
self._sem_vlm = asyncio.Semaphore(config.concurrency.vlm_workers)
|
|
82
|
+
|
|
83
|
+
# Event loop for thread-safe callback handling
|
|
84
|
+
self._loop: asyncio.AbstractEventLoop | None = None
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
def _resolve_notifier_entries(
|
|
88
|
+
notifier: Notifier,
|
|
89
|
+
notifier_entries: list[NotifierEntry] | None,
|
|
90
|
+
) -> list[NotifierEntry]:
|
|
91
|
+
if notifier_entries:
|
|
92
|
+
return list(notifier_entries)
|
|
93
|
+
name = getattr(notifier, "name", type(notifier).__name__)
|
|
94
|
+
return [NotifierEntry(name=name, notifier=notifier)]
|
|
95
|
+
|
|
96
|
+
def set_event_loop(self, loop: asyncio.AbstractEventLoop) -> None:
|
|
97
|
+
"""Set event loop for thread-safe callback handling.
|
|
98
|
+
|
|
99
|
+
Must be called before registering with ClipSource if source
|
|
100
|
+
runs in a different thread.
|
|
101
|
+
"""
|
|
102
|
+
self._loop = loop
|
|
103
|
+
|
|
104
|
+
def _create_task(self, loop: asyncio.AbstractEventLoop, clip: Clip) -> None:
|
|
105
|
+
"""Create and track a processing task in the given loop."""
|
|
106
|
+
task = loop.create_task(self._process_clip(clip))
|
|
107
|
+
self._tasks.add(task)
|
|
108
|
+
task.add_done_callback(self._tasks.discard)
|
|
109
|
+
task.add_done_callback(self._log_task_exception)
|
|
110
|
+
|
|
111
|
+
def _log_task_exception(self, task: asyncio.Task[None]) -> None:
|
|
112
|
+
"""Log unexpected task exceptions."""
|
|
113
|
+
try:
|
|
114
|
+
exc = task.exception()
|
|
115
|
+
except asyncio.CancelledError:
|
|
116
|
+
return
|
|
117
|
+
if exc is not None:
|
|
118
|
+
logger.error("Clip processing failed: %s", exc, exc_info=exc)
|
|
119
|
+
|
|
120
|
+
def on_new_clip(self, clip: Clip) -> None:
|
|
121
|
+
"""Callback for ClipSource when new clip is ready.
|
|
122
|
+
|
|
123
|
+
Thread-safe: can be called from any thread. Uses stored event loop
|
|
124
|
+
if available, otherwise tries to get current loop.
|
|
125
|
+
"""
|
|
126
|
+
# Try to get current running loop (works if called from async context)
|
|
127
|
+
try:
|
|
128
|
+
loop = asyncio.get_running_loop()
|
|
129
|
+
except RuntimeError:
|
|
130
|
+
loop = None
|
|
131
|
+
|
|
132
|
+
if loop is not None:
|
|
133
|
+
self._create_task(loop, clip)
|
|
134
|
+
return
|
|
135
|
+
|
|
136
|
+
# Use stored loop for thread-safe scheduling
|
|
137
|
+
if self._loop is not None:
|
|
138
|
+
self._loop.call_soon_threadsafe(self._create_task, self._loop, clip)
|
|
139
|
+
return
|
|
140
|
+
|
|
141
|
+
logger.error(
|
|
142
|
+
"Cannot process clip %s: no event loop available. "
|
|
143
|
+
"Call set_event_loop() before registering with ClipSource.",
|
|
144
|
+
clip.clip_id,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
async def _process_clip(self, clip: Clip) -> None:
|
|
148
|
+
"""Process a single clip through all stages.
|
|
149
|
+
|
|
150
|
+
Flow:
|
|
151
|
+
1. Parallel: upload + filter
|
|
152
|
+
2. Conditional: VLM (if filter detects trigger classes)
|
|
153
|
+
3. Alert decision
|
|
154
|
+
4. Conditional: Notify (if alert decision is True)
|
|
155
|
+
"""
|
|
156
|
+
async with self._sem_global:
|
|
157
|
+
logger.info("Processing clip: %s", clip.clip_id)
|
|
158
|
+
|
|
159
|
+
# Initialize state + record clip arrival
|
|
160
|
+
await self._repository.initialize_clip(clip)
|
|
161
|
+
|
|
162
|
+
storage_uri: str | None = None
|
|
163
|
+
view_url: str | None = None
|
|
164
|
+
upload_failed = False
|
|
165
|
+
|
|
166
|
+
# Stage 1 & 2: Upload and Filter in parallel
|
|
167
|
+
upload_task = asyncio.create_task(self._upload_stage(clip))
|
|
168
|
+
filter_task = asyncio.create_task(self._filter_stage(clip))
|
|
169
|
+
|
|
170
|
+
filter_result = await filter_task
|
|
171
|
+
|
|
172
|
+
# Handle filter result (critical - cannot proceed without it)
|
|
173
|
+
match filter_result:
|
|
174
|
+
case FilterError() as filter_err:
|
|
175
|
+
logger.error(
|
|
176
|
+
"Filter failed for %s: %s",
|
|
177
|
+
clip.clip_id,
|
|
178
|
+
filter_err.cause,
|
|
179
|
+
exc_info=filter_err.cause,
|
|
180
|
+
)
|
|
181
|
+
upload_result = await upload_task
|
|
182
|
+
await self._apply_upload_result(clip, upload_result)
|
|
183
|
+
return
|
|
184
|
+
case FilterResult() as filter_res:
|
|
185
|
+
pass
|
|
186
|
+
case _:
|
|
187
|
+
raise TypeError(
|
|
188
|
+
f"Unexpected filter result type: {type(filter_result).__name__}"
|
|
189
|
+
)
|
|
190
|
+
logger.info(
|
|
191
|
+
"Filter complete for %s: detected %s",
|
|
192
|
+
clip.clip_id,
|
|
193
|
+
filter_res.detected_classes,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
# Stage 3: VLM (conditional)
|
|
197
|
+
analysis_result: AnalysisResult | None = None
|
|
198
|
+
vlm_failed = False
|
|
199
|
+
if self._should_run_vlm(clip.camera_name, filter_res):
|
|
200
|
+
vlm_result = await self._vlm_stage(clip, filter_res)
|
|
201
|
+
match vlm_result:
|
|
202
|
+
case VLMError() as vlm_err:
|
|
203
|
+
logger.warning(
|
|
204
|
+
"VLM failed for %s (continuing): %s",
|
|
205
|
+
clip.clip_id,
|
|
206
|
+
vlm_err.cause,
|
|
207
|
+
)
|
|
208
|
+
vlm_failed = True
|
|
209
|
+
case AnalysisResult() as analysis_result:
|
|
210
|
+
logger.info(
|
|
211
|
+
"VLM complete for %s: risk=%s, activity=%s",
|
|
212
|
+
clip.clip_id,
|
|
213
|
+
analysis_result.risk_level,
|
|
214
|
+
analysis_result.activity_type,
|
|
215
|
+
)
|
|
216
|
+
case _:
|
|
217
|
+
raise TypeError(
|
|
218
|
+
f"Unexpected VLM result type: {type(vlm_result).__name__}"
|
|
219
|
+
)
|
|
220
|
+
else:
|
|
221
|
+
await self._repository.record_vlm_skipped(
|
|
222
|
+
clip.clip_id,
|
|
223
|
+
reason="no_trigger_classes",
|
|
224
|
+
)
|
|
225
|
+
logger.info("VLM skipped for %s: no trigger classes", clip.clip_id)
|
|
226
|
+
|
|
227
|
+
# Await upload after filter/VLM to maximize overlap
|
|
228
|
+
upload_result = await upload_task
|
|
229
|
+
|
|
230
|
+
# Handle upload result (non-critical - can proceed without URL)
|
|
231
|
+
storage_uri, view_url, upload_failed = await self._apply_upload_result(
|
|
232
|
+
clip, upload_result
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Stage 4: Alert decision
|
|
236
|
+
alert_decision = self._alert_policy.make_decision(
|
|
237
|
+
clip.camera_name, filter_res, analysis_result
|
|
238
|
+
)
|
|
239
|
+
logger.info(
|
|
240
|
+
"Alert decision for %s: notify=%s, reason=%s",
|
|
241
|
+
clip.clip_id,
|
|
242
|
+
alert_decision.notify,
|
|
243
|
+
alert_decision.notify_reason,
|
|
244
|
+
)
|
|
245
|
+
await self._repository.record_alert_decision(
|
|
246
|
+
clip.clip_id,
|
|
247
|
+
alert_decision,
|
|
248
|
+
detected_classes=filter_res.detected_classes,
|
|
249
|
+
vlm_risk=analysis_result.risk_level if analysis_result else None,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
# Stage 5: Notify (conditional)
|
|
253
|
+
if alert_decision.notify:
|
|
254
|
+
notify_result = await self._notify_stage(
|
|
255
|
+
clip,
|
|
256
|
+
alert_decision,
|
|
257
|
+
analysis_result,
|
|
258
|
+
storage_uri,
|
|
259
|
+
view_url,
|
|
260
|
+
upload_failed,
|
|
261
|
+
vlm_failed,
|
|
262
|
+
)
|
|
263
|
+
match notify_result:
|
|
264
|
+
case NotifyError() as notify_err:
|
|
265
|
+
logger.error(
|
|
266
|
+
"Notify failed for %s: %s",
|
|
267
|
+
clip.clip_id,
|
|
268
|
+
notify_err.cause,
|
|
269
|
+
exc_info=notify_err.cause,
|
|
270
|
+
)
|
|
271
|
+
case None:
|
|
272
|
+
logger.info("Notification sent for %s", clip.clip_id)
|
|
273
|
+
case _:
|
|
274
|
+
raise TypeError(
|
|
275
|
+
f"Unexpected notify result type: {type(notify_result).__name__}"
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
await self._repository.mark_done(clip.clip_id)
|
|
279
|
+
logger.info("Clip processing complete: %s", clip.clip_id)
|
|
280
|
+
|
|
281
|
+
async def _run_stage_with_retries(
|
|
282
|
+
self,
|
|
283
|
+
*,
|
|
284
|
+
stage: str,
|
|
285
|
+
clip_id: str,
|
|
286
|
+
op: Callable[[], Awaitable[TResult]],
|
|
287
|
+
on_attempt_start: Callable[[int], Awaitable[None]] | None = None,
|
|
288
|
+
on_attempt_success: Callable[[TResult, int, int], Awaitable[None]] | None = None,
|
|
289
|
+
on_attempt_failure: Callable[[Exception, int, bool, int], Awaitable[None]]
|
|
290
|
+
| None = None,
|
|
291
|
+
) -> TResult:
|
|
292
|
+
"""Run stage with retry logic and event emission."""
|
|
293
|
+
max_attempts = max(1, int(self._config.retry.max_attempts))
|
|
294
|
+
backoff_s = max(0.0, float(self._config.retry.backoff_s))
|
|
295
|
+
attempts = 1
|
|
296
|
+
|
|
297
|
+
while True:
|
|
298
|
+
if on_attempt_start is not None:
|
|
299
|
+
await on_attempt_start(attempts)
|
|
300
|
+
started = time.monotonic()
|
|
301
|
+
try:
|
|
302
|
+
result = await op()
|
|
303
|
+
except Exception as exc:
|
|
304
|
+
duration_ms = int((time.monotonic() - started) * 1000)
|
|
305
|
+
will_retry = attempts < max_attempts
|
|
306
|
+
if on_attempt_failure is not None:
|
|
307
|
+
await on_attempt_failure(exc, attempts, will_retry, duration_ms)
|
|
308
|
+
if attempts >= max_attempts:
|
|
309
|
+
raise
|
|
310
|
+
logger.warning(
|
|
311
|
+
"Stage %s failed for %s (attempt %d/%d): %s",
|
|
312
|
+
stage,
|
|
313
|
+
clip_id,
|
|
314
|
+
attempts,
|
|
315
|
+
max_attempts,
|
|
316
|
+
exc,
|
|
317
|
+
exc_info=True,
|
|
318
|
+
)
|
|
319
|
+
delay = backoff_s * (2 ** (attempts - 1))
|
|
320
|
+
if delay > 0:
|
|
321
|
+
await asyncio.sleep(delay)
|
|
322
|
+
attempts += 1
|
|
323
|
+
else:
|
|
324
|
+
duration_ms = int((time.monotonic() - started) * 1000)
|
|
325
|
+
if on_attempt_success is not None:
|
|
326
|
+
await on_attempt_success(result, attempts, duration_ms)
|
|
327
|
+
return result
|
|
328
|
+
|
|
329
|
+
async def _upload_stage(self, clip: Clip) -> UploadOutcome | UploadError:
|
|
330
|
+
"""Upload clip to storage. Returns UploadOutcome or UploadError."""
|
|
331
|
+
dest_path = build_clip_path(clip, self._config.storage.paths)
|
|
332
|
+
|
|
333
|
+
async def attempt() -> UploadOutcome:
|
|
334
|
+
async with self._sem_upload:
|
|
335
|
+
storage_result = await self._storage.put_file(
|
|
336
|
+
clip.local_path,
|
|
337
|
+
dest_path,
|
|
338
|
+
)
|
|
339
|
+
return UploadOutcome(
|
|
340
|
+
storage_uri=storage_result.storage_uri,
|
|
341
|
+
view_url=storage_result.view_url,
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
async def on_attempt_start(attempt_num: int) -> None:
|
|
345
|
+
await self._repository.record_upload_started(
|
|
346
|
+
clip.clip_id,
|
|
347
|
+
dest_key=dest_path,
|
|
348
|
+
attempt=attempt_num,
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
async def on_attempt_success(
|
|
352
|
+
result: UploadOutcome, attempt_num: int, duration_ms: int
|
|
353
|
+
) -> None:
|
|
354
|
+
await self._repository.record_upload_completed(
|
|
355
|
+
clip.clip_id,
|
|
356
|
+
result.storage_uri,
|
|
357
|
+
result.view_url,
|
|
358
|
+
duration_ms,
|
|
359
|
+
attempt=attempt_num,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
async def on_attempt_failure(
|
|
363
|
+
exc: Exception, attempt_num: int, will_retry: bool, _duration_ms: int
|
|
364
|
+
) -> None:
|
|
365
|
+
await self._repository.record_upload_failed(
|
|
366
|
+
clip.clip_id,
|
|
367
|
+
error_message=self._format_error_message(exc),
|
|
368
|
+
error_type=self._format_error_type(exc),
|
|
369
|
+
attempt=attempt_num,
|
|
370
|
+
will_retry=will_retry,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
try:
|
|
374
|
+
return await self._run_stage_with_retries(
|
|
375
|
+
stage="upload",
|
|
376
|
+
clip_id=clip.clip_id,
|
|
377
|
+
op=attempt,
|
|
378
|
+
on_attempt_start=on_attempt_start,
|
|
379
|
+
on_attempt_success=on_attempt_success,
|
|
380
|
+
on_attempt_failure=on_attempt_failure,
|
|
381
|
+
)
|
|
382
|
+
except Exception as e:
|
|
383
|
+
return UploadError(clip.clip_id, storage_uri=None, cause=e)
|
|
384
|
+
|
|
385
|
+
async def _filter_stage(self, clip: Clip) -> FilterResult | FilterError:
|
|
386
|
+
"""Run object detection filter. Returns FilterResult or FilterError."""
|
|
387
|
+
|
|
388
|
+
async def attempt() -> FilterResult:
|
|
389
|
+
async with self._sem_filter:
|
|
390
|
+
return await self._filter.detect(clip.local_path)
|
|
391
|
+
|
|
392
|
+
async def on_attempt_start(attempt_num: int) -> None:
|
|
393
|
+
await self._repository.record_filter_started(clip.clip_id, attempt=attempt_num)
|
|
394
|
+
|
|
395
|
+
async def on_attempt_success(
|
|
396
|
+
result: FilterResult, attempt_num: int, duration_ms: int
|
|
397
|
+
) -> None:
|
|
398
|
+
await self._repository.record_filter_completed(
|
|
399
|
+
clip.clip_id,
|
|
400
|
+
result,
|
|
401
|
+
duration_ms,
|
|
402
|
+
attempt=attempt_num,
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
async def on_attempt_failure(
|
|
406
|
+
exc: Exception, attempt_num: int, will_retry: bool, _duration_ms: int
|
|
407
|
+
) -> None:
|
|
408
|
+
await self._repository.record_filter_failed(
|
|
409
|
+
clip.clip_id,
|
|
410
|
+
error_message=self._format_error_message(exc),
|
|
411
|
+
error_type=self._format_error_type(exc),
|
|
412
|
+
attempt=attempt_num,
|
|
413
|
+
will_retry=will_retry,
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
return await self._run_stage_with_retries(
|
|
418
|
+
stage="filter",
|
|
419
|
+
clip_id=clip.clip_id,
|
|
420
|
+
op=attempt,
|
|
421
|
+
on_attempt_start=on_attempt_start,
|
|
422
|
+
on_attempt_success=on_attempt_success,
|
|
423
|
+
on_attempt_failure=on_attempt_failure,
|
|
424
|
+
)
|
|
425
|
+
except Exception as e:
|
|
426
|
+
return FilterError(clip.clip_id, plugin_name=self._config.filter.plugin, cause=e)
|
|
427
|
+
|
|
428
|
+
async def _vlm_stage(
|
|
429
|
+
self, clip: Clip, filter_result: FilterResult
|
|
430
|
+
) -> AnalysisResult | VLMError:
|
|
431
|
+
"""Run VLM analysis. Returns AnalysisResult or VLMError."""
|
|
432
|
+
|
|
433
|
+
async def attempt() -> AnalysisResult:
|
|
434
|
+
async with self._sem_vlm:
|
|
435
|
+
return await self._vlm.analyze(
|
|
436
|
+
clip.local_path, filter_result, self._config.vlm
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
async def on_attempt_start(attempt_num: int) -> None:
|
|
440
|
+
await self._repository.record_vlm_started(clip.clip_id, attempt=attempt_num)
|
|
441
|
+
|
|
442
|
+
async def on_attempt_success(
|
|
443
|
+
result: AnalysisResult, attempt_num: int, duration_ms: int
|
|
444
|
+
) -> None:
|
|
445
|
+
await self._repository.record_vlm_completed(
|
|
446
|
+
clip.clip_id,
|
|
447
|
+
result,
|
|
448
|
+
prompt_tokens=result.prompt_tokens,
|
|
449
|
+
completion_tokens=result.completion_tokens,
|
|
450
|
+
duration_ms=duration_ms,
|
|
451
|
+
attempt=attempt_num,
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
async def on_attempt_failure(
|
|
455
|
+
exc: Exception, attempt_num: int, will_retry: bool, _duration_ms: int
|
|
456
|
+
) -> None:
|
|
457
|
+
await self._repository.record_vlm_failed(
|
|
458
|
+
clip.clip_id,
|
|
459
|
+
error_message=self._format_error_message(exc),
|
|
460
|
+
error_type=self._format_error_type(exc),
|
|
461
|
+
attempt=attempt_num,
|
|
462
|
+
will_retry=will_retry,
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
try:
|
|
466
|
+
return await self._run_stage_with_retries(
|
|
467
|
+
stage="vlm",
|
|
468
|
+
clip_id=clip.clip_id,
|
|
469
|
+
op=attempt,
|
|
470
|
+
on_attempt_start=on_attempt_start,
|
|
471
|
+
on_attempt_success=on_attempt_success,
|
|
472
|
+
on_attempt_failure=on_attempt_failure,
|
|
473
|
+
)
|
|
474
|
+
except Exception as e:
|
|
475
|
+
return VLMError(clip.clip_id, plugin_name=self._config.vlm.backend, cause=e)
|
|
476
|
+
|
|
477
|
+
async def _notify_stage(
|
|
478
|
+
self,
|
|
479
|
+
clip: Clip,
|
|
480
|
+
decision: AlertDecision,
|
|
481
|
+
analysis_result: AnalysisResult | None,
|
|
482
|
+
storage_uri: str | None,
|
|
483
|
+
view_url: str | None,
|
|
484
|
+
upload_failed: bool,
|
|
485
|
+
vlm_failed: bool,
|
|
486
|
+
) -> None | NotifyError:
|
|
487
|
+
"""Send notification. Returns None on success or NotifyError."""
|
|
488
|
+
alert = Alert(
|
|
489
|
+
clip_id=clip.clip_id,
|
|
490
|
+
camera_name=clip.camera_name,
|
|
491
|
+
storage_uri=storage_uri,
|
|
492
|
+
view_url=view_url,
|
|
493
|
+
risk_level=analysis_result.risk_level if analysis_result else None,
|
|
494
|
+
activity_type=analysis_result.activity_type if analysis_result else None,
|
|
495
|
+
notify_reason=decision.notify_reason,
|
|
496
|
+
summary=analysis_result.summary if analysis_result else None,
|
|
497
|
+
analysis=analysis_result.analysis if analysis_result else None,
|
|
498
|
+
ts=datetime.now(),
|
|
499
|
+
dedupe_key=clip.clip_id,
|
|
500
|
+
upload_failed=upload_failed,
|
|
501
|
+
vlm_failed=vlm_failed,
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
tasks = [
|
|
505
|
+
self._notify_with_entry(entry, alert)
|
|
506
|
+
for entry in self._notifier_entries
|
|
507
|
+
]
|
|
508
|
+
results = await asyncio.gather(*tasks)
|
|
509
|
+
|
|
510
|
+
errors: list[NotifyError] = []
|
|
511
|
+
for result in results:
|
|
512
|
+
match result:
|
|
513
|
+
case NotifyError() as err:
|
|
514
|
+
errors.append(err)
|
|
515
|
+
case None:
|
|
516
|
+
continue
|
|
517
|
+
case _:
|
|
518
|
+
raise TypeError(
|
|
519
|
+
f"Unexpected notify result type: {type(result).__name__}"
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
if errors:
|
|
523
|
+
return errors[0]
|
|
524
|
+
return None
|
|
525
|
+
|
|
526
|
+
async def _notify_with_entry(
|
|
527
|
+
self,
|
|
528
|
+
entry: NotifierEntry,
|
|
529
|
+
alert: Alert,
|
|
530
|
+
) -> None | NotifyError:
|
|
531
|
+
notifier_name = entry.name
|
|
532
|
+
|
|
533
|
+
async def on_attempt_success(
|
|
534
|
+
_result: object, attempt_num: int, _duration_ms: int
|
|
535
|
+
) -> None:
|
|
536
|
+
await self._repository.record_notification_sent(
|
|
537
|
+
alert.clip_id,
|
|
538
|
+
notifier_name=notifier_name,
|
|
539
|
+
dedupe_key=alert.dedupe_key,
|
|
540
|
+
attempt=attempt_num,
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
async def on_attempt_failure(
|
|
544
|
+
exc: Exception, attempt_num: int, will_retry: bool, _duration_ms: int
|
|
545
|
+
) -> None:
|
|
546
|
+
await self._repository.record_notification_failed(
|
|
547
|
+
alert.clip_id,
|
|
548
|
+
notifier_name=notifier_name,
|
|
549
|
+
error_message=self._format_error_message(exc),
|
|
550
|
+
error_type=self._format_error_type(exc),
|
|
551
|
+
attempt=attempt_num,
|
|
552
|
+
will_retry=will_retry,
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
try:
|
|
556
|
+
await self._run_stage_with_retries(
|
|
557
|
+
stage=f"notify:{notifier_name}",
|
|
558
|
+
clip_id=alert.clip_id,
|
|
559
|
+
op=lambda: entry.notifier.send(alert),
|
|
560
|
+
on_attempt_success=on_attempt_success,
|
|
561
|
+
on_attempt_failure=on_attempt_failure,
|
|
562
|
+
)
|
|
563
|
+
return None
|
|
564
|
+
except Exception as exc:
|
|
565
|
+
return NotifyError(alert.clip_id, notifier_name=notifier_name, cause=exc)
|
|
566
|
+
|
|
567
|
+
@staticmethod
|
|
568
|
+
def _format_error_message(exc: Exception) -> str:
|
|
569
|
+
if isinstance(exc, (UploadError, FilterError, VLMError, NotifyError)):
|
|
570
|
+
if exc.cause is not None:
|
|
571
|
+
return str(exc.cause)
|
|
572
|
+
return str(exc)
|
|
573
|
+
|
|
574
|
+
@staticmethod
|
|
575
|
+
def _format_error_type(exc: Exception) -> str:
|
|
576
|
+
if isinstance(exc, (UploadError, FilterError, VLMError, NotifyError)):
|
|
577
|
+
if exc.cause is not None:
|
|
578
|
+
return type(exc.cause).__name__
|
|
579
|
+
return type(exc).__name__
|
|
580
|
+
|
|
581
|
+
def _should_run_vlm(self, camera_name: str, filter_result: FilterResult) -> bool:
|
|
582
|
+
"""Check if VLM should run based on detected classes and config."""
|
|
583
|
+
if self._config.alert_policy.backend == "default":
|
|
584
|
+
alert_config = self._config.get_default_alert_policy(camera_name)
|
|
585
|
+
# If notify_on_motion enabled, always run VLM for richer context
|
|
586
|
+
if alert_config.notify_on_motion:
|
|
587
|
+
return True
|
|
588
|
+
|
|
589
|
+
# Otherwise check if detected classes intersect trigger classes
|
|
590
|
+
detected = set(filter_result.detected_classes)
|
|
591
|
+
trigger = set(self._config.vlm.trigger_classes)
|
|
592
|
+
return bool(detected & trigger)
|
|
593
|
+
|
|
594
|
+
async def _apply_upload_result(
|
|
595
|
+
self,
|
|
596
|
+
clip: Clip,
|
|
597
|
+
upload_result: UploadOutcome | UploadError,
|
|
598
|
+
) -> tuple[str | None, str | None, bool]:
|
|
599
|
+
"""Return upload metadata for downstream stages."""
|
|
600
|
+
match upload_result:
|
|
601
|
+
case UploadError() as upload_err:
|
|
602
|
+
logger.warning(
|
|
603
|
+
"Upload failed for %s (continuing): %s",
|
|
604
|
+
clip.clip_id,
|
|
605
|
+
upload_err.cause,
|
|
606
|
+
)
|
|
607
|
+
return None, None, True
|
|
608
|
+
case UploadOutcome() as outcome:
|
|
609
|
+
storage_uri = outcome.storage_uri
|
|
610
|
+
view_url = outcome.view_url
|
|
611
|
+
case _:
|
|
612
|
+
raise TypeError(
|
|
613
|
+
f"Unexpected upload result type: {type(upload_result).__name__}"
|
|
614
|
+
)
|
|
615
|
+
logger.info("Upload complete for %s: %s", clip.clip_id, storage_uri)
|
|
616
|
+
return storage_uri, view_url, False
|
|
617
|
+
|
|
618
|
+
|
|
619
|
+
async def shutdown(self, timeout: float = 30.0) -> None:
|
|
620
|
+
"""Graceful shutdown of pipeline.
|
|
621
|
+
|
|
622
|
+
Waits for in-flight tasks to complete. The app owns plugin shutdown.
|
|
623
|
+
"""
|
|
624
|
+
logger.info("Shutting down pipeline...")
|
|
625
|
+
|
|
626
|
+
# Wait for in-flight tasks
|
|
627
|
+
if self._tasks:
|
|
628
|
+
logger.info("Waiting for %d in-flight clips...", len(self._tasks))
|
|
629
|
+
try:
|
|
630
|
+
await asyncio.wait_for(
|
|
631
|
+
asyncio.gather(*self._tasks, return_exceptions=True),
|
|
632
|
+
timeout=timeout,
|
|
633
|
+
)
|
|
634
|
+
except asyncio.TimeoutError:
|
|
635
|
+
logger.warning("Timeout waiting for tasks, cancelling...")
|
|
636
|
+
for task in self._tasks:
|
|
637
|
+
task.cancel()
|
|
638
|
+
|
|
639
|
+
logger.info("Pipeline shutdown complete")
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""Unified plugin discovery for all plugin types."""
|
|
2
|
+
|
|
3
|
+
import importlib
|
|
4
|
+
import logging
|
|
5
|
+
import pkgutil
|
|
6
|
+
|
|
7
|
+
from homesec.plugins.utils import iter_entry_points
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def discover_all_plugins() -> None:
|
|
13
|
+
"""Discover and register all plugins (built-in and external).
|
|
14
|
+
|
|
15
|
+
Built-in plugins are discovered by importing all modules in plugin
|
|
16
|
+
type packages. External plugins are discovered via entry points.
|
|
17
|
+
|
|
18
|
+
All plugins use decorators for registration, so importing modules
|
|
19
|
+
triggers registration automatically.
|
|
20
|
+
"""
|
|
21
|
+
# 1. Discover built-in plugins by importing all modules
|
|
22
|
+
plugin_types = ["filters", "analyzers", "storage", "notifiers", "alert_policies"]
|
|
23
|
+
|
|
24
|
+
for plugin_type in plugin_types:
|
|
25
|
+
try:
|
|
26
|
+
package = importlib.import_module(f"homesec.plugins.{plugin_type}")
|
|
27
|
+
for _, module_name, _ in pkgutil.iter_modules(package.__path__):
|
|
28
|
+
if module_name.startswith("_"):
|
|
29
|
+
continue # Skip private modules
|
|
30
|
+
try:
|
|
31
|
+
importlib.import_module(f"homesec.plugins.{plugin_type}.{module_name}")
|
|
32
|
+
except Exception as exc:
|
|
33
|
+
logger.error(
|
|
34
|
+
"Failed to import built-in plugin module %s.%s: %s",
|
|
35
|
+
plugin_type,
|
|
36
|
+
module_name,
|
|
37
|
+
exc,
|
|
38
|
+
exc_info=True,
|
|
39
|
+
)
|
|
40
|
+
except Exception as exc:
|
|
41
|
+
logger.error(
|
|
42
|
+
"Failed to discover built-in plugins for %s: %s",
|
|
43
|
+
plugin_type,
|
|
44
|
+
exc,
|
|
45
|
+
exc_info=True,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
# 2. Discover external plugins via entry points
|
|
49
|
+
for point in iter_entry_points("homesec.plugins"):
|
|
50
|
+
try:
|
|
51
|
+
importlib.import_module(point.module)
|
|
52
|
+
except Exception as exc:
|
|
53
|
+
logger.error(
|
|
54
|
+
"Failed to load external plugin %s from %s: %s",
|
|
55
|
+
point.name,
|
|
56
|
+
point.module,
|
|
57
|
+
exc,
|
|
58
|
+
exc_info=True,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
__all__ = ["discover_all_plugins"]
|