aiohomematic 2026.1.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiohomematic/__init__.py +110 -0
- aiohomematic/_log_context_protocol.py +29 -0
- aiohomematic/api.py +410 -0
- aiohomematic/async_support.py +250 -0
- aiohomematic/backend_detection.py +462 -0
- aiohomematic/central/__init__.py +103 -0
- aiohomematic/central/async_rpc_server.py +760 -0
- aiohomematic/central/central_unit.py +1152 -0
- aiohomematic/central/config.py +463 -0
- aiohomematic/central/config_builder.py +772 -0
- aiohomematic/central/connection_state.py +160 -0
- aiohomematic/central/coordinators/__init__.py +38 -0
- aiohomematic/central/coordinators/cache.py +414 -0
- aiohomematic/central/coordinators/client.py +480 -0
- aiohomematic/central/coordinators/connection_recovery.py +1141 -0
- aiohomematic/central/coordinators/device.py +1166 -0
- aiohomematic/central/coordinators/event.py +514 -0
- aiohomematic/central/coordinators/hub.py +532 -0
- aiohomematic/central/decorators.py +184 -0
- aiohomematic/central/device_registry.py +229 -0
- aiohomematic/central/events/__init__.py +104 -0
- aiohomematic/central/events/bus.py +1392 -0
- aiohomematic/central/events/integration.py +424 -0
- aiohomematic/central/events/types.py +194 -0
- aiohomematic/central/health.py +762 -0
- aiohomematic/central/rpc_server.py +353 -0
- aiohomematic/central/scheduler.py +794 -0
- aiohomematic/central/state_machine.py +391 -0
- aiohomematic/client/__init__.py +203 -0
- aiohomematic/client/_rpc_errors.py +187 -0
- aiohomematic/client/backends/__init__.py +48 -0
- aiohomematic/client/backends/base.py +335 -0
- aiohomematic/client/backends/capabilities.py +138 -0
- aiohomematic/client/backends/ccu.py +487 -0
- aiohomematic/client/backends/factory.py +116 -0
- aiohomematic/client/backends/homegear.py +294 -0
- aiohomematic/client/backends/json_ccu.py +252 -0
- aiohomematic/client/backends/protocol.py +316 -0
- aiohomematic/client/ccu.py +1857 -0
- aiohomematic/client/circuit_breaker.py +459 -0
- aiohomematic/client/config.py +64 -0
- aiohomematic/client/handlers/__init__.py +40 -0
- aiohomematic/client/handlers/backup.py +157 -0
- aiohomematic/client/handlers/base.py +79 -0
- aiohomematic/client/handlers/device_ops.py +1085 -0
- aiohomematic/client/handlers/firmware.py +144 -0
- aiohomematic/client/handlers/link_mgmt.py +199 -0
- aiohomematic/client/handlers/metadata.py +436 -0
- aiohomematic/client/handlers/programs.py +144 -0
- aiohomematic/client/handlers/sysvars.py +100 -0
- aiohomematic/client/interface_client.py +1304 -0
- aiohomematic/client/json_rpc.py +2068 -0
- aiohomematic/client/request_coalescer.py +282 -0
- aiohomematic/client/rpc_proxy.py +629 -0
- aiohomematic/client/state_machine.py +324 -0
- aiohomematic/const.py +2207 -0
- aiohomematic/context.py +275 -0
- aiohomematic/converter.py +270 -0
- aiohomematic/decorators.py +390 -0
- aiohomematic/exceptions.py +185 -0
- aiohomematic/hmcli.py +997 -0
- aiohomematic/i18n.py +193 -0
- aiohomematic/interfaces/__init__.py +407 -0
- aiohomematic/interfaces/central.py +1067 -0
- aiohomematic/interfaces/client.py +1096 -0
- aiohomematic/interfaces/coordinators.py +63 -0
- aiohomematic/interfaces/model.py +1921 -0
- aiohomematic/interfaces/operations.py +217 -0
- aiohomematic/logging_context.py +134 -0
- aiohomematic/metrics/__init__.py +125 -0
- aiohomematic/metrics/_protocols.py +140 -0
- aiohomematic/metrics/aggregator.py +534 -0
- aiohomematic/metrics/dataclasses.py +489 -0
- aiohomematic/metrics/emitter.py +292 -0
- aiohomematic/metrics/events.py +183 -0
- aiohomematic/metrics/keys.py +300 -0
- aiohomematic/metrics/observer.py +563 -0
- aiohomematic/metrics/stats.py +172 -0
- aiohomematic/model/__init__.py +189 -0
- aiohomematic/model/availability.py +65 -0
- aiohomematic/model/calculated/__init__.py +89 -0
- aiohomematic/model/calculated/climate.py +276 -0
- aiohomematic/model/calculated/data_point.py +315 -0
- aiohomematic/model/calculated/field.py +147 -0
- aiohomematic/model/calculated/operating_voltage_level.py +286 -0
- aiohomematic/model/calculated/support.py +232 -0
- aiohomematic/model/custom/__init__.py +214 -0
- aiohomematic/model/custom/capabilities/__init__.py +67 -0
- aiohomematic/model/custom/capabilities/climate.py +41 -0
- aiohomematic/model/custom/capabilities/light.py +87 -0
- aiohomematic/model/custom/capabilities/lock.py +44 -0
- aiohomematic/model/custom/capabilities/siren.py +63 -0
- aiohomematic/model/custom/climate.py +1130 -0
- aiohomematic/model/custom/cover.py +722 -0
- aiohomematic/model/custom/data_point.py +360 -0
- aiohomematic/model/custom/definition.py +300 -0
- aiohomematic/model/custom/field.py +89 -0
- aiohomematic/model/custom/light.py +1174 -0
- aiohomematic/model/custom/lock.py +322 -0
- aiohomematic/model/custom/mixins.py +445 -0
- aiohomematic/model/custom/profile.py +945 -0
- aiohomematic/model/custom/registry.py +251 -0
- aiohomematic/model/custom/siren.py +462 -0
- aiohomematic/model/custom/switch.py +195 -0
- aiohomematic/model/custom/text_display.py +289 -0
- aiohomematic/model/custom/valve.py +78 -0
- aiohomematic/model/data_point.py +1416 -0
- aiohomematic/model/device.py +1840 -0
- aiohomematic/model/event.py +216 -0
- aiohomematic/model/generic/__init__.py +327 -0
- aiohomematic/model/generic/action.py +40 -0
- aiohomematic/model/generic/action_select.py +62 -0
- aiohomematic/model/generic/binary_sensor.py +30 -0
- aiohomematic/model/generic/button.py +31 -0
- aiohomematic/model/generic/data_point.py +177 -0
- aiohomematic/model/generic/dummy.py +150 -0
- aiohomematic/model/generic/number.py +76 -0
- aiohomematic/model/generic/select.py +56 -0
- aiohomematic/model/generic/sensor.py +76 -0
- aiohomematic/model/generic/switch.py +54 -0
- aiohomematic/model/generic/text.py +33 -0
- aiohomematic/model/hub/__init__.py +100 -0
- aiohomematic/model/hub/binary_sensor.py +24 -0
- aiohomematic/model/hub/button.py +28 -0
- aiohomematic/model/hub/connectivity.py +190 -0
- aiohomematic/model/hub/data_point.py +342 -0
- aiohomematic/model/hub/hub.py +864 -0
- aiohomematic/model/hub/inbox.py +135 -0
- aiohomematic/model/hub/install_mode.py +393 -0
- aiohomematic/model/hub/metrics.py +208 -0
- aiohomematic/model/hub/number.py +42 -0
- aiohomematic/model/hub/select.py +52 -0
- aiohomematic/model/hub/sensor.py +37 -0
- aiohomematic/model/hub/switch.py +43 -0
- aiohomematic/model/hub/text.py +30 -0
- aiohomematic/model/hub/update.py +221 -0
- aiohomematic/model/support.py +592 -0
- aiohomematic/model/update.py +140 -0
- aiohomematic/model/week_profile.py +1827 -0
- aiohomematic/property_decorators.py +719 -0
- aiohomematic/py.typed +0 -0
- aiohomematic/rega_scripts/accept_device_in_inbox.fn +51 -0
- aiohomematic/rega_scripts/create_backup_start.fn +28 -0
- aiohomematic/rega_scripts/create_backup_status.fn +89 -0
- aiohomematic/rega_scripts/fetch_all_device_data.fn +97 -0
- aiohomematic/rega_scripts/get_backend_info.fn +25 -0
- aiohomematic/rega_scripts/get_inbox_devices.fn +61 -0
- aiohomematic/rega_scripts/get_program_descriptions.fn +31 -0
- aiohomematic/rega_scripts/get_serial.fn +44 -0
- aiohomematic/rega_scripts/get_service_messages.fn +83 -0
- aiohomematic/rega_scripts/get_system_update_info.fn +39 -0
- aiohomematic/rega_scripts/get_system_variable_descriptions.fn +31 -0
- aiohomematic/rega_scripts/set_program_state.fn +17 -0
- aiohomematic/rega_scripts/set_system_variable.fn +19 -0
- aiohomematic/rega_scripts/trigger_firmware_update.fn +67 -0
- aiohomematic/schemas.py +256 -0
- aiohomematic/store/__init__.py +55 -0
- aiohomematic/store/dynamic/__init__.py +43 -0
- aiohomematic/store/dynamic/command.py +250 -0
- aiohomematic/store/dynamic/data.py +175 -0
- aiohomematic/store/dynamic/details.py +187 -0
- aiohomematic/store/dynamic/ping_pong.py +416 -0
- aiohomematic/store/persistent/__init__.py +71 -0
- aiohomematic/store/persistent/base.py +285 -0
- aiohomematic/store/persistent/device.py +233 -0
- aiohomematic/store/persistent/incident.py +380 -0
- aiohomematic/store/persistent/paramset.py +241 -0
- aiohomematic/store/persistent/session.py +556 -0
- aiohomematic/store/serialization.py +150 -0
- aiohomematic/store/storage.py +689 -0
- aiohomematic/store/types.py +526 -0
- aiohomematic/store/visibility/__init__.py +40 -0
- aiohomematic/store/visibility/parser.py +141 -0
- aiohomematic/store/visibility/registry.py +722 -0
- aiohomematic/store/visibility/rules.py +307 -0
- aiohomematic/strings.json +237 -0
- aiohomematic/support.py +706 -0
- aiohomematic/tracing.py +236 -0
- aiohomematic/translations/de.json +237 -0
- aiohomematic/translations/en.json +237 -0
- aiohomematic/type_aliases.py +51 -0
- aiohomematic/validator.py +128 -0
- aiohomematic-2026.1.29.dist-info/METADATA +296 -0
- aiohomematic-2026.1.29.dist-info/RECORD +188 -0
- aiohomematic-2026.1.29.dist-info/WHEEL +5 -0
- aiohomematic-2026.1.29.dist-info/entry_points.txt +2 -0
- aiohomematic-2026.1.29.dist-info/licenses/LICENSE +21 -0
- aiohomematic-2026.1.29.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1392 @@
|
|
|
1
|
+
# SPDX-License-Identifier: MIT
|
|
2
|
+
# Copyright (c) 2021-2026
|
|
3
|
+
"""
|
|
4
|
+
Event Bus for decoupled event handling in aiohomematic.
|
|
5
|
+
|
|
6
|
+
Overview
|
|
7
|
+
--------
|
|
8
|
+
This module provides a type-safe, async-first event bus that replaces the various
|
|
9
|
+
callback dictionaries scattered throughout CentralUnit. It supports:
|
|
10
|
+
|
|
11
|
+
- Type-safe event subscription and publishing
|
|
12
|
+
- Async and sync callback handlers
|
|
13
|
+
- Automatic error isolation (one handler failure doesn't affect others)
|
|
14
|
+
- Unsubscription via returned callable
|
|
15
|
+
- Event filtering and debugging
|
|
16
|
+
- Handler priority levels (CRITICAL, HIGH, NORMAL, LOW)
|
|
17
|
+
- Batch event publishing for performance optimization
|
|
18
|
+
|
|
19
|
+
Design Philosophy
|
|
20
|
+
-----------------
|
|
21
|
+
Instead of multiple callback dictionaries with different signatures, we use:
|
|
22
|
+
1. A base Event class with concrete event types (dataclasses)
|
|
23
|
+
2. Generic subscription by event type
|
|
24
|
+
3. Async-first design with sync compatibility
|
|
25
|
+
4. Clear separation of concerns
|
|
26
|
+
|
|
27
|
+
Metrics Architecture Note
|
|
28
|
+
-------------------------
|
|
29
|
+
Most components in aiohomematic emit metrics via EventBus (event-driven pattern).
|
|
30
|
+
However, HandlerStats is an intentional exception that uses inline tracking.
|
|
31
|
+
|
|
32
|
+
This is because EventBus is **meta-infrastructure**: it cannot use itself to
|
|
33
|
+
observe its own handler execution without causing infinite recursion. This is
|
|
34
|
+
analogous to how logging frameworks cannot log their own internal errors.
|
|
35
|
+
|
|
36
|
+
Access handler stats directly via ``event_bus.get_handler_stats()``.
|
|
37
|
+
|
|
38
|
+
Public API
|
|
39
|
+
----------
|
|
40
|
+
- EventBus: Main event bus class for subscription and publishing
|
|
41
|
+
- EventBatch: Context manager for batch event publishing
|
|
42
|
+
- EventPriority: Enum for handler priority levels
|
|
43
|
+
- HandlerStats: Statistics for event handler execution tracking
|
|
44
|
+
- Event: Base class for all events
|
|
45
|
+
- Various event types: DataPointValueReceivedEvent, DeviceStateChangedEvent, etc.
|
|
46
|
+
|
|
47
|
+
Example Usage
|
|
48
|
+
-------------
|
|
49
|
+
from aiohomematic.async_support import Looper
|
|
50
|
+
from aiohomematic.central.events import (
|
|
51
|
+
EventBus,
|
|
52
|
+
EventBatch,
|
|
53
|
+
EventPriority,
|
|
54
|
+
DataPointValueReceivedEvent,
|
|
55
|
+
)
|
|
56
|
+
from aiohomematic.const import DataPointKey, ParamsetKey
|
|
57
|
+
|
|
58
|
+
looper = Looper()
|
|
59
|
+
bus = EventBus(task_scheduler=looper)
|
|
60
|
+
|
|
61
|
+
# Subscribe with default priority (note: event is keyword-only)
|
|
62
|
+
async def on_data_point_updated(*, event: DataPointValueReceivedEvent) -> None:
|
|
63
|
+
print(f"DataPoint {event.dpk} updated to {event.value}")
|
|
64
|
+
|
|
65
|
+
unsubscribe = bus.subscribe(
|
|
66
|
+
event_type=DataPointValueReceivedEvent,
|
|
67
|
+
event_key=None,
|
|
68
|
+
handler=on_data_point_updated,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Subscribe with high priority (called before normal handlers)
|
|
72
|
+
unsubscribe_high = bus.subscribe(
|
|
73
|
+
event_type=DataPointValueReceivedEvent,
|
|
74
|
+
event_key=None,
|
|
75
|
+
handler=on_data_point_updated,
|
|
76
|
+
priority=EventPriority.HIGH,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Publish single event
|
|
80
|
+
await bus.publish(event=DataPointValueReceivedEvent(
|
|
81
|
+
timestamp=datetime.now(),
|
|
82
|
+
dpk=DataPointKey(
|
|
83
|
+
interface_id="BidCos-RF",
|
|
84
|
+
channel_address="VCU0000001:1",
|
|
85
|
+
paramset_key=ParamsetKey.VALUES,
|
|
86
|
+
parameter="STATE",
|
|
87
|
+
),
|
|
88
|
+
value=True,
|
|
89
|
+
received_at=datetime.now(),
|
|
90
|
+
))
|
|
91
|
+
|
|
92
|
+
# Batch publish multiple events (more efficient)
|
|
93
|
+
async with EventBatch(bus=bus) as batch:
|
|
94
|
+
batch.add(event=DeviceStateChangedEvent(timestamp=now, device_address="VCU001"))
|
|
95
|
+
batch.add(event=DeviceStateChangedEvent(timestamp=now, device_address="VCU002"))
|
|
96
|
+
# Events are published when context exits
|
|
97
|
+
|
|
98
|
+
# Unsubscribe when done
|
|
99
|
+
unsubscribe()
|
|
100
|
+
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
from __future__ import annotations
|
|
104
|
+
|
|
105
|
+
import asyncio
|
|
106
|
+
from collections import defaultdict
|
|
107
|
+
from collections.abc import Coroutine, Sequence
|
|
108
|
+
from dataclasses import dataclass
|
|
109
|
+
from datetime import datetime
|
|
110
|
+
import logging
|
|
111
|
+
import time
|
|
112
|
+
import types
|
|
113
|
+
from typing import TYPE_CHECKING, Any, Final, Protocol, TypeVar
|
|
114
|
+
|
|
115
|
+
from aiohomematic.central.events.types import Event, EventPriority
|
|
116
|
+
from aiohomematic.const import (
|
|
117
|
+
CacheInvalidationReason,
|
|
118
|
+
CacheType,
|
|
119
|
+
ConnectionStage,
|
|
120
|
+
DataPointKey,
|
|
121
|
+
DataRefreshType,
|
|
122
|
+
FailureReason,
|
|
123
|
+
ParamsetKey,
|
|
124
|
+
ProgramTrigger,
|
|
125
|
+
RecoveryStage,
|
|
126
|
+
)
|
|
127
|
+
from aiohomematic.property_decorators import DelegatedProperty
|
|
128
|
+
from aiohomematic.type_aliases import UnsubscribeCallback
|
|
129
|
+
|
|
130
|
+
if TYPE_CHECKING:
|
|
131
|
+
from typing import Self
|
|
132
|
+
|
|
133
|
+
from aiohomematic.interfaces import TaskSchedulerProtocol
|
|
134
|
+
|
|
135
|
+
_LOGGER = logging.getLogger(__name__)
|
|
136
|
+
|
|
137
|
+
# Type variables for generic event handling
|
|
138
|
+
T_Event = TypeVar("T_Event", bound="Event")
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
# Event handler protocols - handlers receive event as keyword-only argument
|
|
142
|
+
class SyncEventHandlerProtocol(Protocol):
|
|
143
|
+
"""Protocol for synchronous event handlers with keyword-only event parameter."""
|
|
144
|
+
|
|
145
|
+
def __call__(self, *, event: Any) -> None:
|
|
146
|
+
"""Handle event synchronously."""
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class AsyncEventHandlerProtocol(Protocol):
|
|
150
|
+
"""Protocol for asynchronous event handlers with keyword-only event parameter."""
|
|
151
|
+
|
|
152
|
+
def __call__(self, *, event: Any) -> Coroutine[Any, Any, None]:
|
|
153
|
+
"""Handle event asynchronously."""
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
EventHandler = SyncEventHandlerProtocol | AsyncEventHandlerProtocol
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
@dataclass(slots=True)
|
|
160
|
+
class _PrioritizedHandler:
|
|
161
|
+
"""Internal wrapper for handlers with priority information."""
|
|
162
|
+
|
|
163
|
+
handler: EventHandler
|
|
164
|
+
priority: EventPriority
|
|
165
|
+
order: int # Insertion order for stable sorting within same priority
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
@dataclass(slots=True)
|
|
169
|
+
class HandlerStats:
|
|
170
|
+
"""
|
|
171
|
+
Statistics for event handler execution tracking.
|
|
172
|
+
|
|
173
|
+
Architectural Note
|
|
174
|
+
------------------
|
|
175
|
+
HandlerStats uses **inline tracking** rather than event-driven metrics.
|
|
176
|
+
This is an intentional design decision to avoid infinite recursion:
|
|
177
|
+
|
|
178
|
+
If we emitted metric events for handler execution, the MetricsObserver
|
|
179
|
+
handler would itself trigger a metric event, creating an endless loop::
|
|
180
|
+
|
|
181
|
+
EventBus executes handler
|
|
182
|
+
→ emit LatencyMetricEvent
|
|
183
|
+
→ EventBus executes MetricsObserver handler
|
|
184
|
+
→ emit LatencyMetricEvent
|
|
185
|
+
→ ... infinite recursion
|
|
186
|
+
|
|
187
|
+
This is a fundamental constraint of meta-observability: the EventBus
|
|
188
|
+
cannot use itself for self-observation. Similar constraints exist in:
|
|
189
|
+
|
|
190
|
+
- Logging frameworks (cannot log their own internal errors)
|
|
191
|
+
- Garbage collectors (cannot garbage-collect themselves)
|
|
192
|
+
- Debuggers (cannot debug themselves)
|
|
193
|
+
|
|
194
|
+
Access handler stats directly via ``event_bus.get_handler_stats()``.
|
|
195
|
+
"""
|
|
196
|
+
|
|
197
|
+
total_executions: int = 0
|
|
198
|
+
"""Total number of handler executions."""
|
|
199
|
+
|
|
200
|
+
total_errors: int = 0
|
|
201
|
+
"""Total number of handler errors."""
|
|
202
|
+
|
|
203
|
+
total_duration_ms: float = 0.0
|
|
204
|
+
"""Total handler execution time in milliseconds."""
|
|
205
|
+
|
|
206
|
+
max_duration_ms: float = 0.0
|
|
207
|
+
"""Maximum handler execution time in milliseconds."""
|
|
208
|
+
|
|
209
|
+
@property
|
|
210
|
+
def avg_duration_ms(self) -> float:
|
|
211
|
+
"""Return average handler duration in milliseconds."""
|
|
212
|
+
if self.total_executions == 0:
|
|
213
|
+
return 0.0
|
|
214
|
+
return self.total_duration_ms / self.total_executions
|
|
215
|
+
|
|
216
|
+
def reset(self) -> None:
|
|
217
|
+
"""Reset handler statistics."""
|
|
218
|
+
self.total_executions = 0
|
|
219
|
+
self.total_errors = 0
|
|
220
|
+
self.total_duration_ms = 0.0
|
|
221
|
+
self.max_duration_ms = 0.0
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
@dataclass(frozen=True, slots=True)
|
|
225
|
+
class DataPointValueReceivedEvent(Event):
|
|
226
|
+
"""
|
|
227
|
+
Fired when a data point value is updated from the backend.
|
|
228
|
+
|
|
229
|
+
Key is the DataPointKey.
|
|
230
|
+
|
|
231
|
+
The dpk (DataPointKey) contains:
|
|
232
|
+
- interface_id: Interface identifier (e.g., "BidCos-RF")
|
|
233
|
+
- channel_address: Full channel address (e.g., "VCU0000001:1")
|
|
234
|
+
- paramset_key: Paramset type (e.g., ParamsetKey.VALUES)
|
|
235
|
+
- parameter: Parameter name (e.g., "STATE")
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
dpk: DataPointKey
|
|
239
|
+
value: Any
|
|
240
|
+
received_at: datetime
|
|
241
|
+
|
|
242
|
+
@property
|
|
243
|
+
def key(self) -> Any:
|
|
244
|
+
"""Key identifier for this event."""
|
|
245
|
+
return self.dpk
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
@dataclass(frozen=True, slots=True)
|
|
249
|
+
class DataPointStatusReceivedEvent(Event):
|
|
250
|
+
"""
|
|
251
|
+
Fired when a STATUS parameter value is updated from the backend.
|
|
252
|
+
|
|
253
|
+
Key is the DataPointKey of the MAIN parameter (not the STATUS parameter).
|
|
254
|
+
|
|
255
|
+
This event is routed to the main parameter's data point to update
|
|
256
|
+
its status attribute. For example, a LEVEL_STATUS event is routed
|
|
257
|
+
to the LEVEL data point.
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
dpk: DataPointKey
|
|
261
|
+
status_value: int | str
|
|
262
|
+
received_at: datetime
|
|
263
|
+
|
|
264
|
+
@property
|
|
265
|
+
def key(self) -> Any:
|
|
266
|
+
"""Key identifier for this event."""
|
|
267
|
+
return self.dpk
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
@dataclass(frozen=True, slots=True)
|
|
271
|
+
class RpcParameterReceivedEvent(Event):
|
|
272
|
+
"""
|
|
273
|
+
Raw parameter update event from backend (re-published from RPC callbacks).
|
|
274
|
+
|
|
275
|
+
Key is DataPointKey(
|
|
276
|
+
interface_id=self.interface_id,
|
|
277
|
+
channel_address=self.channel_address,
|
|
278
|
+
paramset_key=ParamsetKey.VALUES,
|
|
279
|
+
parameter=self.parameter,
|
|
280
|
+
)
|
|
281
|
+
"""
|
|
282
|
+
|
|
283
|
+
interface_id: str
|
|
284
|
+
channel_address: str
|
|
285
|
+
parameter: str
|
|
286
|
+
value: Any
|
|
287
|
+
|
|
288
|
+
@property
|
|
289
|
+
def key(self) -> Any:
|
|
290
|
+
"""Key identifier for this event."""
|
|
291
|
+
return DataPointKey(
|
|
292
|
+
interface_id=self.interface_id,
|
|
293
|
+
channel_address=self.channel_address,
|
|
294
|
+
paramset_key=ParamsetKey.VALUES,
|
|
295
|
+
parameter=self.parameter,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
@dataclass(frozen=True, slots=True)
|
|
300
|
+
class SysvarStateChangedEvent(Event):
|
|
301
|
+
"""
|
|
302
|
+
System variable state has changed.
|
|
303
|
+
|
|
304
|
+
Key is the state path.
|
|
305
|
+
"""
|
|
306
|
+
|
|
307
|
+
state_path: str
|
|
308
|
+
value: Any
|
|
309
|
+
received_at: datetime
|
|
310
|
+
|
|
311
|
+
@property
|
|
312
|
+
def key(self) -> Any:
|
|
313
|
+
"""Key identifier for this event."""
|
|
314
|
+
return self.state_path
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
@dataclass(frozen=True, slots=True)
|
|
318
|
+
class DeviceStateChangedEvent(Event):
|
|
319
|
+
"""
|
|
320
|
+
Device state has changed.
|
|
321
|
+
|
|
322
|
+
Key is device_address.
|
|
323
|
+
"""
|
|
324
|
+
|
|
325
|
+
device_address: str
|
|
326
|
+
|
|
327
|
+
@property
|
|
328
|
+
def key(self) -> Any:
|
|
329
|
+
"""Key identifier for this event."""
|
|
330
|
+
return self.device_address
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
@dataclass(frozen=True, slots=True)
|
|
334
|
+
class FirmwareStateChangedEvent(Event):
|
|
335
|
+
"""
|
|
336
|
+
Device firmware state has changed.
|
|
337
|
+
|
|
338
|
+
Key is device_address.
|
|
339
|
+
"""
|
|
340
|
+
|
|
341
|
+
device_address: str
|
|
342
|
+
|
|
343
|
+
@property
|
|
344
|
+
def key(self) -> Any:
|
|
345
|
+
"""Key identifier for this event."""
|
|
346
|
+
return self.device_address
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
@dataclass(frozen=True, slots=True)
|
|
350
|
+
class LinkPeerChangedEvent(Event):
|
|
351
|
+
"""
|
|
352
|
+
Channel link peer addresses have changed.
|
|
353
|
+
|
|
354
|
+
Key is channel_address.
|
|
355
|
+
"""
|
|
356
|
+
|
|
357
|
+
channel_address: str
|
|
358
|
+
|
|
359
|
+
@property
|
|
360
|
+
def key(self) -> Any:
|
|
361
|
+
"""Key identifier for this event."""
|
|
362
|
+
return self.channel_address
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
@dataclass(frozen=True, slots=True)
|
|
366
|
+
class DataPointStateChangedEvent(Event):
|
|
367
|
+
"""
|
|
368
|
+
Data point value updated callback event.
|
|
369
|
+
|
|
370
|
+
Key is unique_id.
|
|
371
|
+
|
|
372
|
+
This event is fired when a data point's value changes and external
|
|
373
|
+
consumers (like Home Assistant data points) need to be notified.
|
|
374
|
+
Unlike DataPointValueReceivedEvent which handles internal backend updates,
|
|
375
|
+
this event is for external integration points.
|
|
376
|
+
|
|
377
|
+
The old_value and new_value fields allow consumers to track what changed
|
|
378
|
+
without having to maintain their own previous state. These may be None
|
|
379
|
+
if the values are unknown (e.g., during initial load or for non-value updates).
|
|
380
|
+
"""
|
|
381
|
+
|
|
382
|
+
unique_id: str
|
|
383
|
+
custom_id: str
|
|
384
|
+
old_value: Any = None
|
|
385
|
+
new_value: Any = None
|
|
386
|
+
|
|
387
|
+
@property
|
|
388
|
+
def key(self) -> Any:
|
|
389
|
+
"""Key identifier for this event."""
|
|
390
|
+
return self.unique_id
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
@dataclass(frozen=True, slots=True)
|
|
394
|
+
class DeviceRemovedEvent(Event):
|
|
395
|
+
"""
|
|
396
|
+
Device or data point has been removed from the system.
|
|
397
|
+
|
|
398
|
+
Key is device_address (for device removal) or unique_id (for data point removal).
|
|
399
|
+
|
|
400
|
+
When used for device removal (device_address is set):
|
|
401
|
+
- Enables decoupled cache invalidation via EventBus subscription
|
|
402
|
+
- Caches subscribe and react independently instead of direct calls
|
|
403
|
+
|
|
404
|
+
When used for data point removal (only unique_id is set):
|
|
405
|
+
- Signals that a data point entity should be cleaned up
|
|
406
|
+
"""
|
|
407
|
+
|
|
408
|
+
unique_id: str
|
|
409
|
+
"""Unique identifier of the device or data point."""
|
|
410
|
+
|
|
411
|
+
device_address: str | None = None
|
|
412
|
+
"""Address of the removed device (None for data point removal)."""
|
|
413
|
+
|
|
414
|
+
interface_id: str | None = None
|
|
415
|
+
"""Interface ID the device belonged to (None for data point removal)."""
|
|
416
|
+
|
|
417
|
+
channel_addresses: tuple[str, ...] = ()
|
|
418
|
+
"""Addresses of all channels that were part of this device."""
|
|
419
|
+
|
|
420
|
+
@property
|
|
421
|
+
def key(self) -> Any:
|
|
422
|
+
"""Key identifier for this event."""
|
|
423
|
+
return self.device_address if self.device_address else self.unique_id
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
# =============================================================================
|
|
427
|
+
# Connection Health Events (Phase 1)
|
|
428
|
+
# =============================================================================
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
@dataclass(frozen=True, slots=True)
|
|
432
|
+
class ConnectionStageChangedEvent(Event):
|
|
433
|
+
"""
|
|
434
|
+
Connection reconnection stage progression.
|
|
435
|
+
|
|
436
|
+
Key is interface_id.
|
|
437
|
+
|
|
438
|
+
Emitted during staged reconnection when connection is lost and recovered.
|
|
439
|
+
Tracks progression through TCP check, RPC check, warmup, and establishment.
|
|
440
|
+
"""
|
|
441
|
+
|
|
442
|
+
interface_id: str
|
|
443
|
+
stage: ConnectionStage
|
|
444
|
+
previous_stage: ConnectionStage
|
|
445
|
+
duration_in_previous_stage_ms: float
|
|
446
|
+
|
|
447
|
+
@property
|
|
448
|
+
def key(self) -> Any:
|
|
449
|
+
"""Key identifier for this event."""
|
|
450
|
+
return self.interface_id
|
|
451
|
+
|
|
452
|
+
@property
|
|
453
|
+
def stage_name(self) -> str:
|
|
454
|
+
"""Return human-readable stage name."""
|
|
455
|
+
return self.stage.display_name
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
@dataclass(frozen=True, slots=True)
|
|
459
|
+
class ConnectionHealthChangedEvent(Event):
|
|
460
|
+
"""
|
|
461
|
+
Connection health status update.
|
|
462
|
+
|
|
463
|
+
Key is interface_id.
|
|
464
|
+
|
|
465
|
+
Emitted when connection health status changes for an interface.
|
|
466
|
+
"""
|
|
467
|
+
|
|
468
|
+
interface_id: str
|
|
469
|
+
is_healthy: bool
|
|
470
|
+
failure_reason: FailureReason | None
|
|
471
|
+
consecutive_failures: int
|
|
472
|
+
last_successful_contact: datetime | None
|
|
473
|
+
|
|
474
|
+
@property
|
|
475
|
+
def key(self) -> Any:
|
|
476
|
+
"""Key identifier for this event."""
|
|
477
|
+
return self.interface_id
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
# =============================================================================
|
|
481
|
+
# Cache Events (Phase 2)
|
|
482
|
+
# =============================================================================
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
@dataclass(frozen=True, slots=True)
|
|
486
|
+
class CacheInvalidatedEvent(Event):
|
|
487
|
+
"""
|
|
488
|
+
Cache invalidation notification.
|
|
489
|
+
|
|
490
|
+
Key is scope (device_address, interface_id, or None for full cache).
|
|
491
|
+
|
|
492
|
+
Emitted when cache entries are invalidated or cleared.
|
|
493
|
+
"""
|
|
494
|
+
|
|
495
|
+
cache_type: CacheType
|
|
496
|
+
reason: CacheInvalidationReason
|
|
497
|
+
scope: str | None
|
|
498
|
+
entries_affected: int
|
|
499
|
+
|
|
500
|
+
@property
|
|
501
|
+
def key(self) -> Any:
|
|
502
|
+
"""Key identifier for this event."""
|
|
503
|
+
return self.scope
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
# =============================================================================
|
|
507
|
+
# Circuit Breaker Events (Phase 3)
|
|
508
|
+
# - CircuitBreakerStateChangedEvent (from types.py)
|
|
509
|
+
# - CircuitBreakerTrippedEvent (from types.py)
|
|
510
|
+
# =============================================================================
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
# =============================================================================
|
|
514
|
+
# State Machine Events (Phase 4)
|
|
515
|
+
# - ClientStateChangedEvent (from types.py)
|
|
516
|
+
# - CentralStateChangedEvent (from types.py)
|
|
517
|
+
# =============================================================================
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
# =============================================================================
|
|
521
|
+
# Data Refresh Events (Phase 5)
|
|
522
|
+
# =============================================================================
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
@dataclass(frozen=True, slots=True)
|
|
526
|
+
class DataRefreshTriggeredEvent(Event):
|
|
527
|
+
"""
|
|
528
|
+
Data refresh operation triggered.
|
|
529
|
+
|
|
530
|
+
Key is interface_id (or None for hub-level refreshes).
|
|
531
|
+
|
|
532
|
+
Emitted when a data refresh operation starts.
|
|
533
|
+
"""
|
|
534
|
+
|
|
535
|
+
refresh_type: DataRefreshType
|
|
536
|
+
interface_id: str | None
|
|
537
|
+
scheduled: bool
|
|
538
|
+
|
|
539
|
+
@property
|
|
540
|
+
def key(self) -> Any:
|
|
541
|
+
"""Key identifier for this event."""
|
|
542
|
+
return self.interface_id
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
@dataclass(frozen=True, slots=True)
|
|
546
|
+
class DataRefreshCompletedEvent(Event):
|
|
547
|
+
"""
|
|
548
|
+
Data refresh operation completed.
|
|
549
|
+
|
|
550
|
+
Key is interface_id (or None for hub-level refreshes).
|
|
551
|
+
|
|
552
|
+
Emitted when a data refresh operation completes (success or failure).
|
|
553
|
+
"""
|
|
554
|
+
|
|
555
|
+
refresh_type: DataRefreshType
|
|
556
|
+
interface_id: str | None
|
|
557
|
+
success: bool
|
|
558
|
+
duration_ms: float
|
|
559
|
+
items_refreshed: int
|
|
560
|
+
error_message: str | None
|
|
561
|
+
|
|
562
|
+
@property
|
|
563
|
+
def key(self) -> Any:
|
|
564
|
+
"""Key identifier for this event."""
|
|
565
|
+
return self.interface_id
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
# =============================================================================
|
|
569
|
+
# Program/Sysvar Events (Phase 6)
|
|
570
|
+
# =============================================================================
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
@dataclass(frozen=True, slots=True)
|
|
574
|
+
class ProgramExecutedEvent(Event):
|
|
575
|
+
"""
|
|
576
|
+
Backend program was executed.
|
|
577
|
+
|
|
578
|
+
Key is program_id.
|
|
579
|
+
|
|
580
|
+
Emitted when a Homematic program is executed.
|
|
581
|
+
"""
|
|
582
|
+
|
|
583
|
+
program_id: str
|
|
584
|
+
program_name: str
|
|
585
|
+
triggered_by: ProgramTrigger
|
|
586
|
+
success: bool
|
|
587
|
+
|
|
588
|
+
@property
|
|
589
|
+
def key(self) -> Any:
|
|
590
|
+
"""Key identifier for this event."""
|
|
591
|
+
return self.program_id
|
|
592
|
+
|
|
593
|
+
|
|
594
|
+
# =============================================================================
|
|
595
|
+
# Request Coalescer Events (Phase 7)
|
|
596
|
+
# =============================================================================
|
|
597
|
+
|
|
598
|
+
|
|
599
|
+
@dataclass(frozen=True, slots=True)
|
|
600
|
+
class RequestCoalescedEvent(Event):
|
|
601
|
+
"""
|
|
602
|
+
Multiple requests were coalesced into one.
|
|
603
|
+
|
|
604
|
+
Key is interface_id.
|
|
605
|
+
|
|
606
|
+
Emitted when duplicate requests are merged to reduce backend load.
|
|
607
|
+
"""
|
|
608
|
+
|
|
609
|
+
request_key: str
|
|
610
|
+
coalesced_count: int
|
|
611
|
+
interface_id: str
|
|
612
|
+
|
|
613
|
+
@property
|
|
614
|
+
def key(self) -> Any:
|
|
615
|
+
"""Key identifier for this event."""
|
|
616
|
+
return self.interface_id
|
|
617
|
+
|
|
618
|
+
|
|
619
|
+
# =============================================================================
|
|
620
|
+
# Health Record Events (Phase 8)
|
|
621
|
+
# - HealthRecordedEvent (from types.py)
|
|
622
|
+
# =============================================================================
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
# =============================================================================
|
|
626
|
+
# Connection Recovery Events (Phase 9)
|
|
627
|
+
# =============================================================================
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
@dataclass(frozen=True, slots=True)
|
|
631
|
+
class ConnectionLostEvent(Event):
|
|
632
|
+
"""
|
|
633
|
+
Connection loss detected for an interface.
|
|
634
|
+
|
|
635
|
+
Key is interface_id.
|
|
636
|
+
|
|
637
|
+
Emitted when the BackgroundScheduler detects a connection loss,
|
|
638
|
+
triggering the ConnectionRecoveryCoordinator to start recovery.
|
|
639
|
+
"""
|
|
640
|
+
|
|
641
|
+
interface_id: str
|
|
642
|
+
reason: str
|
|
643
|
+
detected_at: datetime
|
|
644
|
+
|
|
645
|
+
@property
|
|
646
|
+
def key(self) -> Any:
|
|
647
|
+
"""Key identifier for this event."""
|
|
648
|
+
return self.interface_id
|
|
649
|
+
|
|
650
|
+
|
|
651
|
+
@dataclass(frozen=True, slots=True)
|
|
652
|
+
class RecoveryStageChangedEvent(Event):
|
|
653
|
+
"""
|
|
654
|
+
Recovery stage transition.
|
|
655
|
+
|
|
656
|
+
Key is interface_id.
|
|
657
|
+
|
|
658
|
+
Emitted when the ConnectionRecoveryCoordinator transitions between
|
|
659
|
+
recovery stages. Enables fine-grained observability of the recovery process.
|
|
660
|
+
"""
|
|
661
|
+
|
|
662
|
+
interface_id: str
|
|
663
|
+
old_stage: RecoveryStage
|
|
664
|
+
new_stage: RecoveryStage
|
|
665
|
+
duration_in_old_stage_ms: float
|
|
666
|
+
attempt_number: int
|
|
667
|
+
|
|
668
|
+
@property
|
|
669
|
+
def key(self) -> Any:
|
|
670
|
+
"""Key identifier for this event."""
|
|
671
|
+
return self.interface_id
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
@dataclass(frozen=True, slots=True)
|
|
675
|
+
class RecoveryAttemptedEvent(Event):
|
|
676
|
+
"""
|
|
677
|
+
Recovery attempt completed.
|
|
678
|
+
|
|
679
|
+
Key is interface_id.
|
|
680
|
+
|
|
681
|
+
Emitted after each recovery attempt, regardless of success or failure.
|
|
682
|
+
"""
|
|
683
|
+
|
|
684
|
+
interface_id: str
|
|
685
|
+
attempt_number: int
|
|
686
|
+
max_attempts: int
|
|
687
|
+
stage_reached: RecoveryStage
|
|
688
|
+
success: bool
|
|
689
|
+
error_message: str | None
|
|
690
|
+
|
|
691
|
+
@property
|
|
692
|
+
def key(self) -> Any:
|
|
693
|
+
"""Key identifier for this event."""
|
|
694
|
+
return self.interface_id
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
@dataclass(frozen=True, slots=True)
|
|
698
|
+
class RecoveryCompletedEvent(Event):
|
|
699
|
+
"""
|
|
700
|
+
Recovery completed successfully.
|
|
701
|
+
|
|
702
|
+
Key is interface_id (or central_name for batch recovery).
|
|
703
|
+
|
|
704
|
+
Emitted when recovery succeeds for an interface or all interfaces.
|
|
705
|
+
"""
|
|
706
|
+
|
|
707
|
+
interface_id: str | None
|
|
708
|
+
"""Interface ID (None for batch recovery of multiple interfaces)."""
|
|
709
|
+
|
|
710
|
+
central_name: str
|
|
711
|
+
"""Name of the central unit."""
|
|
712
|
+
|
|
713
|
+
total_attempts: int
|
|
714
|
+
total_duration_ms: float
|
|
715
|
+
stages_completed: tuple[RecoveryStage, ...]
|
|
716
|
+
interfaces_recovered: tuple[str, ...] | None = None
|
|
717
|
+
"""List of recovered interfaces (for batch recovery)."""
|
|
718
|
+
|
|
719
|
+
@property
|
|
720
|
+
def key(self) -> Any:
|
|
721
|
+
"""Key identifier for this event."""
|
|
722
|
+
return self.interface_id or self.central_name
|
|
723
|
+
|
|
724
|
+
|
|
725
|
+
@dataclass(frozen=True, slots=True)
|
|
726
|
+
class RecoveryFailedEvent(Event):
|
|
727
|
+
"""
|
|
728
|
+
Recovery failed after max retries.
|
|
729
|
+
|
|
730
|
+
Key is interface_id (or central_name for batch recovery).
|
|
731
|
+
|
|
732
|
+
Emitted when recovery fails for an interface or all interfaces,
|
|
733
|
+
indicating transition to FAILED state with heartbeat retry.
|
|
734
|
+
"""
|
|
735
|
+
|
|
736
|
+
interface_id: str | None
|
|
737
|
+
"""Interface ID (None for batch failure of multiple interfaces)."""
|
|
738
|
+
|
|
739
|
+
central_name: str
|
|
740
|
+
"""Name of the central unit."""
|
|
741
|
+
|
|
742
|
+
total_attempts: int
|
|
743
|
+
total_duration_ms: float
|
|
744
|
+
last_stage_reached: RecoveryStage
|
|
745
|
+
failure_reason: FailureReason
|
|
746
|
+
requires_manual_intervention: bool
|
|
747
|
+
failed_interfaces: tuple[str, ...] | None = None
|
|
748
|
+
"""List of failed interfaces (for batch recovery)."""
|
|
749
|
+
|
|
750
|
+
@property
|
|
751
|
+
def key(self) -> Any:
|
|
752
|
+
"""Key identifier for this event."""
|
|
753
|
+
return self.interface_id or self.central_name
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
@dataclass(frozen=True, slots=True)
|
|
757
|
+
class HeartbeatTimerFiredEvent(Event):
|
|
758
|
+
"""
|
|
759
|
+
Heartbeat timer fired in FAILED state.
|
|
760
|
+
|
|
761
|
+
Key is central_name.
|
|
762
|
+
|
|
763
|
+
Emitted by the heartbeat timer when the system is in FAILED state,
|
|
764
|
+
triggering a retry attempt for failed interfaces.
|
|
765
|
+
"""
|
|
766
|
+
|
|
767
|
+
central_name: str
|
|
768
|
+
interface_ids: tuple[str, ...]
|
|
769
|
+
|
|
770
|
+
@property
|
|
771
|
+
def key(self) -> Any:
|
|
772
|
+
"""Key identifier for this event."""
|
|
773
|
+
return self.central_name
|
|
774
|
+
|
|
775
|
+
|
|
776
|
+
class EventBus:
|
|
777
|
+
"""
|
|
778
|
+
Async-first, type-safe event bus for decoupled communication.
|
|
779
|
+
|
|
780
|
+
Features
|
|
781
|
+
--------
|
|
782
|
+
- Type-safe subscriptions (subscribe by event class)
|
|
783
|
+
- Async and sync handler support
|
|
784
|
+
- Automatic error isolation per handler
|
|
785
|
+
- Subscription management with unsubscribe callbacks
|
|
786
|
+
- Optional event logging for debugging
|
|
787
|
+
|
|
788
|
+
Thread Safety
|
|
789
|
+
-------------
|
|
790
|
+
This EventBus is designed for single-threaded asyncio use.
|
|
791
|
+
All subscriptions and publishes should happen in the same event loop.
|
|
792
|
+
"""
|
|
793
|
+
|
|
794
|
+
def __init__(
|
|
795
|
+
self,
|
|
796
|
+
*,
|
|
797
|
+
enable_event_logging: bool = False,
|
|
798
|
+
task_scheduler: TaskSchedulerProtocol,
|
|
799
|
+
) -> None:
|
|
800
|
+
"""
|
|
801
|
+
Initialize the event bus.
|
|
802
|
+
|
|
803
|
+
Args:
|
|
804
|
+
----
|
|
805
|
+
enable_event_logging: If True, log all published events (debug only)
|
|
806
|
+
task_scheduler: Task scheduler for proper task lifecycle management.
|
|
807
|
+
|
|
808
|
+
"""
|
|
809
|
+
self._subscriptions: Final[dict[type[Event], dict[Any, list[_PrioritizedHandler]]]] = defaultdict(
|
|
810
|
+
lambda: defaultdict(list)
|
|
811
|
+
)
|
|
812
|
+
self._enable_event_logging: Final = enable_event_logging
|
|
813
|
+
self._event_count: Final[dict[type[Event], int]] = defaultdict(int)
|
|
814
|
+
self._handler_order_counter: int = 0 # For stable sorting within same priority
|
|
815
|
+
self._task_scheduler: Final = task_scheduler
|
|
816
|
+
# Handler execution statistics for metrics
|
|
817
|
+
self._handler_stats: Final = HandlerStats()
|
|
818
|
+
|
|
819
|
+
def clear_event_stats(self) -> None:
|
|
820
|
+
"""Clear event statistics counters to free memory."""
|
|
821
|
+
self._event_count.clear()
|
|
822
|
+
self._handler_stats.reset()
|
|
823
|
+
_LOGGER.debug("CLEAR_EVENT_STATS: Cleared all event statistics")
|
|
824
|
+
|
|
825
|
+
def clear_external_subscriptions(self) -> int:
|
|
826
|
+
"""
|
|
827
|
+
Clear subscriptions for event types that are not cleaned up elsewhere.
|
|
828
|
+
|
|
829
|
+
This includes:
|
|
830
|
+
- External subscriptions made via public APIs (subscribe_to_device_removed(),
|
|
831
|
+
subscribe_to_firmware_updated(), subscribe_to_device_updated(), etc.)
|
|
832
|
+
- Internal subscriptions that are created dynamically and not tracked
|
|
833
|
+
|
|
834
|
+
This method provides a fallback cleanup during central shutdown.
|
|
835
|
+
|
|
836
|
+
Returns
|
|
837
|
+
-------
|
|
838
|
+
Total number of subscriptions cleared
|
|
839
|
+
|
|
840
|
+
"""
|
|
841
|
+
external_event_types: tuple[type[Event], ...] = (
|
|
842
|
+
DataPointStateChangedEvent,
|
|
843
|
+
DeviceRemovedEvent,
|
|
844
|
+
DeviceStateChangedEvent,
|
|
845
|
+
FirmwareStateChangedEvent,
|
|
846
|
+
LinkPeerChangedEvent,
|
|
847
|
+
)
|
|
848
|
+
total_cleared = 0
|
|
849
|
+
for event_type in external_event_types:
|
|
850
|
+
if (count := self.get_subscription_count(event_type=event_type)) > 0:
|
|
851
|
+
self.clear_subscriptions(event_type=event_type)
|
|
852
|
+
total_cleared += count
|
|
853
|
+
if total_cleared > 0:
|
|
854
|
+
_LOGGER.debug(
|
|
855
|
+
"CLEAR_EXTERNAL_SUBSCRIPTIONS: Cleared %d external subscription(s)",
|
|
856
|
+
total_cleared,
|
|
857
|
+
)
|
|
858
|
+
return total_cleared
|
|
859
|
+
|
|
860
|
+
def clear_subscriptions(self, *, event_type: type[Event] | None = None) -> None:
|
|
861
|
+
"""
|
|
862
|
+
Clear subscriptions for a specific event type or all types.
|
|
863
|
+
|
|
864
|
+
Args:
|
|
865
|
+
----
|
|
866
|
+
event_type: The event type to clear, or None to clear all
|
|
867
|
+
|
|
868
|
+
"""
|
|
869
|
+
if event_type is None:
|
|
870
|
+
self._subscriptions.clear()
|
|
871
|
+
self._event_count.clear()
|
|
872
|
+
_LOGGER.debug("CLEAR_SUBSCRIPTION: Cleared all event subscriptions and statistics")
|
|
873
|
+
else:
|
|
874
|
+
self._subscriptions[event_type].clear()
|
|
875
|
+
_LOGGER.debug("CLEAR_SUBSCRIPTION: Cleared subscriptions for %s", event_type.__name__)
|
|
876
|
+
|
|
877
|
+
def clear_subscriptions_by_key(self, *, event_key: Any) -> int:
|
|
878
|
+
"""
|
|
879
|
+
Clear all subscriptions for a specific event key across all event types.
|
|
880
|
+
|
|
881
|
+
This is used to clean up subscriptions when a device or data point is removed,
|
|
882
|
+
preventing memory leaks from orphaned handlers.
|
|
883
|
+
|
|
884
|
+
Args:
|
|
885
|
+
----
|
|
886
|
+
event_key: The key to clear subscriptions for (e.g., unique_id, dpk, channel_address)
|
|
887
|
+
|
|
888
|
+
Returns:
|
|
889
|
+
-------
|
|
890
|
+
Number of handlers removed
|
|
891
|
+
|
|
892
|
+
"""
|
|
893
|
+
total_removed = 0
|
|
894
|
+
for event_type, keys_handlers in self._subscriptions.items():
|
|
895
|
+
if event_key in keys_handlers and (count := len(keys_handlers[event_key])) > 0:
|
|
896
|
+
total_removed += count
|
|
897
|
+
keys_handlers[event_key].clear()
|
|
898
|
+
_LOGGER.debug(
|
|
899
|
+
"CLEAR_SUBSCRIPTION_BY_KEY: Cleared %d subscription(s) for key=%s, event_type=%s",
|
|
900
|
+
count,
|
|
901
|
+
event_key,
|
|
902
|
+
event_type.__name__,
|
|
903
|
+
)
|
|
904
|
+
return total_removed
|
|
905
|
+
|
|
906
|
+
def get_event_stats(self) -> dict[str, int]:
|
|
907
|
+
"""
|
|
908
|
+
Get statistics about published events (for debugging).
|
|
909
|
+
|
|
910
|
+
Returns
|
|
911
|
+
-------
|
|
912
|
+
Dictionary mapping event type names to publish counts
|
|
913
|
+
|
|
914
|
+
"""
|
|
915
|
+
return {event_type.__name__: count for event_type, count in self._event_count.items()}
|
|
916
|
+
|
|
917
|
+
def get_handler_stats(self) -> HandlerStats:
|
|
918
|
+
"""Return handler execution statistics for metrics."""
|
|
919
|
+
return self._handler_stats
|
|
920
|
+
|
|
921
|
+
def get_subscription_count(self, *, event_type: type[Event]) -> int:
|
|
922
|
+
"""
|
|
923
|
+
Get the number of active subscriptions for an event type.
|
|
924
|
+
|
|
925
|
+
Counts all handlers across all event_keys for the given event_type.
|
|
926
|
+
|
|
927
|
+
Args:
|
|
928
|
+
----
|
|
929
|
+
event_type: The event class to query
|
|
930
|
+
|
|
931
|
+
Returns:
|
|
932
|
+
-------
|
|
933
|
+
Number of active subscribers
|
|
934
|
+
|
|
935
|
+
"""
|
|
936
|
+
return sum(len(handlers) for handlers in self._subscriptions.get(event_type, {}).values())
|
|
937
|
+
|
|
938
|
+
def get_total_subscription_count(self) -> int:
|
|
939
|
+
"""Return the total number of active subscriptions across all event types."""
|
|
940
|
+
return sum(
|
|
941
|
+
len(handlers) for event_handlers in self._subscriptions.values() for handlers in event_handlers.values()
|
|
942
|
+
)
|
|
943
|
+
|
|
944
|
+
def log_leaked_subscriptions(self) -> int:
|
|
945
|
+
"""
|
|
946
|
+
Log any remaining subscriptions for debugging memory leaks.
|
|
947
|
+
|
|
948
|
+
Call this before clearing subscriptions to identify potential leaks.
|
|
949
|
+
|
|
950
|
+
Returns
|
|
951
|
+
-------
|
|
952
|
+
Total number of leaked subscriptions found
|
|
953
|
+
|
|
954
|
+
"""
|
|
955
|
+
total_leaked = 0
|
|
956
|
+
for event_type, keys_handlers in self._subscriptions.items():
|
|
957
|
+
for key, handlers in keys_handlers.items():
|
|
958
|
+
if handlers:
|
|
959
|
+
count = len(handlers)
|
|
960
|
+
total_leaked += count
|
|
961
|
+
_LOGGER.warning( # i18n-log: ignore
|
|
962
|
+
"LEAKED_SUBSCRIPTION: %s (key=%s, count=%d)",
|
|
963
|
+
event_type.__name__,
|
|
964
|
+
key,
|
|
965
|
+
count,
|
|
966
|
+
)
|
|
967
|
+
if total_leaked > 0:
|
|
968
|
+
_LOGGER.warning("LEAKED_SUBSCRIPTION: Total leaked subscriptions: %d", total_leaked) # i18n-log: ignore
|
|
969
|
+
return total_leaked
|
|
970
|
+
|
|
971
|
+
async def publish(self, *, event: Event) -> None:
|
|
972
|
+
"""
|
|
973
|
+
Publish an event to all subscribed handlers.
|
|
974
|
+
|
|
975
|
+
Handler lookup strategy (dual-key fallback):
|
|
976
|
+
1. First try: Look up handlers by specific event.key
|
|
977
|
+
(e.g., unique_id for DataPointValueReceivedEvent)
|
|
978
|
+
2. Fallback: Look up handlers subscribed with key=None
|
|
979
|
+
(wildcard subscribers that receive all events of this type)
|
|
980
|
+
|
|
981
|
+
This allows both targeted subscriptions (only events for specific
|
|
982
|
+
data point) and global subscriptions (all events of a type).
|
|
983
|
+
|
|
984
|
+
Priority-based ordering:
|
|
985
|
+
Handlers are sorted by priority (CRITICAL > HIGH > NORMAL > LOW).
|
|
986
|
+
Within the same priority, handlers are called in subscription order.
|
|
987
|
+
|
|
988
|
+
Concurrent execution:
|
|
989
|
+
All matching handlers are called concurrently via asyncio.gather().
|
|
990
|
+
return_exceptions=True ensures one failing handler doesn't prevent
|
|
991
|
+
others from receiving the event. Errors are logged in _safe_call_handler.
|
|
992
|
+
|
|
993
|
+
Args:
|
|
994
|
+
----
|
|
995
|
+
event: The event instance to publish
|
|
996
|
+
|
|
997
|
+
"""
|
|
998
|
+
event_type = type(event)
|
|
999
|
+
|
|
1000
|
+
# Dual-key lookup: specific key first, then wildcard (None) fallback.
|
|
1001
|
+
# The `or` chain short-circuits: if specific key has handlers, use them;
|
|
1002
|
+
# otherwise fall back to None-key handlers; otherwise empty list.
|
|
1003
|
+
if not (
|
|
1004
|
+
prioritized_handlers := (
|
|
1005
|
+
self._subscriptions.get(event_type, {}).get(event.key)
|
|
1006
|
+
or self._subscriptions.get(event_type, {}).get(None)
|
|
1007
|
+
or []
|
|
1008
|
+
)
|
|
1009
|
+
):
|
|
1010
|
+
if self._enable_event_logging:
|
|
1011
|
+
if isinstance(event, RpcParameterReceivedEvent):
|
|
1012
|
+
_LOGGER.debug(
|
|
1013
|
+
"PUBLISH: No subscribers for %s: %s [%s]",
|
|
1014
|
+
event_type.__name__,
|
|
1015
|
+
event.parameter,
|
|
1016
|
+
event.channel_address,
|
|
1017
|
+
)
|
|
1018
|
+
else:
|
|
1019
|
+
_LOGGER.debug("PUBLISH: No subscribers for %s", event_type.__name__)
|
|
1020
|
+
|
|
1021
|
+
return
|
|
1022
|
+
|
|
1023
|
+
# Track event statistics for debugging
|
|
1024
|
+
self._event_count[event_type] += 1
|
|
1025
|
+
|
|
1026
|
+
if self._enable_event_logging:
|
|
1027
|
+
_LOGGER.debug(
|
|
1028
|
+
"PUBLISH: Publishing %s to %d handler(s) [count: %d]",
|
|
1029
|
+
event_type.__name__,
|
|
1030
|
+
len(prioritized_handlers),
|
|
1031
|
+
self._event_count[event_type],
|
|
1032
|
+
)
|
|
1033
|
+
|
|
1034
|
+
# Sort handlers by priority (descending) then by insertion order (ascending).
|
|
1035
|
+
# Higher priority values execute first; same priority uses FIFO order.
|
|
1036
|
+
sorted_handlers = sorted(
|
|
1037
|
+
prioritized_handlers,
|
|
1038
|
+
key=lambda ph: (-ph.priority, ph.order),
|
|
1039
|
+
)
|
|
1040
|
+
|
|
1041
|
+
# Concurrent handler invocation with error isolation.
|
|
1042
|
+
# Each handler runs independently; failures don't affect siblings.
|
|
1043
|
+
tasks = [self._safe_call_handler(handler=ph.handler, event=event) for ph in sorted_handlers]
|
|
1044
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
1045
|
+
|
|
1046
|
+
async def publish_batch(self, *, events: Sequence[Event]) -> None:
|
|
1047
|
+
"""
|
|
1048
|
+
Publish multiple events efficiently.
|
|
1049
|
+
|
|
1050
|
+
This method optimizes handler lookup by grouping events by type and key,
|
|
1051
|
+
reducing redundant lookups when publishing many events of the same type.
|
|
1052
|
+
|
|
1053
|
+
Events are still processed individually per handler, but the overhead of
|
|
1054
|
+
looking up handlers is reduced. This is particularly beneficial during
|
|
1055
|
+
device discovery or bulk updates.
|
|
1056
|
+
|
|
1057
|
+
Priority ordering is maintained: handlers are sorted by priority
|
|
1058
|
+
(CRITICAL > HIGH > NORMAL > LOW) before invocation.
|
|
1059
|
+
|
|
1060
|
+
Args:
|
|
1061
|
+
----
|
|
1062
|
+
events: Sequence of events to publish
|
|
1063
|
+
|
|
1064
|
+
Example:
|
|
1065
|
+
-------
|
|
1066
|
+
events = [
|
|
1067
|
+
DeviceStateChangedEvent(timestamp=now, device_address="VCU001"),
|
|
1068
|
+
DeviceStateChangedEvent(timestamp=now, device_address="VCU002"),
|
|
1069
|
+
DeviceStateChangedEvent(timestamp=now, device_address="VCU003"),
|
|
1070
|
+
]
|
|
1071
|
+
await bus.publish_batch(events=events)
|
|
1072
|
+
|
|
1073
|
+
"""
|
|
1074
|
+
if not events:
|
|
1075
|
+
return
|
|
1076
|
+
|
|
1077
|
+
# Group events by (event_type, event_key) for efficient handler lookup
|
|
1078
|
+
grouped: dict[tuple[type[Event], Any], list[Event]] = defaultdict(list)
|
|
1079
|
+
for event in events:
|
|
1080
|
+
grouped[(type(event), event.key)].append(event)
|
|
1081
|
+
|
|
1082
|
+
if self._enable_event_logging:
|
|
1083
|
+
_LOGGER.debug(
|
|
1084
|
+
"PUBLISH_BATCH: Processing %d events in %d groups",
|
|
1085
|
+
len(events),
|
|
1086
|
+
len(grouped),
|
|
1087
|
+
)
|
|
1088
|
+
|
|
1089
|
+
all_tasks: list[Coroutine[Any, Any, None]] = []
|
|
1090
|
+
|
|
1091
|
+
for (event_type, event_key), grouped_events in grouped.items():
|
|
1092
|
+
# Look up handlers once per group
|
|
1093
|
+
prioritized_handlers = (
|
|
1094
|
+
self._subscriptions.get(event_type, {}).get(event_key)
|
|
1095
|
+
or self._subscriptions.get(event_type, {}).get(None)
|
|
1096
|
+
or []
|
|
1097
|
+
)
|
|
1098
|
+
|
|
1099
|
+
if not prioritized_handlers:
|
|
1100
|
+
continue
|
|
1101
|
+
|
|
1102
|
+
# Track event statistics
|
|
1103
|
+
self._event_count[event_type] += len(grouped_events)
|
|
1104
|
+
|
|
1105
|
+
# Sort handlers by priority
|
|
1106
|
+
sorted_handlers = sorted(
|
|
1107
|
+
prioritized_handlers,
|
|
1108
|
+
key=lambda ph: (-ph.priority, ph.order),
|
|
1109
|
+
)
|
|
1110
|
+
|
|
1111
|
+
# Create tasks for all event-handler combinations
|
|
1112
|
+
all_tasks.extend(
|
|
1113
|
+
self._safe_call_handler(handler=ph.handler, event=event)
|
|
1114
|
+
for event in grouped_events
|
|
1115
|
+
for ph in sorted_handlers
|
|
1116
|
+
)
|
|
1117
|
+
|
|
1118
|
+
if all_tasks:
|
|
1119
|
+
await asyncio.gather(*all_tasks, return_exceptions=True)
|
|
1120
|
+
|
|
1121
|
+
def publish_sync(self, *, event: Event) -> None:
|
|
1122
|
+
"""
|
|
1123
|
+
Schedule an event for publishing from synchronous code.
|
|
1124
|
+
|
|
1125
|
+
This method schedules the event to be published asynchronously via the
|
|
1126
|
+
running event loop. Use this when you need to publish events from
|
|
1127
|
+
synchronous callbacks or methods that cannot be made async.
|
|
1128
|
+
|
|
1129
|
+
The TaskScheduler is used for proper task lifecycle management
|
|
1130
|
+
(tracking, shutdown, exception logging).
|
|
1131
|
+
|
|
1132
|
+
Note: The event will be published asynchronously after this method returns.
|
|
1133
|
+
There is no guarantee about when handlers will be invoked.
|
|
1134
|
+
|
|
1135
|
+
Args:
|
|
1136
|
+
----
|
|
1137
|
+
event: The event instance to publish
|
|
1138
|
+
|
|
1139
|
+
"""
|
|
1140
|
+
# Use TaskScheduler for proper lifecycle management
|
|
1141
|
+
# Pass a factory (lambda) instead of a coroutine to defer creation
|
|
1142
|
+
# until inside the event loop - avoids "was never awaited" warnings
|
|
1143
|
+
self._task_scheduler.create_task(
|
|
1144
|
+
target=lambda: self.publish(event=event),
|
|
1145
|
+
name=f"event_bus_publish_{type(event).__name__}",
|
|
1146
|
+
)
|
|
1147
|
+
|
|
1148
|
+
def subscribe(
|
|
1149
|
+
self,
|
|
1150
|
+
*,
|
|
1151
|
+
event_type: type[T_Event],
|
|
1152
|
+
event_key: Any,
|
|
1153
|
+
handler: EventHandler,
|
|
1154
|
+
priority: EventPriority = EventPriority.NORMAL,
|
|
1155
|
+
) -> UnsubscribeCallback:
|
|
1156
|
+
"""
|
|
1157
|
+
Subscribe to events of a specific type.
|
|
1158
|
+
|
|
1159
|
+
Args:
|
|
1160
|
+
----
|
|
1161
|
+
event_type: The event class to listen for
|
|
1162
|
+
event_key: The key for unique identification
|
|
1163
|
+
handler: Async or sync callback with signature (*, event: EventType) -> None
|
|
1164
|
+
priority: Handler priority (default: NORMAL). Higher priority handlers
|
|
1165
|
+
are called before lower priority handlers.
|
|
1166
|
+
|
|
1167
|
+
Returns:
|
|
1168
|
+
-------
|
|
1169
|
+
A callable that unsubscribes this handler when called
|
|
1170
|
+
|
|
1171
|
+
Example:
|
|
1172
|
+
-------
|
|
1173
|
+
async def on_update(*, event: DataPointValueReceivedEvent) -> None:
|
|
1174
|
+
print(f"Updated: {event.dpk}")
|
|
1175
|
+
|
|
1176
|
+
# Subscribe with default priority
|
|
1177
|
+
unsubscribe = bus.subscribe(event_type=DataPointValueReceivedEvent, handler=on_update)
|
|
1178
|
+
|
|
1179
|
+
# Subscribe with high priority
|
|
1180
|
+
unsubscribe = bus.subscribe(
|
|
1181
|
+
event_type=DataPointValueReceivedEvent,
|
|
1182
|
+
handler=on_update,
|
|
1183
|
+
priority=EventPriority.HIGH,
|
|
1184
|
+
)
|
|
1185
|
+
# Later...
|
|
1186
|
+
unsubscribe()
|
|
1187
|
+
|
|
1188
|
+
"""
|
|
1189
|
+
# Create prioritized handler wrapper
|
|
1190
|
+
generic_handler = handler
|
|
1191
|
+
prioritized_handler = _PrioritizedHandler(
|
|
1192
|
+
handler=generic_handler,
|
|
1193
|
+
priority=priority,
|
|
1194
|
+
order=self._handler_order_counter,
|
|
1195
|
+
)
|
|
1196
|
+
self._handler_order_counter += 1
|
|
1197
|
+
self._subscriptions[event_type][event_key].append(prioritized_handler)
|
|
1198
|
+
|
|
1199
|
+
_LOGGER.debug(
|
|
1200
|
+
"SUBSCRIBE: Subscribed to %s with priority %s (total subscribers: %d)",
|
|
1201
|
+
event_type.__name__,
|
|
1202
|
+
priority.name,
|
|
1203
|
+
len(self._subscriptions[event_type][event_key]),
|
|
1204
|
+
)
|
|
1205
|
+
|
|
1206
|
+
def unsubscribe() -> None:
|
|
1207
|
+
"""Remove this specific handler from subscriptions."""
|
|
1208
|
+
if prioritized_handler in self._subscriptions[event_type][event_key]:
|
|
1209
|
+
self._subscriptions[event_type][event_key].remove(prioritized_handler)
|
|
1210
|
+
_LOGGER.debug(
|
|
1211
|
+
"SUBSCRIBE: Unsubscribed from %s (remaining: %d)",
|
|
1212
|
+
event_type.__name__,
|
|
1213
|
+
len(self._subscriptions[event_type][event_key]),
|
|
1214
|
+
)
|
|
1215
|
+
|
|
1216
|
+
return unsubscribe
|
|
1217
|
+
|
|
1218
|
+
async def _safe_call_handler(self, *, handler: EventHandler, event: Event) -> None:
|
|
1219
|
+
"""
|
|
1220
|
+
Safely invoke a handler, catching and logging exceptions.
|
|
1221
|
+
|
|
1222
|
+
Polymorphic handler detection:
|
|
1223
|
+
Handlers can be either sync or async functions. We use a try-then-await
|
|
1224
|
+
pattern to support both:
|
|
1225
|
+
1. Call the handler (works for both sync and async)
|
|
1226
|
+
2. Check if the result is a coroutine (indicates async handler)
|
|
1227
|
+
3. If coroutine, await it; if not, the call already completed
|
|
1228
|
+
|
|
1229
|
+
This is more efficient than checking asyncio.iscoroutinefunction() upfront
|
|
1230
|
+
because some handlers may be wrapped/decorated in ways that obscure their
|
|
1231
|
+
async nature.
|
|
1232
|
+
|
|
1233
|
+
Error isolation:
|
|
1234
|
+
Exceptions are caught and logged but not re-raised. This ensures one
|
|
1235
|
+
buggy handler doesn't prevent other handlers from receiving events.
|
|
1236
|
+
|
|
1237
|
+
Duration tracking:
|
|
1238
|
+
Handler execution time is measured and recorded in _handler_stats
|
|
1239
|
+
for metrics aggregation.
|
|
1240
|
+
|
|
1241
|
+
Args:
|
|
1242
|
+
----
|
|
1243
|
+
handler: The callback to invoke (sync or async)
|
|
1244
|
+
event: The event to pass to the handler
|
|
1245
|
+
|
|
1246
|
+
"""
|
|
1247
|
+
start_time = time.perf_counter()
|
|
1248
|
+
had_error = False
|
|
1249
|
+
try:
|
|
1250
|
+
# Invoke handler with keyword-only event parameter
|
|
1251
|
+
result = handler(event=event)
|
|
1252
|
+
# If async, the result is a coroutine that needs to be awaited
|
|
1253
|
+
if asyncio.iscoroutine(result):
|
|
1254
|
+
await result
|
|
1255
|
+
except Exception:
|
|
1256
|
+
had_error = True
|
|
1257
|
+
# Log but don't re-raise - isolate handler errors
|
|
1258
|
+
_LOGGER.exception( # i18n-log: ignore
|
|
1259
|
+
"_SAFE_CALL_HANDLER: Error in event handler %s for event %s",
|
|
1260
|
+
handler.__name__ if hasattr(handler, "__name__") else handler,
|
|
1261
|
+
type(event).__name__,
|
|
1262
|
+
)
|
|
1263
|
+
finally:
|
|
1264
|
+
# Record handler statistics
|
|
1265
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
1266
|
+
self._handler_stats.total_executions += 1
|
|
1267
|
+
self._handler_stats.total_duration_ms += duration_ms
|
|
1268
|
+
self._handler_stats.max_duration_ms = max(self._handler_stats.max_duration_ms, duration_ms)
|
|
1269
|
+
if had_error:
|
|
1270
|
+
self._handler_stats.total_errors += 1
|
|
1271
|
+
|
|
1272
|
+
|
|
1273
|
+
class EventBatch:
|
|
1274
|
+
"""
|
|
1275
|
+
Context manager for collecting and publishing events in batch.
|
|
1276
|
+
|
|
1277
|
+
EventBatch collects events during a context and publishes them all at once
|
|
1278
|
+
when the context exits. This is more efficient than publishing events
|
|
1279
|
+
individually when multiple events need to be sent together.
|
|
1280
|
+
|
|
1281
|
+
Features
|
|
1282
|
+
--------
|
|
1283
|
+
- Async context manager support
|
|
1284
|
+
- Automatic flush on context exit
|
|
1285
|
+
- Manual flush capability
|
|
1286
|
+
- Event count tracking
|
|
1287
|
+
|
|
1288
|
+
Example Usage
|
|
1289
|
+
-------------
|
|
1290
|
+
async with EventBatch(bus=event_bus) as batch:
|
|
1291
|
+
batch.add(DeviceStateChangedEvent(timestamp=now, device_address="VCU001"))
|
|
1292
|
+
batch.add(DeviceStateChangedEvent(timestamp=now, device_address="VCU002"))
|
|
1293
|
+
# Events are published when the context exits
|
|
1294
|
+
|
|
1295
|
+
# Or with manual flush:
|
|
1296
|
+
batch = EventBatch(bus=event_bus)
|
|
1297
|
+
batch.add(event1)
|
|
1298
|
+
batch.add(event2)
|
|
1299
|
+
await batch.flush()
|
|
1300
|
+
|
|
1301
|
+
Thread Safety
|
|
1302
|
+
-------------
|
|
1303
|
+
EventBatch is designed for single-threaded asyncio use within one context.
|
|
1304
|
+
Do not share an EventBatch instance across tasks.
|
|
1305
|
+
"""
|
|
1306
|
+
|
|
1307
|
+
def __init__(self, *, bus: EventBus) -> None:
|
|
1308
|
+
"""
|
|
1309
|
+
Initialize the event batch.
|
|
1310
|
+
|
|
1311
|
+
Args:
|
|
1312
|
+
----
|
|
1313
|
+
bus: The EventBus to publish events to
|
|
1314
|
+
|
|
1315
|
+
"""
|
|
1316
|
+
self._bus: Final = bus
|
|
1317
|
+
self._events: list[Event] = []
|
|
1318
|
+
self._flushed: bool = False
|
|
1319
|
+
|
|
1320
|
+
async def __aenter__(self) -> Self:
|
|
1321
|
+
"""Enter the async context."""
|
|
1322
|
+
return self
|
|
1323
|
+
|
|
1324
|
+
async def __aexit__( # kwonly: disable
|
|
1325
|
+
self,
|
|
1326
|
+
exc_type: type[BaseException] | None,
|
|
1327
|
+
exc_val: BaseException | None,
|
|
1328
|
+
exc_tb: types.TracebackType | None,
|
|
1329
|
+
) -> None:
|
|
1330
|
+
"""Exit the async context and flush events."""
|
|
1331
|
+
await self.flush()
|
|
1332
|
+
|
|
1333
|
+
is_flushed: Final = DelegatedProperty[bool](path="_flushed")
|
|
1334
|
+
|
|
1335
|
+
@property
|
|
1336
|
+
def event_count(self) -> int:
|
|
1337
|
+
"""Return the number of events in the batch."""
|
|
1338
|
+
return len(self._events)
|
|
1339
|
+
|
|
1340
|
+
def add(self, *, event: Event) -> None:
|
|
1341
|
+
"""
|
|
1342
|
+
Add an event to the batch.
|
|
1343
|
+
|
|
1344
|
+
Args:
|
|
1345
|
+
----
|
|
1346
|
+
event: The event to add
|
|
1347
|
+
|
|
1348
|
+
Raises:
|
|
1349
|
+
------
|
|
1350
|
+
RuntimeError: If the batch has already been flushed
|
|
1351
|
+
|
|
1352
|
+
"""
|
|
1353
|
+
if self._flushed:
|
|
1354
|
+
raise RuntimeError("Cannot add events to a flushed batch") # noqa: TRY003 # i18n-exc: ignore
|
|
1355
|
+
self._events.append(event)
|
|
1356
|
+
|
|
1357
|
+
def add_all(self, *, events: Sequence[Event]) -> None:
|
|
1358
|
+
"""
|
|
1359
|
+
Add multiple events to the batch.
|
|
1360
|
+
|
|
1361
|
+
Args:
|
|
1362
|
+
----
|
|
1363
|
+
events: Sequence of events to add
|
|
1364
|
+
|
|
1365
|
+
Raises:
|
|
1366
|
+
------
|
|
1367
|
+
RuntimeError: If the batch has already been flushed
|
|
1368
|
+
|
|
1369
|
+
"""
|
|
1370
|
+
if self._flushed:
|
|
1371
|
+
raise RuntimeError("Cannot add events to a flushed batch") # noqa: TRY003 # i18n-exc: ignore
|
|
1372
|
+
self._events.extend(events)
|
|
1373
|
+
|
|
1374
|
+
async def flush(self) -> int:
|
|
1375
|
+
"""
|
|
1376
|
+
Publish all collected events and clear the batch.
|
|
1377
|
+
|
|
1378
|
+
Returns
|
|
1379
|
+
-------
|
|
1380
|
+
Number of events that were published
|
|
1381
|
+
|
|
1382
|
+
"""
|
|
1383
|
+
if self._flushed:
|
|
1384
|
+
return 0
|
|
1385
|
+
|
|
1386
|
+
count = len(self._events)
|
|
1387
|
+
if self._events:
|
|
1388
|
+
await self._bus.publish_batch(events=self._events)
|
|
1389
|
+
self._events.clear()
|
|
1390
|
+
|
|
1391
|
+
self._flushed = True
|
|
1392
|
+
return count
|