aiohomematic 2026.1.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. aiohomematic/__init__.py +110 -0
  2. aiohomematic/_log_context_protocol.py +29 -0
  3. aiohomematic/api.py +410 -0
  4. aiohomematic/async_support.py +250 -0
  5. aiohomematic/backend_detection.py +462 -0
  6. aiohomematic/central/__init__.py +103 -0
  7. aiohomematic/central/async_rpc_server.py +760 -0
  8. aiohomematic/central/central_unit.py +1152 -0
  9. aiohomematic/central/config.py +463 -0
  10. aiohomematic/central/config_builder.py +772 -0
  11. aiohomematic/central/connection_state.py +160 -0
  12. aiohomematic/central/coordinators/__init__.py +38 -0
  13. aiohomematic/central/coordinators/cache.py +414 -0
  14. aiohomematic/central/coordinators/client.py +480 -0
  15. aiohomematic/central/coordinators/connection_recovery.py +1141 -0
  16. aiohomematic/central/coordinators/device.py +1166 -0
  17. aiohomematic/central/coordinators/event.py +514 -0
  18. aiohomematic/central/coordinators/hub.py +532 -0
  19. aiohomematic/central/decorators.py +184 -0
  20. aiohomematic/central/device_registry.py +229 -0
  21. aiohomematic/central/events/__init__.py +104 -0
  22. aiohomematic/central/events/bus.py +1392 -0
  23. aiohomematic/central/events/integration.py +424 -0
  24. aiohomematic/central/events/types.py +194 -0
  25. aiohomematic/central/health.py +762 -0
  26. aiohomematic/central/rpc_server.py +353 -0
  27. aiohomematic/central/scheduler.py +794 -0
  28. aiohomematic/central/state_machine.py +391 -0
  29. aiohomematic/client/__init__.py +203 -0
  30. aiohomematic/client/_rpc_errors.py +187 -0
  31. aiohomematic/client/backends/__init__.py +48 -0
  32. aiohomematic/client/backends/base.py +335 -0
  33. aiohomematic/client/backends/capabilities.py +138 -0
  34. aiohomematic/client/backends/ccu.py +487 -0
  35. aiohomematic/client/backends/factory.py +116 -0
  36. aiohomematic/client/backends/homegear.py +294 -0
  37. aiohomematic/client/backends/json_ccu.py +252 -0
  38. aiohomematic/client/backends/protocol.py +316 -0
  39. aiohomematic/client/ccu.py +1857 -0
  40. aiohomematic/client/circuit_breaker.py +459 -0
  41. aiohomematic/client/config.py +64 -0
  42. aiohomematic/client/handlers/__init__.py +40 -0
  43. aiohomematic/client/handlers/backup.py +157 -0
  44. aiohomematic/client/handlers/base.py +79 -0
  45. aiohomematic/client/handlers/device_ops.py +1085 -0
  46. aiohomematic/client/handlers/firmware.py +144 -0
  47. aiohomematic/client/handlers/link_mgmt.py +199 -0
  48. aiohomematic/client/handlers/metadata.py +436 -0
  49. aiohomematic/client/handlers/programs.py +144 -0
  50. aiohomematic/client/handlers/sysvars.py +100 -0
  51. aiohomematic/client/interface_client.py +1304 -0
  52. aiohomematic/client/json_rpc.py +2068 -0
  53. aiohomematic/client/request_coalescer.py +282 -0
  54. aiohomematic/client/rpc_proxy.py +629 -0
  55. aiohomematic/client/state_machine.py +324 -0
  56. aiohomematic/const.py +2207 -0
  57. aiohomematic/context.py +275 -0
  58. aiohomematic/converter.py +270 -0
  59. aiohomematic/decorators.py +390 -0
  60. aiohomematic/exceptions.py +185 -0
  61. aiohomematic/hmcli.py +997 -0
  62. aiohomematic/i18n.py +193 -0
  63. aiohomematic/interfaces/__init__.py +407 -0
  64. aiohomematic/interfaces/central.py +1067 -0
  65. aiohomematic/interfaces/client.py +1096 -0
  66. aiohomematic/interfaces/coordinators.py +63 -0
  67. aiohomematic/interfaces/model.py +1921 -0
  68. aiohomematic/interfaces/operations.py +217 -0
  69. aiohomematic/logging_context.py +134 -0
  70. aiohomematic/metrics/__init__.py +125 -0
  71. aiohomematic/metrics/_protocols.py +140 -0
  72. aiohomematic/metrics/aggregator.py +534 -0
  73. aiohomematic/metrics/dataclasses.py +489 -0
  74. aiohomematic/metrics/emitter.py +292 -0
  75. aiohomematic/metrics/events.py +183 -0
  76. aiohomematic/metrics/keys.py +300 -0
  77. aiohomematic/metrics/observer.py +563 -0
  78. aiohomematic/metrics/stats.py +172 -0
  79. aiohomematic/model/__init__.py +189 -0
  80. aiohomematic/model/availability.py +65 -0
  81. aiohomematic/model/calculated/__init__.py +89 -0
  82. aiohomematic/model/calculated/climate.py +276 -0
  83. aiohomematic/model/calculated/data_point.py +315 -0
  84. aiohomematic/model/calculated/field.py +147 -0
  85. aiohomematic/model/calculated/operating_voltage_level.py +286 -0
  86. aiohomematic/model/calculated/support.py +232 -0
  87. aiohomematic/model/custom/__init__.py +214 -0
  88. aiohomematic/model/custom/capabilities/__init__.py +67 -0
  89. aiohomematic/model/custom/capabilities/climate.py +41 -0
  90. aiohomematic/model/custom/capabilities/light.py +87 -0
  91. aiohomematic/model/custom/capabilities/lock.py +44 -0
  92. aiohomematic/model/custom/capabilities/siren.py +63 -0
  93. aiohomematic/model/custom/climate.py +1130 -0
  94. aiohomematic/model/custom/cover.py +722 -0
  95. aiohomematic/model/custom/data_point.py +360 -0
  96. aiohomematic/model/custom/definition.py +300 -0
  97. aiohomematic/model/custom/field.py +89 -0
  98. aiohomematic/model/custom/light.py +1174 -0
  99. aiohomematic/model/custom/lock.py +322 -0
  100. aiohomematic/model/custom/mixins.py +445 -0
  101. aiohomematic/model/custom/profile.py +945 -0
  102. aiohomematic/model/custom/registry.py +251 -0
  103. aiohomematic/model/custom/siren.py +462 -0
  104. aiohomematic/model/custom/switch.py +195 -0
  105. aiohomematic/model/custom/text_display.py +289 -0
  106. aiohomematic/model/custom/valve.py +78 -0
  107. aiohomematic/model/data_point.py +1416 -0
  108. aiohomematic/model/device.py +1840 -0
  109. aiohomematic/model/event.py +216 -0
  110. aiohomematic/model/generic/__init__.py +327 -0
  111. aiohomematic/model/generic/action.py +40 -0
  112. aiohomematic/model/generic/action_select.py +62 -0
  113. aiohomematic/model/generic/binary_sensor.py +30 -0
  114. aiohomematic/model/generic/button.py +31 -0
  115. aiohomematic/model/generic/data_point.py +177 -0
  116. aiohomematic/model/generic/dummy.py +150 -0
  117. aiohomematic/model/generic/number.py +76 -0
  118. aiohomematic/model/generic/select.py +56 -0
  119. aiohomematic/model/generic/sensor.py +76 -0
  120. aiohomematic/model/generic/switch.py +54 -0
  121. aiohomematic/model/generic/text.py +33 -0
  122. aiohomematic/model/hub/__init__.py +100 -0
  123. aiohomematic/model/hub/binary_sensor.py +24 -0
  124. aiohomematic/model/hub/button.py +28 -0
  125. aiohomematic/model/hub/connectivity.py +190 -0
  126. aiohomematic/model/hub/data_point.py +342 -0
  127. aiohomematic/model/hub/hub.py +864 -0
  128. aiohomematic/model/hub/inbox.py +135 -0
  129. aiohomematic/model/hub/install_mode.py +393 -0
  130. aiohomematic/model/hub/metrics.py +208 -0
  131. aiohomematic/model/hub/number.py +42 -0
  132. aiohomematic/model/hub/select.py +52 -0
  133. aiohomematic/model/hub/sensor.py +37 -0
  134. aiohomematic/model/hub/switch.py +43 -0
  135. aiohomematic/model/hub/text.py +30 -0
  136. aiohomematic/model/hub/update.py +221 -0
  137. aiohomematic/model/support.py +592 -0
  138. aiohomematic/model/update.py +140 -0
  139. aiohomematic/model/week_profile.py +1827 -0
  140. aiohomematic/property_decorators.py +719 -0
  141. aiohomematic/py.typed +0 -0
  142. aiohomematic/rega_scripts/accept_device_in_inbox.fn +51 -0
  143. aiohomematic/rega_scripts/create_backup_start.fn +28 -0
  144. aiohomematic/rega_scripts/create_backup_status.fn +89 -0
  145. aiohomematic/rega_scripts/fetch_all_device_data.fn +97 -0
  146. aiohomematic/rega_scripts/get_backend_info.fn +25 -0
  147. aiohomematic/rega_scripts/get_inbox_devices.fn +61 -0
  148. aiohomematic/rega_scripts/get_program_descriptions.fn +31 -0
  149. aiohomematic/rega_scripts/get_serial.fn +44 -0
  150. aiohomematic/rega_scripts/get_service_messages.fn +83 -0
  151. aiohomematic/rega_scripts/get_system_update_info.fn +39 -0
  152. aiohomematic/rega_scripts/get_system_variable_descriptions.fn +31 -0
  153. aiohomematic/rega_scripts/set_program_state.fn +17 -0
  154. aiohomematic/rega_scripts/set_system_variable.fn +19 -0
  155. aiohomematic/rega_scripts/trigger_firmware_update.fn +67 -0
  156. aiohomematic/schemas.py +256 -0
  157. aiohomematic/store/__init__.py +55 -0
  158. aiohomematic/store/dynamic/__init__.py +43 -0
  159. aiohomematic/store/dynamic/command.py +250 -0
  160. aiohomematic/store/dynamic/data.py +175 -0
  161. aiohomematic/store/dynamic/details.py +187 -0
  162. aiohomematic/store/dynamic/ping_pong.py +416 -0
  163. aiohomematic/store/persistent/__init__.py +71 -0
  164. aiohomematic/store/persistent/base.py +285 -0
  165. aiohomematic/store/persistent/device.py +233 -0
  166. aiohomematic/store/persistent/incident.py +380 -0
  167. aiohomematic/store/persistent/paramset.py +241 -0
  168. aiohomematic/store/persistent/session.py +556 -0
  169. aiohomematic/store/serialization.py +150 -0
  170. aiohomematic/store/storage.py +689 -0
  171. aiohomematic/store/types.py +526 -0
  172. aiohomematic/store/visibility/__init__.py +40 -0
  173. aiohomematic/store/visibility/parser.py +141 -0
  174. aiohomematic/store/visibility/registry.py +722 -0
  175. aiohomematic/store/visibility/rules.py +307 -0
  176. aiohomematic/strings.json +237 -0
  177. aiohomematic/support.py +706 -0
  178. aiohomematic/tracing.py +236 -0
  179. aiohomematic/translations/de.json +237 -0
  180. aiohomematic/translations/en.json +237 -0
  181. aiohomematic/type_aliases.py +51 -0
  182. aiohomematic/validator.py +128 -0
  183. aiohomematic-2026.1.29.dist-info/METADATA +296 -0
  184. aiohomematic-2026.1.29.dist-info/RECORD +188 -0
  185. aiohomematic-2026.1.29.dist-info/WHEEL +5 -0
  186. aiohomematic-2026.1.29.dist-info/entry_points.txt +2 -0
  187. aiohomematic-2026.1.29.dist-info/licenses/LICENSE +21 -0
  188. aiohomematic-2026.1.29.dist-info/top_level.txt +1 -0
@@ -0,0 +1,416 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2021-2026
3
+ """
4
+ Ping/pong tracker for connection health monitoring.
5
+
6
+ This module provides PingPongTracker which tracks ping/pong timestamps to detect
7
+ connection health issues and publishes interface events on mismatch thresholds.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import asyncio
13
+ from datetime import datetime
14
+ import logging
15
+ import time
16
+ from typing import TYPE_CHECKING, Final
17
+
18
+ from aiohomematic import i18n
19
+ from aiohomematic.central.events import IntegrationIssue, SystemStatusChangedEvent
20
+ from aiohomematic.const import (
21
+ PING_PONG_CACHE_MAX_SIZE,
22
+ PING_PONG_MISMATCH_COUNT,
23
+ PING_PONG_MISMATCH_COUNT_TTL,
24
+ IntegrationIssueSeverity,
25
+ IntegrationIssueType,
26
+ PingPongMismatchType,
27
+ )
28
+ from aiohomematic.interfaces import CentralInfoProtocol, EventBusProviderProtocol, IncidentRecorderProtocol
29
+ from aiohomematic.metrics import MetricKeys, emit_latency
30
+ from aiohomematic.property_decorators import DelegatedProperty
31
+ from aiohomematic.store.types import IncidentSeverity, IncidentType, PingPongJournal, PongTracker
32
+
33
+ if TYPE_CHECKING:
34
+ from aiohomematic.central import CentralConnectionState
35
+
36
+ _LOGGER: Final = logging.getLogger(__name__)
37
+
38
+
39
+ class PingPongTracker:
40
+ """Tracker for ping/pong events to monitor connection health."""
41
+
42
+ __slots__ = (
43
+ "_allowed_delta",
44
+ "_central_info",
45
+ "_connection_state",
46
+ "_event_bus_provider",
47
+ "_incident_recorder",
48
+ "_interface_id",
49
+ "_journal",
50
+ "_pending",
51
+ "_retry_at",
52
+ "_ttl",
53
+ "_unknown",
54
+ )
55
+
56
+ def __init__(
57
+ self,
58
+ *,
59
+ event_bus_provider: EventBusProviderProtocol,
60
+ central_info: CentralInfoProtocol,
61
+ interface_id: str,
62
+ connection_state: CentralConnectionState | None = None,
63
+ incident_recorder: IncidentRecorderProtocol | None = None,
64
+ allowed_delta: int = PING_PONG_MISMATCH_COUNT,
65
+ ttl: int = PING_PONG_MISMATCH_COUNT_TTL,
66
+ ):
67
+ """Initialize the cache with ttl."""
68
+ assert ttl > 0
69
+ self._event_bus_provider: Final = event_bus_provider
70
+ self._central_info: Final = central_info
71
+ self._interface_id: Final = interface_id
72
+ self._connection_state: Final = connection_state
73
+ self._incident_recorder: Final = incident_recorder
74
+ self._allowed_delta: Final = allowed_delta
75
+ self._ttl: Final = ttl
76
+ self._pending: Final = PongTracker(tokens=set(), seen_at={})
77
+ self._unknown: Final = PongTracker(tokens=set(), seen_at={})
78
+ self._retry_at: Final[set[str]] = set()
79
+ self._journal: Final = PingPongJournal()
80
+
81
+ allowed_delta: Final = DelegatedProperty[int](path="_allowed_delta")
82
+
83
+ @property
84
+ def has_connection_issue(self) -> bool:
85
+ """Return True if there is a known connection issue for this interface."""
86
+ if self._connection_state is None:
87
+ return False
88
+ return self._connection_state.has_rpc_proxy_issue(interface_id=self._interface_id)
89
+
90
+ @property
91
+ def journal(self) -> PingPongJournal:
92
+ """Return the diagnostic journal for this tracker."""
93
+ return self._journal
94
+
95
+ @property
96
+ def size(self) -> int:
97
+ """Return total size of pending and unknown pong sets."""
98
+ return len(self._pending) + len(self._unknown)
99
+
100
+ def clear(self) -> None:
101
+ """Clear the cache and journal."""
102
+ self._pending.clear()
103
+ self._unknown.clear()
104
+ self._journal.clear()
105
+
106
+ def handle_received_pong(self, *, pong_token: str) -> None:
107
+ """Handle received pong token."""
108
+ if self._pending.contains(token=pong_token):
109
+ # Calculate round-trip latency and emit metric event
110
+ rtt_ms: float | None = None
111
+ if (send_time := self._pending.seen_at.get(pong_token)) is not None:
112
+ rtt_ms = (time.monotonic() - send_time) * 1000
113
+ emit_latency(
114
+ event_bus=self._event_bus_provider.event_bus,
115
+ key=MetricKeys.ping_pong_rtt(interface_id=self._interface_id),
116
+ duration_ms=rtt_ms,
117
+ )
118
+ # Record successful PONG in journal
119
+ self._journal.record_pong_received(token=pong_token, rtt_ms=rtt_ms or 0.0)
120
+ self._pending.remove(token=pong_token)
121
+ self._cleanup_tracker(tracker=self._pending, tracker_name="pending")
122
+ count = len(self._pending)
123
+ self._check_and_publish_pong_event(mismatch_type=PingPongMismatchType.PENDING)
124
+ _LOGGER.debug(
125
+ "PING PONG CACHE: Reduce pending PING count: %s - %i for token: %s",
126
+ self._interface_id,
127
+ count,
128
+ pong_token,
129
+ )
130
+ else:
131
+ # Record unknown PONG in journal
132
+ self._journal.record_pong_unknown(token=pong_token)
133
+ # Track unknown pong with monotonic insertion time for TTL expiry.
134
+ self._unknown.add(token=pong_token, timestamp=time.monotonic())
135
+ self._cleanup_tracker(tracker=self._unknown, tracker_name="unknown")
136
+ count = len(self._unknown)
137
+ self._check_and_publish_pong_event(mismatch_type=PingPongMismatchType.UNKNOWN)
138
+ _LOGGER.debug(
139
+ "PING PONG CACHE: Increase unknown PONG count: %s - %i for token: %s",
140
+ self._interface_id,
141
+ count,
142
+ pong_token,
143
+ )
144
+ # Schedule a single retry after 15s to try reconciling this PONG with a possible late PING.
145
+ self._schedule_unknown_pong_retry(token=pong_token, delay=15.0)
146
+
147
+ def handle_send_ping(self, *, ping_token: str) -> None:
148
+ """Handle send ping token by tracking it as pending and publishing events."""
149
+ # Skip tracking if connection is known to be down - prevents false alarm
150
+ # mismatch events during CCU restart when PINGs cannot be received.
151
+ if self.has_connection_issue:
152
+ _LOGGER.debug(
153
+ "PING PONG CACHE: Skip tracking PING (connection issue): %s - token: %s",
154
+ self._interface_id,
155
+ ping_token,
156
+ )
157
+ return
158
+ # Record PING in journal
159
+ self._journal.record_ping_sent(token=ping_token)
160
+ self._pending.add(token=ping_token, timestamp=time.monotonic())
161
+ self._cleanup_tracker(tracker=self._pending, tracker_name="pending")
162
+ # Throttle event emission to every second ping to avoid spamming callbacks,
163
+ # but always publish when crossing the high threshold.
164
+ count = len(self._pending)
165
+ if (count > self._allowed_delta) or (count % 2 == 0):
166
+ self._check_and_publish_pong_event(mismatch_type=PingPongMismatchType.PENDING)
167
+ _LOGGER.debug(
168
+ "PING PONG CACHE: Increase pending PING count: %s - %i for token: %s",
169
+ self._interface_id,
170
+ count,
171
+ ping_token,
172
+ )
173
+
174
+ def _check_and_publish_pong_event(self, *, mismatch_type: PingPongMismatchType) -> None:
175
+ """Publish an event about the pong status."""
176
+
177
+ def _publish_event(mismatch_count: int) -> None:
178
+ """Publish event."""
179
+ acceptable = mismatch_count <= self._allowed_delta
180
+ issue = IntegrationIssue(
181
+ issue_type=IntegrationIssueType.PING_PONG_MISMATCH,
182
+ severity=IntegrationIssueSeverity.WARNING if acceptable else IntegrationIssueSeverity.ERROR,
183
+ interface_id=self._interface_id,
184
+ mismatch_type=mismatch_type,
185
+ mismatch_count=mismatch_count,
186
+ )
187
+ self._event_bus_provider.event_bus.publish_sync(
188
+ event=SystemStatusChangedEvent(
189
+ timestamp=datetime.now(),
190
+ issues=(issue,),
191
+ )
192
+ )
193
+ _LOGGER.debug(
194
+ "PING PONG CACHE: Emitting event %s for %s with mismatch_count: %i with %i acceptable",
195
+ mismatch_type,
196
+ self._interface_id,
197
+ mismatch_count,
198
+ self._allowed_delta,
199
+ )
200
+
201
+ if mismatch_type == PingPongMismatchType.PENDING:
202
+ self._cleanup_tracker(tracker=self._pending, tracker_name="pending")
203
+ if (count := len(self._pending)) > self._allowed_delta:
204
+ # Publish event to inform subscribers about high pending pong count.
205
+ _publish_event(mismatch_count=count)
206
+ if self._pending.logged is False:
207
+ _LOGGER.warning(
208
+ i18n.tr(
209
+ key="log.store.dynamic.pending_pong_mismatch",
210
+ interface_id=self._interface_id,
211
+ )
212
+ )
213
+ # Record incident for persistent diagnostics
214
+ self._record_incident_async(
215
+ incident_type=IncidentType.PING_PONG_MISMATCH_HIGH,
216
+ severity=IncidentSeverity.ERROR,
217
+ message=f"Pending PONG count ({count}) exceeded threshold ({self._allowed_delta})",
218
+ context={"pending_count": count, "threshold": self._allowed_delta},
219
+ )
220
+ self._pending.logged = True
221
+ # In low state:
222
+ # - If we previously logged a high state, publish a reset event (mismatch=0) exactly once.
223
+ # - Otherwise, throttle emission to every second ping (even counts > 0) to avoid spamming.
224
+ elif self._pending.logged:
225
+ _publish_event(mismatch_count=0)
226
+ self._pending.logged = False
227
+ elif count > 0 and count % 2 == 0:
228
+ _publish_event(mismatch_count=count)
229
+ elif mismatch_type == PingPongMismatchType.UNKNOWN:
230
+ self._cleanup_tracker(tracker=self._unknown, tracker_name="unknown")
231
+ count = len(self._unknown)
232
+ if len(self._unknown) > self._allowed_delta:
233
+ # Publish event to inform subscribers about high unknown pong count.
234
+ _publish_event(mismatch_count=count)
235
+ if self._unknown.logged is False:
236
+ _LOGGER.warning(
237
+ i18n.tr(
238
+ key="log.store.dynamic.unknown_pong_mismatch",
239
+ interface_id=self._interface_id,
240
+ )
241
+ )
242
+ # Record incident for persistent diagnostics
243
+ self._record_incident_async(
244
+ incident_type=IncidentType.PING_PONG_UNKNOWN_HIGH,
245
+ severity=IncidentSeverity.WARNING,
246
+ message=f"Unknown PONG count ({count}) exceeded threshold ({self._allowed_delta})",
247
+ context={"unknown_count": count, "threshold": self._allowed_delta},
248
+ )
249
+ self._unknown.logged = True
250
+ elif self._unknown.logged:
251
+ # Publish reset event when dropping below threshold after being in high state.
252
+ _publish_event(mismatch_count=0)
253
+ self._unknown.logged = False
254
+
255
+ def _cleanup_tracker(self, *, tracker: PongTracker, tracker_name: str) -> None:
256
+ """Clean up expired entries and enforce size limit for a tracker."""
257
+ now = time.monotonic()
258
+
259
+ # Remove expired entries
260
+ expired_tokens = [
261
+ token
262
+ for token in list(tracker.tokens)
263
+ if (seen_at := tracker.seen_at.get(token)) is not None and (now - seen_at) > self._ttl
264
+ ]
265
+ for token in expired_tokens:
266
+ tracker.remove(token=token)
267
+ # Record expired PINGs in journal (pending tracker only - these are unanswered PINGs)
268
+ if tracker_name == "pending":
269
+ self._journal.record_pong_expired(token=token)
270
+ _LOGGER.debug(
271
+ "PING PONG CACHE: Removing expired %s PONG: %s - %i for ts: %s",
272
+ tracker_name,
273
+ self._interface_id,
274
+ len(tracker),
275
+ token,
276
+ )
277
+
278
+ # Enforce size limit by removing oldest entries
279
+ if len(tracker) > PING_PONG_CACHE_MAX_SIZE:
280
+ sorted_entries = sorted(
281
+ tracker.seen_at.items(),
282
+ key=lambda item: item[1],
283
+ )
284
+ remove_count = len(tracker) - PING_PONG_CACHE_MAX_SIZE
285
+ for token, _ in sorted_entries[:remove_count]:
286
+ tracker.remove(token=token)
287
+ _LOGGER.debug(
288
+ "PING PONG CACHE: Evicted %d oldest %s entries on %s (limit: %d)",
289
+ remove_count,
290
+ tracker_name,
291
+ self._interface_id,
292
+ PING_PONG_CACHE_MAX_SIZE,
293
+ )
294
+
295
+ def _record_incident_async(
296
+ self,
297
+ *,
298
+ incident_type: IncidentType,
299
+ severity: IncidentSeverity,
300
+ message: str,
301
+ context: dict[str, int],
302
+ ) -> None:
303
+ """
304
+ Schedule async incident recording via the looper.
305
+
306
+ This method fires and forgets the incident recording since we don't want
307
+ to block the sync caller. If no incident_recorder or looper is available,
308
+ the incident is silently skipped.
309
+ """
310
+ if (incident_recorder := self._incident_recorder) is None:
311
+ return
312
+
313
+ if (looper := getattr(self._central_info, "looper", None)) is None:
314
+ _LOGGER.debug(
315
+ "PING PONG CACHE: Skip incident recording for %s on %s (no looper)",
316
+ incident_type.value,
317
+ self._interface_id,
318
+ )
319
+ return
320
+
321
+ async def _record() -> None:
322
+ try:
323
+ await incident_recorder.record_incident(
324
+ incident_type=incident_type,
325
+ severity=severity,
326
+ message=message,
327
+ interface_id=self._interface_id,
328
+ context=context,
329
+ journal=self._journal,
330
+ )
331
+ except Exception as err: # pragma: no cover
332
+ _LOGGER.debug(
333
+ "PING PONG CACHE: Failed to record incident %s on %s: %s",
334
+ incident_type.value,
335
+ self._interface_id,
336
+ err,
337
+ )
338
+
339
+ looper.create_task(target=_record, name=f"ppc_incident_{self._interface_id}_{incident_type.value}")
340
+
341
+ async def _retry_reconcile_pong(self, *, token: str) -> None:
342
+ """Attempt to reconcile a previously-unknown PONG with a late pending PING."""
343
+ # Always allow another schedule after the retry completes
344
+ try:
345
+ # Cleanup any expired entries first to avoid outdated counts
346
+ self._cleanup_tracker(tracker=self._pending, tracker_name="pending")
347
+ self._cleanup_tracker(tracker=self._unknown, tracker_name="unknown")
348
+
349
+ if self._pending.contains(token=token):
350
+ # Remove from pending
351
+ self._pending.remove(token=token)
352
+
353
+ # If still marked unknown, clear it
354
+ unknown_before = len(self._unknown)
355
+ if self._unknown.contains(token=token):
356
+ self._unknown.remove(token=token)
357
+
358
+ # Re-publish events to reflect new counts (respecting existing throttling)
359
+ self._check_and_publish_pong_event(mismatch_type=PingPongMismatchType.PENDING)
360
+ if len(self._unknown) != unknown_before:
361
+ self._check_and_publish_pong_event(mismatch_type=PingPongMismatchType.UNKNOWN)
362
+
363
+ _LOGGER.debug(
364
+ "PING PONG CACHE: Retry reconciled PONG on %s for token: %s (pending now: %i, unknown now: %i)",
365
+ self._interface_id,
366
+ token,
367
+ len(self._pending),
368
+ len(self._unknown),
369
+ )
370
+ else:
371
+ _LOGGER.debug(
372
+ "PING PONG CACHE: Retry found no pending PING on %s for token: %s (unknown: %s)",
373
+ self._interface_id,
374
+ token,
375
+ self._unknown.contains(token=token),
376
+ )
377
+ finally:
378
+ self._retry_at.discard(token)
379
+
380
+ def _schedule_unknown_pong_retry(self, *, token: str, delay: float) -> None:
381
+ """
382
+ Schedule a one-shot retry to reconcile an unknown PONG after delay seconds.
383
+
384
+ If no looper is available on the central (e.g. in unit tests), skip scheduling.
385
+ """
386
+ # Coalesce multiple schedules for the same token
387
+ if token in self._retry_at:
388
+ return
389
+ self._retry_at.add(token)
390
+
391
+ if (looper := getattr(self._central_info, "looper", None)) is None:
392
+ # In testing contexts without a looper, we cannot schedule — leave to TTL expiry.
393
+ _LOGGER.debug(
394
+ "PING PONG CACHE: Skip scheduling retry for token %s on %s (no looper)",
395
+ token,
396
+ self._interface_id,
397
+ )
398
+ # Allow a future attempt to schedule if environment changes
399
+ self._retry_at.discard(token)
400
+ return
401
+
402
+ async def _retry() -> None:
403
+ try:
404
+ await asyncio.sleep(delay)
405
+ await self._retry_reconcile_pong(token=token)
406
+ except Exception as err: # pragma: no cover
407
+ _LOGGER.debug(
408
+ "PING PONG CACHE: Retry task error for token %s on %s: %s",
409
+ token,
410
+ self._interface_id,
411
+ err,
412
+ )
413
+ # Ensure token can be rescheduled if needed
414
+ self._retry_at.discard(token)
415
+
416
+ looper.create_task(target=_retry, name=f"ppc_retry_{self._interface_id}_{token}")
@@ -0,0 +1,71 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2021-2026
3
+ """
4
+ Persistent store used to persist Homematic metadata between runs.
5
+
6
+ This package provides on-disk registries that complement the short-lived, in-memory
7
+ stores from aiohomematic.store.dynamic. The goal is to minimize expensive data
8
+ retrieval from the backend by storing stable metadata such as device and
9
+ paramset descriptions in JSON files inside a dedicated cache directory.
10
+
11
+ Package structure
12
+ -----------------
13
+ - base: BasePersistentFile abstract base class
14
+ - device: DeviceDescriptionRegistry for device/channel metadata
15
+ - incident: IncidentStore for diagnostic incident snapshots
16
+ - paramset: ParamsetDescriptionRegistry for parameter descriptions
17
+ - session: SessionRecorder for RPC call/response recording
18
+
19
+ Key behaviors
20
+ -------------
21
+ - Saves only if caches are enabled and content has changed (hash comparison)
22
+ - Uses orjson for fast binary writes and json for reads
23
+ - Save/load/clear operations are synchronized via a semaphore
24
+
25
+ Public API
26
+ ----------
27
+ - DeviceDescriptionRegistry: Device and channel description storage
28
+ - IncidentStore: Persistent diagnostic incident storage
29
+ - ParamsetDescriptionRegistry: Paramset description storage
30
+ - SessionRecorder: RPC session recording for testing
31
+ - cleanup_files: Clean up cache files for a central unit
32
+ """
33
+
34
+ from __future__ import annotations
35
+
36
+ import asyncio
37
+ import logging
38
+ from typing import Final
39
+
40
+ from aiohomematic.async_support import loop_check
41
+ from aiohomematic.const import SUB_DIRECTORY_CACHE, SUB_DIRECTORY_SESSION
42
+ from aiohomematic.store.persistent.base import get_file_name, get_file_path
43
+ from aiohomematic.store.persistent.device import DeviceDescriptionRegistry
44
+ from aiohomematic.store.persistent.incident import IncidentStore
45
+ from aiohomematic.store.persistent.paramset import ParamsetDescriptionRegistry
46
+ from aiohomematic.store.persistent.session import SessionRecorder
47
+ from aiohomematic.support import delete_file
48
+
49
+ _LOGGER: Final = logging.getLogger(__name__)
50
+
51
+ __all__ = [
52
+ # Registries
53
+ "DeviceDescriptionRegistry",
54
+ "IncidentStore",
55
+ "ParamsetDescriptionRegistry",
56
+ "SessionRecorder",
57
+ # Utilities
58
+ "cleanup_files",
59
+ "get_file_name",
60
+ "get_file_path",
61
+ ]
62
+
63
+
64
+ @loop_check
65
+ def cleanup_files(*, central_name: str, storage_directory: str) -> None:
66
+ """Clean up the used files."""
67
+ loop = asyncio.get_running_loop()
68
+ cache_dir = get_file_path(storage_directory=storage_directory, sub_directory=SUB_DIRECTORY_CACHE)
69
+ loop.run_in_executor(None, delete_file, cache_dir, f"{central_name}*.json".lower())
70
+ session_dir = get_file_path(storage_directory=storage_directory, sub_directory=SUB_DIRECTORY_SESSION)
71
+ loop.run_in_executor(None, delete_file, session_dir, f"{central_name}*.json".lower())