aiohomematic 2025.10.2__tar.gz → 2025.10.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiohomematic might be problematic. Click here for more details.
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/PKG-INFO +1 -1
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/async_support.py +59 -23
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/caches/dynamic.py +27 -14
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/caches/persistent.py +12 -2
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/central/__init__.py +172 -46
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/central/xml_rpc_server.py +6 -1
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/client/__init__.py +23 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/client/json_rpc.py +10 -1
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/const.py +29 -17
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/decorators.py +33 -27
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/siren.py +2 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/data_point.py +1 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/data_point.py +1 -1
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/support.py +2 -2
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/property_decorators.py +40 -13
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/support.py +83 -20
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic.egg-info/PKG-INFO +1 -1
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic.egg-info/SOURCES.txt +8 -1
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic_support/client_local.py +8 -3
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/pyproject.toml +10 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_action.py +3 -2
- aiohomematic-2025.10.4/tests/test_async_support.py +171 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_binary_sensor.py +4 -2
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_button.py +5 -3
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_central.py +55 -24
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_central_pydevccu.py +1 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_climate.py +8 -4
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_cover.py +18 -9
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_device.py +6 -3
- aiohomematic-2025.10.4/tests/test_dynamic_caches.py +150 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_entity.py +10 -5
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_event.py +6 -3
- aiohomematic-2025.10.4/tests/test_json_rpc_client_integration.py +34 -0
- aiohomematic-2025.10.4/tests/test_kwonly_lint.py +29 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_light.py +14 -7
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_lock.py +4 -2
- aiohomematic-2025.10.4/tests/test_logging_support.py +108 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_number.py +8 -4
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_select.py +4 -2
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_sensor.py +6 -3
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_siren.py +4 -2
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_support.py +11 -6
- aiohomematic-2025.10.4/tests/test_support_extra.py +88 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_switch.py +7 -4
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_text.py +5 -3
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_valve.py +3 -2
- aiohomematic-2025.10.4/tests/test_xml_rpc_proxy_integration.py +33 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/LICENSE +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/README.md +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/caches/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/caches/visibility.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/central/decorators.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/client/_rpc_errors.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/client/xml_rpc.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/context.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/converter.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/exceptions.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/hmcli.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/calculated/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/calculated/climate.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/calculated/data_point.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/calculated/operating_voltage_level.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/calculated/support.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/climate.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/const.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/cover.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/data_point.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/definition.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/light.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/lock.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/support.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/switch.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/custom/valve.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/device.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/event.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/action.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/binary_sensor.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/button.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/number.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/select.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/sensor.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/switch.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/generic/text.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/binary_sensor.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/button.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/data_point.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/number.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/select.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/sensor.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/switch.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/hub/text.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/model/update.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/py.typed +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/rega_scripts/fetch_all_device_data.fn +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/rega_scripts/get_program_descriptions.fn +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/rega_scripts/get_serial.fn +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/rega_scripts/get_system_variable_descriptions.fn +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/rega_scripts/set_program_state.fn +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/rega_scripts/set_system_variable.fn +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic/validator.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic.egg-info/dependency_links.txt +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic.egg-info/requires.txt +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic.egg-info/top_level.txt +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/aiohomematic_support/__init__.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/setup.cfg +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_calculated_support.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_decorator.py +0 -0
- {aiohomematic-2025.10.2 → aiohomematic-2025.10.4}/tests/test_json_rpc.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: aiohomematic
|
|
3
|
-
Version: 2025.10.
|
|
3
|
+
Version: 2025.10.4
|
|
4
4
|
Summary: Homematic interface for Home Assistant running on Python 3.
|
|
5
5
|
Home-page: https://github.com/sukramj/aiohomematic
|
|
6
6
|
Author-email: SukramJ <sukramj@icloud.com>, Daniel Perna <danielperna84@gmail.com>
|
|
@@ -15,7 +15,8 @@ from typing import Any, Final, cast
|
|
|
15
15
|
|
|
16
16
|
from aiohomematic.const import BLOCK_LOG_TIMEOUT
|
|
17
17
|
from aiohomematic.exceptions import AioHomematicException
|
|
18
|
-
|
|
18
|
+
import aiohomematic.support as hms
|
|
19
|
+
from aiohomematic.support import extract_exc_args
|
|
19
20
|
|
|
20
21
|
_LOGGER: Final = logging.getLogger(__name__)
|
|
21
22
|
|
|
@@ -46,7 +47,13 @@ class Looper:
|
|
|
46
47
|
_LOGGER.warning("Shutdown timeout reached; task still pending: %s", task)
|
|
47
48
|
break
|
|
48
49
|
|
|
49
|
-
await self._await_and_log_pending(pending=tasks)
|
|
50
|
+
pending_after_wait = await self._await_and_log_pending(pending=tasks, deadline=deadline)
|
|
51
|
+
|
|
52
|
+
# If deadline has been reached and tasks are still pending, log and break
|
|
53
|
+
if deadline is not None and monotonic() >= deadline and pending_after_wait:
|
|
54
|
+
for task in pending_after_wait:
|
|
55
|
+
_LOGGER.warning("Shutdown timeout reached; task still pending: %s", task)
|
|
56
|
+
break
|
|
50
57
|
|
|
51
58
|
if start_time is None:
|
|
52
59
|
# Avoid calling monotonic() until we know
|
|
@@ -63,16 +70,35 @@ class Looper:
|
|
|
63
70
|
for task in tasks:
|
|
64
71
|
_LOGGER.debug("Waiting for task: %s", task)
|
|
65
72
|
|
|
66
|
-
async def _await_and_log_pending(
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
73
|
+
async def _await_and_log_pending(
|
|
74
|
+
self, *, pending: Collection[asyncio.Future[Any]], deadline: float | None
|
|
75
|
+
) -> set[asyncio.Future[Any]]:
|
|
76
|
+
"""
|
|
77
|
+
Await and log tasks that take a long time, respecting an optional deadline.
|
|
78
|
+
|
|
79
|
+
Returns the set of pending tasks if the deadline has been reached (or zero timeout),
|
|
80
|
+
allowing the caller to decide about timeout logging. Returns an empty set if no tasks are pending.
|
|
81
|
+
"""
|
|
82
|
+
wait_time = 0.0
|
|
83
|
+
pending_set: set[asyncio.Future[Any]] = set(pending)
|
|
84
|
+
while pending_set:
|
|
85
|
+
if deadline is None:
|
|
86
|
+
timeout = BLOCK_LOG_TIMEOUT
|
|
87
|
+
else:
|
|
88
|
+
remaining = int(max(0.0, deadline - monotonic()))
|
|
89
|
+
if (timeout := min(BLOCK_LOG_TIMEOUT, remaining)) == 0.0:
|
|
90
|
+
# Deadline reached; return current pending to caller for warning log
|
|
91
|
+
return pending_set
|
|
92
|
+
done, still_pending = await asyncio.wait(pending_set, timeout=timeout)
|
|
93
|
+
if not (pending_set := set(still_pending)):
|
|
94
|
+
return set()
|
|
95
|
+
wait_time += timeout
|
|
96
|
+
for task in pending_set:
|
|
75
97
|
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
|
|
98
|
+
# If the deadline was reached during the wait, let caller handle warning
|
|
99
|
+
if deadline is not None and monotonic() >= deadline:
|
|
100
|
+
return pending_set
|
|
101
|
+
return set()
|
|
76
102
|
|
|
77
103
|
def create_task(self, *, target: Coroutine[Any, Any, Any], name: str) -> None:
|
|
78
104
|
"""Add task to the executor pool."""
|
|
@@ -85,7 +111,7 @@ class Looper:
|
|
|
85
111
|
)
|
|
86
112
|
return
|
|
87
113
|
|
|
88
|
-
def _async_create_task[R](self, target: Coroutine[Any, Any, R], name: str) -> asyncio.Task[R]:
|
|
114
|
+
def _async_create_task[R](self, target: Coroutine[Any, Any, R], name: str) -> asyncio.Task[R]: # kwonly: disable
|
|
89
115
|
"""Create a task from within the event_loop. This method must be run in the event_loop."""
|
|
90
116
|
task = self._loop.create_task(target, name=name)
|
|
91
117
|
self._tasks.add(task)
|
|
@@ -134,7 +160,12 @@ def cancelling(*, task: asyncio.Future[Any]) -> bool:
|
|
|
134
160
|
|
|
135
161
|
|
|
136
162
|
def loop_check[**P, R](func: Callable[P, R]) -> Callable[P, R]:
|
|
137
|
-
"""
|
|
163
|
+
"""
|
|
164
|
+
Annotation to mark method that must be run within the event loop.
|
|
165
|
+
|
|
166
|
+
Always wraps the function, but only performs loop checks when debug is enabled.
|
|
167
|
+
This allows tests to monkeypatch aiohomematic.support.debug_enabled at runtime.
|
|
168
|
+
"""
|
|
138
169
|
|
|
139
170
|
_with_loop: set = set()
|
|
140
171
|
|
|
@@ -143,17 +174,22 @@ def loop_check[**P, R](func: Callable[P, R]) -> Callable[P, R]:
|
|
|
143
174
|
"""Wrap loop check."""
|
|
144
175
|
return_value = func(*args, **kwargs)
|
|
145
176
|
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
177
|
+
# Only perform the (potentially expensive) loop check when debug is enabled.
|
|
178
|
+
if hms.debug_enabled():
|
|
179
|
+
try:
|
|
180
|
+
asyncio.get_running_loop()
|
|
181
|
+
loop_running = True
|
|
182
|
+
except Exception:
|
|
183
|
+
loop_running = False
|
|
184
|
+
|
|
185
|
+
if not loop_running and func not in _with_loop:
|
|
186
|
+
_with_loop.add(func)
|
|
187
|
+
_LOGGER.warning(
|
|
188
|
+
"Method %s must run in the event_loop. No loop detected.",
|
|
189
|
+
func.__name__,
|
|
190
|
+
)
|
|
155
191
|
|
|
156
192
|
return return_value
|
|
157
193
|
|
|
158
194
|
setattr(func, "_loop_check", True)
|
|
159
|
-
return cast(Callable[P, R], wrapper_loop_check)
|
|
195
|
+
return cast(Callable[P, R], wrapper_loop_check)
|
|
@@ -400,15 +400,15 @@ class PingPongCache:
|
|
|
400
400
|
|
|
401
401
|
@property
|
|
402
402
|
def low_pending_pongs(self) -> bool:
|
|
403
|
-
"""Return
|
|
403
|
+
"""Return True when pending pong count is at or below the allowed delta (i.e., not high)."""
|
|
404
404
|
self._cleanup_pending_pongs()
|
|
405
|
-
return len(self._pending_pongs)
|
|
405
|
+
return len(self._pending_pongs) <= self._allowed_delta
|
|
406
406
|
|
|
407
407
|
@property
|
|
408
408
|
def low_unknown_pongs(self) -> bool:
|
|
409
|
-
"""Return
|
|
409
|
+
"""Return True when unknown pong count is at or below the allowed delta (i.e., not high)."""
|
|
410
410
|
self._cleanup_unknown_pongs()
|
|
411
|
-
return len(self._unknown_pongs)
|
|
411
|
+
return len(self._unknown_pongs) <= self._allowed_delta
|
|
412
412
|
|
|
413
413
|
@property
|
|
414
414
|
def pending_pong_count(self) -> int:
|
|
@@ -430,10 +430,14 @@ class PingPongCache:
|
|
|
430
430
|
def handle_send_ping(self, *, ping_ts: datetime) -> None:
|
|
431
431
|
"""Handle send ping timestamp."""
|
|
432
432
|
self._pending_pongs.add(ping_ts)
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
)
|
|
433
|
+
# Throttle event emission to every second ping to avoid spamming callbacks,
|
|
434
|
+
# but always emit when crossing the high threshold.
|
|
435
|
+
count = self.pending_pong_count
|
|
436
|
+
if (count > self._allowed_delta) or (count % 2 == 0):
|
|
437
|
+
self._check_and_fire_pong_event(
|
|
438
|
+
event_type=InterfaceEventType.PENDING_PONG,
|
|
439
|
+
pong_mismatch_count=count,
|
|
440
|
+
)
|
|
437
441
|
_LOGGER.debug(
|
|
438
442
|
"PING PONG CACHE: Increase pending PING count: %s - %i for ts: %s",
|
|
439
443
|
self._interface_id,
|
|
@@ -473,8 +477,8 @@ class PingPongCache:
|
|
|
473
477
|
"""Cleanup too old pending pongs."""
|
|
474
478
|
dt_now = datetime.now()
|
|
475
479
|
for pong_ts in list(self._pending_pongs):
|
|
476
|
-
|
|
477
|
-
if
|
|
480
|
+
# Only expire entries that are actually older than the TTL.
|
|
481
|
+
if (dt_now - pong_ts).total_seconds() > self._ttl:
|
|
478
482
|
self._pending_pongs.remove(pong_ts)
|
|
479
483
|
_LOGGER.debug(
|
|
480
484
|
"PING PONG CACHE: Removing expired pending PONG: %s - %i for ts: %s",
|
|
@@ -487,8 +491,8 @@ class PingPongCache:
|
|
|
487
491
|
"""Cleanup too old unknown pongs."""
|
|
488
492
|
dt_now = datetime.now()
|
|
489
493
|
for pong_ts in list(self._unknown_pongs):
|
|
490
|
-
|
|
491
|
-
if
|
|
494
|
+
# Only expire entries that are actually older than the TTL.
|
|
495
|
+
if (dt_now - pong_ts).total_seconds() > self._ttl:
|
|
492
496
|
self._unknown_pongs.remove(pong_ts)
|
|
493
497
|
_LOGGER.debug(
|
|
494
498
|
"PING PONG CACHE: Removing expired unknown PONG: %s - %i or ts: %s",
|
|
@@ -519,11 +523,20 @@ class PingPongCache:
|
|
|
519
523
|
)
|
|
520
524
|
|
|
521
525
|
if self.low_pending_pongs and event_type == InterfaceEventType.PENDING_PONG:
|
|
522
|
-
|
|
523
|
-
|
|
526
|
+
# In low state:
|
|
527
|
+
# - If we previously logged a high state, emit a reset event (mismatch=0) exactly once.
|
|
528
|
+
# - Otherwise, throttle emission to every second ping (even counts > 0) to avoid spamming.
|
|
529
|
+
if self._pending_pong_logged:
|
|
530
|
+
_fire_event(mismatch_count=0)
|
|
531
|
+
self._pending_pong_logged = False
|
|
532
|
+
return
|
|
533
|
+
if pong_mismatch_count > 0 and pong_mismatch_count % 2 == 0:
|
|
534
|
+
_fire_event(mismatch_count=pong_mismatch_count)
|
|
524
535
|
return
|
|
525
536
|
|
|
526
537
|
if self.low_unknown_pongs and event_type == InterfaceEventType.UNKNOWN_PONG:
|
|
538
|
+
# For unknown pongs, only reset the logged flag when we drop below the threshold.
|
|
539
|
+
# We do not emit an event here since there is no explicit expectation for a reset notification.
|
|
527
540
|
self._unknown_pong_logged = False
|
|
528
541
|
return
|
|
529
542
|
|
|
@@ -250,14 +250,24 @@ class DeviceDescriptionCache(BasePersistentCache):
|
|
|
250
250
|
addr_map.pop(address, None)
|
|
251
251
|
desc_map.pop(address, None)
|
|
252
252
|
|
|
253
|
-
def get_addresses(self, *, interface_id: str) -> frozenset[str]:
|
|
253
|
+
def get_addresses(self, *, interface_id: str | None = None) -> frozenset[str]:
|
|
254
254
|
"""Return the addresses by interface as a set."""
|
|
255
|
-
|
|
255
|
+
if interface_id:
|
|
256
|
+
return frozenset(self._addresses[interface_id])
|
|
257
|
+
return frozenset(addr for interface_id in self.get_interface_ids() for addr in self._addresses[interface_id])
|
|
256
258
|
|
|
257
259
|
def get_device_descriptions(self, *, interface_id: str) -> Mapping[str, DeviceDescription]:
|
|
258
260
|
"""Return the devices by interface."""
|
|
259
261
|
return self._device_descriptions[interface_id]
|
|
260
262
|
|
|
263
|
+
def get_interface_ids(self) -> tuple[str, ...]:
|
|
264
|
+
"""Return the interface ids."""
|
|
265
|
+
return tuple(self._raw_device_descriptions.keys())
|
|
266
|
+
|
|
267
|
+
def has_device_descriptions(self, *, interface_id: str) -> bool:
|
|
268
|
+
"""Return the devices by interface."""
|
|
269
|
+
return interface_id in self._device_descriptions
|
|
270
|
+
|
|
261
271
|
def find_device_description(self, *, interface_id: str, device_address: str) -> DeviceDescription | None:
|
|
262
272
|
"""Return the device description by interface and device_address."""
|
|
263
273
|
return self._device_descriptions[interface_id].get(device_address)
|
|
@@ -92,6 +92,7 @@ from aiohomematic.const import (
|
|
|
92
92
|
CONNECTION_CHECKER_INTERVAL,
|
|
93
93
|
DATA_POINT_EVENTS,
|
|
94
94
|
DATETIME_FORMAT_MILLIS,
|
|
95
|
+
DEFAULT_DELAY_NEW_DEVICE_CREATION,
|
|
95
96
|
DEFAULT_ENABLE_DEVICE_FIRMWARE_CHECK,
|
|
96
97
|
DEFAULT_ENABLE_PROGRAM_SCAN,
|
|
97
98
|
DEFAULT_ENABLE_SYSVAR_SCAN,
|
|
@@ -136,6 +137,7 @@ from aiohomematic.const import (
|
|
|
136
137
|
Parameter,
|
|
137
138
|
ParamsetKey,
|
|
138
139
|
ProxyInitState,
|
|
140
|
+
SourceOfDeviceCreation,
|
|
139
141
|
SystemInformation,
|
|
140
142
|
)
|
|
141
143
|
from aiohomematic.decorators import inspector
|
|
@@ -164,6 +166,7 @@ from aiohomematic.support import (
|
|
|
164
166
|
LogContextMixin,
|
|
165
167
|
PayloadMixin,
|
|
166
168
|
check_config,
|
|
169
|
+
extract_device_addresses_from_device_descriptions,
|
|
167
170
|
extract_exc_args,
|
|
168
171
|
get_channel_no,
|
|
169
172
|
get_device_address,
|
|
@@ -506,7 +509,9 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
506
509
|
if self._config.start_direct:
|
|
507
510
|
if await self._create_clients():
|
|
508
511
|
for client in self._clients.values():
|
|
509
|
-
await self.
|
|
512
|
+
await self._refresh_device_descriptions_and_create_missing_devices(
|
|
513
|
+
client=client, refresh_only_existing=False
|
|
514
|
+
)
|
|
510
515
|
else:
|
|
511
516
|
self._clients_started = await self._start_clients()
|
|
512
517
|
if self._config.enable_server:
|
|
@@ -576,11 +581,15 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
576
581
|
async def refresh_firmware_data(self, *, device_address: str | None = None) -> None:
|
|
577
582
|
"""Refresh device firmware data."""
|
|
578
583
|
if device_address and (device := self.get_device(address=device_address)) is not None and device.is_updatable:
|
|
579
|
-
await self.
|
|
584
|
+
await self._refresh_device_descriptions_and_create_missing_devices(
|
|
585
|
+
client=device.client, refresh_only_existing=True, device_address=device_address
|
|
586
|
+
)
|
|
580
587
|
device.refresh_firmware_data()
|
|
581
588
|
else:
|
|
582
589
|
for client in self._clients.values():
|
|
583
|
-
await self.
|
|
590
|
+
await self._refresh_device_descriptions_and_create_missing_devices(
|
|
591
|
+
client=client, refresh_only_existing=True
|
|
592
|
+
)
|
|
584
593
|
for device in self._devices.values():
|
|
585
594
|
if device.is_updatable:
|
|
586
595
|
device.refresh_firmware_data()
|
|
@@ -595,9 +604,12 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
595
604
|
]:
|
|
596
605
|
await self.refresh_firmware_data(device_address=device.address)
|
|
597
606
|
|
|
598
|
-
async def
|
|
599
|
-
|
|
607
|
+
async def _refresh_device_descriptions_and_create_missing_devices(
|
|
608
|
+
self, *, client: hmcl.Client, refresh_only_existing: bool, device_address: str | None = None
|
|
609
|
+
) -> None:
|
|
610
|
+
"""Refresh device descriptions and create missing devices."""
|
|
600
611
|
device_descriptions: tuple[DeviceDescription, ...] | None = None
|
|
612
|
+
|
|
601
613
|
if (
|
|
602
614
|
device_address
|
|
603
615
|
and (device_description := await client.get_device_description(device_address=device_address)) is not None
|
|
@@ -606,10 +618,25 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
606
618
|
else:
|
|
607
619
|
device_descriptions = await client.list_devices()
|
|
608
620
|
|
|
621
|
+
if (
|
|
622
|
+
device_descriptions
|
|
623
|
+
and refresh_only_existing
|
|
624
|
+
and (
|
|
625
|
+
existing_device_descriptions := tuple(
|
|
626
|
+
dev_desc
|
|
627
|
+
for dev_desc in list(device_descriptions)
|
|
628
|
+
if dev_desc["ADDRESS"]
|
|
629
|
+
in self.device_descriptions.get_device_descriptions(interface_id=client.interface_id)
|
|
630
|
+
)
|
|
631
|
+
)
|
|
632
|
+
):
|
|
633
|
+
device_descriptions = existing_device_descriptions
|
|
634
|
+
|
|
609
635
|
if device_descriptions:
|
|
610
636
|
await self._add_new_devices(
|
|
611
637
|
interface_id=client.interface_id,
|
|
612
638
|
device_descriptions=device_descriptions,
|
|
639
|
+
source=SourceOfDeviceCreation.REFRESH,
|
|
613
640
|
)
|
|
614
641
|
|
|
615
642
|
async def _start_clients(self) -> bool:
|
|
@@ -618,13 +645,15 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
618
645
|
return False
|
|
619
646
|
await self._load_caches()
|
|
620
647
|
if new_device_addresses := self._check_for_new_device_addresses():
|
|
621
|
-
await self._create_devices(new_device_addresses=new_device_addresses)
|
|
648
|
+
await self._create_devices(new_device_addresses=new_device_addresses, source=SourceOfDeviceCreation.CACHE)
|
|
622
649
|
await self._init_hub()
|
|
623
650
|
await self._init_clients()
|
|
624
651
|
# Proactively fetch device descriptions if none were created yet to avoid slow startup
|
|
625
652
|
if not self._devices:
|
|
626
653
|
for client in self._clients.values():
|
|
627
|
-
await self.
|
|
654
|
+
await self._refresh_device_descriptions_and_create_missing_devices(
|
|
655
|
+
client=client, refresh_only_existing=False
|
|
656
|
+
)
|
|
628
657
|
return True
|
|
629
658
|
|
|
630
659
|
async def _stop_clients(self) -> None:
|
|
@@ -934,7 +963,9 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
934
963
|
await self._data_cache.load()
|
|
935
964
|
return True
|
|
936
965
|
|
|
937
|
-
async def _create_devices(
|
|
966
|
+
async def _create_devices(
|
|
967
|
+
self, *, new_device_addresses: Mapping[str, set[str]], source: SourceOfDeviceCreation
|
|
968
|
+
) -> None:
|
|
938
969
|
"""Trigger creation of the objects that expose the functionality."""
|
|
939
970
|
if not self._clients:
|
|
940
971
|
raise AioHomematicException(
|
|
@@ -988,6 +1019,7 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
988
1019
|
system_event=BackendSystemEvent.DEVICES_CREATED,
|
|
989
1020
|
new_data_points=new_dps,
|
|
990
1021
|
new_channel_events=new_channel_events,
|
|
1022
|
+
source=source,
|
|
991
1023
|
)
|
|
992
1024
|
|
|
993
1025
|
async def delete_device(self, *, interface_id: str, device_address: str) -> None:
|
|
@@ -1014,15 +1046,45 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
1014
1046
|
for address in addresses:
|
|
1015
1047
|
if device := self._devices.get(address):
|
|
1016
1048
|
self.remove_device(device=device)
|
|
1017
|
-
await self.save_caches()
|
|
1049
|
+
await self.save_caches(save_device_descriptions=True, save_paramset_descriptions=True)
|
|
1018
1050
|
|
|
1019
1051
|
@callback_backend_system(system_event=BackendSystemEvent.NEW_DEVICES)
|
|
1020
1052
|
async def add_new_devices(self, *, interface_id: str, device_descriptions: tuple[DeviceDescription, ...]) -> None:
|
|
1021
1053
|
"""Add new devices to central unit."""
|
|
1022
|
-
|
|
1054
|
+
source = (
|
|
1055
|
+
SourceOfDeviceCreation.NEW
|
|
1056
|
+
if self._device_descriptions.has_device_descriptions(interface_id=interface_id)
|
|
1057
|
+
else SourceOfDeviceCreation.INIT
|
|
1058
|
+
)
|
|
1059
|
+
await self._add_new_devices(interface_id=interface_id, device_descriptions=device_descriptions, source=source)
|
|
1060
|
+
|
|
1061
|
+
async def add_new_device_manually(self, *, interface_id: str, address: str) -> None:
|
|
1062
|
+
"""Add new devices manually triggered to central unit."""
|
|
1063
|
+
if interface_id not in self._clients:
|
|
1064
|
+
_LOGGER.warning(
|
|
1065
|
+
"ADD_NEW_DEVICES_MANUALLY failed: Missing client for interface_id %s",
|
|
1066
|
+
interface_id,
|
|
1067
|
+
)
|
|
1068
|
+
return
|
|
1069
|
+
client = self._clients[interface_id]
|
|
1070
|
+
if (device_descriptions := await client.get_all_device_description(device_address=address)) is None:
|
|
1071
|
+
_LOGGER.warning(
|
|
1072
|
+
"ADD_NEW_DEVICES_MANUALLY failed: No device description found for address %s on interface_id %s",
|
|
1073
|
+
address,
|
|
1074
|
+
interface_id,
|
|
1075
|
+
)
|
|
1076
|
+
return
|
|
1077
|
+
|
|
1078
|
+
await self._add_new_devices(
|
|
1079
|
+
interface_id=interface_id,
|
|
1080
|
+
device_descriptions=device_descriptions,
|
|
1081
|
+
source=SourceOfDeviceCreation.MANUAL,
|
|
1082
|
+
)
|
|
1023
1083
|
|
|
1024
1084
|
@inspector(measure_performance=True)
|
|
1025
|
-
async def _add_new_devices(
|
|
1085
|
+
async def _add_new_devices(
|
|
1086
|
+
self, *, interface_id: str, device_descriptions: tuple[DeviceDescription, ...], source: SourceOfDeviceCreation
|
|
1087
|
+
) -> None:
|
|
1026
1088
|
"""Add new devices to central unit."""
|
|
1027
1089
|
if not device_descriptions:
|
|
1028
1090
|
_LOGGER.debug(
|
|
@@ -1045,57 +1107,95 @@ class CentralUnit(LogContextMixin, PayloadMixin):
|
|
|
1045
1107
|
return
|
|
1046
1108
|
|
|
1047
1109
|
async with self._device_add_semaphore:
|
|
1048
|
-
|
|
1049
|
-
|
|
1110
|
+
if not (
|
|
1111
|
+
new_device_descriptions := self._identify_new_device_descriptions(
|
|
1112
|
+
device_descriptions=device_descriptions, interface_id=interface_id
|
|
1113
|
+
)
|
|
1114
|
+
):
|
|
1115
|
+
_LOGGER.debug("ADD_NEW_DEVICES: Nothing to add for interface_id %s", interface_id)
|
|
1116
|
+
return
|
|
1117
|
+
|
|
1118
|
+
# Here we block the automatic creation of new devices, if required
|
|
1119
|
+
if (
|
|
1120
|
+
self._config.delay_new_device_creation
|
|
1121
|
+
and source == SourceOfDeviceCreation.NEW
|
|
1122
|
+
and (
|
|
1123
|
+
new_addresses := extract_device_addresses_from_device_descriptions(
|
|
1124
|
+
device_descriptions=new_device_descriptions
|
|
1125
|
+
)
|
|
1126
|
+
)
|
|
1127
|
+
):
|
|
1128
|
+
self.fire_backend_system_callback(
|
|
1129
|
+
system_event=BackendSystemEvent.DEVICES_DELAYED,
|
|
1130
|
+
new_addresses=new_addresses,
|
|
1131
|
+
interface_id=interface_id,
|
|
1132
|
+
source=source,
|
|
1133
|
+
)
|
|
1134
|
+
return
|
|
1135
|
+
|
|
1050
1136
|
client = self._clients[interface_id]
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
for dev_desc in device_descriptions:
|
|
1137
|
+
save_descriptions = False
|
|
1138
|
+
for dev_desc in new_device_descriptions:
|
|
1054
1139
|
try:
|
|
1055
|
-
address = dev_desc["ADDRESS"]
|
|
1056
|
-
# Check existence before mutating cache to ensure we detect truly new addresses.
|
|
1057
|
-
is_new_address = address not in existing_map
|
|
1058
1140
|
self._device_descriptions.add_device(interface_id=interface_id, device_description=dev_desc)
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
await client.fetch_paramset_descriptions(device_description=dev_desc)
|
|
1062
|
-
save_paramset_descriptions = True
|
|
1141
|
+
await client.fetch_paramset_descriptions(device_description=dev_desc)
|
|
1142
|
+
save_descriptions = True
|
|
1063
1143
|
except Exception as exc: # pragma: no cover
|
|
1064
|
-
|
|
1065
|
-
save_paramset_descriptions = False
|
|
1144
|
+
save_descriptions = False
|
|
1066
1145
|
_LOGGER.error(
|
|
1067
|
-
"
|
|
1146
|
+
"UPDATE_CACHES_WITH_NEW_DEVICES failed: %s [%s]",
|
|
1068
1147
|
type(exc).__name__,
|
|
1069
1148
|
extract_exc_args(exc=exc),
|
|
1070
1149
|
)
|
|
1071
1150
|
|
|
1072
1151
|
await self.save_caches(
|
|
1073
|
-
save_device_descriptions=
|
|
1074
|
-
save_paramset_descriptions=
|
|
1152
|
+
save_device_descriptions=save_descriptions,
|
|
1153
|
+
save_paramset_descriptions=save_descriptions,
|
|
1075
1154
|
)
|
|
1076
|
-
if new_device_addresses := self._check_for_new_device_addresses():
|
|
1077
|
-
await self._device_details.load()
|
|
1078
|
-
await self._data_cache.load()
|
|
1079
|
-
await self._create_devices(new_device_addresses=new_device_addresses)
|
|
1080
1155
|
|
|
1081
|
-
|
|
1156
|
+
if new_device_addresses := self._check_for_new_device_addresses(interface_id=interface_id):
|
|
1157
|
+
await self._device_details.load()
|
|
1158
|
+
await self._data_cache.load(interface=client.interface)
|
|
1159
|
+
await self._create_devices(new_device_addresses=new_device_addresses, source=source)
|
|
1160
|
+
|
|
1161
|
+
def _identify_new_device_descriptions(
|
|
1162
|
+
self, *, device_descriptions: tuple[DeviceDescription, ...], interface_id: str | None = None
|
|
1163
|
+
) -> tuple[DeviceDescription, ...]:
|
|
1164
|
+
"""Identify devices whose ADDRESS isn't already known on any interface."""
|
|
1165
|
+
known_addresses = self._device_descriptions.get_addresses(interface_id=interface_id)
|
|
1166
|
+
return tuple(
|
|
1167
|
+
dev_desc
|
|
1168
|
+
for dev_desc in device_descriptions
|
|
1169
|
+
if (dev_desc["ADDRESS"] if dev_desc["PARENT"] == "" else dev_desc["PARENT"]) not in known_addresses
|
|
1170
|
+
)
|
|
1171
|
+
|
|
1172
|
+
def _check_for_new_device_addresses(self, *, interface_id: str | None = None) -> Mapping[str, set[str]]:
|
|
1082
1173
|
"""Check if there are new devices that need to be created."""
|
|
1083
1174
|
new_device_addresses: dict[str, set[str]] = {}
|
|
1084
|
-
|
|
1085
|
-
|
|
1175
|
+
|
|
1176
|
+
# Cache existing device addresses once to avoid repeated mapping lookups
|
|
1177
|
+
existing_addresses = set(self._devices.keys())
|
|
1178
|
+
|
|
1179
|
+
def _check_for_new_device_addresses_helper(*, iid: str) -> None:
|
|
1180
|
+
"""Check if there are new devices that need to be created."""
|
|
1181
|
+
if not self._paramset_descriptions.has_interface_id(interface_id=iid):
|
|
1086
1182
|
_LOGGER.debug(
|
|
1087
1183
|
"CHECK_FOR_NEW_DEVICE_ADDRESSES: Skipping interface %s, missing paramsets",
|
|
1088
|
-
|
|
1184
|
+
iid,
|
|
1089
1185
|
)
|
|
1090
|
-
|
|
1091
|
-
|
|
1186
|
+
return
|
|
1092
1187
|
# Build the set locally and assign only if non-empty to avoid add-then-delete pattern
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1188
|
+
# Use set difference for speed on large collections
|
|
1189
|
+
addresses = set(self._device_descriptions.get_addresses(interface_id=iid))
|
|
1190
|
+
# get_addresses returns an iterable (likely tuple); convert to set once for efficient diff
|
|
1191
|
+
if new_set := addresses - existing_addresses:
|
|
1192
|
+
new_device_addresses[iid] = new_set
|
|
1193
|
+
|
|
1194
|
+
if interface_id:
|
|
1195
|
+
_check_for_new_device_addresses_helper(iid=interface_id)
|
|
1196
|
+
else:
|
|
1197
|
+
for iid in self.interface_ids:
|
|
1198
|
+
_check_for_new_device_addresses_helper(iid=iid)
|
|
1099
1199
|
|
|
1100
1200
|
if _LOGGER.isEnabledFor(level=DEBUG):
|
|
1101
1201
|
count = sum(len(item) for item in new_device_addresses.values())
|
|
@@ -1640,7 +1740,7 @@ class _Scheduler(threading.Thread):
|
|
|
1640
1740
|
),
|
|
1641
1741
|
]
|
|
1642
1742
|
|
|
1643
|
-
def _backend_system_callback(self, system_event: BackendSystemEvent, **kwargs: Any) -> None:
|
|
1743
|
+
def _backend_system_callback(self, *, system_event: BackendSystemEvent, **kwargs: Any) -> None:
|
|
1644
1744
|
"""Handle event of new device creation, to delay the start of the sysvar scan."""
|
|
1645
1745
|
if system_event == BackendSystemEvent.DEVICES_CREATED:
|
|
1646
1746
|
self._devices_created = True
|
|
@@ -1670,12 +1770,31 @@ class _Scheduler(threading.Thread):
|
|
|
1670
1770
|
_LOGGER.debug("SCHEDULER: Waiting till central %s is started", self._central.name)
|
|
1671
1771
|
await asyncio.sleep(SCHEDULER_NOT_STARTED_SLEEP)
|
|
1672
1772
|
continue
|
|
1773
|
+
|
|
1774
|
+
any_executed = False
|
|
1673
1775
|
for job in self._scheduler_jobs:
|
|
1674
1776
|
if not self._active or not job.ready:
|
|
1675
1777
|
continue
|
|
1676
1778
|
await job.run()
|
|
1677
1779
|
job.schedule_next_execution()
|
|
1678
|
-
|
|
1780
|
+
any_executed = True
|
|
1781
|
+
|
|
1782
|
+
if not self._active:
|
|
1783
|
+
break # type: ignore[unreachable]
|
|
1784
|
+
|
|
1785
|
+
# If no job was executed this cycle, we can sleep until the next job is due
|
|
1786
|
+
if not any_executed:
|
|
1787
|
+
now = datetime.now()
|
|
1788
|
+
try:
|
|
1789
|
+
next_due = min(job.next_run for job in self._scheduler_jobs)
|
|
1790
|
+
# Sleep until the next task should run, but cap to 1s to remain responsive
|
|
1791
|
+
delay = max(0.0, (next_due - now).total_seconds())
|
|
1792
|
+
await asyncio.sleep(min(1.0, delay))
|
|
1793
|
+
except ValueError:
|
|
1794
|
+
# No jobs configured; fallback to default loop sleep
|
|
1795
|
+
await asyncio.sleep(SCHEDULER_LOOP_SLEEP)
|
|
1796
|
+
else:
|
|
1797
|
+
# When work was done, yield briefly to the loop
|
|
1679
1798
|
await asyncio.sleep(SCHEDULER_LOOP_SLEEP)
|
|
1680
1799
|
|
|
1681
1800
|
async def _check_connection(self) -> None:
|
|
@@ -1823,6 +1942,11 @@ class _SchedulerJob:
|
|
|
1823
1942
|
"""Return if the job can be executed."""
|
|
1824
1943
|
return self._next_run < datetime.now()
|
|
1825
1944
|
|
|
1945
|
+
@property
|
|
1946
|
+
def next_run(self) -> datetime:
|
|
1947
|
+
"""Return the next scheduled run timestamp."""
|
|
1948
|
+
return self._next_run
|
|
1949
|
+
|
|
1826
1950
|
async def run(self) -> None:
|
|
1827
1951
|
"""Run the task."""
|
|
1828
1952
|
await self._task()
|
|
@@ -1848,6 +1972,7 @@ class CentralConfig:
|
|
|
1848
1972
|
callback_host: str | None = None,
|
|
1849
1973
|
callback_port: int | None = None,
|
|
1850
1974
|
default_callback_port: int = PORT_ANY,
|
|
1975
|
+
delay_new_device_creation: bool = DEFAULT_DELAY_NEW_DEVICE_CREATION,
|
|
1851
1976
|
enable_device_firmware_check: bool = DEFAULT_ENABLE_DEVICE_FIRMWARE_CHECK,
|
|
1852
1977
|
enable_program_scan: bool = DEFAULT_ENABLE_PROGRAM_SCAN,
|
|
1853
1978
|
enable_sysvar_scan: bool = DEFAULT_ENABLE_SYSVAR_SCAN,
|
|
@@ -1876,6 +2001,7 @@ class CentralConfig:
|
|
|
1876
2001
|
self.central_id: Final = central_id
|
|
1877
2002
|
self.client_session: Final = client_session
|
|
1878
2003
|
self.default_callback_port: Final = default_callback_port
|
|
2004
|
+
self.delay_new_device_creation: Final = delay_new_device_creation
|
|
1879
2005
|
self.enable_device_firmware_check: Final = enable_device_firmware_check
|
|
1880
2006
|
self.enable_program_scan: Final = enable_program_scan
|
|
1881
2007
|
self.enable_sysvar_scan: Final = enable_sysvar_scan
|
|
@@ -27,6 +27,9 @@ _LOGGER: Final = logging.getLogger(__name__)
|
|
|
27
27
|
class RPCFunctions:
|
|
28
28
|
"""The XML-RPC functions the backend will expect."""
|
|
29
29
|
|
|
30
|
+
# Disable kw-only linter for this class since XML-RPC signatures are positional by protocol
|
|
31
|
+
__kwonly_check__ = False
|
|
32
|
+
|
|
30
33
|
def __init__(self, *, xml_rpc_server: XmlRpcServer) -> None:
|
|
31
34
|
"""Init RPCFunctions."""
|
|
32
35
|
self._xml_rpc_server: Final = xml_rpc_server
|
|
@@ -161,6 +164,8 @@ class AioHomematicXMLRPCServer(SimpleXMLRPCServer):
|
|
|
161
164
|
system_listMethods(self, interface_id: str.
|
|
162
165
|
"""
|
|
163
166
|
|
|
167
|
+
__kwonly_check__ = False
|
|
168
|
+
|
|
164
169
|
def system_listMethods(self, interface_id: str | None = None, /) -> list[str]:
|
|
165
170
|
"""Return a list of the methods supported by the server."""
|
|
166
171
|
return SimpleXMLRPCServer.system_listMethods(self)
|
|
@@ -198,7 +203,7 @@ class XmlRpcServer(threading.Thread):
|
|
|
198
203
|
self._simple_xml_rpc_server.register_instance(RPCFunctions(xml_rpc_server=self), allow_dotted_names=True)
|
|
199
204
|
self._centrals: Final[dict[str, hmcu.CentralUnit]] = {}
|
|
200
205
|
|
|
201
|
-
def __new__(cls, ip_addr: str, port: int) -> XmlRpcServer: # noqa: PYI034
|
|
206
|
+
def __new__(cls, ip_addr: str, port: int) -> XmlRpcServer: # noqa: PYI034 # kwonly: disable
|
|
202
207
|
"""Create new XmlRPC server."""
|
|
203
208
|
if (xml_rpc := cls._instances.get((ip_addr, port))) is None:
|
|
204
209
|
_LOGGER.debug("Creating XmlRpc server")
|