ramses-rf 0.22.40__py3-none-any.whl → 0.51.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. ramses_cli/__init__.py +18 -0
  2. ramses_cli/client.py +597 -0
  3. ramses_cli/debug.py +20 -0
  4. ramses_cli/discovery.py +405 -0
  5. ramses_cli/utils/cat_slow.py +17 -0
  6. ramses_cli/utils/convert.py +60 -0
  7. ramses_rf/__init__.py +31 -10
  8. ramses_rf/binding_fsm.py +787 -0
  9. ramses_rf/const.py +124 -105
  10. ramses_rf/database.py +297 -0
  11. ramses_rf/device/__init__.py +69 -39
  12. ramses_rf/device/base.py +187 -376
  13. ramses_rf/device/heat.py +540 -552
  14. ramses_rf/device/hvac.py +279 -171
  15. ramses_rf/dispatcher.py +153 -177
  16. ramses_rf/entity_base.py +478 -361
  17. ramses_rf/exceptions.py +82 -0
  18. ramses_rf/gateway.py +377 -513
  19. ramses_rf/helpers.py +57 -19
  20. ramses_rf/py.typed +0 -0
  21. ramses_rf/schemas.py +148 -194
  22. ramses_rf/system/__init__.py +16 -23
  23. ramses_rf/system/faultlog.py +363 -0
  24. ramses_rf/system/heat.py +295 -302
  25. ramses_rf/system/schedule.py +312 -198
  26. ramses_rf/system/zones.py +318 -238
  27. ramses_rf/version.py +2 -8
  28. ramses_rf-0.51.2.dist-info/METADATA +72 -0
  29. ramses_rf-0.51.2.dist-info/RECORD +55 -0
  30. {ramses_rf-0.22.40.dist-info → ramses_rf-0.51.2.dist-info}/WHEEL +1 -2
  31. ramses_rf-0.51.2.dist-info/entry_points.txt +2 -0
  32. {ramses_rf-0.22.40.dist-info → ramses_rf-0.51.2.dist-info/licenses}/LICENSE +1 -1
  33. ramses_tx/__init__.py +160 -0
  34. {ramses_rf/protocol → ramses_tx}/address.py +65 -59
  35. ramses_tx/command.py +1454 -0
  36. ramses_tx/const.py +903 -0
  37. ramses_tx/exceptions.py +92 -0
  38. {ramses_rf/protocol → ramses_tx}/fingerprints.py +56 -15
  39. {ramses_rf/protocol → ramses_tx}/frame.py +132 -131
  40. ramses_tx/gateway.py +338 -0
  41. ramses_tx/helpers.py +883 -0
  42. {ramses_rf/protocol → ramses_tx}/logger.py +67 -53
  43. {ramses_rf/protocol → ramses_tx}/message.py +155 -191
  44. ramses_tx/opentherm.py +1260 -0
  45. ramses_tx/packet.py +210 -0
  46. {ramses_rf/protocol → ramses_tx}/parsers.py +1266 -1003
  47. ramses_tx/protocol.py +801 -0
  48. ramses_tx/protocol_fsm.py +672 -0
  49. ramses_tx/py.typed +0 -0
  50. {ramses_rf/protocol → ramses_tx}/ramses.py +262 -185
  51. {ramses_rf/protocol → ramses_tx}/schemas.py +150 -133
  52. ramses_tx/transport.py +1471 -0
  53. ramses_tx/typed_dicts.py +492 -0
  54. ramses_tx/typing.py +181 -0
  55. ramses_tx/version.py +4 -0
  56. ramses_rf/discovery.py +0 -398
  57. ramses_rf/protocol/__init__.py +0 -59
  58. ramses_rf/protocol/backports.py +0 -42
  59. ramses_rf/protocol/command.py +0 -1576
  60. ramses_rf/protocol/const.py +0 -697
  61. ramses_rf/protocol/exceptions.py +0 -111
  62. ramses_rf/protocol/helpers.py +0 -390
  63. ramses_rf/protocol/opentherm.py +0 -1170
  64. ramses_rf/protocol/packet.py +0 -235
  65. ramses_rf/protocol/protocol.py +0 -613
  66. ramses_rf/protocol/transport.py +0 -1011
  67. ramses_rf/protocol/version.py +0 -10
  68. ramses_rf/system/hvac.py +0 -82
  69. ramses_rf-0.22.40.dist-info/METADATA +0 -64
  70. ramses_rf-0.22.40.dist-info/RECORD +0 -42
  71. ramses_rf-0.22.40.dist-info/top_level.txt +0 -1
ramses_rf/gateway.py CHANGED
@@ -1,328 +1,85 @@
1
1
  #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- #
4
- """RAMSES RF - a RAMSES-II protocol decoder & analyser.
5
2
 
6
- The serial to RF gateway (HGI80, not RFG100).
7
- """
3
+ # TODO:
4
+ # - sort out gwy.config...
5
+ # - sort out reduced processing
6
+
7
+
8
+ """RAMSES RF -the gateway (i.e. HGI80 / evofw3, not RFG100)."""
9
+
8
10
  from __future__ import annotations
9
11
 
10
12
  import asyncio
11
13
  import logging
12
- import os
13
- import signal
14
- from concurrent import futures
15
- from datetime import datetime as dt
16
- from threading import Lock
17
14
  from types import SimpleNamespace
18
- from typing import Callable, Optional, TextIO
19
-
20
- from ramses_rf.device import DeviceHeat, DeviceHvac, Fakeable
21
- from ramses_rf.protocol.frame import _CodeT, _DeviceIdT, _PayloadT, _VerbT
22
- from ramses_rf.protocol.protocol import _MessageProtocolT, _MessageTransportT
23
- from ramses_rf.protocol.transport import _PacketProtocolT, _PacketTransportT
24
-
25
- from .const import DONT_CREATE_MESSAGES, SZ_DEVICE_ID, SZ_DEVICES, __dev_mode__
26
- from .device import Device, device_factory
27
- from .dispatcher import Message, process_msg
28
- from .helpers import schedule_task, shrink
29
- from .protocol import (
30
- SZ_POLLER_TASK,
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+ from ramses_tx import (
31
18
  Address,
32
19
  Command,
33
- create_msg_stack,
34
- create_pkt_stack,
20
+ Engine,
21
+ Message,
22
+ Packet,
23
+ Priority,
24
+ extract_known_hgi_id,
35
25
  is_valid_dev_id,
36
- set_logger_timesource,
26
+ protocol_factory,
37
27
  set_pkt_logging_config,
28
+ transport_factory,
29
+ )
30
+ from ramses_tx.const import (
31
+ DEFAULT_GAP_DURATION,
32
+ DEFAULT_MAX_RETRIES,
33
+ DEFAULT_NUM_REPEATS,
34
+ DEFAULT_SEND_TIMEOUT,
35
+ DEFAULT_WAIT_FOR_REPLY,
36
+ SZ_ACTIVE_HGI,
37
+ )
38
+ from ramses_tx.schemas import (
39
+ SCH_ENGINE_CONFIG,
40
+ SZ_BLOCK_LIST,
41
+ SZ_ENFORCE_KNOWN_LIST,
42
+ SZ_KNOWN_LIST,
43
+ PktLogConfigT,
44
+ PortConfigT,
38
45
  )
39
- from .protocol.address import HGI_DEV_ADDR, NON_DEV_ADDR, NUL_DEV_ADDR
40
- from .protocol.schemas import SZ_PACKET_LOG, SZ_PORT_CONFIG, SZ_PORT_NAME
46
+ from ramses_tx.transport import SZ_READER_TASK
47
+
48
+ from .const import DONT_CREATE_MESSAGES, SZ_DEVICES
49
+ from .database import MessageIndex
50
+ from .device import DeviceHeat, DeviceHvac, Fakeable, HgiGateway, device_factory
51
+ from .dispatcher import detect_array_fragment, process_msg
41
52
  from .schemas import (
42
- SCH_GLOBAL_CONFIG,
53
+ SCH_GATEWAY_CONFIG,
54
+ SCH_GLOBAL_SCHEMAS,
43
55
  SCH_TRAITS,
44
56
  SZ_ALIAS,
45
- SZ_BLOCK_LIST,
46
57
  SZ_CLASS,
47
58
  SZ_CONFIG,
48
- SZ_ENFORCE_KNOWN_LIST,
59
+ SZ_DISABLE_DISCOVERY,
60
+ SZ_ENABLE_EAVESDROP,
49
61
  SZ_FAKED,
50
- SZ_KNOWN_LIST,
51
62
  SZ_MAIN_TCS,
52
63
  SZ_ORPHANS,
53
- load_config,
54
64
  load_schema,
55
65
  )
56
- from .system import System
66
+ from .system import Evohome
57
67
 
58
- # skipcq: PY-W2000
59
- from .protocol import ( # noqa: F401, isort: skip, pylint: disable=unused-import
68
+ from .const import ( # noqa: F401, isort: skip, pylint: disable=unused-import
60
69
  I_,
61
70
  RP,
62
71
  RQ,
63
72
  W_,
64
- F9,
65
- FA,
66
- FC,
67
- FF,
68
73
  Code,
69
74
  )
70
75
 
76
+ if TYPE_CHECKING:
77
+ from ramses_tx import DeviceIdT, DeviceListT, RamsesTransportT
71
78
 
72
- DEV_MODE = __dev_mode__ and False
79
+ from .device import Device
80
+ from .entity_base import Parent
73
81
 
74
82
  _LOGGER = logging.getLogger(__name__)
75
- if DEV_MODE:
76
- _LOGGER.setLevel(logging.DEBUG)
77
-
78
-
79
- class Engine:
80
- """The engine class."""
81
-
82
- _create_msg_stack: Callable = create_msg_stack
83
- _create_pkt_stack: Callable = create_pkt_stack
84
-
85
- def __init__(
86
- self,
87
- port_name: None | str,
88
- input_file: None | TextIO = None,
89
- port_config: None | dict = None,
90
- loop: None | asyncio.AbstractEventLoop = None,
91
- ) -> None:
92
-
93
- self.ser_name = port_name
94
- self._input_file = input_file
95
- self._port_config = port_config or {}
96
- self._loop = loop or asyncio.get_running_loop()
97
-
98
- self._include: dict[_DeviceIdT, dict] = {} # aka known_list, and ?allow_list
99
- self._exclude: dict[_DeviceIdT, dict] = {} # aka block_list
100
- self._unwanted: list[_DeviceIdT] = [
101
- NON_DEV_ADDR.id,
102
- NUL_DEV_ADDR.id,
103
- "01:000001",
104
- ]
105
-
106
- self.config = SimpleNamespace() # **SCH_CONFIG_GATEWAY({}))
107
-
108
- self.msg_protocol: _MessageProtocolT = None # type: ignore[assignment]
109
- self.msg_transport: _MessageTransportT = None # type: ignore[assignment]
110
- self.pkt_protocol: _PacketProtocolT = None # type: ignore[assignment]
111
- self.pkt_transport: _PacketTransportT = None # type: ignore[assignment]
112
-
113
- self._engine_lock = Lock()
114
- self._engine_state: None | tuple[None | Callable, tuple] = None
115
-
116
- def __str__(self) -> str:
117
- if self.pkt_protocol and self.pkt_protocol._hgi80[SZ_DEVICE_ID]:
118
- return f"{self.pkt_protocol._hgi80[SZ_DEVICE_ID]} ({self.ser_name})"
119
- return f"{HGI_DEV_ADDR.id} ({self.ser_name})"
120
-
121
- @property
122
- def hgi(self) -> None | Device:
123
- """Return the active HGI80-compatible gateway device, if known."""
124
- if self.pkt_protocol and self.pkt_protocol._hgi80[SZ_DEVICE_ID]:
125
- return self.device_by_id.get(self.pkt_protocol._hgi80[SZ_DEVICE_ID])
126
- return None
127
-
128
- def _setup_event_handlers(self) -> None: # HACK: for dev/test only
129
- def handle_exception(loop, context):
130
- """Handle exceptions on any platform."""
131
- _LOGGER.error("handle_exception(): Caught: %s", context["message"])
132
-
133
- exc = context.get("exception")
134
- if exc:
135
- raise exc
136
-
137
- if DEV_MODE:
138
- _LOGGER.debug("_setup_event_handlers(): Creating exception handler...")
139
- self._loop.set_exception_handler(handle_exception)
140
-
141
- def _dt_now(self):
142
- # return dt.now()
143
- return self.pkt_protocol._dt_now() if self.pkt_protocol else dt.now()
144
-
145
- def create_client(
146
- self,
147
- msg_handler: Callable[[Message, Optional[Message]], None],
148
- # msg_filter: Callable[[Message], bool] | None = None,
149
- ) -> tuple[_MessageProtocolT, _MessageTransportT]:
150
- """Create a client protocol for the RAMSES-II message transport."""
151
-
152
- # The optional filter will return True if the message is to be handled.
153
- # """ # TODO
154
- # if msg_filter is not None and not is_callback(msg_filter):
155
- # raise TypeError(f"Msg filter {msg_filter} is not a callback")
156
- return self._create_msg_stack(msg_handler)
157
-
158
- async def start(self) -> None:
159
- self._start()
160
-
161
- def _start(self) -> None:
162
- """Initiate ad-hoc sending, and (polled) receiving."""
163
-
164
- (_LOGGER.warning if DEV_MODE else _LOGGER.debug)("ENGINE: Starting poller...")
165
-
166
- pkt_receiver = (
167
- self.msg_transport.get_extra_info(self.msg_transport.READER)
168
- if self.msg_transport
169
- else None
170
- )
171
-
172
- if self.ser_name:
173
- source = {SZ_PORT_NAME: self.ser_name, SZ_PORT_CONFIG: self._port_config}
174
- else:
175
- source = {SZ_PACKET_LOG: self._input_file}
176
-
177
- self.pkt_protocol, self.pkt_transport = self._create_pkt_stack(
178
- pkt_receiver, **source
179
- ) # TODO: may raise SerialException
180
-
181
- if self.ser_name: # and self.msg_transport:
182
- self.msg_transport._set_dispatcher(self.pkt_protocol.send_data)
183
- else: # if self._input_file:
184
- set_logger_timesource(self.pkt_protocol._dt_now)
185
- _LOGGER.warning("Datetimes maintained as most recent packet log timestamp")
186
-
187
- async def stop(self) -> None:
188
- self._stop()
189
-
190
- if (task := self.pkt_source) and not task.done():
191
- try:
192
- await task
193
- except asyncio.CancelledError:
194
- pass
195
-
196
- def _stop(self) -> None:
197
- """Cancel all outstanding tasks."""
198
-
199
- if self.msg_transport:
200
- self.msg_transport.close() # ? .abort()
201
-
202
- if self.pkt_transport:
203
- self.pkt_transport.close() # ? .abort()
204
-
205
- def _pause(self, *args) -> None:
206
- """Pause the (unpaused) engine or raise a RuntimeError."""
207
-
208
- (_LOGGER.info if DEV_MODE else _LOGGER.debug)("ENGINE: Pausing engine...")
209
-
210
- if not self._engine_lock.acquire(blocking=False):
211
- raise RuntimeError("Unable to pause engine, failed to acquire lock")
212
-
213
- if self._engine_state is not None:
214
- self._engine_lock.release()
215
- raise RuntimeError("Unable to pause engine, it is already paused")
216
-
217
- self._engine_state, callback = (None, tuple()), None
218
- self._engine_lock.release()
219
-
220
- if self.pkt_protocol:
221
- self.pkt_protocol.pause_writing()
222
- self.pkt_protocol._callback, callback = None, self.pkt_protocol._callback
223
-
224
- self._engine_state = (callback, args)
225
-
226
- def _resume(self) -> tuple: # FIXME: not atomic
227
- """Resume the (paused) engine or raise a RuntimeError."""
228
-
229
- (_LOGGER.info if DEV_MODE else _LOGGER.debug)("ENGINE: Resuming engine...")
230
-
231
- # if not self.ser_name:
232
- # raise RuntimeError("Unable to resume engine, no serial port configured")
233
-
234
- if not self._engine_lock.acquire(timeout=0.1):
235
- raise RuntimeError("Unable to resume engine, failed to acquire lock")
236
-
237
- if self._engine_state is None:
238
- self._engine_lock.release()
239
- raise RuntimeError("Unable to resume engine, it was not paused")
240
-
241
- callback: None | Callable
242
- args: tuple
243
- callback, args = self._engine_state
244
-
245
- self._engine_lock.release()
246
-
247
- if self.pkt_protocol:
248
- self.pkt_protocol._callback = callback # self.msg_transport._pkt_receiver
249
- self.pkt_protocol.resume_writing()
250
-
251
- self._engine_state = None
252
-
253
- return args
254
-
255
- @property
256
- def pkt_source(self) -> None | asyncio.Task:
257
- if t := self.msg_transport:
258
- return t.get_extra_info(t.WRITER)
259
- return None
260
-
261
- @staticmethod
262
- def create_cmd(
263
- verb: _VerbT, device_id: _DeviceIdT, code: _CodeT, payload: _PayloadT, **kwargs
264
- ) -> Command:
265
- """Make a command addressed to device_id."""
266
- return Command.from_attrs(verb, device_id, code, payload, **kwargs)
267
-
268
- def send_cmd(self, cmd: Command, callback: Callable = None, **kwargs): # FIXME
269
- """Send a command with the option to return any response message via callback.
270
-
271
- Response packets, if any (an RP/I will follow an RQ/W), and have the same code.
272
- This routine is thread safe.
273
- """
274
-
275
- if not self.msg_protocol:
276
- raise RuntimeError("there is no message protocol")
277
-
278
- # self._loop.call_soon_threadsafe(
279
- # self.msg_protocol.send_data(cmd, callback=callback, **kwargs)
280
- # )
281
- coro = self.msg_protocol.send_data(cmd, callback=callback, **kwargs)
282
- fut: futures.Future = asyncio.run_coroutine_threadsafe(coro, self._loop)
283
- # fut: asyncio.Future = asyncio.wrap_future(fut)
284
- return fut
285
-
286
- async def async_send_cmd(self, cmd: Command, **kwargs) -> None | Message: # FIXME
287
- """Send a command with the option to not wait for a response message.
288
-
289
- Response packets, if any, follow an RQ/W (as an RP/I), and have the same code.
290
- This routine is thread safe.
291
- """
292
-
293
- # def callback(fut):
294
- # print(fut.result())
295
-
296
- fut = self.send_cmd(cmd, _make_awaitable=True, **kwargs)
297
- # fut.add_done_callback(callback)
298
-
299
- while True:
300
- try:
301
- result = fut.result(timeout=0)
302
-
303
- # except futures.CancelledError: # fut ?was cancelled by a higher layer
304
- # break
305
-
306
- except futures.TimeoutError: # fut/cmd has not yet completed
307
- pass # should be a pass
308
-
309
- except TimeoutError as exc: # raised by send_cmd()
310
- raise TimeoutError(f"cmd ({cmd.tx_header}) timed out: {exc}")
311
-
312
- # except RuntimeError as exc: # raised by send_cmd()
313
- # _LOGGER.error(f"cmd ({cmd.tx_header}) raised an exception: {exc!r}")
314
- # if self.msg_transport.is_closing:
315
- # pass
316
-
317
- except Exception as exc:
318
- _LOGGER.error(f"cmd ({cmd.tx_header}) raised an exception: {exc!r}")
319
- raise exc
320
-
321
- else:
322
- _LOGGER.debug(f"cmd ({cmd.tx_header}) returned: {result!r})")
323
- return result
324
-
325
- await asyncio.sleep(0.001)
326
83
 
327
84
 
328
85
  class Gateway(Engine):
@@ -330,81 +87,74 @@ class Gateway(Engine):
330
87
 
331
88
  def __init__(
332
89
  self,
333
- port_name: None | str,
334
- debug_mode: None | bool = None,
335
- input_file: None | TextIO = None,
336
- loop: None | asyncio.AbstractEventLoop = None,
337
- port_config: None | dict = None,
338
- **kwargs,
90
+ port_name: str | None,
91
+ input_file: str | None = None,
92
+ port_config: PortConfigT | None = None,
93
+ packet_log: PktLogConfigT | None = None,
94
+ block_list: DeviceListT | None = None,
95
+ known_list: DeviceListT | None = None,
96
+ loop: asyncio.AbstractEventLoop | None = None,
97
+ **kwargs: Any,
339
98
  ) -> None:
99
+ if kwargs.pop("debug_mode", None):
100
+ _LOGGER.setLevel(logging.DEBUG)
340
101
 
341
- if debug_mode:
342
- _LOGGER.setLevel(logging.DEBUG) # should be INFO?
343
- _LOGGER.debug("Starting RAMSES RF, **config = %s", kwargs)
102
+ kwargs = {k: v for k, v in kwargs.items() if k[:1] != "_"} # anachronism
103
+ config: dict[str, Any] = kwargs.pop(SZ_CONFIG, {})
344
104
 
345
105
  super().__init__(
346
- port_name, input_file=input_file, port_config=port_config, loop=loop
106
+ port_name,
107
+ input_file=input_file,
108
+ port_config=port_config,
109
+ packet_log=packet_log,
110
+ block_list=block_list,
111
+ known_list=known_list,
112
+ loop=loop,
113
+ **SCH_ENGINE_CONFIG(config),
347
114
  )
348
115
 
349
- self._tasks: list = [] # TODO: used by discovery, move lower?
350
- self._schema: dict[str, dict] = {}
116
+ if self._disable_sending:
117
+ config[SZ_DISABLE_DISCOVERY] = True
118
+ if config.get(SZ_ENABLE_EAVESDROP):
119
+ _LOGGER.warning(
120
+ f"{SZ_ENABLE_EAVESDROP}=True: this is strongly discouraged"
121
+ " for routine use (there be dragons here)"
122
+ )
351
123
 
352
- (self.config, self._schema, self._include, self._exclude) = load_config(
353
- self.ser_name,
354
- self._input_file,
355
- **SCH_GLOBAL_CONFIG({k: v for k, v in kwargs.items() if k[:1] != "_"}),
356
- )
357
- set_pkt_logging_config(
358
- cc_console=self.config.reduce_processing >= DONT_CREATE_MESSAGES,
359
- **self.config.packet_log or {},
360
- )
124
+ self.config = SimpleNamespace(**SCH_GATEWAY_CONFIG(config))
125
+ self._schema: dict[str, Any] = SCH_GLOBAL_SCHEMAS(kwargs)
361
126
 
362
- if self.config.reduce_processing < DONT_CREATE_MESSAGES:
363
- self.msg_protocol, self.msg_transport = self.create_client(process_msg)
127
+ self._tcs: Evohome | None = None
364
128
 
365
- # if self.config.reduce_processing > 0:
366
- self._tcs: None | System = None # type: ignore[assignment]
367
129
  self.devices: list[Device] = []
368
- self.device_by_id: dict[str, Device] = {}
369
-
370
- self._setup_event_handlers()
130
+ self.device_by_id: dict[DeviceIdT, Device] = {}
371
131
 
372
- load_schema(self, **self._schema)
132
+ self._zzz: MessageIndex | None = None # MessageIndex()
373
133
 
374
134
  def __repr__(self) -> str:
375
- return super().__str__()
376
-
377
- def __str__(self) -> str:
378
- return (self.hgi or HGI_DEV_ADDR).id
379
-
380
- def _setup_event_handlers(self) -> None: # HACK: for dev/test only
381
- async def handle_sig_posix(sig):
382
- """Handle signals on posix platform."""
383
- _LOGGER.debug("Received a signal (%s), processing...", sig.name)
384
-
385
- if sig == signal.SIGUSR1:
386
- _LOGGER.info("Schema: \r\n%s", {self.tcs.id: self.tcs.schema})
387
- _LOGGER.info("Params: \r\n%s", {self.tcs.id: self.tcs.params})
388
- _LOGGER.info("Status: \r\n%s", {self.tcs.id: self.tcs.status})
389
-
390
- elif sig == signal.SIGUSR2:
391
- _LOGGER.info("Status: \r\n%s", {self.tcs.id: self.tcs.status})
135
+ if not self.ser_name:
136
+ return f"Gateway(input_file={self._input_file})"
137
+ return f"Gateway(port_name={self.ser_name}, port_config={self._port_config})"
392
138
 
393
- super()._setup_event_handlers()
139
+ @property
140
+ def hgi(self) -> HgiGateway | None:
141
+ """Return the active HGI80-compatible gateway device, if known."""
142
+ if not self._transport:
143
+ return None
144
+ if device_id := self._transport.get_extra_info(SZ_ACTIVE_HGI):
145
+ return self.device_by_id.get(device_id) # type: ignore[return-value]
146
+ return None
394
147
 
395
- _LOGGER.debug("_setup_event_handlers(): Creating signal handlers...")
396
- if os.name == "posix": # full support
397
- for sig in [signal.SIGUSR1, signal.SIGUSR2]:
398
- self._loop.add_signal_handler(
399
- sig, lambda sig=sig: self._loop.create_task(handle_sig_posix(sig))
400
- )
401
- elif os.name == "nt": # supported, but YMMV
402
- _LOGGER.warning("Be aware, YMMV with Windows...")
403
- else: # unsupported
404
- raise RuntimeError(f"Unsupported OS for this module: {os.name}")
148
+ async def start(
149
+ self,
150
+ /,
151
+ *,
152
+ start_discovery: bool = True,
153
+ cached_packets: dict[str, str] | None = None,
154
+ ) -> None:
155
+ """Start the Gateway and Initiate discovery as required."""
405
156
 
406
- async def start(self, *, start_discovery: bool = True) -> None:
407
- def initiate_discovery(dev_list, sys_list) -> None:
157
+ def initiate_discovery(dev_list: list[Device], sys_list: list[Evohome]) -> None:
408
158
  _LOGGER.debug("ENGINE: Initiating/enabling discovery...")
409
159
 
410
160
  # [d._start_discovery_poller() for d in devs]
@@ -418,84 +168,73 @@ class Gateway(Engine):
418
168
  if system.dhw:
419
169
  system.dhw._start_discovery_poller()
420
170
 
421
- await super().start()
422
-
423
- if not self.ser_name: # wait until have processed the entire packet log...
424
- await self.pkt_transport.get_extra_info(SZ_POLLER_TASK)
171
+ await set_pkt_logging_config( # type: ignore[arg-type]
172
+ cc_console=self.config.reduce_processing >= DONT_CREATE_MESSAGES,
173
+ **self._packet_log,
174
+ )
425
175
 
426
- elif start_discovery: # source of packets is a serial port
427
- initiate_discovery(self.devices, self.systems)
176
+ self.config.disable_discovery, disable_discovery = (
177
+ True,
178
+ self.config.disable_discovery,
179
+ )
428
180
 
429
- async def stop(self) -> None: # FIXME: a mess
430
- """Cancel all outstanding tasks."""
431
- # if self._engine_state is None:
432
- # self._pause()
181
+ load_schema(self, known_list=self._include, **self._schema) # create faked too
433
182
 
434
- # (t for t in self._tasks if not isinstance(t, asyncio.Task))
183
+ await super().start() # TODO: do this *after* restore cache
184
+ if cached_packets:
185
+ await self._restore_cached_packets(cached_packets)
435
186
 
436
- if [t.cancel() for t in self._tasks if not t.done()]:
437
- await asyncio.gather(*(t for t in self._tasks if asyncio.isfuture(t)))
187
+ self.config.disable_discovery = disable_discovery
438
188
 
439
- # this doesn't work...
440
- for t in [t for t in self._tasks if isinstance(t, futures.Future)]:
441
- try:
442
- await asyncio.wrap_future(t, loop=self._loop)
443
- except asyncio.CancelledError:
444
- pass
189
+ if (
190
+ not self._disable_sending
191
+ and not self.config.disable_discovery
192
+ and start_discovery
193
+ ):
194
+ initiate_discovery(self.devices, self.systems)
445
195
 
446
- # this doesn't work either
447
- # for device in self.devices:
448
- # await device._stop_discovery_poller()
449
- # for system in self.systems:
450
- # await system._stop_discovery_poller()
451
- # for zone in system.zones:
452
- # await zone._stop_discovery_poller()
196
+ async def stop(self) -> None:
197
+ """Stop the Gateway and tidy up."""
453
198
 
199
+ if self._zzz:
200
+ self._zzz.stop()
454
201
  await super().stop()
455
202
 
456
- def _pause(self, *args, clear_state: bool = False) -> None:
457
- """Pause the (unpaused) gateway."""
458
-
459
- super()._pause(
460
- self.config.disable_discovery, self.config.disable_sending, *args
461
- )
462
- self.config.disable_discovery = True
463
- self.config.disable_sending = True
464
-
465
- if clear_state:
466
- self._clear_state()
203
+ def _pause(self, *args: Any) -> None:
204
+ """Pause the (unpaused) gateway (disables sending/discovery).
467
205
 
468
- def _resume(self) -> tuple:
469
- """Resume the (paused) gateway."""
206
+ There is the option to save other objects, as *args.
207
+ """
208
+ _LOGGER.debug("Gateway: Pausing engine...")
470
209
 
471
- (
472
- self.config.disable_discovery,
473
- self.config.disable_sending,
474
- *args,
475
- ) = super()._resume()
210
+ self.config.disable_discovery, disc_flag = True, self.config.disable_discovery
476
211
 
477
- return args # type: ignore[return-value]
212
+ try:
213
+ super()._pause(disc_flag, *args)
214
+ except RuntimeError:
215
+ self.config.disable_discovery = disc_flag
216
+ raise
478
217
 
479
- def _clear_state(self) -> None:
480
- _LOGGER.warning("ENGINE: Clearing exisiting schema/state...")
218
+ def _resume(self) -> tuple[Any]:
219
+ """Resume the (paused) gateway (enables sending/discovery, if applicable).
481
220
 
482
- self._tcs = None
483
- self.devices = []
484
- self.device_by_id = {}
221
+ Will restore other objects, as *args.
222
+ """
223
+ args: tuple[Any]
485
224
 
486
- def get_state(self, include_expired: bool = False) -> tuple[dict, dict]:
225
+ _LOGGER.debug("Gateway: Resuming engine...")
487
226
 
488
- (_LOGGER.warning if DEV_MODE else _LOGGER.debug)("ENGINE: Getting state...")
489
- self._pause()
227
+ self.config.disable_discovery, *args = super()._resume() # type: ignore[assignment]
490
228
 
491
- result = self._get_state(include_expired=include_expired)
229
+ return args
492
230
 
493
- self._resume()
494
- (_LOGGER.warning if DEV_MODE else _LOGGER.debug)("ENGINE: Got schema/state.")
231
+ def get_state(
232
+ self, include_expired: bool = False
233
+ ) -> tuple[dict[str, Any], dict[str, str]]:
234
+ """Return the current schema & state (may include expired packets)."""
495
235
 
496
- return result
236
+ self._pause()
497
237
 
498
- def _get_state(self, include_expired: bool = False) -> tuple[dict, dict]:
499
238
  def wanted_msg(msg: Message, include_expired: bool = False) -> bool:
500
239
  if msg.code == Code._313F:
501
240
  return msg.verb in (I_, RP) # usu. expired, useful 4 back-back restarts
@@ -514,55 +253,102 @@ class Gateway(Engine):
514
253
  for system in self.systems:
515
254
  msgs.extend(list(system._msgs.values()))
516
255
  msgs.extend([m for z in system.zones for m in z._msgs.values()])
517
- # msgs.extend([m for z in system.dhw for m in z._msgs.values()])
256
+ # msgs.extend([m for z in system.dhw for m in z._msgs.values()]) # TODO
257
+
258
+ if self._zzz:
259
+ pkts = {
260
+ f"{repr(msg._pkt)[:26]}": f"{repr(msg._pkt)[27:]}"
261
+ for msg in self._zzz.all(include_expired=True)
262
+ if wanted_msg(msg, include_expired=include_expired)
263
+ }
518
264
 
519
- # BUG: assumes pkts have unique dtms: may not be true for contrived logs...
520
- pkts = {
521
- f"{repr(msg._pkt)[:26]}": f"{repr(msg._pkt)[27:]}"
522
- for msg in msgs
523
- if wanted_msg(msg, include_expired=include_expired)
524
- } # BUG: assumes pkts have unique dtms
265
+ else:
266
+ pkts = { # BUG: assumes pkts have unique dtms: may be untrue for contrived logs
267
+ f"{repr(msg._pkt)[:26]}": f"{repr(msg._pkt)[27:]}"
268
+ for msg in msgs
269
+ if wanted_msg(msg, include_expired=include_expired)
270
+ }
271
+
272
+ self._resume()
525
273
 
526
274
  return self.schema, dict(sorted(pkts.items()))
527
275
 
528
- async def set_state(self, packets, *, schema: None | dict = None) -> None:
529
- # TODO: add a feature to exludede expired packets?
530
- (_LOGGER.warning if DEV_MODE else _LOGGER.info)("ENGINE: Setting state...")
276
+ async def _restore_cached_packets(
277
+ self, packets: dict[str, str], _clear_state: bool = False
278
+ ) -> None:
279
+ """Restore cached packets (may include expired packets)."""
280
+
281
+ def clear_state() -> None:
282
+ _LOGGER.info("GATEWAY: Clearing existing schema/state...")
531
283
 
532
- if schema is None: # TODO: also for known_list (device traits)?
533
- schema = shrink(self.schema)
284
+ # self._schema = {}
534
285
 
535
- self._pause(clear_state=True)
286
+ self._tcs = None
287
+ self.devices = []
288
+ self.device_by_id = {}
536
289
 
537
- load_schema(self, **schema)
538
- await self._set_state(packets, schema=schema)
290
+ self._prev_msg = None
291
+ self._this_msg = None
539
292
 
540
- self._resume()
541
- (_LOGGER.warning if DEV_MODE else _LOGGER.info)("ENGINE: Set state.")
293
+ tmp_transport: RamsesTransportT # mypy hint
294
+
295
+ _LOGGER.debug("GATEWAY: Restoring a cached packet log...")
296
+ self._pause()
297
+
298
+ if _clear_state: # only intended for test suite use
299
+ clear_state()
300
+
301
+ # We do not always enforce the known_list whilst restoring a cache because
302
+ # if it does not contain a correctly configured HGI, a 'working' address is
303
+ # used (which could be different to the address in the cache) & wanted packets
304
+ # can be dropped unnecessarily.
305
+
306
+ enforce_include_list = bool(
307
+ self._enforce_known_list
308
+ and extract_known_hgi_id(
309
+ self._include, disable_warnings=True, strick_checking=True
310
+ )
311
+ )
542
312
 
543
- async def _set_state(self, packets: dict, *, schema=None) -> None:
544
- tmp_transport: asyncio.Transport
313
+ # The actual HGI address will be discovered when the actual transport was/is
314
+ # started up (usually before now)
545
315
 
546
- pkt_receiver = (
547
- self.msg_transport.get_extra_info(self.msg_transport.READER)
548
- if self.msg_transport
549
- else None
316
+ tmp_protocol = protocol_factory(
317
+ self._msg_handler,
318
+ disable_sending=True,
319
+ enforce_include_list=enforce_include_list,
320
+ exclude_list=self._exclude,
321
+ include_list=self._include,
550
322
  )
551
- _, tmp_transport = self._create_pkt_stack(pkt_receiver, packet_dict=packets)
552
- await tmp_transport.get_extra_info(SZ_POLLER_TASK)
553
323
 
554
- # self.msg_transport._clear_write_buffer() # TODO: shouldn't be needed
555
- self.msg_protocol._prev_msg = None # TODO: move to pause/resume?
324
+ tmp_transport = await transport_factory(
325
+ tmp_protocol,
326
+ packet_dict=packets,
327
+ )
328
+
329
+ await tmp_transport.get_extra_info(SZ_READER_TASK)
330
+
331
+ _LOGGER.debug("GATEWAY: Restored, resuming")
332
+ self._resume()
333
+
334
+ def _add_device(self, dev: Device) -> None: # TODO: also: _add_system()
335
+ """Add a device to the gateway (called by devices during instantiation)."""
336
+
337
+ if dev.id in self.device_by_id:
338
+ raise LookupError(f"Device already exists: {dev.id}")
339
+
340
+ self.devices.append(dev)
341
+ self.device_by_id[dev.id] = dev
556
342
 
557
343
  def get_device(
558
344
  self,
559
- dev_id: _DeviceIdT,
345
+ device_id: DeviceIdT,
560
346
  *,
561
- msg: None | Message = None,
562
- parent=None,
563
- child_id=None,
564
- is_sensor: None | bool = None,
565
- ) -> Device: # TODO: **schema) -> Device: # may: LookupError
347
+ msg: Message | None = None,
348
+ parent: Parent | None = None,
349
+ child_id: str | None = None,
350
+ is_sensor: bool | None = None,
351
+ ) -> Device: # TODO: **schema/traits) -> Device: # may: LookupError
566
352
  """Return a device, create it if required.
567
353
 
568
354
  First, use the traits to create/update it, then pass it any msg to handle.
@@ -572,13 +358,13 @@ class Gateway(Engine):
572
358
  If a device is created, attach it to the gateway.
573
359
  """
574
360
 
575
- def check_filter_lists(dev_id: _DeviceIdT) -> None: # may: LookupError
361
+ def check_filter_lists(dev_id: DeviceIdT) -> None: # may: LookupError
576
362
  """Raise an LookupError if a device_id is filtered out by a list."""
577
363
 
578
364
  if dev_id in self._unwanted: # TODO: shouldn't invalidate a msg
579
365
  raise LookupError(f"Can't create {dev_id}: it is unwanted or invalid")
580
366
 
581
- if self.config.enforce_known_list and (
367
+ if self._enforce_known_list and (
582
368
  dev_id not in self._include and dev_id != getattr(self.hgi, "id", None)
583
369
  ):
584
370
  self._unwanted.append(dev_id)
@@ -594,14 +380,32 @@ class Gateway(Engine):
594
380
  f" (if required, remove it from the {SZ_BLOCK_LIST})"
595
381
  )
596
382
 
597
- check_filter_lists(dev_id)
598
- traits = SCH_TRAITS(self._include.get(dev_id, {}))
383
+ try:
384
+ check_filter_lists(device_id)
385
+ except LookupError:
386
+ # have to allow for GWY not being in known_list...
387
+ if device_id != self._protocol.hgi_id:
388
+ raise # TODO: make parochial
389
+
390
+ dev = self.device_by_id.get(device_id)
599
391
 
600
- dev = self.device_by_id.get(dev_id)
601
392
  if not dev:
602
- dev = device_factory(self, Address(dev_id), msg=msg, **traits)
393
+ # voluptuous bug workaround: https://github.com/alecthomas/voluptuous/pull/524
394
+ _traits: dict[str, Any] = self._include.get(device_id, {}) # type: ignore[assignment]
395
+ _traits.pop("commands", None)
396
+
397
+ traits: dict[str, Any] = SCH_TRAITS(self._include.get(device_id, {}))
398
+
399
+ dev = device_factory(self, Address(device_id), msg=msg, **_traits)
400
+
401
+ if traits.get(SZ_FAKED):
402
+ if isinstance(dev, Fakeable):
403
+ dev._make_fake()
404
+ else:
405
+ _LOGGER.warning(f"The device is not fakeable: {dev}")
603
406
 
604
407
  # TODO: the exact order of the following may need refining...
408
+ # TODO: some will be done my devices themselves?
605
409
 
606
410
  # if schema: # Step 2: Only controllers have a schema...
607
411
  # dev._update_schema(**schema) # TODO: schema/traits
@@ -609,19 +413,34 @@ class Gateway(Engine):
609
413
  if parent or child_id:
610
414
  dev.set_parent(parent, child_id=child_id, is_sensor=is_sensor)
611
415
 
612
- if traits.get(SZ_FAKED):
613
- if isinstance(dev, Fakeable):
614
- dev._make_fake()
615
- else:
616
- _LOGGER.warning(f"The device is not fakable: {dev}")
617
-
618
- if msg:
619
- dev._handle_msg(msg)
416
+ # if msg:
417
+ # dev._handle_msg(msg)
620
418
 
621
419
  return dev
622
420
 
421
+ def fake_device(
422
+ self,
423
+ device_id: DeviceIdT,
424
+ create_device: bool = False,
425
+ ) -> Device:
426
+ """Create a faked device."""
427
+
428
+ if not is_valid_dev_id(device_id):
429
+ raise TypeError(f"The device id is not valid: {device_id}")
430
+
431
+ if not create_device and device_id not in self.device_by_id:
432
+ raise LookupError(f"The device id does not exist: {device_id}")
433
+ elif create_device and device_id not in self.known_list:
434
+ raise LookupError(f"The device id is not in the known_list: {device_id}")
435
+
436
+ if (dev := self.get_device(device_id)) and isinstance(dev, Fakeable):
437
+ dev._make_fake()
438
+ return dev
439
+
440
+ raise TypeError(f"The device is not fakable: {device_id}")
441
+
623
442
  @property
624
- def tcs(self) -> Optional[System]:
443
+ def tcs(self) -> Evohome | None:
625
444
  """Return the primary TCS, if any."""
626
445
 
627
446
  if self._tcs is None and self.systems:
@@ -629,7 +448,7 @@ class Gateway(Engine):
629
448
  return self._tcs
630
449
 
631
450
  @property
632
- def known_list(self) -> dict:
451
+ def known_list(self) -> DeviceListT:
633
452
  """Return the working known_list (a superset of the provided known_list).
634
453
 
635
454
  Unlike orphans, which are always instantiated when a schema is loaded, these
@@ -640,15 +459,15 @@ class Gateway(Engine):
640
459
  result = self._include # could be devices here, not (yet) in gwy.devices
641
460
  result.update(
642
461
  {
643
- d.id: {k: d.traits[k] for k in (SZ_CLASS, SZ_ALIAS, SZ_FAKED)}
462
+ d.id: {k: d.traits[k] for k in (SZ_CLASS, SZ_ALIAS, SZ_FAKED)} # type: ignore[misc]
644
463
  for d in self.devices
645
- if not self.config.enforce_known_list or d.id in self._include
464
+ if not self._enforce_known_list or d.id in self._include
646
465
  }
647
466
  )
648
467
  return result
649
468
 
650
469
  @property
651
- def system_by_id(self) -> dict:
470
+ def system_by_id(self) -> dict[DeviceIdT, Evohome]:
652
471
  return {
653
472
  d.id: d.tcs
654
473
  for d in self.devices
@@ -656,11 +475,11 @@ class Gateway(Engine):
656
475
  } # why something so simple look so messy
657
476
 
658
477
  @property
659
- def systems(self) -> list:
478
+ def systems(self) -> list[Evohome]:
660
479
  return list(self.system_by_id.values())
661
480
 
662
481
  @property
663
- def _config(self) -> dict:
482
+ def _config(self) -> dict[str, Any]:
664
483
  """Return the working configuration.
665
484
 
666
485
  Includes:
@@ -673,28 +492,30 @@ class Gateway(Engine):
673
492
  return {
674
493
  "_gateway_id": self.hgi.id if self.hgi else None,
675
494
  SZ_MAIN_TCS: self.tcs.id if self.tcs else None,
676
- SZ_CONFIG: {SZ_ENFORCE_KNOWN_LIST: self.config.enforce_known_list},
495
+ SZ_CONFIG: {SZ_ENFORCE_KNOWN_LIST: self._enforce_known_list},
677
496
  SZ_KNOWN_LIST: self.known_list,
678
497
  SZ_BLOCK_LIST: [{k: v} for k, v in self._exclude.items()],
679
- "_unwanted": sorted(self.pkt_protocol._unwanted),
680
- "_unwanted_alt": sorted(self._unwanted),
498
+ "_unwanted": sorted(self._unwanted),
681
499
  }
682
500
 
683
501
  @property
684
- def schema(self) -> dict:
502
+ def schema(self) -> dict[str, Any]:
685
503
  """Return the global schema.
686
504
 
505
+ This 'active' schema may exclude non-present devices from the configured schema
506
+ that was loaded during initialisation.
507
+
687
508
  Orphans are devices that 'exist' but don't yet have a place in the schema
688
- hierachy (if ever): therefore, they are instantiated when the schema is loaded,
509
+ hierarchy (if ever): therefore, they are instantiated when the schema is loaded,
689
510
  just like the other devices in the schema.
690
511
  """
691
512
 
692
- schema = {SZ_MAIN_TCS: self.tcs.ctl.id if self.tcs else None}
513
+ schema: dict[str, Any] = {SZ_MAIN_TCS: self.tcs.ctl.id if self.tcs else None}
693
514
 
694
515
  for tcs in self.systems:
695
516
  schema[tcs.ctl.id] = tcs.schema
696
517
 
697
- schema[f"{SZ_ORPHANS}_heat"] = sorted(
518
+ dev_list: list[DeviceIdT] = sorted(
698
519
  [
699
520
  d.id
700
521
  for d in self.devices
@@ -703,67 +524,110 @@ class Gateway(Engine):
703
524
  and d._is_present
704
525
  ]
705
526
  )
527
+ schema[f"{SZ_ORPHANS}_heat"] = dev_list
706
528
 
707
- schema[f"{SZ_ORPHANS}_hvac"] = sorted(
529
+ dev_list = sorted(
708
530
  [d.id for d in self.devices if isinstance(d, DeviceHvac) and d._is_present]
709
531
  )
532
+ schema[f"{SZ_ORPHANS}_hvac"] = dev_list
710
533
 
711
534
  return schema
712
535
 
713
536
  @property
714
- def params(self) -> dict:
537
+ def params(self) -> dict[str, Any]:
715
538
  return {SZ_DEVICES: {d.id: d.params for d in sorted(self.devices)}}
716
539
 
717
540
  @property
718
- def status(self) -> dict:
719
- return {SZ_DEVICES: {d.id: d.status for d in sorted(self.devices)}}
541
+ def status(self) -> dict[str, Any]:
542
+ tx_rate = self._transport.get_extra_info("tx_rate") if self._transport else None
543
+ return {
544
+ SZ_DEVICES: {d.id: d.status for d in sorted(self.devices)},
545
+ "_tx_rate": tx_rate,
546
+ }
720
547
 
721
- def send_cmd( # FIXME
722
- self, cmd: Command, callback: Callable = None, **kwargs
723
- ) -> futures.Future:
724
- """Send a command with the option to return any response via callback."""
548
+ def _msg_handler(self, msg: Message) -> None:
549
+ """A callback to handle messages from the protocol stack."""
550
+ # TODO: Remove this
551
+ # # HACK: if CLI, double-logging with client.py proc_msg() & setLevel(DEBUG)
552
+ # if (log_level := _LOGGER.getEffectiveLevel()) < logging.INFO:
553
+ # _LOGGER.info(msg)
554
+ # elif log_level <= logging.INFO and not (
555
+ # msg.verb == RQ and msg.src.type == DEV_TYPE_MAP.HGI
556
+ # ):
557
+ # _LOGGER.info(msg)
725
558
 
726
- if self.config.disable_sending:
727
- raise RuntimeError("sending is disabled")
559
+ super()._msg_handler(msg)
728
560
 
729
- fut = super().send_cmd(cmd, callback, **kwargs)
561
+ # TODO: ideally remove this feature...
562
+ assert self._this_msg # mypy check
730
563
 
731
- self._tasks = [t for t in self._tasks if not t.done()]
732
- self._tasks.append(fut)
733
- return fut
564
+ if self._prev_msg and detect_array_fragment(self._this_msg, self._prev_msg):
565
+ msg._pkt._force_has_array() # may be an array of length 1
566
+ msg._payload = self._prev_msg.payload + (
567
+ msg.payload if isinstance(msg.payload, list) else [msg.payload]
568
+ )
734
569
 
735
- def fake_device(
736
- self,
737
- device_id: _DeviceIdT,
738
- create_device: bool = False,
739
- start_binding: bool = False,
740
- ) -> Device:
741
- """Create a faked device, and optionally set it to binding mode.
570
+ process_msg(self, msg)
742
571
 
743
- Will make any neccesary changed to the device lists.
572
+ def send_cmd(
573
+ self,
574
+ cmd: Command,
575
+ /,
576
+ *,
577
+ gap_duration: float = DEFAULT_GAP_DURATION,
578
+ num_repeats: int = DEFAULT_NUM_REPEATS,
579
+ priority: Priority = Priority.DEFAULT,
580
+ timeout: float = DEFAULT_SEND_TIMEOUT,
581
+ wait_for_reply: bool | None = DEFAULT_WAIT_FOR_REPLY,
582
+ ) -> asyncio.Task[Packet]:
583
+ """Wrapper to schedule an async_send_cmd() and return the Task.
584
+
585
+ num_repeats: 0 = send once, 1 = send twice, etc.
586
+ gap_duration: the gap between repeats (in seconds)
587
+ priority: the priority of the command
744
588
  """
745
- # TODO: what about using the HGI
746
589
 
747
- if not is_valid_dev_id(device_id):
748
- raise TypeError(f"The device id is not valid: {device_id}")
749
-
750
- if create_device and device_id in self.device_by_id:
751
- raise LookupError(f"The device id already exists: {device_id}")
752
- elif not create_device and device_id not in self.device_by_id:
753
- raise LookupError(f"The device id does not exist: {device_id}")
590
+ coro = self.async_send_cmd(
591
+ cmd,
592
+ gap_duration=gap_duration,
593
+ num_repeats=num_repeats,
594
+ priority=priority,
595
+ timeout=timeout,
596
+ wait_for_reply=wait_for_reply,
597
+ )
754
598
 
755
- if self.config.enforce_known_list and device_id not in self._include:
756
- self._include[device_id] = {}
757
- elif device_id in self._exclude:
758
- del self._exclude[device_id]
599
+ task = self._loop.create_task(coro)
600
+ self.add_task(task)
601
+ return task
759
602
 
760
- if (dev := self.get_device(device_id)) and isinstance(dev, Fakeable):
761
- return dev._make_fake(bind=start_binding)
762
- raise TypeError(f"The device is not fakable: {device_id}")
603
+ async def async_send_cmd(
604
+ self,
605
+ cmd: Command,
606
+ /,
607
+ *,
608
+ gap_duration: float = DEFAULT_GAP_DURATION,
609
+ num_repeats: int = DEFAULT_NUM_REPEATS,
610
+ priority: Priority = Priority.DEFAULT,
611
+ max_retries: int = DEFAULT_MAX_RETRIES,
612
+ timeout: float = DEFAULT_SEND_TIMEOUT,
613
+ wait_for_reply: bool | None = DEFAULT_WAIT_FOR_REPLY,
614
+ ) -> Packet:
615
+ """Send a Command and return the corresponding (echo or reply) Packet.
616
+
617
+ If wait_for_reply is True (*and* the Command has a rx_header), return the
618
+ reply Packet. Otherwise, simply return the echo Packet.
619
+
620
+ If the expected Packet can't be returned, raise:
621
+ ProtocolSendFailed: tried to Tx Command, but didn't get echo/reply
622
+ ProtocolError: didn't attempt to Tx Command for some reason
623
+ """
763
624
 
764
- def add_task(self, fnc, *args, delay=None, period=None, **kwargs) -> asyncio.Task:
765
- """Start a task after delay seconds and then repeat it every period seconds."""
766
- self._tasks = [t for t in self._tasks if not t.done()]
767
- task = schedule_task(fnc, *args, delay=delay, period=period, **kwargs)
768
- self._tasks.append(task)
769
- return task
625
+ return await super().async_send_cmd(
626
+ cmd,
627
+ gap_duration=gap_duration,
628
+ num_repeats=num_repeats,
629
+ priority=priority,
630
+ max_retries=max_retries,
631
+ timeout=timeout,
632
+ wait_for_reply=wait_for_reply,
633
+ ) # may: raise ProtocolError/ProtocolSendFailed