ramses-rf 0.22.2__py3-none-any.whl → 0.51.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. ramses_cli/__init__.py +18 -0
  2. ramses_cli/client.py +597 -0
  3. ramses_cli/debug.py +20 -0
  4. ramses_cli/discovery.py +405 -0
  5. ramses_cli/utils/cat_slow.py +17 -0
  6. ramses_cli/utils/convert.py +60 -0
  7. ramses_rf/__init__.py +31 -10
  8. ramses_rf/binding_fsm.py +787 -0
  9. ramses_rf/const.py +124 -105
  10. ramses_rf/database.py +297 -0
  11. ramses_rf/device/__init__.py +69 -39
  12. ramses_rf/device/base.py +187 -376
  13. ramses_rf/device/heat.py +540 -552
  14. ramses_rf/device/hvac.py +286 -171
  15. ramses_rf/dispatcher.py +153 -177
  16. ramses_rf/entity_base.py +478 -361
  17. ramses_rf/exceptions.py +82 -0
  18. ramses_rf/gateway.py +378 -514
  19. ramses_rf/helpers.py +57 -19
  20. ramses_rf/py.typed +0 -0
  21. ramses_rf/schemas.py +148 -194
  22. ramses_rf/system/__init__.py +16 -23
  23. ramses_rf/system/faultlog.py +363 -0
  24. ramses_rf/system/heat.py +295 -302
  25. ramses_rf/system/schedule.py +312 -198
  26. ramses_rf/system/zones.py +318 -238
  27. ramses_rf/version.py +2 -8
  28. ramses_rf-0.51.1.dist-info/METADATA +72 -0
  29. ramses_rf-0.51.1.dist-info/RECORD +55 -0
  30. {ramses_rf-0.22.2.dist-info → ramses_rf-0.51.1.dist-info}/WHEEL +1 -2
  31. ramses_rf-0.51.1.dist-info/entry_points.txt +2 -0
  32. {ramses_rf-0.22.2.dist-info → ramses_rf-0.51.1.dist-info/licenses}/LICENSE +1 -1
  33. ramses_tx/__init__.py +160 -0
  34. {ramses_rf/protocol → ramses_tx}/address.py +65 -59
  35. ramses_tx/command.py +1454 -0
  36. ramses_tx/const.py +903 -0
  37. ramses_tx/exceptions.py +92 -0
  38. {ramses_rf/protocol → ramses_tx}/fingerprints.py +56 -15
  39. {ramses_rf/protocol → ramses_tx}/frame.py +132 -131
  40. ramses_tx/gateway.py +338 -0
  41. ramses_tx/helpers.py +883 -0
  42. {ramses_rf/protocol → ramses_tx}/logger.py +67 -53
  43. {ramses_rf/protocol → ramses_tx}/message.py +155 -191
  44. ramses_tx/opentherm.py +1260 -0
  45. ramses_tx/packet.py +210 -0
  46. ramses_tx/parsers.py +2957 -0
  47. ramses_tx/protocol.py +801 -0
  48. ramses_tx/protocol_fsm.py +672 -0
  49. ramses_tx/py.typed +0 -0
  50. {ramses_rf/protocol → ramses_tx}/ramses.py +262 -185
  51. {ramses_rf/protocol → ramses_tx}/schemas.py +150 -133
  52. ramses_tx/transport.py +1471 -0
  53. ramses_tx/typed_dicts.py +492 -0
  54. ramses_tx/typing.py +181 -0
  55. ramses_tx/version.py +4 -0
  56. ramses_rf/discovery.py +0 -398
  57. ramses_rf/protocol/__init__.py +0 -59
  58. ramses_rf/protocol/backports.py +0 -42
  59. ramses_rf/protocol/command.py +0 -1561
  60. ramses_rf/protocol/const.py +0 -697
  61. ramses_rf/protocol/exceptions.py +0 -111
  62. ramses_rf/protocol/helpers.py +0 -390
  63. ramses_rf/protocol/opentherm.py +0 -1170
  64. ramses_rf/protocol/packet.py +0 -235
  65. ramses_rf/protocol/parsers.py +0 -2673
  66. ramses_rf/protocol/protocol.py +0 -613
  67. ramses_rf/protocol/transport.py +0 -1011
  68. ramses_rf/protocol/version.py +0 -10
  69. ramses_rf/system/hvac.py +0 -82
  70. ramses_rf-0.22.2.dist-info/METADATA +0 -64
  71. ramses_rf-0.22.2.dist-info/RECORD +0 -42
  72. ramses_rf-0.22.2.dist-info/top_level.txt +0 -1
ramses_tx/transport.py ADDED
@@ -0,0 +1,1471 @@
1
+ #!/usr/bin/env python3
2
+ """RAMSES RF - RAMSES-II compatible packet transport.
3
+
4
+ Operates at the pkt layer of: app - msg - pkt - h/w
5
+
6
+ For ser2net, use the following YAML with: ser2net -c misc/ser2net.yaml
7
+ connection: &con00
8
+ accepter: telnet(rfc2217),tcp,5001
9
+ timeout: 0
10
+ connector: serialdev,/dev/ttyUSB0,115200n81,local
11
+ options:
12
+ max-connections: 3
13
+
14
+ For socat, see:
15
+ socat -dd pty,raw,echo=0 pty,raw,echo=0
16
+ python client.py monitor /dev/pts/0
17
+ cat packet.log | cut -d ' ' -f 2- | unix2dos > /dev/pts/1
18
+
19
+ For re-flashing evofw3 via Arduino IDE on *my* atmega328p (YMMV):
20
+ - Board: atmega328p (SW UART)
21
+ - Bootloader: Old Bootloader
22
+ - Processor: atmega328p (5V, 16 MHz)
23
+ - Host: 57600 (or 115200, YMMV)
24
+ - Pinout: Nano
25
+
26
+ For re-flashing evofw3 via Arduino IDE on *my* atmega32u4 (YMMV):
27
+ - Board: atmega32u4 (HW UART)
28
+ - Processor: atmega32u4 (5V, 16 MHz)
29
+ - Pinout: Pro Micro
30
+ """
31
+
32
+ from __future__ import annotations
33
+
34
+ import asyncio
35
+ import contextlib
36
+ import fileinput
37
+ import functools
38
+ import glob
39
+ import json
40
+ import logging
41
+ import os
42
+ import re
43
+ import sys
44
+ from collections import deque
45
+ from collections.abc import Awaitable, Callable, Iterable
46
+ from datetime import datetime as dt, timedelta as td
47
+ from functools import wraps
48
+ from io import TextIOWrapper
49
+ from string import printable
50
+ from time import perf_counter
51
+ from typing import TYPE_CHECKING, Any, Final, TypeAlias
52
+ from urllib.parse import parse_qs, unquote, urlparse
53
+
54
+ from paho.mqtt import MQTTException, client as mqtt
55
+ from paho.mqtt.enums import CallbackAPIVersion
56
+ from serial import ( # type: ignore[import-untyped]
57
+ Serial,
58
+ SerialException,
59
+ serial_for_url,
60
+ )
61
+
62
+ from . import exceptions as exc
63
+ from .command import Command
64
+ from .const import (
65
+ DUTY_CYCLE_DURATION,
66
+ MAX_DUTY_CYCLE_RATE,
67
+ MAX_TRANSMIT_RATE_TOKENS,
68
+ MIN_INTER_WRITE_GAP,
69
+ SZ_ACTIVE_HGI,
70
+ SZ_IS_EVOFW3,
71
+ SZ_SIGNATURE,
72
+ )
73
+ from .helpers import dt_now
74
+ from .packet import Packet
75
+ from .schemas import (
76
+ SCH_SERIAL_PORT_CONFIG,
77
+ SZ_EVOFW_FLAG,
78
+ SZ_INBOUND,
79
+ SZ_OUTBOUND,
80
+ DeviceIdT,
81
+ PortConfigT,
82
+ )
83
+ from .typing import ExceptionT, SerPortNameT
84
+
85
+ from .const import ( # noqa: F401, isort: skip, pylint: disable=unused-import
86
+ I_,
87
+ RP,
88
+ RQ,
89
+ W_,
90
+ Code,
91
+ )
92
+
93
+ if TYPE_CHECKING:
94
+ from .protocol import RamsesProtocolT
95
+
96
+
97
+ _DEFAULT_TIMEOUT_PORT: Final[float] = 3
98
+ _DEFAULT_TIMEOUT_MQTT: Final[float] = 9
99
+
100
+ _SIGNATURE_GAP_SECS = 0.05
101
+ _SIGNATURE_MAX_TRYS = 40 # was: 24
102
+ _SIGNATURE_MAX_SECS = 3
103
+
104
+ SZ_RAMSES_GATEWAY: Final = "RAMSES/GATEWAY"
105
+ SZ_READER_TASK: Final = "reader_task"
106
+
107
+
108
+ #
109
+ # NOTE: All debug flags should be False for deployment to end-users
110
+ _DBG_DISABLE_DUTY_CYCLE_LIMIT: Final[bool] = False
111
+ _DBG_DISABLE_REGEX_WARNINGS: Final[bool] = False
112
+ _DBG_FORCE_FRAME_LOGGING: Final[bool] = False
113
+
114
+ _LOGGER = logging.getLogger(__name__)
115
+
116
+
117
+ try:
118
+ import serial_asyncio_fast as serial_asyncio # type: ignore[import-not-found, import-untyped, unused-ignore]
119
+
120
+ _LOGGER.debug("Using pyserial-asyncio-fast in place of pyserial-asyncio")
121
+ except ImportError:
122
+ import serial_asyncio # type: ignore[import-not-found, import-untyped, unused-ignore, no-redef]
123
+
124
+
125
+ # For linux, use a modified version of comports() to include /dev/serial/by-id/* links
126
+ if os.name == "nt": # sys.platform == 'win32':
127
+ from serial.tools.list_ports_windows import comports # type: ignore[import-untyped]
128
+
129
+ elif os.name != "posix": # is unsupported
130
+ raise ImportError(
131
+ f"Sorry: no implementation for your platform ('{os.name}') available"
132
+ )
133
+
134
+ elif sys.platform.lower()[:5] != "linux": # e.g. osx
135
+ from serial.tools.list_ports_posix import comports # type: ignore[import-untyped]
136
+
137
+ else: # is linux
138
+ # - see: https://github.com/pyserial/pyserial/pull/700
139
+ # - see: https://github.com/pyserial/pyserial/pull/709
140
+
141
+ from serial.tools.list_ports_linux import SysFS # type: ignore[import-untyped]
142
+
143
+ def list_links(devices: set[str]) -> list[str]:
144
+ """Search for symlinks to ports already listed in devices."""
145
+
146
+ links = []
147
+ for device in glob.glob("/dev/*") + glob.glob("/dev/serial/by-id/*"):
148
+ if os.path.islink(device) and os.path.realpath(device) in devices:
149
+ links.append(device)
150
+ return links
151
+
152
+ def comports( # type: ignore[no-any-unimported]
153
+ include_links: bool = False, _hide_subsystems: list[str] | None = None
154
+ ) -> list[SysFS]:
155
+ """Return a list of Serial objects for all known serial ports."""
156
+
157
+ if _hide_subsystems is None:
158
+ _hide_subsystems = ["platform"]
159
+
160
+ devices = set()
161
+ with open("/proc/tty/drivers") as file:
162
+ drivers = file.readlines()
163
+ for driver in drivers:
164
+ items = driver.strip().split()
165
+ if items[4] == "serial":
166
+ devices.update(glob.glob(items[1] + "*"))
167
+
168
+ if include_links:
169
+ devices.update(list_links(devices))
170
+
171
+ result: list[SysFS] = [ # type: ignore[no-any-unimported]
172
+ d for d in map(SysFS, devices) if d.subsystem not in _hide_subsystems
173
+ ]
174
+ return result
175
+
176
+
177
+ def is_hgi80(serial_port: SerPortNameT) -> bool | None:
178
+ """Return True/False if the device attached to the port has the attrs of an HGI80.
179
+
180
+ Return None if it's not possible to tell (falsy should assume is evofw3).
181
+ Raise TransportSerialError if the port is not found at all.
182
+ """
183
+
184
+ if serial_port[:7] == "mqtt://":
185
+ return False # ramses_esp
186
+
187
+ # TODO: add tests for different serial ports, incl./excl/ by-id
188
+
189
+ # See: https://github.com/pyserial/pyserial-asyncio/issues/46
190
+ if "://" in serial_port: # e.g. "rfc2217://localhost:5001"
191
+ try:
192
+ serial_for_url(serial_port, do_not_open=True)
193
+ except (SerialException, ValueError) as err:
194
+ raise exc.TransportSerialError(
195
+ f"Unable to find {serial_port}: {err}"
196
+ ) from err
197
+ return None
198
+
199
+ if not os.path.exists(serial_port):
200
+ raise exc.TransportSerialError(f"Unable to find {serial_port}")
201
+
202
+ # first, try the easy win...
203
+ if "by-id" not in serial_port:
204
+ pass
205
+ elif "TUSB3410" in serial_port:
206
+ return True
207
+ elif "evofw3" in serial_port or "FT232R" in serial_port or "NANO" in serial_port:
208
+ return False
209
+
210
+ # otherwise, we can look at device attrs via comports()...
211
+ try:
212
+ komports = comports(include_links=True)
213
+ except ImportError as err:
214
+ raise exc.TransportSerialError(f"Unable to find {serial_port}: {err}") from err
215
+
216
+ # TODO: remove get(): not monkeypatching comports() correctly for /dev/pts/...
217
+ vid = {x.device: x.vid for x in komports}.get(serial_port)
218
+
219
+ # this works, but we may not have all valid VIDs
220
+ if not vid:
221
+ pass
222
+ elif vid == 0x10AC: # Honeywell
223
+ return True
224
+ elif vid in (0x0403, 0x1B4F): # FTDI, SparkFun
225
+ return False
226
+
227
+ # TODO: remove get(): not monkeypatching comports() correctly for /dev/pts/...
228
+ product = {x.device: getattr(x, "product", None) for x in komports}.get(serial_port)
229
+
230
+ if not product: # is None - VM, or not member of plugdev group?
231
+ pass
232
+ elif "TUSB3410" in product: # ?needed
233
+ return True
234
+ elif "evofw3" in product or "FT232R" in product or "NANO" in product:
235
+ return False
236
+
237
+ # could try sending an "!V", expect "# evofw3 0.7.1", but that needs I/O
238
+
239
+ _LOGGER.warning(
240
+ f"{serial_port}: the gateway type is not determinable, will assume evofw3"
241
+ + (
242
+ ", TIP: specify the serial port by-id (i.e. /dev/serial/by-id/usb-...)"
243
+ if "by-id" not in serial_port
244
+ else ""
245
+ )
246
+ )
247
+ return None
248
+
249
+
250
+ def _normalise(pkt_line: str) -> str:
251
+ """Perform any (transparent) frame-level hacks, as required at (near-)RF layer.
252
+
253
+ Goals:
254
+ - ensure an evofw3 provides the same output as a HGI80 (none, presently)
255
+ - handle 'strange' packets (e.g. I|08:|0008)
256
+ """
257
+
258
+ # TODO: deprecate as only for ramses_esp <0.4.0
259
+ # ramses_esp-specific bugs, see: https://github.com/IndaloTech/ramses_esp/issues/1
260
+ pkt_line = re.sub("\r\r", "\r", pkt_line)
261
+ if pkt_line[:4] == " 000":
262
+ pkt_line = pkt_line[1:]
263
+ elif pkt_line[:2] in (I_, RQ, RP, W_):
264
+ pkt_line = ""
265
+
266
+ # pseudo-RAMSES-II packets (encrypted payload?)...
267
+ if pkt_line[10:14] in (" 08:", " 31:") and pkt_line[-16:] == "* Checksum error":
268
+ pkt_line = pkt_line[:-17] + " # Checksum error (ignored)"
269
+
270
+ # remove any "/r/n" (leading whitespeace is a problem for commands, but not packets)
271
+ return pkt_line.strip()
272
+
273
+
274
+ def _str(value: bytes) -> str:
275
+ try:
276
+ result = "".join(
277
+ c for c in value.decode("ascii", errors="strict") if c in printable
278
+ )
279
+ except UnicodeDecodeError:
280
+ _LOGGER.warning("%s < Can't decode bytestream (ignoring)", value)
281
+ return ""
282
+ return result
283
+
284
+
285
+ def limit_duty_cycle(
286
+ max_duty_cycle: float, time_window: int = DUTY_CYCLE_DURATION
287
+ ) -> Callable[..., Any]:
288
+ """Limit the Tx rate to the RF duty cycle regulations (e.g. 1% per hour).
289
+
290
+ max_duty_cycle: bandwidth available per observation window (%)
291
+ time_window: duration of the sliding observation window (default 60 seconds)
292
+ """
293
+
294
+ TX_RATE_AVAIL: int = 38400 # bits per second (deemed)
295
+ FILL_RATE: float = TX_RATE_AVAIL * max_duty_cycle # bits per second
296
+ BUCKET_CAPACITY: float = FILL_RATE * time_window
297
+
298
+ def decorator(
299
+ fnc: Callable[..., Awaitable[None]],
300
+ ) -> Callable[..., Awaitable[None]]:
301
+ # start with a full bit bucket
302
+ bits_in_bucket: float = BUCKET_CAPACITY
303
+ last_time_bit_added = perf_counter()
304
+
305
+ @wraps(fnc)
306
+ async def wrapper(
307
+ self: PortTransport, frame: str, *args: Any, **kwargs: Any
308
+ ) -> None:
309
+ nonlocal bits_in_bucket
310
+ nonlocal last_time_bit_added
311
+
312
+ rf_frame_size = 330 + len(frame[46:]) * 10
313
+
314
+ # top-up the bit bucket
315
+ elapsed_time = perf_counter() - last_time_bit_added
316
+ bits_in_bucket = min(
317
+ bits_in_bucket + elapsed_time * FILL_RATE, BUCKET_CAPACITY
318
+ )
319
+ last_time_bit_added = perf_counter()
320
+
321
+ if _DBG_DISABLE_DUTY_CYCLE_LIMIT:
322
+ bits_in_bucket = BUCKET_CAPACITY
323
+
324
+ # if required, wait for the bit bucket to refill (not for SETs/PUTs)
325
+ if bits_in_bucket < rf_frame_size:
326
+ await asyncio.sleep((rf_frame_size - bits_in_bucket) / FILL_RATE)
327
+
328
+ # consume the bits from the bit bucket
329
+ try:
330
+ await fnc(self, frame, *args, **kwargs)
331
+ finally:
332
+ bits_in_bucket -= rf_frame_size
333
+
334
+ @wraps(fnc)
335
+ async def null_wrapper(
336
+ self: PortTransport, frame: str, *args: Any, **kwargs: Any
337
+ ) -> None:
338
+ await fnc(self, frame, *args, **kwargs)
339
+
340
+ if 0 < max_duty_cycle <= 1:
341
+ return wrapper
342
+
343
+ return null_wrapper
344
+
345
+ return decorator
346
+
347
+
348
+ # used by @track_transmit_rate, current_transmit_rate()
349
+ _MAX_TRACKED_TRANSMITS = 99
350
+ _MAX_TRACKED_DURATION = 300
351
+
352
+
353
+ # used by @track_system_syncs, @avoid_system_syncs
354
+ _MAX_TRACKED_SYNCS = 3
355
+ _global_sync_cycles: deque[Packet] = deque(maxlen=_MAX_TRACKED_SYNCS)
356
+
357
+
358
+ # TODO: doesn't look right at all...
359
+ def avoid_system_syncs(fnc: Callable[..., Awaitable[None]]) -> Callable[..., Any]:
360
+ """Take measures to avoid Tx when any controller is doing a sync cycle."""
361
+
362
+ DURATION_PKT_GAP = 0.020 # 0.0200 for evohome, or 0.0127 for DTS92
363
+ DURATION_LONG_PKT = 0.022 # time to tx I|2309|048 (or 30C9, or 000A)
364
+ DURATION_SYNC_PKT = 0.010 # time to tx I|1F09|003
365
+
366
+ SYNC_WAIT_LONG = (DURATION_PKT_GAP + DURATION_LONG_PKT) * 2
367
+ SYNC_WAIT_SHORT = DURATION_SYNC_PKT
368
+ SYNC_WINDOW_LOWER = td(seconds=SYNC_WAIT_SHORT * 0.8) # could be * 0
369
+ SYNC_WINDOW_UPPER = SYNC_WINDOW_LOWER + td(seconds=SYNC_WAIT_LONG * 1.2) #
370
+
371
+ @wraps(fnc)
372
+ async def wrapper(*args: Any, **kwargs: Any) -> None:
373
+ global _global_sync_cycles
374
+
375
+ def is_imminent(p: Packet) -> bool:
376
+ """Return True if a sync cycle is imminent."""
377
+ return bool(
378
+ SYNC_WINDOW_LOWER
379
+ < (p.dtm + td(seconds=int(p.payload[2:6], 16) / 10) - dt_now())
380
+ < SYNC_WINDOW_UPPER
381
+ )
382
+
383
+ start = perf_counter() # TODO: remove
384
+
385
+ # wait for the start of the sync cycle (I|1F09|003, Tx time ~0.009)
386
+ while any(is_imminent(p) for p in _global_sync_cycles):
387
+ await asyncio.sleep(SYNC_WAIT_SHORT)
388
+
389
+ # wait for the remainder of sync cycle (I|2309/30C9) to complete
390
+ if perf_counter() - start > SYNC_WAIT_SHORT:
391
+ await asyncio.sleep(SYNC_WAIT_LONG)
392
+
393
+ await fnc(*args, **kwargs)
394
+ return None
395
+
396
+ return wrapper
397
+
398
+
399
+ def track_system_syncs(fnc: Callable[..., None]) -> Callable[..., Any]:
400
+ """Track/remember any new/outstanding TCS sync cycle."""
401
+
402
+ @wraps(fnc)
403
+ def wrapper(self: PortTransport, pkt: Packet) -> None:
404
+ global _global_sync_cycles
405
+
406
+ def is_pending(p: Packet) -> bool:
407
+ """Return True if a sync cycle is still pending (ignores drift)."""
408
+ return bool(p.dtm + td(seconds=int(p.payload[2:6], 16) / 10) > dt_now())
409
+
410
+ if pkt.code != Code._1F09 or pkt.verb != I_ or pkt._len != 3:
411
+ fnc(self, pkt)
412
+ return None
413
+
414
+ _global_sync_cycles = deque(
415
+ p for p in _global_sync_cycles if p.src != pkt.src and is_pending(p)
416
+ )
417
+ _global_sync_cycles.append(pkt) # TODO: sort
418
+
419
+ if (
420
+ len(_global_sync_cycles) > _MAX_TRACKED_SYNCS
421
+ ): # safety net for corrupted payloads
422
+ _global_sync_cycles.popleft()
423
+
424
+ fnc(self, pkt)
425
+
426
+ return wrapper
427
+
428
+
429
+ # ### Abstractors #####################################################################
430
+ # ### Do the bare minimum to abstract each transport from its underlying class
431
+
432
+
433
+ class _BaseTransport:
434
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
435
+ super().__init__(*args, **kwargs)
436
+
437
+
438
+ class _FileTransportAbstractor:
439
+ """Do the bare minimum to abstract a transport from its underlying class."""
440
+
441
+ def __init__(
442
+ self,
443
+ pkt_source: dict[str, str] | str | TextIOWrapper,
444
+ protocol: RamsesProtocolT,
445
+ loop: asyncio.AbstractEventLoop | None = None,
446
+ ) -> None:
447
+ # per().__init__(extra=extra) # done in _BaseTransport
448
+
449
+ self._pkt_source = pkt_source
450
+
451
+ self._protocol = protocol
452
+ self._loop = loop or asyncio.get_event_loop()
453
+
454
+
455
+ class _PortTransportAbstractor(serial_asyncio.SerialTransport):
456
+ """Do the bare minimum to abstract a transport from its underlying class."""
457
+
458
+ serial: Serial # type: ignore[no-any-unimported]
459
+
460
+ def __init__( # type: ignore[no-any-unimported]
461
+ self,
462
+ serial_instance: Serial,
463
+ protocol: RamsesProtocolT,
464
+ loop: asyncio.AbstractEventLoop | None = None,
465
+ ) -> None:
466
+ super().__init__(loop or asyncio.get_event_loop(), protocol, serial_instance)
467
+
468
+ # lf._serial = serial_instance # ._serial, not .serial
469
+
470
+ # lf._protocol = protocol
471
+ # lf._loop = loop or asyncio.get_event_loop()
472
+
473
+
474
+ class _MqttTransportAbstractor:
475
+ """Do the bare minimum to abstract a transport from its underlying class."""
476
+
477
+ def __init__(
478
+ self,
479
+ broker_url: str,
480
+ protocol: RamsesProtocolT,
481
+ loop: asyncio.AbstractEventLoop | None = None,
482
+ ) -> None:
483
+ # per().__init__(extra=extra) # done in _BaseTransport
484
+
485
+ self._broker_url = urlparse(broker_url)
486
+
487
+ self._protocol = protocol
488
+ self._loop = loop or asyncio.get_event_loop()
489
+
490
+
491
+ # ### Base classes (common to all Transports) #########################################
492
+ # ### Code shared by all R/O, R/W transport types (File/dict, Serial, MQTT)
493
+
494
+
495
+ class _ReadTransport(_BaseTransport):
496
+ """Interface for read-only transports."""
497
+
498
+ _protocol: RamsesProtocolT = None # type: ignore[assignment]
499
+ _loop: asyncio.AbstractEventLoop
500
+
501
+ _is_hgi80: bool | None = None # NOTE: None (unknown) is as False (is_evofw3)
502
+
503
+ # __slots__ = ('_extra',)
504
+
505
+ def __init__(
506
+ self, *args: Any, extra: dict[str, Any] | None = None, **kwargs: Any
507
+ ) -> None:
508
+ super().__init__(*args, loop=kwargs.pop("loop", None))
509
+
510
+ self._extra: dict[str, Any] = {} if extra is None else extra
511
+
512
+ self._evofw_flag = kwargs.pop(SZ_EVOFW_FLAG, None) # gwy.config.evofw_flag
513
+ # kwargs.pop("comms_params", None) # FiXME: remove this
514
+
515
+ self._closing: bool = False
516
+ self._reading: bool = False
517
+
518
+ self._this_pkt: Packet | None = None
519
+ self._prev_pkt: Packet | None = None
520
+
521
+ for key in (SZ_ACTIVE_HGI, SZ_SIGNATURE):
522
+ self._extra[key] = None
523
+
524
+ def __repr__(self) -> str:
525
+ return f"{self.__class__.__name__}({self._protocol})"
526
+
527
+ def _dt_now(self) -> dt:
528
+ """Return a precise datetime, using last packet's dtm field."""
529
+
530
+ try:
531
+ return self._this_pkt.dtm # type: ignore[union-attr]
532
+ except AttributeError:
533
+ return dt(1970, 1, 1, 1, 0)
534
+
535
+ @property
536
+ def loop(self) -> asyncio.AbstractEventLoop:
537
+ """The asyncio event loop as declared by SerialTransport."""
538
+ return self._loop
539
+
540
+ def get_extra_info(self, name: str, default: Any = None) -> Any:
541
+ if name == SZ_IS_EVOFW3:
542
+ return not self._is_hgi80
543
+ return self._extra.get(name, default)
544
+
545
+ def is_closing(self) -> bool:
546
+ """Return True if the transport is closing or has closed."""
547
+ return self._closing
548
+
549
+ def _close(self, exc: exc.RamsesException | None = None) -> None:
550
+ """Inform the protocol that this transport has closed."""
551
+
552
+ if self._closing:
553
+ return
554
+ self._closing = True
555
+
556
+ self.loop.call_soon_threadsafe(
557
+ functools.partial(self._protocol.connection_lost, exc) # type: ignore[arg-type]
558
+ )
559
+
560
+ def close(self) -> None:
561
+ """Close the transport gracefully."""
562
+ self._close()
563
+
564
+ def is_reading(self) -> bool:
565
+ """Return True if the transport is receiving."""
566
+ return self._reading
567
+
568
+ def pause_reading(self) -> None:
569
+ """Pause the receiving end (no data to protocol.pkt_received())."""
570
+ self._reading = False
571
+
572
+ def resume_reading(self) -> None:
573
+ """Resume the receiving end."""
574
+ self._reading = True
575
+
576
+ def _make_connection(self, gwy_id: DeviceIdT | None) -> None:
577
+ self._extra[SZ_ACTIVE_HGI] = gwy_id # or HGI_DEV_ADDR.id
578
+
579
+ self.loop.call_soon_threadsafe( # shouldn't call this until we have HGI-ID
580
+ functools.partial(self._protocol.connection_made, self, ramses=True) # type: ignore[arg-type]
581
+ )
582
+
583
+ # NOTE: all transport should call this method when they receive data
584
+ def _frame_read(self, dtm_str: str, frame: str) -> None:
585
+ """Make a Packet from the Frame and process it (called by each specific Tx)."""
586
+
587
+ if not frame.strip():
588
+ return
589
+
590
+ try:
591
+ pkt = Packet.from_file(dtm_str, frame) # is OK for when src is dict
592
+
593
+ except ValueError as err: # VE from dt.fromisoformat() or falsey packet
594
+ _LOGGER.debug("%s < PacketInvalid(%s)", frame, err)
595
+ return
596
+
597
+ except exc.PacketInvalid as err: # VE from dt.fromisoformat()
598
+ _LOGGER.warning("%s < PacketInvalid(%s)", frame, err)
599
+ return
600
+
601
+ self._pkt_read(pkt)
602
+
603
+ # NOTE: all protocol callbacks should be invoked from here
604
+ def _pkt_read(self, pkt: Packet) -> None:
605
+ """Pass any valid Packets to the protocol's callback (_prev_pkt, _this_pkt)."""
606
+
607
+ self._this_pkt, self._prev_pkt = pkt, self._this_pkt
608
+
609
+ # if self._reading is False: # raise, or warn & return?
610
+ # raise exc.TransportError("Reading has been paused")
611
+ if self._closing is True: # raise, or warn & return?
612
+ raise exc.TransportError("Transport is closing or has closed")
613
+
614
+ # TODO: can we switch to call_soon now QoS has been refactored?
615
+ # NOTE: No need to use call_soon() here, and they may break Qos/Callbacks
616
+ # NOTE: Thus, excepts need checking
617
+ try: # below could be a call_soon?
618
+ self.loop.call_soon_threadsafe(self._protocol.pkt_received, pkt)
619
+ except AssertionError as err: # protect from upper layers
620
+ _LOGGER.exception("%s < exception from msg layer: %s", pkt, err)
621
+ except exc.ProtocolError as err: # protect from upper layers
622
+ _LOGGER.error("%s < exception from msg layer: %s", pkt, err)
623
+
624
+ async def write_frame(self, frame: str, disable_tx_limits: bool = False) -> None:
625
+ """Transmit a frame via the underlying handler (e.g. serial port, MQTT)."""
626
+ raise exc.TransportSerialError("This transport is read only")
627
+
628
+
629
+ class _FullTransport(_ReadTransport): # asyncio.Transport
630
+ """Interface representing a bidirectional transport."""
631
+
632
+ def __init__(
633
+ self, *args: Any, disable_sending: bool = False, **kwargs: Any
634
+ ) -> None:
635
+ super().__init__(*args, **kwargs)
636
+
637
+ self._disable_sending = disable_sending
638
+ self._transmit_times: deque[dt] = deque(maxlen=_MAX_TRACKED_TRANSMITS)
639
+
640
+ def _dt_now(self) -> dt:
641
+ """Return a precise datetime, using the current dtm."""
642
+ # _LOGGER.error("Full._dt_now()")
643
+
644
+ return dt_now()
645
+
646
+ def get_extra_info(self, name: str, default: Any = None) -> Any:
647
+ if name == "tx_rate":
648
+ return self._report_transmit_rate()
649
+ return super().get_extra_info(name, default=default)
650
+
651
+ def _report_transmit_rate(self) -> float:
652
+ """Return the transmit rate in transmits per minute."""
653
+
654
+ dt_now = dt.now()
655
+ dtm = dt_now - td(seconds=_MAX_TRACKED_DURATION)
656
+ transmit_times = tuple(t for t in self._transmit_times if t > dtm)
657
+
658
+ if len(transmit_times) <= 1:
659
+ return len(transmit_times)
660
+
661
+ duration: float = (transmit_times[-1] - transmit_times[0]) / td(seconds=1)
662
+ return int(len(transmit_times) / duration * 6000) / 100
663
+
664
+ def _track_transmit_rate(self) -> None:
665
+ """Track the Tx rate as period of seconds per x transmits."""
666
+
667
+ # period: float = (transmit_times[-1] - transmit_times[0]) / td(seconds=1)
668
+ # num_tx: int = len(transmit_times)
669
+
670
+ self._transmit_times.append(dt.now())
671
+
672
+ _LOGGER.debug(f"Current Tx rate: {self._report_transmit_rate():.2f} pkts/min")
673
+
674
+ # NOTE: Protocols call write_frame(), not write()
675
+ def write(self, data: bytes) -> None:
676
+ """Write the data to the underlying handler."""
677
+ # _LOGGER.error("Full.write(%s)", data)
678
+
679
+ raise exc.TransportError("write() not implemented, use write_frame() instead")
680
+
681
+ async def write_frame(self, frame: str, disable_tx_limits: bool = False) -> None:
682
+ """Transmit a frame via the underlying handler (e.g. serial port, MQTT).
683
+
684
+ Protocols call Transport.write_frame(), not Transport.write().
685
+ """
686
+
687
+ if self._disable_sending is True:
688
+ raise exc.TransportError("Sending has been disabled")
689
+ if self._closing is True:
690
+ raise exc.TransportError("Transport is closing or has closed")
691
+
692
+ self._track_transmit_rate()
693
+
694
+ await self._write_frame(frame)
695
+
696
+ async def _write_frame(self, frame: str) -> None:
697
+ """Write some data bytes to the underlying transport."""
698
+ # _LOGGER.error("Full._write_frame(%s)", frame)
699
+
700
+ raise NotImplementedError("_write_frame() not implemented here")
701
+
702
+
703
+ _RegexRuleT: TypeAlias = dict[str, str]
704
+
705
+
706
+ class _RegHackMixin:
707
+ def __init__(
708
+ self, *args: Any, use_regex: dict[str, _RegexRuleT] | None = None, **kwargs: Any
709
+ ) -> None:
710
+ super().__init__(*args, **kwargs)
711
+
712
+ use_regex = use_regex or {}
713
+
714
+ self._inbound_rule: _RegexRuleT = use_regex.get(SZ_INBOUND, {})
715
+ self._outbound_rule: _RegexRuleT = use_regex.get(SZ_OUTBOUND, {})
716
+
717
+ @staticmethod
718
+ def _regex_hack(pkt_line: str, regex_rules: _RegexRuleT) -> str:
719
+ if not regex_rules:
720
+ return pkt_line
721
+
722
+ result = pkt_line
723
+ for k, v in regex_rules.items():
724
+ try:
725
+ result = re.sub(k, v, result)
726
+ except re.error as err:
727
+ _LOGGER.warning(f"{pkt_line} < issue with regex ({k}, {v}): {err}")
728
+
729
+ if result != pkt_line and not _DBG_DISABLE_REGEX_WARNINGS:
730
+ _LOGGER.warning(f"{pkt_line} < Changed by use_regex to: {result}")
731
+ return result
732
+
733
+ def _frame_read(self, dtm_str: str, frame: str) -> None:
734
+ super()._frame_read(dtm_str, self._regex_hack(frame, self._inbound_rule)) # type: ignore[misc]
735
+
736
+ async def write_frame(self, frame: str, disable_tx_limits: bool = False) -> None:
737
+ await super().write_frame(self._regex_hack(frame, self._outbound_rule)) # type: ignore[misc]
738
+
739
+
740
+ # ### Transports ######################################################################
741
+ # ### Implement the transports for File/dict (R/O), Serial, MQTT
742
+
743
+
744
+ class FileTransport(_ReadTransport, _FileTransportAbstractor):
745
+ """Receive packets from a read-only source such as packet log or a dict."""
746
+
747
+ def __init__(self, *args: Any, disable_sending: bool = True, **kwargs: Any) -> None:
748
+ super().__init__(*args, **kwargs)
749
+
750
+ if bool(disable_sending) is False:
751
+ raise exc.TransportSourceInvalid("This Transport cannot send packets")
752
+
753
+ self._extra[SZ_READER_TASK] = self._reader_task = self._loop.create_task(
754
+ self._start_reader(), name="FileTransport._start_reader()"
755
+ )
756
+
757
+ self._make_connection(None)
758
+
759
+ async def _start_reader(self) -> None: # TODO
760
+ self._reading = True
761
+ try:
762
+ await self._reader()
763
+ except Exception as err:
764
+ self.loop.call_soon_threadsafe(
765
+ functools.partial(self._protocol.connection_lost, err) # type: ignore[arg-type]
766
+ )
767
+ else:
768
+ self.loop.call_soon_threadsafe(
769
+ functools.partial(self._protocol.connection_lost, None)
770
+ )
771
+
772
+ # NOTE: self._frame_read() invoked from here
773
+ async def _reader(self) -> None: # TODO
774
+ """Loop through the packet source for Frames and process them."""
775
+
776
+ if isinstance(self._pkt_source, dict):
777
+ for dtm_str, pkt_line in self._pkt_source.items(): # assume dtm_str is OK
778
+ while not self._reading:
779
+ await asyncio.sleep(0.001)
780
+ self._frame_read(dtm_str, pkt_line)
781
+ # await asyncio.sleep(0) # NOTE: big performance penalty if delay >0
782
+
783
+ elif isinstance(self._pkt_source, str): # file_name, used in client parse
784
+ # open file file_name before reading
785
+ try:
786
+ with fileinput.input(files=self._pkt_source, encoding="utf-8") as file:
787
+ for dtm_pkt_line in file: # self._pkt_source:
788
+ # TODO check dtm_str is OK
789
+ while not self._reading:
790
+ await asyncio.sleep(0.001)
791
+ # there may be blank lines in annotated log files
792
+ if (dtm_pkt_line := dtm_pkt_line.strip()) and dtm_pkt_line[
793
+ :1
794
+ ] != "#":
795
+ self._frame_read(dtm_pkt_line[:26], dtm_pkt_line[27:])
796
+ # this is where the parsing magic happens!
797
+ # await asyncio.sleep(0) # NOTE: big performance penalty if delay >0
798
+ except FileNotFoundError as err:
799
+ _LOGGER.warning(f"Correct the packet file name; {err}")
800
+ elif isinstance(self._pkt_source, TextIOWrapper): # used by client monitor
801
+ for dtm_pkt_line in self._pkt_source: # should check dtm_str is OK
802
+ while not self._reading:
803
+ await asyncio.sleep(0.001)
804
+ # can be blank lines in annotated log files
805
+ if (dtm_pkt_line := dtm_pkt_line.strip()) and dtm_pkt_line[:1] != "#":
806
+ self._frame_read(dtm_pkt_line[:26], dtm_pkt_line[27:])
807
+ await asyncio.sleep(0) # NOTE: big performance penalty if delay >0
808
+ else:
809
+ raise exc.TransportSourceInvalid(
810
+ f"Packet source is not dict, TextIOWrapper or str: {self._pkt_source:!r}"
811
+ )
812
+
813
+ def _close(self, exc: exc.RamsesException | None = None) -> None:
814
+ """Close the transport (cancel any outstanding tasks)."""
815
+
816
+ super()._close(exc)
817
+
818
+ if self._reader_task:
819
+ self._reader_task.cancel()
820
+
821
+
822
+ class PortTransport(_RegHackMixin, _FullTransport, _PortTransportAbstractor): # type: ignore[misc]
823
+ """Send/receive packets async to/from evofw3/HGI80 via a serial port.
824
+
825
+ See: https://github.com/ghoti57/evofw3
826
+ """
827
+
828
+ _init_fut: asyncio.Future[Packet | None]
829
+ _init_task: asyncio.Task[None]
830
+
831
+ _recv_buffer: bytes = b""
832
+
833
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
834
+ super().__init__(*args, **kwargs)
835
+
836
+ self._leaker_sem = asyncio.BoundedSemaphore()
837
+ self._leaker_task = self._loop.create_task(
838
+ self._leak_sem(), name="PortTransport._leak_sem()"
839
+ )
840
+
841
+ self._is_hgi80 = is_hgi80(self.serial.name)
842
+
843
+ self._loop.create_task(
844
+ self._create_connection(), name="PortTransport._create_connection()"
845
+ )
846
+
847
+ async def _create_connection(self) -> None:
848
+ """Invoke the Protocols's connection_made() callback after HGI80 discovery."""
849
+
850
+ # HGI80s (and also VMs) take longer to send signature packets as they have long
851
+ # initialisation times, so we must wait until they send OK
852
+
853
+ # signature also serves to discover the HGI's device_id (& for pkt log, if any)
854
+
855
+ async def connect_sans_signature() -> None:
856
+ """Call connection_made() without sending/waiting for a signature."""
857
+
858
+ self._init_fut.set_result(None)
859
+ self._make_connection(gwy_id=None)
860
+
861
+ async def connect_with_signature() -> None:
862
+ """Poll port with signatures, call connection_made() after first echo."""
863
+
864
+ # TODO: send a 2nd signature, but with addr0 set to learned GWY address
865
+ # TODO: a HGI80 will silently drop this cmd, so an echo would tell us
866
+ # TODO: that the GWY is evofw3-compatible
867
+
868
+ sig = Command._puzzle()
869
+ self._extra[SZ_SIGNATURE] = sig.payload
870
+
871
+ num_sends = 0
872
+ while num_sends < _SIGNATURE_MAX_TRYS:
873
+ num_sends += 1
874
+
875
+ await self._write_frame(str(sig))
876
+ await asyncio.sleep(_SIGNATURE_GAP_SECS)
877
+
878
+ if self._init_fut.done():
879
+ pkt = self._init_fut.result()
880
+ self._make_connection(gwy_id=pkt.src.id if pkt else None)
881
+ return
882
+
883
+ if not self._init_fut.done():
884
+ self._init_fut.set_result(None)
885
+
886
+ self._make_connection(gwy_id=None)
887
+ return
888
+
889
+ self._init_fut = asyncio.Future()
890
+ if self._disable_sending:
891
+ self._init_task = self._loop.create_task(
892
+ connect_sans_signature(), name="PortTransport.connect_sans_signature()"
893
+ )
894
+ else:
895
+ self._init_task = self._loop.create_task(
896
+ connect_with_signature(), name="PortTransport.connect_with_signature()"
897
+ )
898
+
899
+ try: # wait to get (1st) signature echo from evofw3/HGI80, if any
900
+ await asyncio.wait_for(self._init_fut, timeout=_SIGNATURE_MAX_SECS)
901
+ except TimeoutError as err:
902
+ raise exc.TransportSerialError(
903
+ f"Failed to initialise Transport within {_SIGNATURE_MAX_SECS} secs"
904
+ ) from err
905
+
906
+ async def _leak_sem(self) -> None:
907
+ """Used to enforce a minimum time between calls to self.write()."""
908
+ while True:
909
+ await asyncio.sleep(MIN_INTER_WRITE_GAP)
910
+ with contextlib.suppress(ValueError):
911
+ self._leaker_sem.release()
912
+
913
+ # NOTE: self._frame_read() invoked from here
914
+ def _read_ready(self) -> None:
915
+ """Make Frames from the read data and process them."""
916
+
917
+ def bytes_read(data: bytes) -> Iterable[tuple[dt, bytes]]:
918
+ self._recv_buffer += data
919
+ if b"\r\n" in self._recv_buffer:
920
+ lines = self._recv_buffer.split(b"\r\n")
921
+ self._recv_buffer = lines[-1]
922
+ for line in lines[:-1]:
923
+ yield self._dt_now(), line + b"\r\n"
924
+
925
+ try:
926
+ data: bytes = self.serial.read(self._max_read_size)
927
+ except SerialException as err:
928
+ if not self._closing:
929
+ self._close(exc=err) # have to use _close() to pass in exception
930
+ return
931
+
932
+ if not data:
933
+ return
934
+
935
+ for dtm, raw_line in bytes_read(data):
936
+ if _DBG_FORCE_FRAME_LOGGING:
937
+ _LOGGER.warning("Rx: %s", raw_line)
938
+ elif _LOGGER.getEffectiveLevel() == logging.INFO: # log for INFO not DEBUG
939
+ _LOGGER.info("Rx: %s", raw_line)
940
+
941
+ self._frame_read(
942
+ dtm.isoformat(timespec="milliseconds"), _normalise(_str(raw_line))
943
+ )
944
+
945
+ @track_system_syncs
946
+ def _pkt_read(self, pkt: Packet) -> None:
947
+ # NOTE: a signature can override an existing active gateway
948
+ if (
949
+ not self._init_fut.done()
950
+ and pkt.code == Code._PUZZ
951
+ and pkt.payload == self._extra[SZ_SIGNATURE]
952
+ ):
953
+ self._extra[SZ_ACTIVE_HGI] = pkt.src.id # , by_signature=True)
954
+ self._init_fut.set_result(pkt)
955
+
956
+ super()._pkt_read(pkt)
957
+
958
+ @limit_duty_cycle(MAX_DUTY_CYCLE_RATE)
959
+ @avoid_system_syncs
960
+ async def write_frame(self, frame: str, disable_tx_limits: bool = False) -> None:
961
+ """Transmit a frame via the underlying handler (e.g. serial port, MQTT).
962
+
963
+ Protocols call Transport.write_frame(), not Transport.write().
964
+ """
965
+
966
+ await self._leaker_sem.acquire() # MIN_INTER_WRITE_GAP
967
+ await super().write_frame(frame)
968
+
969
+ # NOTE: The order should be: minimum gap between writes, duty cycle limits, and
970
+ # then the code that avoids the controller sync cycles
971
+
972
+ async def _write_frame(self, frame: str) -> None:
973
+ """Write some data bytes to the underlying transport."""
974
+
975
+ data = bytes(frame, "ascii") + b"\r\n"
976
+
977
+ if _DBG_FORCE_FRAME_LOGGING:
978
+ _LOGGER.warning("Tx: %s", data)
979
+ elif _LOGGER.getEffectiveLevel() == logging.INFO: # log for INFO not DEBUG
980
+ _LOGGER.info("Tx: %s", data)
981
+
982
+ try:
983
+ self._write(data)
984
+ except SerialException as err:
985
+ self._abort(err)
986
+ return
987
+
988
+ def _write(self, data: bytes) -> None:
989
+ self.serial.write(data)
990
+
991
+ def _abort(self, exc: ExceptionT) -> None: # type: ignore[override] # used by serial_asyncio.SerialTransport
992
+ super()._abort(exc) # type: ignore[arg-type]
993
+
994
+ if self._init_task:
995
+ self._init_task.cancel()
996
+ if self._leaker_task:
997
+ self._leaker_task.cancel()
998
+
999
+ def _close(self, exc: exc.RamsesException | None = None) -> None: # type: ignore[override]
1000
+ """Close the transport (cancel any outstanding tasks)."""
1001
+
1002
+ super()._close(exc)
1003
+
1004
+ if self._init_task:
1005
+ self._init_task.cancel()
1006
+
1007
+ if self._leaker_task:
1008
+ self._leaker_task.cancel()
1009
+
1010
+
1011
+ class MqttTransport(_FullTransport, _MqttTransportAbstractor):
1012
+ """Send/receive packets to/from ramses_esp via MQTT.
1013
+
1014
+ See: https://github.com/IndaloTech/ramses_esp
1015
+ """
1016
+
1017
+ # used in .write_frame() to rate-limit the number of writes
1018
+ _MAX_TOKENS: Final[int] = MAX_TRANSMIT_RATE_TOKENS
1019
+ _TIME_WINDOW: Final[int] = DUTY_CYCLE_DURATION
1020
+ _TOKEN_RATE: Final[float] = _MAX_TOKENS / _TIME_WINDOW
1021
+
1022
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
1023
+ # _LOGGER.error("__init__(%s, %s)", args, kwargs)
1024
+
1025
+ super().__init__(*args, **kwargs)
1026
+
1027
+ self._username = unquote(self._broker_url.username or "")
1028
+ self._password = unquote(self._broker_url.password or "")
1029
+
1030
+ self._topic_base = validate_topic_path(self._broker_url.path)
1031
+ self._topic_pub = ""
1032
+ self._topic_sub = ""
1033
+
1034
+ self._mqtt_qos = int(parse_qs(self._broker_url.query).get("qos", ["0"])[0])
1035
+
1036
+ self._connected = False
1037
+ self._connecting = False
1038
+ self._extra[SZ_IS_EVOFW3] = True
1039
+
1040
+ # Reconnection settings
1041
+ self._reconnect_interval = 5.0 # seconds
1042
+ self._max_reconnect_interval = 300.0 # 5 minutes max
1043
+ self._reconnect_backoff = 1.5
1044
+ self._current_reconnect_interval = self._reconnect_interval
1045
+ self._reconnect_task: asyncio.Task[None] | None = None
1046
+
1047
+ # used in .write_frame() to rate-limit the number of writes
1048
+ self._timestamp = perf_counter()
1049
+ self._max_tokens: float = self._MAX_TOKENS * 2 # allow for the initial burst
1050
+ self._num_tokens: float = self._MAX_TOKENS * 2
1051
+
1052
+ # instantiate a paho mqtt client
1053
+ self.client = mqtt.Client(
1054
+ protocol=mqtt.MQTTv5, callback_api_version=CallbackAPIVersion.VERSION2
1055
+ )
1056
+ self.client.on_connect = self._on_connect
1057
+ self.client.on_connect_fail = self._on_connect_fail
1058
+ self.client.on_disconnect = self._on_disconnect
1059
+ self.client.on_message = self._on_message
1060
+ self.client.username_pw_set(self._username, self._password)
1061
+ # connect to the mqtt server
1062
+ self._attempt_connection()
1063
+
1064
+ def _attempt_connection(self) -> None:
1065
+ """Attempt to connect to the MQTT broker."""
1066
+ if self._connecting or self._connected:
1067
+ return
1068
+
1069
+ self._connecting = True
1070
+ try:
1071
+ self.client.connect_async(
1072
+ self._broker_url.hostname, # type: ignore[arg-type]
1073
+ self._broker_url.port or 1883,
1074
+ 60,
1075
+ )
1076
+ self.client.loop_start()
1077
+ except Exception as err:
1078
+ _LOGGER.error(f"Failed to initiate MQTT connection: {err}")
1079
+ self._connecting = False
1080
+ self._schedule_reconnect()
1081
+
1082
+ def _schedule_reconnect(self) -> None:
1083
+ """Schedule a reconnection attempt with exponential backoff."""
1084
+ if self._closing or self._reconnect_task:
1085
+ return
1086
+
1087
+ _LOGGER.info(
1088
+ f"Scheduling MQTT reconnect in {self._current_reconnect_interval} seconds"
1089
+ )
1090
+ self._reconnect_task = self._loop.create_task(
1091
+ self._reconnect_after_delay(), name="MqttTransport._reconnect_after_delay()"
1092
+ )
1093
+
1094
+ async def _reconnect_after_delay(self) -> None:
1095
+ """Wait and then attempt to reconnect."""
1096
+ try:
1097
+ await asyncio.sleep(self._current_reconnect_interval)
1098
+
1099
+ # Increase backoff for next time
1100
+ self._current_reconnect_interval = min(
1101
+ self._current_reconnect_interval * self._reconnect_backoff,
1102
+ self._max_reconnect_interval,
1103
+ )
1104
+
1105
+ _LOGGER.info("Attempting MQTT reconnection...")
1106
+ self._attempt_connection()
1107
+ except asyncio.CancelledError:
1108
+ pass
1109
+ finally:
1110
+ self._reconnect_task = None
1111
+
1112
+ def _on_connect(
1113
+ self,
1114
+ client: mqtt.Client,
1115
+ userdata: Any,
1116
+ flags: dict[str, Any],
1117
+ reason_code: Any,
1118
+ properties: Any | None,
1119
+ ) -> None:
1120
+ # _LOGGER.error("Mqtt._on_connect(%s, %s, %s, %s)", client, userdata, flags, reason_code.getName())
1121
+
1122
+ self._connecting = False
1123
+
1124
+ if reason_code.is_failure:
1125
+ _LOGGER.error(f"MQTT connection failed: {reason_code.getName()}")
1126
+ self._schedule_reconnect()
1127
+ return
1128
+
1129
+ _LOGGER.info(f"MQTT connected: {reason_code.getName()}")
1130
+
1131
+ # Reset reconnect interval on successful connection
1132
+ self._current_reconnect_interval = self._reconnect_interval
1133
+
1134
+ # Cancel any pending reconnect task
1135
+ if self._reconnect_task:
1136
+ self._reconnect_task.cancel()
1137
+ self._reconnect_task = None
1138
+
1139
+ self.client.subscribe(self._topic_base) # hope to see 'online' message
1140
+
1141
+ def _on_connect_fail(
1142
+ self,
1143
+ client: mqtt.Client,
1144
+ userdata: Any,
1145
+ ) -> None:
1146
+ _LOGGER.error("MQTT connection failed")
1147
+
1148
+ self._connecting = False
1149
+ self._connected = False
1150
+
1151
+ if not self._closing:
1152
+ self._schedule_reconnect()
1153
+
1154
+ def _on_disconnect(
1155
+ self,
1156
+ client: mqtt.Client,
1157
+ userdata: Any,
1158
+ reason_code: Any,
1159
+ properties: Any | None,
1160
+ ) -> None:
1161
+ _LOGGER.warning(f"MQTT disconnected: {reason_code.getName()}")
1162
+
1163
+ self._connected = False
1164
+
1165
+ # Only attempt reconnection if we didn't deliberately disconnect
1166
+ if not self._closing and not reason_code.is_failure:
1167
+ # This was an unexpected disconnect, schedule reconnection
1168
+ self._schedule_reconnect()
1169
+ elif reason_code.is_failure and not self._closing:
1170
+ # Connection failed, also schedule reconnection
1171
+ self._schedule_reconnect()
1172
+
1173
+ def _create_connection(self, msg: mqtt.MQTTMessage) -> None:
1174
+ """Invoke the Protocols's connection_made() callback MQTT is established."""
1175
+ # _LOGGER.error("Mqtt._create_connection(%s)", msg)
1176
+
1177
+ assert msg.payload == b"online", "Coding error"
1178
+
1179
+ if self._connected:
1180
+ _LOGGER.info("MQTT device came back online - resuming writing")
1181
+ self._loop.call_soon_threadsafe(self._protocol.resume_writing)
1182
+ return
1183
+
1184
+ _LOGGER.info("MQTT device is online - establishing connection")
1185
+ self._connected = True
1186
+
1187
+ self._extra[SZ_ACTIVE_HGI] = msg.topic[-9:]
1188
+
1189
+ self._topic_pub = msg.topic + "/tx"
1190
+ self._topic_sub = msg.topic + "/rx"
1191
+
1192
+ self.client.subscribe(self._topic_sub, qos=self._mqtt_qos)
1193
+
1194
+ self._make_connection(gwy_id=msg.topic[-9:]) # type: ignore[arg-type]
1195
+
1196
+ # NOTE: self._frame_read() invoked from here
1197
+ def _on_message(
1198
+ self, client: mqtt.Client, userdata: Any, msg: mqtt.MQTTMessage
1199
+ ) -> None:
1200
+ """Make a Frame from the MQTT message and process it."""
1201
+ # _LOGGER.error(
1202
+ # "Mqtt._on_message(%s, %s, %s)",
1203
+ # client,
1204
+ # userdata,
1205
+ # (msg.timestamp, msg.topic, msg.payload),
1206
+ # )
1207
+
1208
+ if _DBG_FORCE_FRAME_LOGGING:
1209
+ _LOGGER.warning("Rx: %s", msg.payload)
1210
+ elif _LOGGER.getEffectiveLevel() == logging.INFO: # log for INFO not DEBUG
1211
+ _LOGGER.info("Rx: %s", msg.payload)
1212
+
1213
+ if msg.topic[-3:] != "/rx": # then, e.g. 'RAMSES/GATEWAY/18:017804'
1214
+ if msg.payload == b"offline" and self._topic_sub.startswith(msg.topic):
1215
+ _LOGGER.warning(
1216
+ f"{self}: the MQTT device is offline: {self._topic_sub[:-3]}"
1217
+ )
1218
+ self._connected = False
1219
+ self._protocol.pause_writing()
1220
+
1221
+ # BUG: using create task (self._loop.ct() & asyncio.ct()) causes the
1222
+ # BUG: event look to close early
1223
+ elif msg.payload == b"online":
1224
+ _LOGGER.info(
1225
+ f"{self}: the MQTT device is online: {self._topic_sub[:-3]}"
1226
+ )
1227
+ self._create_connection(msg)
1228
+
1229
+ return
1230
+
1231
+ try:
1232
+ payload = json.loads(msg.payload)
1233
+ except json.JSONDecodeError:
1234
+ _LOGGER.warning("%s < Can't decode JSON (ignoring)", msg.payload)
1235
+ return
1236
+
1237
+ # HACK: hotfix for converting RAMSES_ESP dtm into local/naive dtm
1238
+ dtm = dt.fromisoformat(payload["ts"])
1239
+ if dtm.tzinfo is not None:
1240
+ dtm = dtm.astimezone().replace(tzinfo=None)
1241
+ if dtm < dt.now() - td(days=90):
1242
+ _LOGGER.warning(
1243
+ f"{self}: Have you configured the SNTP settings on the ESP?"
1244
+ )
1245
+ # FIXME: convert all dt early, and convert to aware, i.e. dt.now().astimezone()
1246
+
1247
+ self._frame_read(dtm.isoformat(), _normalise(payload["msg"]))
1248
+
1249
+ async def write_frame(self, frame: str, disable_tx_limits: bool = False) -> None:
1250
+ """Transmit a frame via the underlying handler (e.g. serial port, MQTT).
1251
+
1252
+ Writes are rate-limited to _MAX_TOKENS Packets over the last _TIME_WINDOW
1253
+ seconds, except when disable_tx_limits is True (for e.g. user commands).
1254
+
1255
+ Protocols call Transport.write_frame(), not Transport.write().
1256
+ """
1257
+
1258
+ # Check if we're connected before attempting to write
1259
+ if not self._connected:
1260
+ _LOGGER.debug(f"{self}: Dropping write - MQTT not connected")
1261
+ return
1262
+
1263
+ # top-up the token bucket
1264
+ timestamp = perf_counter()
1265
+ elapsed, self._timestamp = timestamp - self._timestamp, timestamp
1266
+ self._num_tokens = min(
1267
+ self._num_tokens + elapsed * self._TOKEN_RATE, self._max_tokens
1268
+ )
1269
+
1270
+ # if would have to sleep >= 1 second, dump the write instead
1271
+ if self._num_tokens < 1.0 - self._TOKEN_RATE and not disable_tx_limits:
1272
+ _LOGGER.warning(f"{self}: Discarding write (tokens={self._num_tokens:.2f})")
1273
+ return
1274
+
1275
+ self._num_tokens -= 1.0
1276
+ if self._max_tokens > self._MAX_TOKENS: # what is the new max number of tokens
1277
+ self._max_tokens = min(self._max_tokens, self._num_tokens)
1278
+ self._max_tokens = max(self._max_tokens, self._MAX_TOKENS)
1279
+
1280
+ # if in token debt, sleep until the debt is paid
1281
+ if self._num_tokens < 0.0 and not disable_tx_limits:
1282
+ delay = (0 - self._num_tokens) / self._TOKEN_RATE
1283
+ _LOGGER.debug(f"{self}: Sleeping (seconds={delay})")
1284
+ await asyncio.sleep(delay)
1285
+
1286
+ await super().write_frame(frame)
1287
+
1288
+ async def _write_frame(self, frame: str) -> None:
1289
+ """Write some data bytes to the underlying transport."""
1290
+ # _LOGGER.error("Mqtt._write_frame(%s)", frame)
1291
+
1292
+ data = json.dumps({"msg": frame})
1293
+
1294
+ if _DBG_FORCE_FRAME_LOGGING:
1295
+ _LOGGER.warning("Tx: %s", data)
1296
+ elif _LOGGER.getEffectiveLevel() == logging.INFO: # log for INFO not DEBUG
1297
+ _LOGGER.info("Tx: %s", data)
1298
+
1299
+ try:
1300
+ self._publish(data)
1301
+ except MQTTException as err:
1302
+ _LOGGER.error(f"MQTT publish failed: {err}")
1303
+ # Don't close the transport, just log the error and continue
1304
+ # The broker might come back online
1305
+ return
1306
+
1307
+ def _publish(self, payload: str) -> None:
1308
+ # _LOGGER.error("Mqtt._publish(%s)", message)
1309
+
1310
+ if not self._connected:
1311
+ _LOGGER.debug("Cannot publish - MQTT not connected")
1312
+ return
1313
+
1314
+ info: mqtt.MQTTMessageInfo = self.client.publish(
1315
+ self._topic_pub, payload=payload, qos=self._mqtt_qos
1316
+ )
1317
+
1318
+ if not info:
1319
+ _LOGGER.warning("MQTT publish returned no info")
1320
+ elif info.rc != mqtt.MQTT_ERR_SUCCESS:
1321
+ _LOGGER.warning(f"MQTT publish failed with code: {info.rc}")
1322
+ # Check if this indicates a connection issue
1323
+ if info.rc in (mqtt.MQTT_ERR_NO_CONN, mqtt.MQTT_ERR_CONN_LOST):
1324
+ self._connected = False
1325
+ if not self._closing:
1326
+ self._schedule_reconnect()
1327
+
1328
+ def _close(self, exc: exc.RamsesException | None = None) -> None:
1329
+ """Close the transport (disconnect from the broker and stop its poller)."""
1330
+ # _LOGGER.error("Mqtt._close(%s)", exc)
1331
+
1332
+ super()._close(exc)
1333
+
1334
+ # Cancel any pending reconnection attempts
1335
+ if self._reconnect_task:
1336
+ self._reconnect_task.cancel()
1337
+ self._reconnect_task = None
1338
+
1339
+ if not self._connected:
1340
+ return
1341
+ self._connected = False
1342
+
1343
+ try:
1344
+ self.client.unsubscribe(self._topic_sub)
1345
+ self.client.disconnect()
1346
+ self.client.loop_stop()
1347
+ except Exception as err:
1348
+ _LOGGER.debug(f"Error during MQTT cleanup: {err}")
1349
+
1350
+
1351
+ def validate_topic_path(path: str) -> str:
1352
+ """Test the topic path."""
1353
+
1354
+ # The user can supply the following paths:
1355
+ # - ""
1356
+ # - "/RAMSES/GATEWAY"
1357
+ # - "/RAMSES/GATEWAY/+" (the previous two are equivalent to this one)
1358
+ # - "/RAMSES/GATEWAY/18:123456"
1359
+
1360
+ # "RAMSES/GATEWAY/+" -> online, online, ...
1361
+ # "RAMSES/GATEWAY/18:017804" -> online
1362
+ # "RAMSES/GATEWAY/18:017804/info/+" -> ramses_esp/0.4.0
1363
+ # "RAMSES/GATEWAY/+/rx" -> pkts from all gateways
1364
+
1365
+ new_path = path or SZ_RAMSES_GATEWAY
1366
+ if new_path.startswith("/"):
1367
+ new_path = new_path[1:]
1368
+ if not new_path.startswith(SZ_RAMSES_GATEWAY):
1369
+ raise ValueError(f"Invalid topic path: {path}")
1370
+ if new_path == SZ_RAMSES_GATEWAY:
1371
+ new_path += "/+"
1372
+ if len(new_path.split("/")) != 3:
1373
+ raise ValueError(f"Invalid topic path: {path}")
1374
+ return new_path
1375
+
1376
+
1377
+ RamsesTransportT: TypeAlias = FileTransport | MqttTransport | PortTransport
1378
+
1379
+
1380
+ async def transport_factory(
1381
+ protocol: RamsesProtocolT,
1382
+ /,
1383
+ *,
1384
+ port_name: SerPortNameT | None = None,
1385
+ port_config: PortConfigT | None = None,
1386
+ packet_log: str | None = None,
1387
+ packet_dict: dict[str, str] | None = None,
1388
+ disable_sending: bool | None = False,
1389
+ extra: dict[str, Any] | None = None,
1390
+ loop: asyncio.AbstractEventLoop | None = None,
1391
+ **kwargs: Any, # HACK: odd/misc params
1392
+ ) -> RamsesTransportT:
1393
+ """Create and return a Ramses-specific async packet Transport."""
1394
+
1395
+ # kwargs are specific to a transport. The above transports have:
1396
+ # evofw3_flag, use_regex
1397
+
1398
+ def get_serial_instance( # type: ignore[no-any-unimported]
1399
+ ser_name: SerPortNameT, ser_config: PortConfigT | None
1400
+ ) -> Serial:
1401
+ """Return a Serial instance for the given port name and config.
1402
+
1403
+ May: raise TransportSourceInvalid("Unable to open serial port...")
1404
+ """
1405
+ # For example:
1406
+ # - python client.py monitor 'rfc2217://localhost:5001'
1407
+ # - python client.py monitor 'alt:///dev/ttyUSB0?class=PosixPollSerial'
1408
+
1409
+ ser_config = SCH_SERIAL_PORT_CONFIG(ser_config or {})
1410
+
1411
+ try:
1412
+ ser_obj = serial_for_url(ser_name, **ser_config)
1413
+ except SerialException as err:
1414
+ _LOGGER.error(
1415
+ "Failed to open %s (config: %s): %s", ser_name, ser_config, err
1416
+ )
1417
+ raise exc.TransportSourceInvalid(
1418
+ f"Unable to open the serial port: {ser_name}"
1419
+ ) from err
1420
+
1421
+ # FTDI on Posix/Linux would be a common environment for this library...
1422
+ with contextlib.suppress(AttributeError, NotImplementedError, ValueError):
1423
+ ser_obj.set_low_latency_mode(True)
1424
+
1425
+ return ser_obj
1426
+
1427
+ def issue_warning() -> None:
1428
+ """Warn of the perils of semi-supported configurations."""
1429
+ _LOGGER.warning(
1430
+ f"{'Windows' if os.name == 'nt' else 'This type of serial interface'} "
1431
+ "is not fully supported by this library: "
1432
+ "please don't report any Transport/Protocol errors/warnings, "
1433
+ "unless they are reproducible with a standard configuration "
1434
+ "(e.g. linux with a local serial port)"
1435
+ )
1436
+
1437
+ if len([x for x in (packet_dict, packet_log, port_name) if x is not None]) != 1:
1438
+ raise exc.TransportSourceInvalid(
1439
+ "Packet source must be exactly one of: packet_dict, packet_log, port_name"
1440
+ )
1441
+
1442
+ if (pkt_source := packet_log or packet_dict) is not None:
1443
+ return FileTransport(pkt_source, protocol, extra=extra, loop=loop, **kwargs)
1444
+
1445
+ assert port_name is not None # mypy check
1446
+ assert port_config is not None # mypy check
1447
+
1448
+ if port_name[:4] == "mqtt": # TODO: handle disable_sending
1449
+ transport = MqttTransport(port_name, protocol, extra=extra, loop=loop, **kwargs)
1450
+
1451
+ # TODO: remove this? better to invoke timeout after factory returns?
1452
+ await protocol.wait_for_connection_made(timeout=_DEFAULT_TIMEOUT_MQTT)
1453
+ return transport
1454
+
1455
+ ser_instance = get_serial_instance(port_name, port_config)
1456
+
1457
+ if os.name == "nt" or ser_instance.portstr[:7] in ("rfc2217", "socket:"):
1458
+ issue_warning() # TODO: add tests for these...
1459
+
1460
+ transport = PortTransport( # type: ignore[assignment]
1461
+ ser_instance,
1462
+ protocol,
1463
+ disable_sending=bool(disable_sending),
1464
+ extra=extra,
1465
+ loop=loop,
1466
+ **kwargs,
1467
+ )
1468
+
1469
+ # TODO: remove this? better to invoke timeout after factory returns?
1470
+ await protocol.wait_for_connection_made(timeout=_DEFAULT_TIMEOUT_PORT)
1471
+ return transport