ramses-rf 0.22.40__py3-none-any.whl → 0.51.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ramses_cli/__init__.py +18 -0
- ramses_cli/client.py +597 -0
- ramses_cli/debug.py +20 -0
- ramses_cli/discovery.py +405 -0
- ramses_cli/utils/cat_slow.py +17 -0
- ramses_cli/utils/convert.py +60 -0
- ramses_rf/__init__.py +31 -10
- ramses_rf/binding_fsm.py +787 -0
- ramses_rf/const.py +124 -105
- ramses_rf/database.py +297 -0
- ramses_rf/device/__init__.py +69 -39
- ramses_rf/device/base.py +187 -376
- ramses_rf/device/heat.py +540 -552
- ramses_rf/device/hvac.py +279 -171
- ramses_rf/dispatcher.py +153 -177
- ramses_rf/entity_base.py +478 -361
- ramses_rf/exceptions.py +82 -0
- ramses_rf/gateway.py +377 -513
- ramses_rf/helpers.py +57 -19
- ramses_rf/py.typed +0 -0
- ramses_rf/schemas.py +148 -194
- ramses_rf/system/__init__.py +16 -23
- ramses_rf/system/faultlog.py +363 -0
- ramses_rf/system/heat.py +295 -302
- ramses_rf/system/schedule.py +312 -198
- ramses_rf/system/zones.py +318 -238
- ramses_rf/version.py +2 -8
- ramses_rf-0.51.2.dist-info/METADATA +72 -0
- ramses_rf-0.51.2.dist-info/RECORD +55 -0
- {ramses_rf-0.22.40.dist-info → ramses_rf-0.51.2.dist-info}/WHEEL +1 -2
- ramses_rf-0.51.2.dist-info/entry_points.txt +2 -0
- {ramses_rf-0.22.40.dist-info → ramses_rf-0.51.2.dist-info/licenses}/LICENSE +1 -1
- ramses_tx/__init__.py +160 -0
- {ramses_rf/protocol → ramses_tx}/address.py +65 -59
- ramses_tx/command.py +1454 -0
- ramses_tx/const.py +903 -0
- ramses_tx/exceptions.py +92 -0
- {ramses_rf/protocol → ramses_tx}/fingerprints.py +56 -15
- {ramses_rf/protocol → ramses_tx}/frame.py +132 -131
- ramses_tx/gateway.py +338 -0
- ramses_tx/helpers.py +883 -0
- {ramses_rf/protocol → ramses_tx}/logger.py +67 -53
- {ramses_rf/protocol → ramses_tx}/message.py +155 -191
- ramses_tx/opentherm.py +1260 -0
- ramses_tx/packet.py +210 -0
- {ramses_rf/protocol → ramses_tx}/parsers.py +1266 -1003
- ramses_tx/protocol.py +801 -0
- ramses_tx/protocol_fsm.py +672 -0
- ramses_tx/py.typed +0 -0
- {ramses_rf/protocol → ramses_tx}/ramses.py +262 -185
- {ramses_rf/protocol → ramses_tx}/schemas.py +150 -133
- ramses_tx/transport.py +1471 -0
- ramses_tx/typed_dicts.py +492 -0
- ramses_tx/typing.py +181 -0
- ramses_tx/version.py +4 -0
- ramses_rf/discovery.py +0 -398
- ramses_rf/protocol/__init__.py +0 -59
- ramses_rf/protocol/backports.py +0 -42
- ramses_rf/protocol/command.py +0 -1576
- ramses_rf/protocol/const.py +0 -697
- ramses_rf/protocol/exceptions.py +0 -111
- ramses_rf/protocol/helpers.py +0 -390
- ramses_rf/protocol/opentherm.py +0 -1170
- ramses_rf/protocol/packet.py +0 -235
- ramses_rf/protocol/protocol.py +0 -613
- ramses_rf/protocol/transport.py +0 -1011
- ramses_rf/protocol/version.py +0 -10
- ramses_rf/system/hvac.py +0 -82
- ramses_rf-0.22.40.dist-info/METADATA +0 -64
- ramses_rf-0.22.40.dist-info/RECORD +0 -42
- ramses_rf-0.22.40.dist-info/top_level.txt +0 -1
ramses_rf/system/schedule.py
CHANGED
|
@@ -1,36 +1,33 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
2
|
+
"""RAMSES RF - Expose an 0404 schedule (is a stateful process)."""
|
|
3
|
+
|
|
4
|
+
# TODO: use schemas from evohome_async
|
|
5
5
|
|
|
6
|
-
Construct a command (packet that is to be sent).
|
|
7
|
-
"""
|
|
8
6
|
from __future__ import annotations
|
|
9
7
|
|
|
10
8
|
import asyncio
|
|
11
9
|
import logging
|
|
12
10
|
import struct
|
|
13
11
|
import zlib
|
|
12
|
+
from collections.abc import Iterable
|
|
14
13
|
from datetime import timedelta as td
|
|
15
|
-
from typing import Any,
|
|
16
|
-
|
|
17
|
-
import voluptuous as vol # type: ignore[import]
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Final, NotRequired, TypeAlias, TypedDict
|
|
18
15
|
|
|
19
|
-
|
|
16
|
+
import voluptuous as vol # type: ignore[import, unused-ignore]
|
|
20
17
|
|
|
21
|
-
from
|
|
18
|
+
from ramses_rf.const import (
|
|
22
19
|
SZ_FRAG_NUMBER,
|
|
23
20
|
SZ_FRAGMENT,
|
|
24
21
|
SZ_SCHEDULE,
|
|
25
22
|
SZ_TOTAL_FRAGS,
|
|
26
23
|
SZ_ZONE_IDX,
|
|
27
|
-
__dev_mode__,
|
|
28
24
|
)
|
|
29
|
-
from
|
|
30
|
-
from
|
|
25
|
+
from ramses_tx.command import Command
|
|
26
|
+
from ramses_tx.const import SZ_CHANGE_COUNTER, Priority
|
|
27
|
+
from ramses_tx.message import Message
|
|
28
|
+
from ramses_tx.packet import Packet
|
|
31
29
|
|
|
32
|
-
#
|
|
33
|
-
from ..const import ( # noqa: F401, isort: skip, pylint: disable=unused-import
|
|
30
|
+
from ramses_rf.const import ( # noqa: F401, isort: skip, pylint: disable=unused-import
|
|
34
31
|
I_,
|
|
35
32
|
RP,
|
|
36
33
|
RQ,
|
|
@@ -38,83 +35,139 @@ from ..const import ( # noqa: F401, isort: skip, pylint: disable=unused-import
|
|
|
38
35
|
Code,
|
|
39
36
|
)
|
|
40
37
|
|
|
38
|
+
if TYPE_CHECKING:
|
|
39
|
+
from ramses_rf.system.zones import DhwZone, Zone
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class EmptyDictT(TypedDict):
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class SwitchPointDhw(TypedDict):
|
|
47
|
+
time_of_day: str
|
|
48
|
+
enabled: bool
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SwitchPointZon(TypedDict):
|
|
52
|
+
time_of_day: str
|
|
53
|
+
heat_setpoint: float
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
SwitchPointT: TypeAlias = SwitchPointDhw | SwitchPointZon
|
|
57
|
+
SwitchPointsT: TypeAlias = list[SwitchPointDhw] | list[SwitchPointZon]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class DayOfWeek(TypedDict):
|
|
61
|
+
day_of_week: int
|
|
62
|
+
switchpoints: SwitchPointsT
|
|
41
63
|
|
|
42
|
-
MSG = "msg"
|
|
43
64
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
65
|
+
DayOfWeekT: TypeAlias = DayOfWeek
|
|
66
|
+
InnerScheduleT: TypeAlias = list[DayOfWeek]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class _OuterSchedule(TypedDict):
|
|
70
|
+
zone_idx: str
|
|
71
|
+
schedule: InnerScheduleT
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class _EmptySchedule(TypedDict):
|
|
75
|
+
zone_idx: str
|
|
76
|
+
schedule: NotRequired[EmptyDictT | None]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
OuterScheduleT: TypeAlias = _OuterSchedule | _EmptySchedule
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
_LOGGER = logging.getLogger(__name__)
|
|
83
|
+
|
|
49
84
|
|
|
50
85
|
FIVE_MINS = td(minutes=5)
|
|
51
86
|
|
|
52
|
-
|
|
87
|
+
SZ_MSG: Final = "msg"
|
|
88
|
+
|
|
89
|
+
SZ_DAY_OF_WEEK: Final = "day_of_week"
|
|
90
|
+
SZ_HEAT_SETPOINT: Final = "heat_setpoint"
|
|
91
|
+
SZ_SWITCHPOINTS: Final = "switchpoints"
|
|
92
|
+
SZ_TIME_OF_DAY: Final = "time_of_day"
|
|
93
|
+
SZ_ENABLED: Final = "enabled"
|
|
94
|
+
|
|
95
|
+
REGEX_TIME_OF_DAY: Final = r"^([0-1][0-9]|2[0-3]):[0-5][05]$"
|
|
53
96
|
|
|
54
97
|
|
|
55
98
|
def schema_sched(schema_switchpoint: vol.Schema) -> vol.Schema:
|
|
56
99
|
schema_sched_day = vol.Schema(
|
|
57
100
|
{
|
|
58
|
-
vol.Required(
|
|
59
|
-
vol.Required(
|
|
101
|
+
vol.Required(SZ_DAY_OF_WEEK): int,
|
|
102
|
+
vol.Required(SZ_SWITCHPOINTS): vol.All(
|
|
60
103
|
[schema_switchpoint], vol.Length(min=1)
|
|
61
104
|
),
|
|
62
105
|
},
|
|
63
106
|
extra=vol.PREVENT_EXTRA,
|
|
64
107
|
)
|
|
65
108
|
return vol.Schema(
|
|
66
|
-
vol.
|
|
109
|
+
vol.All([schema_sched_day], vol.Length(min=0, max=7)),
|
|
67
110
|
extra=vol.PREVENT_EXTRA,
|
|
68
111
|
)
|
|
69
112
|
|
|
70
113
|
|
|
71
114
|
SCH_SWITCHPOINT_DHW = vol.Schema(
|
|
72
115
|
{
|
|
73
|
-
vol.Required(
|
|
74
|
-
vol.Required(
|
|
116
|
+
vol.Required(SZ_TIME_OF_DAY): vol.Match(REGEX_TIME_OF_DAY),
|
|
117
|
+
vol.Required(SZ_ENABLED): bool,
|
|
75
118
|
},
|
|
76
119
|
extra=vol.PREVENT_EXTRA,
|
|
77
120
|
)
|
|
121
|
+
|
|
78
122
|
SCH_SWITCHPOINT_ZON = vol.Schema(
|
|
79
123
|
{
|
|
80
|
-
vol.Required(
|
|
81
|
-
vol.Required(
|
|
124
|
+
vol.Required(SZ_TIME_OF_DAY): vol.Match(REGEX_TIME_OF_DAY),
|
|
125
|
+
vol.Required(SZ_HEAT_SETPOINT): vol.All(
|
|
82
126
|
vol.Coerce(float), vol.Range(min=5, max=35)
|
|
83
127
|
),
|
|
84
128
|
},
|
|
85
129
|
extra=vol.PREVENT_EXTRA,
|
|
86
130
|
)
|
|
87
|
-
|
|
131
|
+
|
|
132
|
+
SCH_SCHEDULE_DHW = schema_sched(SCH_SWITCHPOINT_DHW)
|
|
133
|
+
SCH_SCHEDULE_DHW_OUTER = vol.Schema(
|
|
88
134
|
{
|
|
89
135
|
vol.Required(SZ_ZONE_IDX): "HW",
|
|
90
|
-
vol.Required(SZ_SCHEDULE):
|
|
136
|
+
vol.Required(SZ_SCHEDULE): SCH_SCHEDULE_DHW,
|
|
91
137
|
},
|
|
92
138
|
extra=vol.PREVENT_EXTRA,
|
|
93
139
|
)
|
|
94
|
-
|
|
140
|
+
|
|
141
|
+
SCH_SCHEDULE_ZON = schema_sched(SCH_SWITCHPOINT_ZON)
|
|
142
|
+
SCH_SCHEDULE_ZON_OUTER = vol.Schema(
|
|
95
143
|
{
|
|
96
144
|
vol.Required(SZ_ZONE_IDX): vol.Match(r"0[0-F]"),
|
|
97
|
-
vol.Required(SZ_SCHEDULE):
|
|
145
|
+
vol.Required(SZ_SCHEDULE): SCH_SCHEDULE_ZON,
|
|
98
146
|
},
|
|
99
147
|
extra=vol.PREVENT_EXTRA,
|
|
100
148
|
)
|
|
101
|
-
|
|
102
|
-
|
|
149
|
+
|
|
150
|
+
SCH_FULL_SCHEDULE = vol.Schema(
|
|
151
|
+
vol.Any(SCH_SCHEDULE_DHW_OUTER, SCH_SCHEDULE_ZON_OUTER),
|
|
152
|
+
extra=vol.PREVENT_EXTRA,
|
|
103
153
|
)
|
|
104
154
|
|
|
105
|
-
DEV_MODE = __dev_mode__ and False
|
|
106
155
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
156
|
+
_PayloadT: TypeAlias = dict[str, Any] # Message payload
|
|
157
|
+
_PayloadSetT: TypeAlias = list[_PayloadT | None]
|
|
158
|
+
|
|
159
|
+
_FragmentT: TypeAlias = str
|
|
160
|
+
_FragmentSetT: TypeAlias = list[_FragmentT]
|
|
161
|
+
|
|
162
|
+
EMPTY_PAYLOAD_SET: _PayloadSetT = [None]
|
|
110
163
|
|
|
111
164
|
|
|
165
|
+
# TODO: make stateful (a la binding)
|
|
112
166
|
class Schedule: # 0404
|
|
113
167
|
"""The schedule of a zone."""
|
|
114
168
|
|
|
115
|
-
def __init__(self, zone
|
|
116
|
-
|
|
117
|
-
self._loop = zone._gwy._loop
|
|
169
|
+
def __init__(self, zone: DhwZone | Zone) -> None:
|
|
170
|
+
_LOGGER.debug("Schedule(zon=%s).__init__()", zone)
|
|
118
171
|
|
|
119
172
|
self.id = zone.id
|
|
120
173
|
self._zone = zone
|
|
@@ -124,14 +177,13 @@ class Schedule: # 0404
|
|
|
124
177
|
self.tcs = zone.tcs
|
|
125
178
|
self._gwy = zone._gwy
|
|
126
179
|
|
|
127
|
-
self.
|
|
128
|
-
self._schedule_done = None # TODO: deprecate
|
|
180
|
+
self._full_schedule: OuterScheduleT | EmptyDictT = {}
|
|
129
181
|
|
|
130
|
-
self.
|
|
131
|
-
self.
|
|
182
|
+
self._payload_set: _PayloadSetT = EMPTY_PAYLOAD_SET # Rx'd
|
|
183
|
+
self._fragments: _FragmentSetT = [] # to Tx
|
|
132
184
|
|
|
133
|
-
self._global_ver
|
|
134
|
-
self._sched_ver
|
|
185
|
+
self._global_ver = 0 # None is a sentinel for 'dont know'
|
|
186
|
+
self._sched_ver = 0 # the global_ver when this schedule was retrieved
|
|
135
187
|
|
|
136
188
|
def __str__(self) -> str:
|
|
137
189
|
return f"{self._zone} (schedule)"
|
|
@@ -139,15 +191,19 @@ class Schedule: # 0404
|
|
|
139
191
|
def _handle_msg(self, msg: Message) -> None:
|
|
140
192
|
"""Process a schedule packet: if possible, create the corresponding schedule."""
|
|
141
193
|
|
|
142
|
-
if msg.code == Code._0006:
|
|
194
|
+
if msg.code == Code._0006: # keep up, in cause is useful to know in future
|
|
143
195
|
self._global_ver = msg.payload[SZ_CHANGE_COUNTER]
|
|
144
196
|
return
|
|
145
197
|
|
|
198
|
+
if msg.code != Code._0404:
|
|
199
|
+
return
|
|
200
|
+
|
|
201
|
+
# can do via here, or via gwy.async_send_cmd(cmd)
|
|
146
202
|
# next line also in self._get_schedule(), so protected here with a lock
|
|
147
|
-
if msg.payload[SZ_TOTAL_FRAGS] !=
|
|
148
|
-
self.
|
|
203
|
+
if msg.payload[SZ_TOTAL_FRAGS] != 0xFF and self.tcs.zone_lock_idx != self.idx:
|
|
204
|
+
self._payload_set = self._update_payload_set(self._payload_set, msg.payload)
|
|
149
205
|
|
|
150
|
-
async def _is_dated(self, *, force_io: bool = False) ->
|
|
206
|
+
async def _is_dated(self, *, force_io: bool = False) -> tuple[bool, bool]:
|
|
151
207
|
"""Indicate if it is possible that a more recent schedule is available.
|
|
152
208
|
|
|
153
209
|
If required, retrieve the latest global version (change counter) from the
|
|
@@ -182,7 +238,9 @@ class Schedule: # 0404
|
|
|
182
238
|
|
|
183
239
|
return self._global_ver > self._sched_ver, did_io # is_dated, did_io
|
|
184
240
|
|
|
185
|
-
async def get_schedule(
|
|
241
|
+
async def get_schedule(
|
|
242
|
+
self, *, force_io: bool = False, timeout: float = 15
|
|
243
|
+
) -> InnerScheduleT | None:
|
|
186
244
|
"""Retrieve/return the brief schedule of a zone.
|
|
187
245
|
|
|
188
246
|
Return the cached schedule (which may have been eavesdropped) only if the
|
|
@@ -193,150 +251,159 @@ class Schedule: # 0404
|
|
|
193
251
|
"""
|
|
194
252
|
|
|
195
253
|
try:
|
|
196
|
-
await asyncio.wait_for(
|
|
197
|
-
|
|
198
|
-
|
|
254
|
+
await asyncio.wait_for(
|
|
255
|
+
self._get_schedule(force_io=force_io), timeout=timeout
|
|
256
|
+
)
|
|
257
|
+
except TimeoutError as err:
|
|
258
|
+
raise TimeoutError(
|
|
259
|
+
f"Failed to obtain schedule within {timeout} secs"
|
|
260
|
+
) from err
|
|
261
|
+
# TODO: raise a more parochial exception
|
|
199
262
|
return self.schedule
|
|
200
263
|
|
|
201
|
-
async def _get_schedule(self, *, force_io: bool = False) -> None
|
|
202
|
-
"""Retrieve/return the
|
|
264
|
+
async def _get_schedule(self, *, force_io: bool = False) -> None:
|
|
265
|
+
"""Retrieve/return the schedule of a zone (sets self._full_schedule)."""
|
|
203
266
|
|
|
204
|
-
async def get_fragment(frag_num: int): # may: TimeoutError?
|
|
205
|
-
"""
|
|
267
|
+
async def get_fragment(frag_num: int) -> _PayloadT: # may: TimeoutError?
|
|
268
|
+
"""Retrieve a schedule fragment from the controller."""
|
|
206
269
|
|
|
207
|
-
frag_set_size = 0 if frag_num == 1 else
|
|
270
|
+
frag_set_size = 0 if frag_num == 1 else _len(self._payload_set)
|
|
208
271
|
cmd = Command.get_schedule_fragment(
|
|
209
272
|
self.ctl.id, self.idx, frag_num, frag_set_size
|
|
210
273
|
)
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
274
|
+
pkt: Packet = await self._gwy.async_send_cmd(
|
|
275
|
+
cmd, wait_for_reply=True, priority=Priority.HIGH
|
|
276
|
+
)
|
|
277
|
+
msg = Message(pkt)
|
|
278
|
+
assert isinstance(msg.payload, dict) # mypy check
|
|
279
|
+
return msg.payload # may: TimeoutError?
|
|
217
280
|
|
|
218
281
|
is_dated, did_io = await self._is_dated(force_io=force_io)
|
|
219
282
|
if is_dated:
|
|
220
|
-
self.
|
|
221
|
-
if self.
|
|
222
|
-
return
|
|
283
|
+
self._full_schedule = {} # keep frags, maybe only other scheds have changed
|
|
284
|
+
if self._full_schedule:
|
|
285
|
+
return
|
|
223
286
|
|
|
224
287
|
await self.tcs._obtain_lock(self.idx) # maybe raise TimeOutError
|
|
225
288
|
|
|
226
289
|
if not did_io: # must know the version of the schedule about to be RQ'd
|
|
227
290
|
self._global_ver, _ = await self.tcs._schedule_version(force_io=True)
|
|
228
291
|
|
|
229
|
-
self.
|
|
230
|
-
while frag_num := next(
|
|
292
|
+
self._payload_set[0] = None # if 1st frag valid: schedule very likely unchanged
|
|
293
|
+
while frag_num := next(
|
|
294
|
+
i for i, f in enumerate(self._payload_set, 1) if f is None
|
|
295
|
+
):
|
|
231
296
|
fragment = await get_fragment(frag_num)
|
|
232
297
|
# next line also in self._handle_msg(), so protected there with a lock
|
|
233
|
-
self.
|
|
234
|
-
if self.
|
|
235
|
-
self._sched_ver = self._global_ver
|
|
298
|
+
self._payload_set = self._update_payload_set(self._payload_set, fragment)
|
|
299
|
+
if self._full_schedule: # TODO: potential for infinite loop?
|
|
300
|
+
self._sched_ver = self._global_ver # type: ignore[unreachable]
|
|
236
301
|
break
|
|
237
302
|
|
|
238
303
|
self.tcs._release_lock()
|
|
239
|
-
return self.schedule
|
|
240
304
|
|
|
241
|
-
def
|
|
242
|
-
"""Process a
|
|
305
|
+
def _proc_payload_set(self, payload_set: _PayloadSetT) -> OuterScheduleT | None:
|
|
306
|
+
"""Process a payload set and return the full schedule (sets `self._schedule`).
|
|
243
307
|
|
|
244
308
|
If the schedule is for DHW, set the `zone_idx` key to 'HW' (to avoid confusing
|
|
245
309
|
with zone '00').
|
|
246
310
|
"""
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
311
|
+
|
|
312
|
+
# TODO: relying upon caller to ensure set is only empty or full
|
|
313
|
+
|
|
314
|
+
if payload_set == EMPTY_PAYLOAD_SET:
|
|
315
|
+
self._full_schedule = {SZ_ZONE_IDX: self.idx}
|
|
316
|
+
return self._full_schedule
|
|
317
|
+
|
|
250
318
|
try:
|
|
251
|
-
schedule =
|
|
319
|
+
schedule = fragz_to_full_sched(
|
|
320
|
+
payload[SZ_FRAGMENT] for payload in payload_set if payload
|
|
321
|
+
) # TODO: messy - what is set not full
|
|
252
322
|
except zlib.error:
|
|
253
|
-
return None
|
|
323
|
+
return None # TODO: raise a more parochial exception
|
|
324
|
+
|
|
254
325
|
if self.idx == "HW":
|
|
255
326
|
schedule[SZ_ZONE_IDX] = "HW"
|
|
256
|
-
self.
|
|
257
|
-
return self._schedule # NOTE: not self.schedule
|
|
327
|
+
self._full_schedule = schedule
|
|
258
328
|
|
|
259
|
-
|
|
260
|
-
def _init_set(fragment: dict = None) -> list: # return frag_set
|
|
261
|
-
"""Return a new frag set, after initializing it with an optional fragment."""
|
|
262
|
-
if fragment is None or fragment[SZ_TOTAL_FRAGS] is None:
|
|
263
|
-
return [None]
|
|
264
|
-
frag_set = [None] * fragment[SZ_TOTAL_FRAGS]
|
|
265
|
-
frag_set[fragment[SZ_FRAG_NUMBER] - 1] = fragment
|
|
266
|
-
return frag_set
|
|
329
|
+
return self._full_schedule # NOTE: not self.schedule
|
|
267
330
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
Return 0 if the expected set size is unknown (sentinel value as per RAMSES II).
|
|
273
|
-
|
|
274
|
-
Uses frag_set[i][SZ_TOTAL_FRAGS] instead of `len(frag_set)` (is necessary?).
|
|
275
|
-
"""
|
|
276
|
-
for frag in (f for f in frag_set if f is not None): # they will all match
|
|
277
|
-
assert len(frag_set) == frag[SZ_TOTAL_FRAGS] # TODO: remove
|
|
278
|
-
return frag[SZ_TOTAL_FRAGS]
|
|
279
|
-
assert len(frag_set) == 1 and frag_set == [None] # TODO: remove
|
|
280
|
-
return 0 # sentinel value as per RAMSES protocol
|
|
281
|
-
|
|
282
|
-
def _incr_set(self, frag_set: list, fragment: dict) -> list: # return frag_set
|
|
331
|
+
def _update_payload_set(
|
|
332
|
+
self, payload_set: _PayloadSetT, payload: _PayloadT
|
|
333
|
+
) -> _PayloadSetT:
|
|
283
334
|
"""Add a fragment to a frag set and process/return the new set.
|
|
284
335
|
|
|
285
336
|
If the frag set is complete, check for a schedule (sets `self._schedule`).
|
|
286
337
|
|
|
287
338
|
If required, start a new frag set with the fragment.
|
|
288
339
|
"""
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
340
|
+
|
|
341
|
+
def init_payload_set(payload: _PayloadT) -> _PayloadSetT:
|
|
342
|
+
payload_set: _PayloadSetT = [None] * payload[SZ_TOTAL_FRAGS]
|
|
343
|
+
payload_set[payload[SZ_FRAG_NUMBER] - 1] = payload
|
|
344
|
+
return payload_set
|
|
345
|
+
|
|
346
|
+
if payload[SZ_TOTAL_FRAGS] is None: # zone has no schedule
|
|
347
|
+
payload_set = EMPTY_PAYLOAD_SET
|
|
348
|
+
self._proc_payload_set(payload_set)
|
|
349
|
+
return payload_set
|
|
350
|
+
|
|
351
|
+
if payload[SZ_TOTAL_FRAGS] != _len(payload_set): # sched has changed
|
|
352
|
+
return init_payload_set(payload)
|
|
353
|
+
|
|
354
|
+
payload_set[payload[SZ_FRAG_NUMBER] - 1] = payload
|
|
355
|
+
if None in payload_set or self._proc_payload_set(
|
|
356
|
+
payload_set
|
|
357
|
+
): # sets self._schedule
|
|
358
|
+
return payload_set
|
|
359
|
+
|
|
360
|
+
return init_payload_set(payload)
|
|
361
|
+
|
|
362
|
+
async def set_schedule(
|
|
363
|
+
self, schedule: InnerScheduleT, force_refresh: bool = False
|
|
364
|
+
) -> InnerScheduleT | None:
|
|
301
365
|
"""Set the schedule of a zone."""
|
|
302
366
|
|
|
303
|
-
async def put_fragment(frag_num, frag_cnt, fragment) -> None:
|
|
367
|
+
async def put_fragment(frag_num: int, frag_cnt: int, fragment: str) -> None:
|
|
304
368
|
"""Send a schedule fragment to the controller."""
|
|
305
369
|
|
|
306
|
-
#
|
|
307
370
|
cmd = Command.set_schedule_fragment(
|
|
308
371
|
self.ctl.id, self.idx, frag_num, frag_cnt, fragment
|
|
309
372
|
)
|
|
310
|
-
await self._gwy.async_send_cmd(
|
|
373
|
+
await self._gwy.async_send_cmd(
|
|
374
|
+
cmd, wait_for_reply=True, priority=Priority.HIGH
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
def normalise_validate(schedule: InnerScheduleT) -> _OuterSchedule:
|
|
378
|
+
full_schedule: _OuterSchedule
|
|
311
379
|
|
|
312
|
-
def normalise_validate(schedule) -> dict:
|
|
313
380
|
if self.idx == "HW":
|
|
314
|
-
|
|
315
|
-
|
|
381
|
+
full_schedule = {SZ_ZONE_IDX: "HW", SZ_SCHEDULE: schedule}
|
|
382
|
+
schedule_schema = SCH_SCHEDULE_DHW_OUTER
|
|
316
383
|
else:
|
|
317
|
-
|
|
318
|
-
|
|
384
|
+
full_schedule = {SZ_ZONE_IDX: self.idx, SZ_SCHEDULE: schedule}
|
|
385
|
+
schedule_schema = SCH_SCHEDULE_ZON_OUTER
|
|
319
386
|
|
|
320
387
|
try:
|
|
321
|
-
|
|
322
|
-
except vol.MultipleInvalid as
|
|
323
|
-
raise TypeError(f"failed to set schedule: {
|
|
388
|
+
full_schedule = schedule_schema(full_schedule)
|
|
389
|
+
except vol.MultipleInvalid as err:
|
|
390
|
+
raise TypeError(f"failed to set schedule: {err}") from err
|
|
324
391
|
|
|
325
|
-
if self.idx == "HW":
|
|
326
|
-
|
|
392
|
+
if self.idx == "HW": # HACK: to avoid confusing dhw with zone '00'
|
|
393
|
+
full_schedule[SZ_ZONE_IDX] = "00"
|
|
327
394
|
|
|
328
|
-
return
|
|
395
|
+
return full_schedule
|
|
329
396
|
|
|
330
|
-
|
|
331
|
-
self.
|
|
397
|
+
full_schedule: _OuterSchedule = normalise_validate(schedule)
|
|
398
|
+
self._fragments = full_sched_to_fragz(full_schedule)
|
|
332
399
|
|
|
333
400
|
await self.tcs._obtain_lock(self.idx) # maybe raise TimeOutError
|
|
334
401
|
|
|
335
402
|
try:
|
|
336
|
-
for num, frag in enumerate(self.
|
|
337
|
-
await put_fragment(num, len(self.
|
|
338
|
-
except TimeoutError as
|
|
339
|
-
raise TimeoutError(f"failed to set schedule: {
|
|
403
|
+
for num, frag in enumerate(self._fragments, 1):
|
|
404
|
+
await put_fragment(num, len(self._fragments), frag)
|
|
405
|
+
except TimeoutError as err:
|
|
406
|
+
raise TimeoutError(f"failed to set schedule: {err}") from err
|
|
340
407
|
else:
|
|
341
408
|
if not force_refresh:
|
|
342
409
|
self._global_ver, _ = await self.tcs._schedule_version(force_io=True)
|
|
@@ -346,89 +413,136 @@ class Schedule: # 0404
|
|
|
346
413
|
self.tcs._release_lock()
|
|
347
414
|
|
|
348
415
|
if force_refresh:
|
|
349
|
-
|
|
416
|
+
await self.get_schedule(force_io=True) # sets self._full_schedule
|
|
350
417
|
else:
|
|
351
|
-
self.
|
|
418
|
+
self._full_schedule = full_schedule
|
|
352
419
|
|
|
353
420
|
return self.schedule
|
|
354
421
|
|
|
355
422
|
@property
|
|
356
|
-
def schedule(self) ->
|
|
357
|
-
"""Return the current schedule, if any."""
|
|
358
|
-
|
|
423
|
+
def schedule(self) -> InnerScheduleT | None:
|
|
424
|
+
"""Return the current (not full) schedule, if any."""
|
|
425
|
+
if not self._full_schedule: # can be {}
|
|
426
|
+
return None
|
|
427
|
+
result: InnerScheduleT = self._full_schedule.get(SZ_SCHEDULE) # type: ignore[assignment]
|
|
428
|
+
return result
|
|
359
429
|
|
|
360
430
|
@property
|
|
361
|
-
def version(self) ->
|
|
431
|
+
def version(self) -> int | None:
|
|
362
432
|
"""Return the version associated with the current schedule, if any."""
|
|
363
|
-
return self._sched_ver if self.
|
|
433
|
+
return self._sched_ver if self._full_schedule else None
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
# TODO: deprecate in favour of len(payload_set)
|
|
437
|
+
def _len(payload_set: _PayloadSetT) -> int:
|
|
438
|
+
"""Return the total number of fragments in the complete frag set.
|
|
439
|
+
|
|
440
|
+
Return 0 if the expected set size is unknown (sentinel value as per RAMSES II).
|
|
441
|
+
|
|
442
|
+
Uses frag_set[i][SZ_TOTAL_FRAGS] instead of `len(frag_set)` (is necessary?).
|
|
443
|
+
"""
|
|
444
|
+
# for frag in (f for f in payload_set if f is not None): # they will all match
|
|
445
|
+
# assert len(payload_set) == frag[SZ_TOTAL_FRAGS] # TODO: remove
|
|
446
|
+
# assert isinstance(frag[SZ_TOTAL_FRAGS], int) # mypy check
|
|
447
|
+
# result: int = frag[SZ_TOTAL_FRAGS]
|
|
448
|
+
# return result
|
|
364
449
|
|
|
450
|
+
# assert payload_set == EMPTY_PAYLOAD_SET # TODO: remove
|
|
451
|
+
# return 0 # sentinel value as per RAMSES protocol
|
|
452
|
+
return len(payload_set)
|
|
365
453
|
|
|
366
|
-
|
|
367
|
-
|
|
454
|
+
|
|
455
|
+
def fragz_to_full_sched(fragments: Iterable[_FragmentT]) -> _OuterSchedule:
|
|
456
|
+
"""Convert a tuple of fragments strs (a blob) into a schedule.
|
|
368
457
|
|
|
369
458
|
May raise a `zlib.error` exception.
|
|
370
459
|
"""
|
|
371
460
|
|
|
461
|
+
def setpoint(value: int) -> dict[str, bool | float]:
|
|
462
|
+
if value in (0, 1):
|
|
463
|
+
return {SZ_ENABLED: bool(value)}
|
|
464
|
+
return {SZ_HEAT_SETPOINT: value / 100}
|
|
465
|
+
|
|
372
466
|
raw_schedule = zlib.decompress(bytearray.fromhex("".join(fragments)))
|
|
373
467
|
|
|
374
468
|
old_day = 0
|
|
375
|
-
schedule = []
|
|
376
|
-
switchpoints:
|
|
377
|
-
|
|
469
|
+
schedule: InnerScheduleT = []
|
|
470
|
+
switchpoints: SwitchPointsT = [] # type: ignore[assignment, unused-ignore]
|
|
471
|
+
|
|
472
|
+
idx: int
|
|
473
|
+
dow: int
|
|
474
|
+
tod: int
|
|
475
|
+
val: int
|
|
378
476
|
|
|
379
477
|
for i in range(0, len(raw_schedule), 20):
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
{
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
schedule.append({DAY_OF_WEEK: old_day, SWITCHPOINTS: switchpoints})
|
|
398
|
-
|
|
399
|
-
return {SZ_ZONE_IDX: f"{zone_idx:02X}", SZ_SCHEDULE: schedule}
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
def schedule_to_fragments(schedule: dict) -> list:
|
|
478
|
+
idx, dow, tod, val = _struct_unpack(raw_schedule[i : i + 20])
|
|
479
|
+
|
|
480
|
+
if dow > old_day:
|
|
481
|
+
schedule.append({SZ_DAY_OF_WEEK: old_day, SZ_SWITCHPOINTS: switchpoints})
|
|
482
|
+
old_day, switchpoints = dow, [] # type: ignore[assignment, unused-ignore]
|
|
483
|
+
|
|
484
|
+
switchpoint: SwitchPointDhw | SwitchPointZon = {
|
|
485
|
+
SZ_TIME_OF_DAY: "{:02d}:{:02d}".format(*divmod(tod, 60))
|
|
486
|
+
} | setpoint(val) # type: ignore[assignment]
|
|
487
|
+
switchpoints.append(switchpoint) # type: ignore[arg-type]
|
|
488
|
+
|
|
489
|
+
schedule.append({SZ_DAY_OF_WEEK: old_day, SZ_SWITCHPOINTS: switchpoints})
|
|
490
|
+
|
|
491
|
+
return {SZ_ZONE_IDX: f"{idx:02X}", SZ_SCHEDULE: schedule}
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
def full_sched_to_fragz(full_schedule: _OuterSchedule) -> list[_FragmentT]:
|
|
403
495
|
"""Convert a schedule into a set of fragments (a blob).
|
|
404
496
|
|
|
405
497
|
May raise `KeyError`, `zlib.error` exceptions.
|
|
406
498
|
"""
|
|
407
499
|
|
|
408
|
-
frags = [
|
|
409
|
-
(
|
|
410
|
-
int(schedule[SZ_ZONE_IDX], 16),
|
|
411
|
-
int(week_day[DAY_OF_WEEK]),
|
|
412
|
-
int(setpoint[TIME_OF_DAY][:2]) * 60 + int(setpoint[TIME_OF_DAY][3:]),
|
|
413
|
-
int(
|
|
414
|
-
(setpoint[HEAT_SETPOINT] * 100)
|
|
415
|
-
if setpoint.get(HEAT_SETPOINT)
|
|
416
|
-
else setpoint[ENABLED]
|
|
417
|
-
),
|
|
418
|
-
)
|
|
419
|
-
for week_day in schedule[SZ_SCHEDULE]
|
|
420
|
-
for setpoint in week_day[SWITCHPOINTS]
|
|
421
|
-
]
|
|
422
|
-
frags_: list[bytes] = [struct.pack("<xxxxBxxxBxxxHxxHxx", *s) for s in frags]
|
|
423
|
-
|
|
424
500
|
cobj = zlib.compressobj(level=9, wbits=14)
|
|
425
|
-
|
|
501
|
+
frags: list[bytes] = []
|
|
502
|
+
|
|
503
|
+
days_of_week: InnerScheduleT = full_schedule[SZ_SCHEDULE]
|
|
504
|
+
for week_day in days_of_week:
|
|
505
|
+
switchpoints: SwitchPointsT = week_day[SZ_SWITCHPOINTS]
|
|
506
|
+
for switchpoint in switchpoints:
|
|
507
|
+
frags.append(_struct_pack(full_schedule, week_day, switchpoint))
|
|
508
|
+
|
|
509
|
+
blob = (b"".join(cobj.compress(f) for f in frags) + cobj.flush()).hex().upper()
|
|
426
510
|
|
|
427
511
|
return [blob[i : i + 82] for i in range(0, len(blob), 82)]
|
|
428
512
|
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
513
|
+
|
|
514
|
+
def _struct_pack(
|
|
515
|
+
full_schedule: OuterScheduleT,
|
|
516
|
+
week_day: DayOfWeekT,
|
|
517
|
+
switchpoint: SwitchPointDhw | SwitchPointZon,
|
|
518
|
+
) -> bytes:
|
|
519
|
+
idx_: str = full_schedule[SZ_ZONE_IDX]
|
|
520
|
+
dow_: int = week_day[SZ_DAY_OF_WEEK]
|
|
521
|
+
tod_: str = switchpoint[SZ_TIME_OF_DAY]
|
|
522
|
+
|
|
523
|
+
idx = int(idx_, 16)
|
|
524
|
+
dow = int(dow_)
|
|
525
|
+
tod = int(tod_[:2]) * 60 + int(tod_[3:])
|
|
526
|
+
|
|
527
|
+
if SZ_HEAT_SETPOINT in switchpoint:
|
|
528
|
+
val = int(switchpoint[SZ_HEAT_SETPOINT] * 100) # type: ignore[typeddict-item]
|
|
529
|
+
else:
|
|
530
|
+
val = int(bool(switchpoint[SZ_ENABLED]))
|
|
531
|
+
|
|
532
|
+
return struct.pack("<xxxxBxxxBxxxHxxHxx", idx, dow, tod, val)
|
|
533
|
+
|
|
534
|
+
|
|
535
|
+
def _struct_unpack(raw_schedule: bytes) -> tuple[int, int, int, int]:
|
|
536
|
+
idx, dow, tod, val, _ = struct.unpack("<xxxxBxxxBxxxHxxHH", raw_schedule)
|
|
537
|
+
return idx, dow, tod, val
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
# 16:27:56.942 000 RQ --- 18:006402 01:145038 --:------ 0006 001 00
|
|
541
|
+
# 16:27:56.958 038 RP --- 01:145038 18:006402 --:------ 0006 004 00050009
|
|
542
|
+
|
|
543
|
+
# 16:27:57.005 000 RQ --- 18:006402 01:145038 --:------ 0404 007 0120000800-0100
|
|
544
|
+
# 16:27:57.068 037 RP --- 01:145038 18:006402 --:------ 0404 048 0120000829-0103-68816DCFCB0980301045D1994C3E624916660956604596600516E1D285094112F566F5B80C072222A2
|
|
545
|
+
# 16:27:57.114 000 RQ --- 18:006402 01:145038 --:------ 0404 007 0120000800-0203
|
|
546
|
+
# 16:27:57.161 038 RP --- 01:145038 18:006402 --:------ 0404 048 0120000829-0203-52DF92C79CEA7EDA91C7F06997FDEFC620B287D6143C054FC153F01C780E3C079E03CFC033F00C3C03
|
|
547
|
+
# 16:27:57.202 000 RQ --- 18:006402 01:145038 --:------ 0404 007 0120000800-0303
|
|
548
|
+
# 16:27:57.245 038 RP --- 01:145038 18:006402 --:------ 0404 045 0120000826-0303-CF83E7C1F3E079F0CADC3E5E696BFECC944EED5BF5DEAD7AAD45F0227811BCD87937936E24CF
|