ramses-rf 0.53.2__py3-none-any.whl → 0.53.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ramses_cli/client.py +15 -11
- ramses_rf/database.py +113 -61
- ramses_rf/gateway.py +14 -10
- ramses_rf/storage.py +168 -0
- ramses_rf/version.py +1 -1
- {ramses_rf-0.53.2.dist-info → ramses_rf-0.53.4.dist-info}/METADATA +1 -1
- {ramses_rf-0.53.2.dist-info → ramses_rf-0.53.4.dist-info}/RECORD +15 -14
- ramses_tx/gateway.py +20 -16
- ramses_tx/parsers.py +3 -1
- ramses_tx/ramses.py +1 -1
- ramses_tx/transport.py +41 -27
- ramses_tx/version.py +1 -1
- {ramses_rf-0.53.2.dist-info → ramses_rf-0.53.4.dist-info}/WHEEL +0 -0
- {ramses_rf-0.53.2.dist-info → ramses_rf-0.53.4.dist-info}/entry_points.txt +0 -0
- {ramses_rf-0.53.2.dist-info → ramses_rf-0.53.4.dist-info}/licenses/LICENSE +0 -0
ramses_cli/client.py
CHANGED
|
@@ -471,13 +471,8 @@ def print_results(gwy: Gateway, **kwargs: Any) -> None:
|
|
|
471
471
|
system_id, _ = kwargs[GET_SCHED]
|
|
472
472
|
|
|
473
473
|
|
|
474
|
-
def
|
|
475
|
-
"""
|
|
476
|
-
|
|
477
|
-
:param gwy: The gateway instance.
|
|
478
|
-
"""
|
|
479
|
-
schema, msgs = gwy.get_state()
|
|
480
|
-
|
|
474
|
+
def _write_state(schema: dict[str, Any], msgs: dict[str, str]) -> None:
|
|
475
|
+
"""Write the state to the file system (blocking)."""
|
|
481
476
|
with open("state_msgs.log", "w") as f:
|
|
482
477
|
[f.write(f"{dtm} {pkt}\r\n") for dtm, pkt in msgs.items()] # if not m._expired
|
|
483
478
|
|
|
@@ -485,13 +480,22 @@ def _save_state(gwy: Gateway) -> None:
|
|
|
485
480
|
f.write(json.dumps(schema, indent=4))
|
|
486
481
|
|
|
487
482
|
|
|
488
|
-
def
|
|
483
|
+
async def _save_state(gwy: Gateway) -> None:
|
|
484
|
+
"""Save the gateway state to files.
|
|
485
|
+
|
|
486
|
+
:param gwy: The gateway instance.
|
|
487
|
+
"""
|
|
488
|
+
schema, msgs = await gwy.get_state()
|
|
489
|
+
await asyncio.to_thread(_write_state, schema, msgs)
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
async def _print_engine_state(gwy: Gateway, **kwargs: Any) -> None:
|
|
489
493
|
"""Print the current engine state (schema and packets).
|
|
490
494
|
|
|
491
495
|
:param gwy: The gateway instance.
|
|
492
496
|
:param kwargs: Command arguments to determine verbosity.
|
|
493
497
|
"""
|
|
494
|
-
(schema, packets) = gwy.get_state(include_expired=True)
|
|
498
|
+
(schema, packets) = await gwy.get_state(include_expired=True)
|
|
495
499
|
|
|
496
500
|
if kwargs["print_state"] > 0:
|
|
497
501
|
print(f"schema: {json.dumps(schema, indent=4)}\r\n")
|
|
@@ -671,10 +675,10 @@ async def async_main(command: str, lib_kwargs: dict[str, Any], **kwargs: Any) ->
|
|
|
671
675
|
print(f"\r\nclient.py: Engine stopped: {msg}")
|
|
672
676
|
|
|
673
677
|
# if kwargs["save_state"]:
|
|
674
|
-
# _save_state(gwy)
|
|
678
|
+
# await _save_state(gwy)
|
|
675
679
|
|
|
676
680
|
if kwargs["print_state"]:
|
|
677
|
-
_print_engine_state(gwy, **kwargs)
|
|
681
|
+
await _print_engine_state(gwy, **kwargs)
|
|
678
682
|
|
|
679
683
|
elif command == EXECUTE:
|
|
680
684
|
print_results(gwy, **kwargs)
|
ramses_rf/database.py
CHANGED
|
@@ -24,14 +24,19 @@ RAMSES RF - Message database and index.
|
|
|
24
24
|
from __future__ import annotations
|
|
25
25
|
|
|
26
26
|
import asyncio
|
|
27
|
+
import contextlib
|
|
27
28
|
import logging
|
|
29
|
+
import os
|
|
28
30
|
import sqlite3
|
|
31
|
+
import uuid
|
|
29
32
|
from collections import OrderedDict
|
|
30
33
|
from datetime import datetime as dt, timedelta as td
|
|
31
34
|
from typing import TYPE_CHECKING, Any, NewType
|
|
32
35
|
|
|
33
36
|
from ramses_tx import CODES_SCHEMA, RQ, Code, Message, Packet
|
|
34
37
|
|
|
38
|
+
from .storage import StorageWorker
|
|
39
|
+
|
|
35
40
|
if TYPE_CHECKING:
|
|
36
41
|
DtmStrT = NewType("DtmStrT", str)
|
|
37
42
|
MsgDdT = OrderedDict[DtmStrT, Message]
|
|
@@ -89,22 +94,55 @@ class MessageIndex:
|
|
|
89
94
|
|
|
90
95
|
_housekeeping_task: asyncio.Task[None]
|
|
91
96
|
|
|
92
|
-
def __init__(self, maintain: bool = True) -> None:
|
|
97
|
+
def __init__(self, maintain: bool = True, db_path: str = ":memory:") -> None:
|
|
93
98
|
"""Instantiate a message database/index."""
|
|
94
99
|
|
|
95
100
|
self.maintain = maintain
|
|
96
101
|
self._msgs: MsgDdT = OrderedDict() # stores all messages for retrieval.
|
|
97
102
|
# Filled & cleaned up in housekeeping_loop.
|
|
98
103
|
|
|
99
|
-
#
|
|
104
|
+
# For :memory: databases with multiple connections (Reader vs Worker)
|
|
105
|
+
# We must use a Shared Cache URI so both threads see the same data.
|
|
106
|
+
if db_path == ":memory:":
|
|
107
|
+
# Unique ID ensures parallel tests don't share the same in-memory DB
|
|
108
|
+
db_path = f"file:ramses_rf_{uuid.uuid4()}?mode=memory&cache=shared"
|
|
109
|
+
|
|
110
|
+
# Start the Storage Worker (Write Connection)
|
|
111
|
+
# This thread handles all blocking INSERT/UPDATE operations
|
|
112
|
+
self._worker = StorageWorker(db_path)
|
|
113
|
+
|
|
114
|
+
# Wait for the worker to create the tables.
|
|
115
|
+
# This prevents "no such table" errors on immediate reads.
|
|
116
|
+
if not self._worker.wait_for_ready(timeout=10.0):
|
|
117
|
+
_LOGGER.error("MessageIndex: StorageWorker timed out initializing database")
|
|
118
|
+
|
|
119
|
+
# Connect to a SQLite DB (Read Connection)
|
|
100
120
|
self._cx = sqlite3.connect(
|
|
101
|
-
|
|
121
|
+
db_path,
|
|
122
|
+
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
|
|
123
|
+
check_same_thread=False,
|
|
124
|
+
uri=True, # Enable URI parsing for shared memory support
|
|
125
|
+
timeout=10.0, # Increased timeout to reduce 'database locked' errors
|
|
126
|
+
isolation_level=None, # Autocommit mode prevents stale snapshots
|
|
102
127
|
)
|
|
128
|
+
|
|
129
|
+
# Enable Write-Ahead Logging for Reader as well
|
|
130
|
+
if db_path != ":memory:" and "mode=memory" not in db_path:
|
|
131
|
+
with contextlib.suppress(sqlite3.Error):
|
|
132
|
+
self._cx.execute("PRAGMA journal_mode=WAL")
|
|
133
|
+
elif "cache=shared" in db_path:
|
|
134
|
+
# Shared cache (used in tests) requires read_uncommitted to prevent
|
|
135
|
+
# readers from blocking writers (Table Locking).
|
|
136
|
+
with contextlib.suppress(sqlite3.Error):
|
|
137
|
+
self._cx.execute("PRAGMA read_uncommitted = true")
|
|
138
|
+
|
|
103
139
|
# detect_types should retain dt type on store/retrieve
|
|
104
140
|
self._cu = self._cx.cursor() # Create a cursor
|
|
105
141
|
|
|
106
142
|
_setup_db_adapters() # DTM adapter/converter
|
|
107
|
-
|
|
143
|
+
|
|
144
|
+
# Schema creation is now handled safely by the StorageWorker to avoid races.
|
|
145
|
+
# self._setup_db_schema()
|
|
108
146
|
|
|
109
147
|
if self.maintain:
|
|
110
148
|
self._lock = asyncio.Lock()
|
|
@@ -137,6 +175,7 @@ class MessageIndex:
|
|
|
137
175
|
):
|
|
138
176
|
self._housekeeping_task.cancel() # stop the housekeeper
|
|
139
177
|
|
|
178
|
+
self._worker.stop() # Stop the background thread
|
|
140
179
|
self._cx.commit() # just in case
|
|
141
180
|
self._cx.close() # may still need to do queries after engine has stopped?
|
|
142
181
|
|
|
@@ -145,6 +184,13 @@ class MessageIndex:
|
|
|
145
184
|
"""Return the messages in the index in a threadsafe way."""
|
|
146
185
|
return self._msgs
|
|
147
186
|
|
|
187
|
+
def flush(self) -> None:
|
|
188
|
+
"""Flush the storage worker queue.
|
|
189
|
+
|
|
190
|
+
This is primarily for testing to ensure data persistence before querying.
|
|
191
|
+
"""
|
|
192
|
+
self._worker.flush()
|
|
193
|
+
|
|
148
194
|
def _setup_db_schema(self) -> None:
|
|
149
195
|
"""Set up the message database schema.
|
|
150
196
|
|
|
@@ -236,23 +282,30 @@ class MessageIndex:
|
|
|
236
282
|
dup: tuple[Message, ...] = tuple() # avoid UnboundLocalError
|
|
237
283
|
old: Message | None = None # avoid UnboundLocalError
|
|
238
284
|
|
|
285
|
+
# Check in-memory cache for collision instead of blocking SQL
|
|
286
|
+
dtm_str: DtmStrT = msg.dtm.isoformat(timespec="microseconds") # type: ignore[assignment]
|
|
287
|
+
if dtm_str in self._msgs:
|
|
288
|
+
dup = (self._msgs[dtm_str],)
|
|
289
|
+
|
|
239
290
|
try: # TODO: remove this, or apply only when source is a real packet log?
|
|
240
291
|
# await self._lock.acquire()
|
|
241
|
-
dup = self._delete_from( # HACK: because of contrived pkt logs
|
|
242
|
-
|
|
243
|
-
)
|
|
244
|
-
|
|
292
|
+
# dup = self._delete_from( # HACK: because of contrived pkt logs
|
|
293
|
+
# dtm=msg.dtm # stored as such with DTM formatter
|
|
294
|
+
# )
|
|
295
|
+
# We defer the write to the worker; return value (old) is not available synchronously
|
|
296
|
+
self._insert_into(msg) # will delete old msg by hdr (not dtm!)
|
|
245
297
|
|
|
246
298
|
except (
|
|
247
299
|
sqlite3.Error
|
|
248
300
|
): # UNIQUE constraint failed: ? messages.dtm or .hdr (so: HACK)
|
|
249
|
-
self._cx.rollback()
|
|
301
|
+
# self._cx.rollback()
|
|
302
|
+
pass
|
|
250
303
|
|
|
251
304
|
else:
|
|
252
305
|
# _msgs dict requires a timestamp reformat
|
|
253
|
-
dtm: DtmStrT = msg.dtm.isoformat(timespec="microseconds")
|
|
306
|
+
# dtm: DtmStrT = msg.dtm.isoformat(timespec="microseconds")
|
|
254
307
|
# add msg to self._msgs dict
|
|
255
|
-
self._msgs[
|
|
308
|
+
self._msgs[dtm_str] = msg
|
|
256
309
|
|
|
257
310
|
finally:
|
|
258
311
|
pass # self._lock.release()
|
|
@@ -288,39 +341,36 @@ class MessageIndex:
|
|
|
288
341
|
dtm: DtmStrT = _now.isoformat(timespec="microseconds") # type: ignore[assignment]
|
|
289
342
|
hdr = f"{code}|{verb}|{src}|{payload}"
|
|
290
343
|
|
|
291
|
-
dup = self._delete_from(hdr=hdr)
|
|
344
|
+
# dup = self._delete_from(hdr=hdr)
|
|
345
|
+
# Avoid blocking read; worker handles REPLACE on unique constraint collision
|
|
346
|
+
|
|
347
|
+
# Prepare data tuple for worker
|
|
348
|
+
data = (
|
|
349
|
+
_now,
|
|
350
|
+
verb,
|
|
351
|
+
src,
|
|
352
|
+
src,
|
|
353
|
+
code,
|
|
354
|
+
None,
|
|
355
|
+
hdr,
|
|
356
|
+
"|",
|
|
357
|
+
)
|
|
292
358
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
self.
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
code,
|
|
306
|
-
None,
|
|
307
|
-
hdr,
|
|
308
|
-
"|",
|
|
309
|
-
),
|
|
310
|
-
)
|
|
311
|
-
except sqlite3.Error:
|
|
312
|
-
self._cx.rollback()
|
|
313
|
-
else:
|
|
314
|
-
# also add dummy 3220 msg to self._msgs dict to allow maintenance loop
|
|
315
|
-
msg: Message = Message._from_pkt(
|
|
316
|
-
Packet(
|
|
317
|
-
_now, f"... {verb} --- {src} --:------ {src} {code} 005 0000000000"
|
|
318
|
-
)
|
|
319
|
-
)
|
|
320
|
-
self._msgs[dtm] = msg
|
|
359
|
+
self._worker.submit_packet(data)
|
|
360
|
+
|
|
361
|
+
# Backward compatibility for Tests:
|
|
362
|
+
# Check specific env var set by pytest, which is more reliable than sys.modules
|
|
363
|
+
if "PYTEST_CURRENT_TEST" in os.environ:
|
|
364
|
+
self.flush()
|
|
365
|
+
|
|
366
|
+
# also add dummy 3220 msg to self._msgs dict to allow maintenance loop
|
|
367
|
+
msg: Message = Message._from_pkt(
|
|
368
|
+
Packet(_now, f"... {verb} --- {src} --:------ {src} {code} 005 0000000000")
|
|
369
|
+
)
|
|
370
|
+
self._msgs[dtm] = msg
|
|
321
371
|
|
|
322
|
-
if dup: # expected when more than one heat system in schema
|
|
323
|
-
|
|
372
|
+
# if dup: # expected when more than one heat system in schema
|
|
373
|
+
# _LOGGER.debug("Replaced record with same hdr: %s", hdr)
|
|
324
374
|
|
|
325
375
|
def _insert_into(self, msg: Message) -> Message | None:
|
|
326
376
|
"""
|
|
@@ -337,29 +387,31 @@ class MessageIndex:
|
|
|
337
387
|
else:
|
|
338
388
|
msg_pkt_ctx = msg._pkt._ctx # can be None
|
|
339
389
|
|
|
340
|
-
_old_msgs = self._delete_from(hdr=msg._pkt._hdr)
|
|
390
|
+
# _old_msgs = self._delete_from(hdr=msg._pkt._hdr)
|
|
391
|
+
# Refactor: Worker uses INSERT OR REPLACE to handle collision
|
|
392
|
+
|
|
393
|
+
data = (
|
|
394
|
+
msg.dtm,
|
|
395
|
+
str(msg.verb),
|
|
396
|
+
msg.src.id,
|
|
397
|
+
msg.dst.id,
|
|
398
|
+
str(msg.code),
|
|
399
|
+
msg_pkt_ctx,
|
|
400
|
+
msg._pkt._hdr,
|
|
401
|
+
payload_keys(msg.payload),
|
|
402
|
+
)
|
|
341
403
|
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
404
|
+
self._worker.submit_packet(data)
|
|
405
|
+
|
|
406
|
+
# Backward compatibility for Tests:
|
|
407
|
+
# Tests assume the DB update is instant. If running in pytest, flush immediately.
|
|
408
|
+
# This effectively makes the operation synchronous during tests to avoid rewriting tests.
|
|
409
|
+
if "PYTEST_CURRENT_TEST" in os.environ:
|
|
410
|
+
self.flush()
|
|
346
411
|
|
|
347
|
-
self._cu.execute(
|
|
348
|
-
sql,
|
|
349
|
-
(
|
|
350
|
-
msg.dtm,
|
|
351
|
-
str(msg.verb),
|
|
352
|
-
msg.src.id,
|
|
353
|
-
msg.dst.id,
|
|
354
|
-
str(msg.code),
|
|
355
|
-
msg_pkt_ctx,
|
|
356
|
-
msg._pkt._hdr,
|
|
357
|
-
payload_keys(msg.payload),
|
|
358
|
-
),
|
|
359
|
-
)
|
|
360
412
|
# _LOGGER.debug(f"Added {msg} to gwy.msg_db")
|
|
361
413
|
|
|
362
|
-
return
|
|
414
|
+
return None
|
|
363
415
|
|
|
364
416
|
def rem(
|
|
365
417
|
self, msg: Message | None = None, **kwargs: str | dt
|
ramses_rf/gateway.py
CHANGED
|
@@ -267,12 +267,14 @@ class Gateway(Engine):
|
|
|
267
267
|
:returns: None
|
|
268
268
|
:rtype: None
|
|
269
269
|
"""
|
|
270
|
+
# Stop the Engine first to ensure no tasks/callbacks try to write
|
|
271
|
+
# to the DB while we are closing it.
|
|
272
|
+
await super().stop()
|
|
270
273
|
|
|
271
274
|
if self.msg_db:
|
|
272
275
|
self.msg_db.stop()
|
|
273
|
-
await super().stop()
|
|
274
276
|
|
|
275
|
-
def _pause(self, *args: Any) -> None:
|
|
277
|
+
async def _pause(self, *args: Any) -> None:
|
|
276
278
|
"""Pause the (unpaused) gateway (disables sending/discovery).
|
|
277
279
|
|
|
278
280
|
There is the option to save other objects, as `args`.
|
|
@@ -288,12 +290,12 @@ class Gateway(Engine):
|
|
|
288
290
|
self.config.disable_discovery, disc_flag = True, self.config.disable_discovery
|
|
289
291
|
|
|
290
292
|
try:
|
|
291
|
-
super()._pause(disc_flag, *args)
|
|
293
|
+
await super()._pause(disc_flag, *args)
|
|
292
294
|
except RuntimeError:
|
|
293
295
|
self.config.disable_discovery = disc_flag
|
|
294
296
|
raise
|
|
295
297
|
|
|
296
|
-
def _resume(self) -> tuple[Any]:
|
|
298
|
+
async def _resume(self) -> tuple[Any]:
|
|
297
299
|
"""Resume the (paused) gateway (enables sending/discovery, if applicable).
|
|
298
300
|
|
|
299
301
|
Will restore other objects, as `args`.
|
|
@@ -305,11 +307,13 @@ class Gateway(Engine):
|
|
|
305
307
|
|
|
306
308
|
_LOGGER.debug("Gateway: Resuming engine...")
|
|
307
309
|
|
|
308
|
-
|
|
310
|
+
# args_tuple = await super()._resume()
|
|
311
|
+
# self.config.disable_discovery, *args = args_tuple # type: ignore[assignment]
|
|
312
|
+
self.config.disable_discovery, *args = await super()._resume() # type: ignore[assignment]
|
|
309
313
|
|
|
310
314
|
return args
|
|
311
315
|
|
|
312
|
-
def get_state(
|
|
316
|
+
async def get_state(
|
|
313
317
|
self, include_expired: bool = False
|
|
314
318
|
) -> tuple[dict[str, Any], dict[str, str]]:
|
|
315
319
|
"""Return the current schema & state (may include expired packets).
|
|
@@ -320,7 +324,7 @@ class Gateway(Engine):
|
|
|
320
324
|
:rtype: tuple[dict[str, Any], dict[str, str]]
|
|
321
325
|
"""
|
|
322
326
|
|
|
323
|
-
self._pause()
|
|
327
|
+
await self._pause()
|
|
324
328
|
|
|
325
329
|
def wanted_msg(msg: Message, include_expired: bool = False) -> bool:
|
|
326
330
|
if msg.code == Code._313F:
|
|
@@ -357,7 +361,7 @@ class Gateway(Engine):
|
|
|
357
361
|
}
|
|
358
362
|
# _LOGGER.warning("Missing MessageIndex")
|
|
359
363
|
|
|
360
|
-
self._resume()
|
|
364
|
+
await self._resume()
|
|
361
365
|
|
|
362
366
|
return self.schema, dict(sorted(pkts.items()))
|
|
363
367
|
|
|
@@ -392,7 +396,7 @@ class Gateway(Engine):
|
|
|
392
396
|
tmp_transport: RamsesTransportT # mypy hint
|
|
393
397
|
|
|
394
398
|
_LOGGER.debug("Gateway: Restoring a cached packet log...")
|
|
395
|
-
self._pause()
|
|
399
|
+
await self._pause()
|
|
396
400
|
|
|
397
401
|
if _clear_state: # only intended for test suite use
|
|
398
402
|
clear_state()
|
|
@@ -428,7 +432,7 @@ class Gateway(Engine):
|
|
|
428
432
|
await tmp_transport.get_extra_info(SZ_READER_TASK)
|
|
429
433
|
|
|
430
434
|
_LOGGER.debug("Gateway: Restored, resuming")
|
|
431
|
-
self._resume()
|
|
435
|
+
await self._resume()
|
|
432
436
|
|
|
433
437
|
def _add_device(self, dev: Device) -> None: # TODO: also: _add_system()
|
|
434
438
|
"""Add a device to the gateway (called by devices during instantiation).
|
ramses_rf/storage.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""RAMSES RF - Background storage worker for async I/O."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import contextlib
|
|
6
|
+
import logging
|
|
7
|
+
import queue
|
|
8
|
+
import sqlite3
|
|
9
|
+
import threading
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
_LOGGER = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class StorageWorker:
|
|
16
|
+
"""A background worker thread to handle blocking storage I/O asynchronously."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, db_path: str = ":memory:"):
|
|
19
|
+
"""Initialize the storage worker thread."""
|
|
20
|
+
self._db_path = db_path
|
|
21
|
+
self._queue: queue.SimpleQueue[tuple[str, Any] | None] = queue.SimpleQueue()
|
|
22
|
+
self._ready_event = threading.Event()
|
|
23
|
+
|
|
24
|
+
self._thread = threading.Thread(
|
|
25
|
+
target=self._run,
|
|
26
|
+
name="RamsesStorage",
|
|
27
|
+
daemon=True, # FIX: Set to True so the process can exit even if stop() is missed
|
|
28
|
+
)
|
|
29
|
+
self._thread.start()
|
|
30
|
+
|
|
31
|
+
def wait_for_ready(self, timeout: float | None = None) -> bool:
|
|
32
|
+
"""Wait until the database is initialized and ready."""
|
|
33
|
+
return self._ready_event.wait(timeout)
|
|
34
|
+
|
|
35
|
+
def submit_packet(self, packet_data: tuple[Any, ...]) -> None:
|
|
36
|
+
"""Submit a packet tuple for SQL insertion (Non-blocking)."""
|
|
37
|
+
self._queue.put(("SQL", packet_data))
|
|
38
|
+
|
|
39
|
+
def flush(self, timeout: float = 10.0) -> None:
|
|
40
|
+
"""Block until all currently pending tasks are processed."""
|
|
41
|
+
# REMOVED: if self._queue.empty(): return
|
|
42
|
+
# This check caused a race condition where flush() returned before
|
|
43
|
+
# the worker finished committing the last item it just popped.
|
|
44
|
+
|
|
45
|
+
# We inject a special marker into the queue
|
|
46
|
+
sentinel = threading.Event()
|
|
47
|
+
self._queue.put(("MARKER", sentinel))
|
|
48
|
+
|
|
49
|
+
# Wait for the worker to set the sentinel
|
|
50
|
+
if not sentinel.wait(timeout):
|
|
51
|
+
_LOGGER.warning("StorageWorker flush timed out")
|
|
52
|
+
|
|
53
|
+
def stop(self) -> None:
|
|
54
|
+
"""Signal the worker to stop processing and close resources."""
|
|
55
|
+
self._queue.put(None) # Poison pill
|
|
56
|
+
self._thread.join()
|
|
57
|
+
|
|
58
|
+
def _init_db(self, conn: sqlite3.Connection) -> None:
|
|
59
|
+
"""Initialize the database schema."""
|
|
60
|
+
cursor = conn.cursor()
|
|
61
|
+
cursor.execute(
|
|
62
|
+
"""
|
|
63
|
+
CREATE TABLE IF NOT EXISTS messages (
|
|
64
|
+
dtm DTM NOT NULL PRIMARY KEY,
|
|
65
|
+
verb TEXT(2) NOT NULL,
|
|
66
|
+
src TEXT(12) NOT NULL,
|
|
67
|
+
dst TEXT(12) NOT NULL,
|
|
68
|
+
code TEXT(4) NOT NULL,
|
|
69
|
+
ctx TEXT,
|
|
70
|
+
hdr TEXT NOT NULL UNIQUE,
|
|
71
|
+
plk TEXT NOT NULL
|
|
72
|
+
)
|
|
73
|
+
"""
|
|
74
|
+
)
|
|
75
|
+
# Create indexes to speed up future reads
|
|
76
|
+
for col in ("verb", "src", "dst", "code", "ctx", "hdr"):
|
|
77
|
+
cursor.execute(f"CREATE INDEX IF NOT EXISTS idx_{col} ON messages ({col})")
|
|
78
|
+
conn.commit()
|
|
79
|
+
|
|
80
|
+
def _run(self) -> None:
|
|
81
|
+
"""The main loop running in the background thread."""
|
|
82
|
+
_LOGGER.debug("StorageWorker thread started.")
|
|
83
|
+
|
|
84
|
+
# Setup SQLite connection in this thread
|
|
85
|
+
try:
|
|
86
|
+
# uri=True allows opening "file::memory:?cache=shared"
|
|
87
|
+
conn = sqlite3.connect(
|
|
88
|
+
self._db_path,
|
|
89
|
+
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
|
|
90
|
+
check_same_thread=False,
|
|
91
|
+
uri=True,
|
|
92
|
+
timeout=10.0, # Increased timeout for locking
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Enable Write-Ahead Logging for concurrency
|
|
96
|
+
if self._db_path != ":memory:" and "mode=memory" not in self._db_path:
|
|
97
|
+
with contextlib.suppress(sqlite3.Error):
|
|
98
|
+
conn.execute("PRAGMA journal_mode=WAL")
|
|
99
|
+
conn.execute("PRAGMA synchronous=NORMAL")
|
|
100
|
+
elif "cache=shared" in self._db_path:
|
|
101
|
+
with contextlib.suppress(sqlite3.Error):
|
|
102
|
+
conn.execute("PRAGMA read_uncommitted = true")
|
|
103
|
+
|
|
104
|
+
self._init_db(conn)
|
|
105
|
+
self._ready_event.set() # Signal that tables exist
|
|
106
|
+
except sqlite3.Error as exc:
|
|
107
|
+
_LOGGER.error("Failed to initialize storage database: %s", exc)
|
|
108
|
+
self._ready_event.set() # Avoid blocking waiters forever
|
|
109
|
+
return
|
|
110
|
+
|
|
111
|
+
while True:
|
|
112
|
+
try:
|
|
113
|
+
# Block here waiting for work
|
|
114
|
+
item = self._queue.get()
|
|
115
|
+
|
|
116
|
+
if item is None: # Shutdown signal
|
|
117
|
+
break
|
|
118
|
+
|
|
119
|
+
task_type, data = item
|
|
120
|
+
|
|
121
|
+
if task_type == "MARKER":
|
|
122
|
+
# Flush requested
|
|
123
|
+
data.set()
|
|
124
|
+
continue
|
|
125
|
+
|
|
126
|
+
if task_type == "SQL":
|
|
127
|
+
# Optimization: Batch processing
|
|
128
|
+
batch = [data]
|
|
129
|
+
# Drain queue of pending SQL tasks to bulk insert
|
|
130
|
+
while not self._queue.empty():
|
|
131
|
+
try:
|
|
132
|
+
# Peek/get next item without blocking
|
|
133
|
+
next_item = self._queue.get_nowait()
|
|
134
|
+
if next_item is None:
|
|
135
|
+
self._queue.put(None) # Re-queue poison pill
|
|
136
|
+
break
|
|
137
|
+
|
|
138
|
+
next_type, next_data = next_item
|
|
139
|
+
if next_type == "SQL":
|
|
140
|
+
batch.append(next_data)
|
|
141
|
+
elif next_type == "MARKER":
|
|
142
|
+
# Handle marker after this batch
|
|
143
|
+
self._queue.put(next_item) # Re-queue marker
|
|
144
|
+
break
|
|
145
|
+
else:
|
|
146
|
+
pass
|
|
147
|
+
except queue.Empty:
|
|
148
|
+
break
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
conn.executemany(
|
|
152
|
+
"""
|
|
153
|
+
INSERT OR REPLACE INTO messages
|
|
154
|
+
(dtm, verb, src, dst, code, ctx, hdr, plk)
|
|
155
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
156
|
+
""",
|
|
157
|
+
batch,
|
|
158
|
+
)
|
|
159
|
+
conn.commit()
|
|
160
|
+
except sqlite3.Error as err:
|
|
161
|
+
_LOGGER.error("SQL Write Failed: %s", err)
|
|
162
|
+
|
|
163
|
+
except Exception as err:
|
|
164
|
+
_LOGGER.exception("StorageWorker encountered an error: %s", err)
|
|
165
|
+
|
|
166
|
+
# Cleanup
|
|
167
|
+
conn.close()
|
|
168
|
+
_LOGGER.debug("StorageWorker thread stopped.")
|
ramses_rf/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ramses_rf
|
|
3
|
-
Version: 0.53.
|
|
3
|
+
Version: 0.53.4
|
|
4
4
|
Summary: A stateful RAMSES-II protocol decoder & analyser.
|
|
5
5
|
Project-URL: Homepage, https://github.com/ramses-rf/ramses_rf
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/ramses-rf/ramses_rf/issues
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
ramses_cli/__init__.py,sha256=d_3uIFkK8JnWOxknrBloKCe6-vI9Ouo_KGqR4kfBQW8,417
|
|
2
|
-
ramses_cli/client.py,sha256=
|
|
2
|
+
ramses_cli/client.py,sha256=w95Xv2_kVlYelI5XnGt6D2QVLG3guiSMqo_MO1Ni-dc,25277
|
|
3
3
|
ramses_cli/debug.py,sha256=PLcz-3PjUiMVqtD_p6VqTA92eHUM58lOBFXh_qgQ_wA,576
|
|
4
4
|
ramses_cli/discovery.py,sha256=WTcoFH5hNhQ1AeOZtpdZIVYwdUfmUKlq2iBpa-KcgoI,12512
|
|
5
5
|
ramses_cli/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -8,15 +8,16 @@ ramses_cli/utils/convert.py,sha256=N3LxGe3_0pclijtmYW-ChqCuPTzbkoJA4XNAnoSnBk0,1
|
|
|
8
8
|
ramses_rf/__init__.py,sha256=AXsCK1Eh9FWeAI9D_zY_2KB0dqrTb9a5TNY1NvyQaDM,1271
|
|
9
9
|
ramses_rf/binding_fsm.py,sha256=fuqvcc9YW-wr8SPH8zadpPqrHAvzl_eeWF-IBtlLppY,26632
|
|
10
10
|
ramses_rf/const.py,sha256=L3z31CZ-xqno6oZp_h-67CB_5tDDqTwSWXsqRtsjMcs,5460
|
|
11
|
-
ramses_rf/database.py,sha256=
|
|
11
|
+
ramses_rf/database.py,sha256=Fv3Xv6S_7qOf6-biHHKZvntkB8ps_SJvjPlKX0pzGfg,23919
|
|
12
12
|
ramses_rf/dispatcher.py,sha256=YjEU-QrBLo9IfoEhJo2ikg_FxOaMYoWvzelr9Vi-JZ8,11398
|
|
13
13
|
ramses_rf/entity_base.py,sha256=L47P_6CRz3tLDzOzII9AgmueKDb-Bp7Ot3vVsr8jo10,59121
|
|
14
14
|
ramses_rf/exceptions.py,sha256=mt_T7irqHSDKir6KLaf6oDglUIdrw0S40JbOrWJk5jc,3657
|
|
15
|
-
ramses_rf/gateway.py,sha256=
|
|
15
|
+
ramses_rf/gateway.py,sha256=3rnKm-OunN-O_T0eS9ZayEvg49WMVe7GF4pJhyGx3Io,30409
|
|
16
16
|
ramses_rf/helpers.py,sha256=TNk_QkpIOB3alOp1sqnA9LOzi4fuDCeapNlW3zTzNas,4250
|
|
17
17
|
ramses_rf/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
18
|
ramses_rf/schemas.py,sha256=X1GAK3kttuLMiSCUDY2s-85fgBxPeU8xiDa6gJ1I5mY,13543
|
|
19
|
-
ramses_rf/
|
|
19
|
+
ramses_rf/storage.py,sha256=lGKUgQzXBUwksmEeHMLoVoKPCMPAWWuzwCXM8G2CKmg,6452
|
|
20
|
+
ramses_rf/version.py,sha256=4QvpG_-UTxyow-YNsbbOguS-J5cqoyPKHelB0WfKMkA,125
|
|
20
21
|
ramses_rf/device/__init__.py,sha256=sUbH5dhbYFXSoM_TPFRutpRutBRpup7_cQ9smPtDTy8,4858
|
|
21
22
|
ramses_rf/device/base.py,sha256=Tu5I8Lj7KplfRsIBQAYjilS6YPgTyjpU8qgKugMR2Jk,18281
|
|
22
23
|
ramses_rf/device/heat.py,sha256=CU6GlIgjuYD21braJ_RJlS56zP47TGXNxXnZeavfEMY,54654
|
|
@@ -33,24 +34,24 @@ ramses_tx/const.py,sha256=jiE2UaGBJ5agr68EMrcEHWtVz2KMidU7c7rRYCIiaoM,33010
|
|
|
33
34
|
ramses_tx/exceptions.py,sha256=FJSU9YkvpKjs3yeTqUJX1o3TPFSe_B01gRGIh9b3PNc,2632
|
|
34
35
|
ramses_tx/fingerprints.py,sha256=nfftA1E62HQnb-eLt2EqjEi_la0DAoT0wt-PtTMie0s,11974
|
|
35
36
|
ramses_tx/frame.py,sha256=GzNsXr15YLeidJYGtk_xPqsZQh4ehDDlUCtT6rTDhT8,22046
|
|
36
|
-
ramses_tx/gateway.py,sha256=
|
|
37
|
+
ramses_tx/gateway.py,sha256=kJv3jRI66Ii-kxZO6ItbPODW2oJf-e46ou2JdQt0yTY,11498
|
|
37
38
|
ramses_tx/helpers.py,sha256=96OvSOWYuMcr89_c-3dRnqHZaMOctCO94uo1hETh3bc,33613
|
|
38
39
|
ramses_tx/logger.py,sha256=1iKRHKUaqHqGd76CkE_6mCVR0sYODtxshRRwfY61fTk,10426
|
|
39
40
|
ramses_tx/message.py,sha256=zsyDQztSUYeqj3-P598LSmy9ODQY2BUCzWxSoZds6bM,13953
|
|
40
41
|
ramses_tx/opentherm.py,sha256=58PXz9l5x8Ou6Fm3y-R_UnGHCYahoi2RKIDdYStUMzk,42378
|
|
41
42
|
ramses_tx/packet.py,sha256=_nzuInS_WhdOI26SYvgsdDqIaDvVNguc2YDwdPOVCbU,7661
|
|
42
|
-
ramses_tx/parsers.py,sha256=
|
|
43
|
+
ramses_tx/parsers.py,sha256=CJKdLF1F5KR7MwYxwkwSvrusTONh5FGecj7_eeBWu7A,148609
|
|
43
44
|
ramses_tx/protocol.py,sha256=nBPKCD1tcGp_FiX0qhsY0XoGO_h87w5cYywBjSpum4w,33048
|
|
44
45
|
ramses_tx/protocol_fsm.py,sha256=o9vLvlXor3LkPgsY1zii5P1R01GzYLf_PECDdoxtC24,27520
|
|
45
46
|
ramses_tx/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
46
|
-
ramses_tx/ramses.py,sha256=
|
|
47
|
+
ramses_tx/ramses.py,sha256=V4LqD6IaohU7TTZp-_f1K2SOCJwzRY0v8_-INESh2cU,53986
|
|
47
48
|
ramses_tx/schemas.py,sha256=Hrmf_q9bAZtkKJzGu6GtUO0QV_-K9i4L99EzGWR13eE,13408
|
|
48
|
-
ramses_tx/transport.py,sha256=
|
|
49
|
+
ramses_tx/transport.py,sha256=RIrcNrJwiKB_xmJLgG4Z--V2d83PLsJnLXZK-WFFgsA,76568
|
|
49
50
|
ramses_tx/typed_dicts.py,sha256=w-0V5t2Q3GiNUOrRAWiW9GtSwbta_7luME6GfIb1zhI,10869
|
|
50
51
|
ramses_tx/typing.py,sha256=eF2SlPWhNhEFQj6WX2AhTXiyRQVXYnFutiepllYl2rI,5042
|
|
51
|
-
ramses_tx/version.py,sha256=
|
|
52
|
-
ramses_rf-0.53.
|
|
53
|
-
ramses_rf-0.53.
|
|
54
|
-
ramses_rf-0.53.
|
|
55
|
-
ramses_rf-0.53.
|
|
56
|
-
ramses_rf-0.53.
|
|
52
|
+
ramses_tx/version.py,sha256=hutrhdcMJOwR4LO2siGwJHfwUmCNdXAWpBV8-SaqUVo,123
|
|
53
|
+
ramses_rf-0.53.4.dist-info/METADATA,sha256=GNBabuxJgwv1H80fzFeCO31BSn0yI76V0cTeMKp2HjE,4179
|
|
54
|
+
ramses_rf-0.53.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
55
|
+
ramses_rf-0.53.4.dist-info/entry_points.txt,sha256=NnyK29baOCNg8DinPYiZ368h7MTH7bgTW26z2A1NeIE,50
|
|
56
|
+
ramses_rf-0.53.4.dist-info/licenses/LICENSE,sha256=ptVutrtSMr7X-ek6LduiD8Cce4JsNn_8sR8MYlm-fvo,1086
|
|
57
|
+
ramses_rf-0.53.4.dist-info/RECORD,,
|
ramses_tx/gateway.py
CHANGED
|
@@ -12,7 +12,6 @@ import asyncio
|
|
|
12
12
|
import logging
|
|
13
13
|
from collections.abc import Callable
|
|
14
14
|
from datetime import datetime as dt
|
|
15
|
-
from threading import Lock
|
|
16
15
|
from typing import TYPE_CHECKING, Any, Never
|
|
17
16
|
|
|
18
17
|
from .address import ALL_DEV_ADDR, HGI_DEV_ADDR, NON_DEV_ADDR
|
|
@@ -120,7 +119,7 @@ class Engine:
|
|
|
120
119
|
self._log_all_mqtt = kwargs.pop(SZ_LOG_ALL_MQTT, False)
|
|
121
120
|
self._kwargs: dict[str, Any] = kwargs # HACK
|
|
122
121
|
|
|
123
|
-
self._engine_lock = Lock()
|
|
122
|
+
self._engine_lock = asyncio.Lock()
|
|
124
123
|
self._engine_state: (
|
|
125
124
|
tuple[_MsgHandlerT | None, bool | None, *tuple[Any, ...]] | None
|
|
126
125
|
) = None
|
|
@@ -217,15 +216,13 @@ class Engine:
|
|
|
217
216
|
async def stop(self) -> None:
|
|
218
217
|
"""Close the transport (will stop the protocol)."""
|
|
219
218
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
await asyncio.gather(*tasks)
|
|
225
|
-
except asyncio.CancelledError:
|
|
226
|
-
pass
|
|
219
|
+
# Shutdown Safety - wait for tasks to clean up
|
|
220
|
+
tasks = [t for t in self._tasks if not t.done()]
|
|
221
|
+
for t in tasks:
|
|
222
|
+
t.cancel()
|
|
227
223
|
|
|
228
|
-
|
|
224
|
+
if tasks:
|
|
225
|
+
await asyncio.wait(tasks)
|
|
229
226
|
|
|
230
227
|
if self._transport:
|
|
231
228
|
self._transport.close()
|
|
@@ -233,12 +230,14 @@ class Engine:
|
|
|
233
230
|
|
|
234
231
|
return None
|
|
235
232
|
|
|
236
|
-
def _pause(self, *args: Any) -> None:
|
|
233
|
+
async def _pause(self, *args: Any) -> None:
|
|
237
234
|
"""Pause the (active) engine or raise a RuntimeError."""
|
|
238
|
-
|
|
239
|
-
if
|
|
235
|
+
# Async lock handling
|
|
236
|
+
if self._engine_lock.locked():
|
|
240
237
|
raise RuntimeError("Unable to pause engine, failed to acquire lock")
|
|
241
238
|
|
|
239
|
+
await self._engine_lock.acquire()
|
|
240
|
+
|
|
242
241
|
if self._engine_state is not None:
|
|
243
242
|
self._engine_lock.release()
|
|
244
243
|
raise RuntimeError("Unable to pause engine, it is already paused")
|
|
@@ -255,13 +254,18 @@ class Engine:
|
|
|
255
254
|
|
|
256
255
|
self._engine_state = (handler, read_only, *args)
|
|
257
256
|
|
|
258
|
-
def _resume(self) -> tuple[Any]: # FIXME: not atomic
|
|
257
|
+
async def _resume(self) -> tuple[Any]: # FIXME: not atomic
|
|
259
258
|
"""Resume the (paused) engine or raise a RuntimeError."""
|
|
260
259
|
|
|
261
260
|
args: tuple[Any] # mypy
|
|
262
261
|
|
|
263
|
-
|
|
264
|
-
|
|
262
|
+
# Async lock with timeout
|
|
263
|
+
try:
|
|
264
|
+
await asyncio.wait_for(self._engine_lock.acquire(), timeout=0.1)
|
|
265
|
+
except TimeoutError as err:
|
|
266
|
+
raise RuntimeError(
|
|
267
|
+
"Unable to resume engine, failed to acquire lock"
|
|
268
|
+
) from err
|
|
265
269
|
|
|
266
270
|
if self._engine_state is None:
|
|
267
271
|
self._engine_lock.release()
|
ramses_tx/parsers.py
CHANGED
|
@@ -3109,7 +3109,9 @@ def parser_31da(payload: str, msg: Message) -> PayDictT._31DA:
|
|
|
3109
3109
|
if len(payload) == 58:
|
|
3110
3110
|
return result # type: ignore[return-value]
|
|
3111
3111
|
|
|
3112
|
-
result.update(
|
|
3112
|
+
result.update(
|
|
3113
|
+
{"_extra": payload[58:]}
|
|
3114
|
+
) # sporadic [58:60] one of {00, 20, 40} version?
|
|
3113
3115
|
return result # type: ignore[return-value]
|
|
3114
3116
|
|
|
3115
3117
|
# From an Orcon 15RF Display
|
ramses_tx/ramses.py
CHANGED
|
@@ -551,7 +551,7 @@ CODES_SCHEMA: dict[Code, dict[str, Any]] = { # rf_unknown
|
|
|
551
551
|
},
|
|
552
552
|
Code._31DA: { # hvac_state (fan_state_extended)
|
|
553
553
|
SZ_NAME: "hvac_state",
|
|
554
|
-
I_: r"^(00|01|15|16|17|21)[0-9A-F]{56}(00|20)?$",
|
|
554
|
+
I_: r"^(00|01|15|16|17|21)[0-9A-F]{56}(00|20|40)?$",
|
|
555
555
|
RQ: r"^(00|01|15|16|17|21)$",
|
|
556
556
|
# RQ --- 32:168090 30:082155 --:------ 31DA 001 21
|
|
557
557
|
},
|
ramses_tx/transport.py
CHANGED
|
@@ -978,17 +978,21 @@ class FileTransport(_ReadTransport, _FileTransportAbstractor):
|
|
|
978
978
|
if bool(disable_sending) is False:
|
|
979
979
|
raise exc.TransportSourceInvalid("This Transport cannot send packets")
|
|
980
980
|
|
|
981
|
+
self._evt_reading = asyncio.Event()
|
|
982
|
+
|
|
981
983
|
self._extra[SZ_READER_TASK] = self._reader_task = self._loop.create_task(
|
|
982
984
|
self._start_reader(), name="FileTransport._start_reader()"
|
|
983
985
|
)
|
|
984
986
|
|
|
985
987
|
self._make_connection(None)
|
|
986
988
|
|
|
987
|
-
async def _start_reader(self) -> None:
|
|
989
|
+
async def _start_reader(self) -> None:
|
|
988
990
|
"""Start the reader task."""
|
|
989
991
|
self._reading = True
|
|
992
|
+
self._evt_reading.set() # Start in reading state
|
|
993
|
+
|
|
990
994
|
try:
|
|
991
|
-
await self.
|
|
995
|
+
await self._producer_loop()
|
|
992
996
|
except Exception as err:
|
|
993
997
|
self.loop.call_soon_threadsafe(
|
|
994
998
|
functools.partial(self._protocol.connection_lost, err) # type: ignore[arg-type]
|
|
@@ -998,50 +1002,60 @@ class FileTransport(_ReadTransport, _FileTransportAbstractor):
|
|
|
998
1002
|
functools.partial(self._protocol.connection_lost, None)
|
|
999
1003
|
)
|
|
1000
1004
|
|
|
1001
|
-
|
|
1002
|
-
|
|
1005
|
+
def pause_reading(self) -> None:
|
|
1006
|
+
"""Pause the receiving end (no data to protocol.pkt_received())."""
|
|
1007
|
+
self._reading = False
|
|
1008
|
+
self._evt_reading.clear() # Puts the loop to sleep efficiently
|
|
1009
|
+
|
|
1010
|
+
def resume_reading(self) -> None:
|
|
1011
|
+
"""Resume the receiving end."""
|
|
1012
|
+
self._reading = True
|
|
1013
|
+
self._evt_reading.set() # Wakes the loop immediately
|
|
1014
|
+
|
|
1015
|
+
async def _producer_loop(self) -> None:
|
|
1003
1016
|
"""Loop through the packet source for Frames and process them."""
|
|
1017
|
+
# NOTE: fileinput interaction remains synchronous-blocking for simplicity,
|
|
1018
|
+
# but the PAUSE mechanism is now async-non-blocking.
|
|
1004
1019
|
|
|
1005
1020
|
if isinstance(self._pkt_source, dict):
|
|
1006
1021
|
for dtm_str, pkt_line in self._pkt_source.items(): # assume dtm_str is OK
|
|
1007
|
-
|
|
1008
|
-
await asyncio.sleep(0.001)
|
|
1009
|
-
self._frame_read(dtm_str, pkt_line)
|
|
1010
|
-
await asyncio.sleep(0)
|
|
1011
|
-
# NOTE: instable without, big performance penalty if delay >0
|
|
1022
|
+
await self._process_line(dtm_str, pkt_line)
|
|
1012
1023
|
|
|
1013
1024
|
elif isinstance(self._pkt_source, str): # file_name, used in client parse
|
|
1014
1025
|
# open file file_name before reading
|
|
1015
1026
|
try:
|
|
1016
1027
|
with fileinput.input(files=self._pkt_source, encoding="utf-8") as file:
|
|
1017
1028
|
for dtm_pkt_line in file: # self._pkt_source:
|
|
1018
|
-
|
|
1019
|
-
while not self._reading:
|
|
1020
|
-
await asyncio.sleep(0.001)
|
|
1021
|
-
# there may be blank lines in annotated log files
|
|
1022
|
-
if (dtm_pkt_line := dtm_pkt_line.strip()) and dtm_pkt_line[
|
|
1023
|
-
:1
|
|
1024
|
-
] != "#":
|
|
1025
|
-
self._frame_read(dtm_pkt_line[:26], dtm_pkt_line[27:])
|
|
1026
|
-
# this is where the parsing magic happens!
|
|
1027
|
-
await asyncio.sleep(0)
|
|
1028
|
-
# NOTE: instable without, big performance penalty if delay >0
|
|
1029
|
+
await self._process_line_from_raw(dtm_pkt_line)
|
|
1029
1030
|
except FileNotFoundError as err:
|
|
1030
1031
|
_LOGGER.warning(f"Correct the packet file name; {err}")
|
|
1032
|
+
|
|
1031
1033
|
elif isinstance(self._pkt_source, TextIOWrapper): # used by client monitor
|
|
1032
1034
|
for dtm_pkt_line in self._pkt_source: # should check dtm_str is OK
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
# can be blank lines in annotated log files
|
|
1036
|
-
if (dtm_pkt_line := dtm_pkt_line.strip()) and dtm_pkt_line[:1] != "#":
|
|
1037
|
-
self._frame_read(dtm_pkt_line[:26], dtm_pkt_line[27:])
|
|
1038
|
-
await asyncio.sleep(0)
|
|
1039
|
-
# NOTE: instable without, big performance penalty if delay >0
|
|
1035
|
+
await self._process_line_from_raw(dtm_pkt_line)
|
|
1036
|
+
|
|
1040
1037
|
else:
|
|
1041
1038
|
raise exc.TransportSourceInvalid(
|
|
1042
1039
|
f"Packet source is not dict, TextIOWrapper or str: {self._pkt_source:!r}"
|
|
1043
1040
|
)
|
|
1044
1041
|
|
|
1042
|
+
async def _process_line_from_raw(self, line: str) -> None:
|
|
1043
|
+
"""Helper to process raw lines."""
|
|
1044
|
+
# there may be blank lines in annotated log files
|
|
1045
|
+
if (line := line.strip()) and line[:1] != "#":
|
|
1046
|
+
await self._process_line(line[:26], line[27:])
|
|
1047
|
+
# this is where the parsing magic happens!
|
|
1048
|
+
|
|
1049
|
+
async def _process_line(self, dtm_str: str, frame: str) -> None:
|
|
1050
|
+
"""Push frame to protocol in a thread-safe way."""
|
|
1051
|
+
# Efficient wait - 0% CPU usage while paused
|
|
1052
|
+
await self._evt_reading.wait()
|
|
1053
|
+
|
|
1054
|
+
self._frame_read(dtm_str, frame)
|
|
1055
|
+
|
|
1056
|
+
# Yield control to the event loop to prevent starvation during large file reads
|
|
1057
|
+
await asyncio.sleep(0)
|
|
1058
|
+
|
|
1045
1059
|
def _close(self, exc: exc.RamsesException | None = None) -> None:
|
|
1046
1060
|
"""Close the transport (cancel any outstanding tasks).
|
|
1047
1061
|
|
ramses_tx/version.py
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|