marilib-pkg 0.6.0__py3-none-any.whl → 0.7.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
marilib/model.py CHANGED
@@ -5,7 +5,7 @@ from datetime import datetime, timedelta
5
5
  from enum import IntEnum
6
6
  import rich
7
7
 
8
- from marilib.mari_protocol import Frame
8
+ from marilib.mari_protocol import Frame, MetricsProbePayload
9
9
  from marilib.protocol import Packet, PacketFieldMetadata
10
10
 
11
11
  # schedules taken from: https://github.com/DotBots/mari-evaluation/blob/main/simulations/radio-schedule.ipynb
@@ -52,6 +52,10 @@ EMPTY_SCHEDULE_DATA = {
52
52
  MARI_TIMEOUT_NODE_IS_ALIVE = 3 # seconds
53
53
  MARI_TIMEOUT_GATEWAY_IS_ALIVE = 3 # seconds
54
54
 
55
+ # MARI_PROBE_STATS_EPOCH_DURATION_ASN = 565 * 20 # about 10 seconds
56
+ MARI_PROBE_STATS_EPOCH_DURATION_ASN = 565 * 60 # about 30 seconds
57
+ # MARI_PROBE_STATS_EPOCH_DURATION_ASN = 565 * 2 # about 1 second
58
+
55
59
 
56
60
  @dataclass
57
61
  class TestState:
@@ -117,12 +121,12 @@ class NodeStatsReply(Packet):
117
121
  class FrameLogEntry:
118
122
  frame: Frame
119
123
  ts: datetime = field(default_factory=lambda: datetime.now())
120
- is_test_packet: bool = False
121
124
 
122
125
 
123
126
  @dataclass
124
- class LatencyStats:
127
+ class MetricsStats:
125
128
  latencies: deque = field(default_factory=lambda: deque(maxlen=50))
129
+ # TODO: Add PDR stats
126
130
 
127
131
  def add_latency(self, rtt_seconds: float):
128
132
  self.latencies.append(rtt_seconds * 1000)
@@ -143,6 +147,8 @@ class LatencyStats:
143
147
  def max_ms(self) -> float:
144
148
  return max(self.latencies) if self.latencies else 0.0
145
149
 
150
+ # TODO: Add PDR stats
151
+
146
152
 
147
153
  @dataclass
148
154
  class FrameStats:
@@ -154,40 +160,49 @@ class FrameStats:
154
160
  cumulative_sent_non_test: int = 0
155
161
  cumulative_received_non_test: int = 0
156
162
 
157
- def add_sent(self, frame: Frame, is_test_packet: bool):
163
+ def add_sent(self, frame: Frame):
158
164
  """Adds a sent frame, prunes old entries, and updates counters."""
159
165
  self.cumulative_sent += 1
166
+ if not frame.is_test_packet:
167
+ self.cumulative_sent_non_test += 1 # NOTE: do we need this?
160
168
 
161
- if not is_test_packet:
162
- self.cumulative_sent_non_test += 1
169
+ entry = FrameLogEntry(frame=frame)
170
+ self.sent.append(entry)
163
171
 
164
- entry = FrameLogEntry(frame=frame, is_test_packet=is_test_packet)
165
- self.sent.append(entry)
166
- while self.sent and (entry.ts - self.sent[0].ts).total_seconds() > self.window_seconds:
167
- self.sent.popleft()
172
+ # remove old entries
173
+ while self.sent and (entry.ts - self.sent[0].ts).total_seconds() > self.window_seconds:
174
+ self.sent.popleft()
168
175
 
169
- def add_received(self, frame: Frame, is_test_packet: bool):
176
+ def add_received(self, frame: Frame):
170
177
  """Adds a received frame and prunes old entries."""
171
178
  self.cumulative_received += 1
179
+ if not frame.is_test_packet:
180
+ self.cumulative_received_non_test += 1 # NOTE: do we need this?
181
+
182
+ entry = FrameLogEntry(frame=frame)
183
+ self.received.append(entry)
172
184
 
173
- if not is_test_packet:
174
- self.cumulative_received_non_test += 1
175
- entry = FrameLogEntry(frame=frame, is_test_packet=is_test_packet)
176
- self.received.append(entry)
177
- while (
178
- self.received
179
- and (entry.ts - self.received[0].ts).total_seconds() > self.window_seconds
180
- ):
181
- self.received.popleft()
185
+ # remove old entries
186
+ while (
187
+ self.received and (entry.ts - self.received[0].ts).total_seconds() > self.window_seconds
188
+ ):
189
+ self.received.popleft()
182
190
 
183
191
  def sent_count(self, window_secs: int = 0, include_test_packets: bool = True) -> int:
184
192
  if window_secs == 0:
185
193
  return self.cumulative_sent if include_test_packets else self.cumulative_sent_non_test
186
194
 
187
195
  now = datetime.now()
188
- # Windowed count is always for non-test packets.
189
- entries = [e for e in self.sent if now - e.ts < timedelta(seconds=window_secs)]
190
- return len(entries)
196
+ if include_test_packets:
197
+ return len([e for e in self.sent if now - e.ts < timedelta(seconds=window_secs)])
198
+ else:
199
+ return len(
200
+ [
201
+ e
202
+ for e in self.sent
203
+ if now - e.ts < timedelta(seconds=window_secs) and not e.frame.is_test_packet
204
+ ]
205
+ )
191
206
 
192
207
  def received_count(self, window_secs: int = 0, include_test_packets: bool = True) -> int:
193
208
  if window_secs == 0:
@@ -198,14 +213,21 @@ class FrameStats:
198
213
  )
199
214
 
200
215
  now = datetime.now()
201
- entries = [e for e in self.received if now - e.ts < timedelta(seconds=window_secs)]
216
+ if include_test_packets:
217
+ entries = [e for e in self.received if now - e.ts < timedelta(seconds=window_secs)]
218
+ else:
219
+ entries = [
220
+ e
221
+ for e in self.received
222
+ if now - e.ts < timedelta(seconds=window_secs) and not e.frame.is_test_packet
223
+ ]
202
224
  return len(entries)
203
225
 
204
226
  def success_rate(self, window_secs: int = 0) -> float:
205
- s = self.sent_count(window_secs, include_test_packets=False)
227
+ s = self.sent_count(window_secs, include_test_packets=True)
206
228
  if s == 0:
207
229
  return 1.0
208
- r = self.received_count(window_secs, include_test_packets=False)
230
+ r = self.received_count(window_secs, include_test_packets=True)
209
231
  return min(r / s, 1.0)
210
232
 
211
233
  def received_rssi_dbm(self, window_secs: int = 0) -> float:
@@ -228,22 +250,111 @@ class MariNode:
228
250
  address: int
229
251
  gateway_address: int
230
252
  last_seen: datetime = field(default_factory=lambda: datetime.now())
253
+ probe_stats: deque[MetricsProbePayload] = field(
254
+ default_factory=lambda: deque(maxlen=15)
255
+ ) # NOTE: related to frequency of probe stats
231
256
  stats: FrameStats = field(default_factory=FrameStats)
232
- latency_stats: LatencyStats = field(default_factory=LatencyStats)
257
+ metrics_stats: MetricsStats = field(default_factory=MetricsStats)
233
258
  last_reported_rx_count: int = 0
234
259
  last_reported_tx_count: int = 0
235
260
  pdr_downlink: float = 0.0
236
261
  pdr_uplink: float = 0.0
262
+ probe_tx_count: int = 0
263
+ probe_rx_count: int = 0
237
264
 
238
265
  @property
239
266
  def is_alive(self) -> bool:
240
267
  return datetime.now() - self.last_seen < timedelta(seconds=MARI_TIMEOUT_NODE_IS_ALIVE)
241
268
 
242
- def register_received_frame(self, frame: Frame, is_test_packet: bool):
243
- self.stats.add_received(frame, is_test_packet)
269
+ def save_probe_stats(self, probe_stats: MetricsProbePayload):
270
+ # save the current probe stats
271
+ self.probe_stats.append(probe_stats)
272
+
273
+ @property
274
+ def probe_stats_latest(self) -> MetricsProbePayload | None:
275
+ if not self.probe_stats:
276
+ return None
277
+ return self.probe_stats[-1]
278
+
279
+ @property
280
+ def probe_stats_start_epoch(self) -> MetricsProbePayload | None:
281
+ if len(self.probe_stats) < 2:
282
+ return None
283
+ return self.probe_stats[0]
284
+
285
+ def probe_increment_tx_count(self) -> int:
286
+ self.probe_tx_count += 1
287
+ return self.probe_tx_count
288
+
289
+ def probe_increment_rx_count(self) -> int:
290
+ self.probe_rx_count += 1
291
+ return self.probe_rx_count
292
+
293
+ def stats_pdr_downlink_radio(self) -> float:
294
+ if not self.probe_stats_latest:
295
+ return 0
296
+ return self.probe_stats_latest.pdr_downlink_radio(self.probe_stats_start_epoch)
244
297
 
245
- def register_sent_frame(self, frame: Frame, is_test_packet: bool):
246
- self.stats.add_sent(frame, is_test_packet)
298
+ def stats_pdr_uplink_radio(self) -> float:
299
+ if not self.probe_stats_latest:
300
+ return 0
301
+ return self.probe_stats_latest.pdr_uplink_radio(self.probe_stats_start_epoch)
302
+
303
+ def stats_pdr_uplink_uart(self) -> float:
304
+ if not self.probe_stats_latest:
305
+ return 0
306
+ return self.probe_stats_latest.pdr_uplink_uart(self.probe_stats_start_epoch)
307
+
308
+ def stats_pdr_downlink_uart(self) -> float:
309
+ if not self.probe_stats_latest:
310
+ return 0
311
+ return self.probe_stats_latest.pdr_downlink_uart(self.probe_stats_start_epoch)
312
+
313
+ def stats_rssi_node_dbm(self) -> float:
314
+ if not self.probe_stats_latest:
315
+ return None
316
+ return self.probe_stats_latest.rssi_at_node_dbm()
317
+
318
+ def stats_rssi_gw_dbm(self) -> float:
319
+ if not self.probe_stats_latest:
320
+ return None
321
+ return self.probe_stats_latest.rssi_at_gw_dbm()
322
+
323
+ def stats_avg_latency_roundtrip_node_edge_ms(self) -> float:
324
+ """Average latency between node and edge in milliseconds"""
325
+ # compute average latency between node and edge, using all probe stats
326
+ if not self.probe_stats:
327
+ return 0
328
+ return sum(p.latency_roundtrip_node_edge_ms() for p in self.probe_stats) / len(
329
+ self.probe_stats
330
+ )
331
+
332
+ def stats_avg_latency_roundtrip_node_cloud_ms(self) -> float:
333
+ """Average latency between node and cloud in milliseconds"""
334
+ if not self.probe_stats:
335
+ return 0
336
+ return sum(p.latency_roundtrip_node_cloud_ms() for p in self.probe_stats) / len(
337
+ self.probe_stats
338
+ )
339
+
340
+ def stats_latest_latency_roundtrip_node_edge_ms(self) -> float:
341
+ """Last latency between node and edge in milliseconds"""
342
+ # compute average latency between node and edge, using all probe stats
343
+ if not self.probe_stats:
344
+ return 0
345
+ return self.probe_stats_latest.latency_roundtrip_node_edge_ms()
346
+
347
+ def stats_latest_latency_roundtrip_node_cloud_ms(self) -> float:
348
+ """Last latency between node and cloud in milliseconds"""
349
+ if not self.probe_stats:
350
+ return 0
351
+ return self.probe_stats_latest.latency_roundtrip_node_cloud_ms()
352
+
353
+ def register_received_frame(self, frame: Frame):
354
+ self.stats.add_received(frame)
355
+
356
+ def register_sent_frame(self, frame: Frame):
357
+ self.stats.add_sent(frame)
247
358
 
248
359
  def as_node_info_cloud(self) -> NodeInfoCloud:
249
360
  return NodeInfoCloud(address=self.address, gateway_address=self.gateway_address)
@@ -257,12 +368,16 @@ class GatewayInfo(Packet):
257
368
  PacketFieldMetadata(name="network_id", length=2),
258
369
  PacketFieldMetadata(name="schedule_id", length=1),
259
370
  PacketFieldMetadata(name="schedule_stats", length=4 * 8), # 4 uint64_t values
371
+ PacketFieldMetadata(name="asn", length=8),
372
+ PacketFieldMetadata(name="timer", length=4),
260
373
  ]
261
374
  )
262
375
  address: int = 0
263
376
  network_id: int = 0
264
377
  schedule_id: int = 0
265
378
  schedule_stats: bytes = b""
379
+ asn: int = 0
380
+ timer: int = 0
266
381
 
267
382
  # NOTE: maybe move to a separate class, dedicated to schedule stuff
268
383
  def repr_schedule_stats(self):
@@ -298,7 +413,7 @@ class GatewayInfo(Packet):
298
413
  )
299
414
  elif cell == "U":
300
415
  return rich.text.Text(
301
- " ", style=f'bold white on {"yellow" if is_used else "light_yellow3"}'
416
+ "U", style=f'bold white on {"yellow" if is_used else "light_yellow3"}'
302
417
  )
303
418
 
304
419
  def repr_schedule_cells_with_colors(self):
@@ -316,6 +431,10 @@ class GatewayInfo(Packet):
316
431
  schedule_data = SCHEDULES.get(self.schedule_id)
317
432
  return schedule_data["name"] if schedule_data else "unknown"
318
433
 
434
+ @property
435
+ def max_nodes(self) -> int:
436
+ return SCHEDULES.get(self.schedule_id, EMPTY_SCHEDULE_DATA)["max_nodes"]
437
+
319
438
  @property
320
439
  def network_id_str(self) -> str:
321
440
  return f"{self.network_id:04X}"
@@ -334,7 +453,7 @@ class MariGateway:
334
453
  info: GatewayInfo = field(default_factory=GatewayInfo)
335
454
  node_registry: dict[int, MariNode] = field(default_factory=dict)
336
455
  stats: FrameStats = field(default_factory=FrameStats)
337
- latency_stats: LatencyStats = field(default_factory=LatencyStats)
456
+ metrics_stats: MetricsStats = field(default_factory=MetricsStats)
338
457
  last_seen: datetime = field(default_factory=lambda: datetime.now())
339
458
 
340
459
  def __post_init__(self):
@@ -352,6 +471,70 @@ class MariGateway:
352
471
  def is_alive(self) -> bool:
353
472
  return datetime.now() - self.last_seen < timedelta(seconds=MARI_TIMEOUT_GATEWAY_IS_ALIVE)
354
473
 
474
+ def stats_avg_pdr_downlink_radio(self) -> float:
475
+ if not self.nodes:
476
+ return 0.0
477
+ res = sum(n.stats_pdr_downlink_radio() for n in self.nodes) / len(self.nodes)
478
+ return res if res >= 0 and res <= 1.0 else 0.0
479
+
480
+ def stats_avg_pdr_uplink_radio(self) -> float:
481
+ if not self.nodes:
482
+ return 0.0
483
+ res = sum(n.stats_pdr_uplink_radio() for n in self.nodes) / len(self.nodes)
484
+ return res if res >= 0 and res <= 1.0 else 0.0
485
+
486
+ def stats_avg_pdr_downlink_uart(self) -> float:
487
+ if not self.nodes:
488
+ return 0.0
489
+ res = sum(n.stats_pdr_downlink_uart() for n in self.nodes) / len(self.nodes)
490
+ return res if res >= 0 and res <= 1.0 else 0.0
491
+
492
+ def stats_avg_pdr_uplink_uart(self) -> float:
493
+ if not self.nodes:
494
+ return 0.0
495
+ res = sum(n.stats_pdr_uplink_uart() for n in self.nodes) / len(self.nodes)
496
+ return res if res >= 0 and res <= 1.0 else 0.0
497
+
498
+ def stats_avg_latency_roundtrip_node_edge_ms(self) -> float:
499
+ if not self.nodes:
500
+ return 0.0
501
+ res = sum(n.stats_avg_latency_roundtrip_node_edge_ms() for n in self.nodes) / len(
502
+ self.nodes
503
+ )
504
+ return res if res >= 0 else 0.0
505
+
506
+ def stats_avg_latency_roundtrip_node_cloud_ms(self) -> float:
507
+ if not self.nodes:
508
+ return 0.0
509
+ res = sum(n.stats_avg_latency_roundtrip_node_cloud_ms() for n in self.nodes) / len(
510
+ self.nodes
511
+ )
512
+ return res if res >= 0 else 0.0
513
+
514
+ def stats_latest_node_tx_count(self) -> int:
515
+ """Returns sum of tx counts for all nodes"""
516
+ if not self.nodes:
517
+ return 0
518
+ return sum(n.probe_stats_latest.node_tx_count for n in self.nodes if n.probe_stats_latest)
519
+
520
+ def stats_latest_node_rx_count(self) -> int:
521
+ """Returns sum of rx counts for all nodes"""
522
+ if not self.nodes:
523
+ return 0
524
+ return sum(n.probe_stats_latest.node_rx_count for n in self.nodes if n.probe_stats_latest)
525
+
526
+ def stats_latest_gw_tx_count(self) -> int:
527
+ """Returns sum of tx counts for all nodes"""
528
+ if not self.nodes:
529
+ return 0
530
+ return sum(n.probe_stats_latest.gw_tx_count for n in self.nodes if n.probe_stats_latest)
531
+
532
+ def stats_latest_gw_rx_count(self) -> int:
533
+ """Returns sum of rx counts for all nodes"""
534
+ if not self.nodes:
535
+ return 0
536
+ return sum(n.probe_stats_latest.gw_rx_count for n in self.nodes if n.probe_stats_latest)
537
+
355
538
  def update(self):
356
539
  """Recurrent bookkeeping. Don't forget to call this periodically on your main loop."""
357
540
  self.node_registry = {
@@ -384,10 +567,10 @@ class MariGateway:
384
567
  node = self.add_node(addr)
385
568
  return node
386
569
 
387
- def register_received_frame(self, frame: Frame, is_test_packet: bool):
570
+ def register_received_frame(self, frame: Frame):
388
571
  if n := self.get_node(frame.header.source):
389
- n.register_received_frame(frame, is_test_packet)
390
- self.stats.add_received(frame, is_test_packet)
572
+ n.register_received_frame(frame)
573
+ self.stats.add_received(frame)
391
574
 
392
- def register_sent_frame(self, frame: Frame, is_test_packet: bool):
393
- self.stats.add_sent(frame, is_test_packet)
575
+ def register_sent_frame(self, frame: Frame):
576
+ self.stats.add_sent(frame)
marilib/pdr.py ADDED
@@ -0,0 +1,99 @@
1
+ import threading
2
+ from typing import TYPE_CHECKING
3
+ from rich import print
4
+
5
+ from marilib.model import NodeStatsReply
6
+ from marilib.mari_protocol import Frame
7
+ from marilib.protocol import ProtocolPayloadParserException
8
+
9
+
10
+ if TYPE_CHECKING:
11
+ from marilib.marilib_edge import MarilibEdge
12
+
13
+
14
+ PDR_STATS_REQUEST_PAYLOAD = b"S"
15
+
16
+
17
+ class PDRTester:
18
+ """A thread-based class to periodically test PDR to all nodes."""
19
+
20
+ def __init__(self, marilib: "MarilibEdge", interval: float = 15.0):
21
+ self.marilib = marilib
22
+ self.interval = interval
23
+ self._stop_event = threading.Event()
24
+ self._thread = threading.Thread(target=self._run, daemon=True)
25
+
26
+ def start(self):
27
+ """Starts the PDR testing thread."""
28
+ print("[yellow]PDR tester started.[/]")
29
+ self._thread.start()
30
+
31
+ def stop(self):
32
+ """Stops the PDR testing thread."""
33
+ self._stop_event.set()
34
+ if self._thread.is_alive():
35
+ self._thread.join()
36
+ print("[yellow]PDR tester stopped.[/]")
37
+
38
+ def _run(self):
39
+ """The main loop for the testing thread."""
40
+ self._stop_event.wait(self.interval)
41
+
42
+ while not self._stop_event.is_set():
43
+ nodes = list(self.marilib.nodes)
44
+ if not nodes:
45
+ self._stop_event.wait(self.interval)
46
+ continue
47
+
48
+ for node in nodes:
49
+ if self._stop_event.is_set():
50
+ break
51
+ self.send_pdr_request(node.address)
52
+
53
+ # Spread requests evenly over the interval
54
+ sleep_duration = self.interval / len(nodes)
55
+ self._stop_event.wait(sleep_duration)
56
+
57
+ def send_pdr_request(self, address: int):
58
+ """Sends a PDR stats request to a specific address."""
59
+ self.marilib.send_frame(address, PDR_STATS_REQUEST_PAYLOAD)
60
+
61
+ def handle_response(self, frame: Frame) -> bool:
62
+ """
63
+ Handles a PDR stats response frame and calculates PDR values.
64
+ Returns True if the frame was a valid stats reply, False otherwise.
65
+ """
66
+ if len(frame.payload) != 8:
67
+ return False
68
+
69
+ try:
70
+ stats_reply = NodeStatsReply().from_bytes(frame.payload)
71
+ node = self.marilib.gateway.get_node(frame.header.source)
72
+
73
+ if node:
74
+ # Update with the latest stats reported by the node
75
+ node.last_reported_rx_count = stats_reply.rx_app_packets
76
+ node.last_reported_tx_count = stats_reply.tx_app_packets
77
+
78
+ # Calculate Downlink PDR
79
+ sent_count = node.stats.sent_count(include_test_packets=False)
80
+ if sent_count > 0:
81
+ pdr = node.last_reported_rx_count / sent_count
82
+ node.pdr_downlink = min(pdr, 1.0)
83
+ else:
84
+ node.pdr_downlink = 0.0
85
+
86
+ # Calculate Uplink PDR
87
+ received_count = node.stats.received_count(include_test_packets=False)
88
+ if node.last_reported_tx_count > 0:
89
+ pdr = received_count / node.last_reported_tx_count
90
+ node.pdr_uplink = min(pdr, 1.0)
91
+ else:
92
+ node.pdr_uplink = 0.0
93
+
94
+ return True
95
+
96
+ except (ValueError, ProtocolPayloadParserException):
97
+ return False
98
+
99
+ return False
marilib/serial_uart.py CHANGED
@@ -16,7 +16,7 @@ import serial
16
16
  from serial.tools import list_ports
17
17
 
18
18
  SERIAL_PAYLOAD_CHUNK_SIZE = 64
19
- SERIAL_PAYLOAD_CHUNK_DELAY = 0.002 # 2 ms
19
+ SERIAL_PAYLOAD_CHUNK_DELAY = 0.001 # 2 ms
20
20
  SERIAL_DEFAULT_PORT = "/dev/ttyACM0"
21
21
  SERIAL_DEFAULT_BAUDRATE = 1_000_000
22
22
 
@@ -76,9 +76,10 @@ class SerialInterface(threading.Thread):
76
76
  """Write bytes on serial."""
77
77
  # Send 64 bytes at a time
78
78
  pos = 0
79
- while (pos % SERIAL_PAYLOAD_CHUNK_SIZE) == 0 and pos < len(bytes_):
80
- self.serial.write(bytes_[pos : pos + SERIAL_PAYLOAD_CHUNK_SIZE])
79
+ while pos < len(bytes_):
80
+ chunk_end = min(pos + SERIAL_PAYLOAD_CHUNK_SIZE, len(bytes_))
81
+ self.serial.write(bytes_[pos:chunk_end])
81
82
  self.serial.flush()
82
- pos += SERIAL_PAYLOAD_CHUNK_SIZE
83
- time.sleep(SERIAL_PAYLOAD_CHUNK_DELAY)
84
- # self.serial.flush()
83
+ pos = chunk_end
84
+ if pos < len(bytes_): # Only sleep if there are more chunks
85
+ time.sleep(SERIAL_PAYLOAD_CHUNK_DELAY)
marilib/tui_cloud.py CHANGED
@@ -88,9 +88,34 @@ class MarilibTUICloud(MarilibTUI):
88
88
  # Row 1: Gateway info
89
89
  node_count = f"{len(gateway.nodes)} / {gateway.info.schedule_uplink_cells}"
90
90
  schedule_info = f"#{gateway.info.schedule_id} {gateway.info.schedule_name}"
91
+
92
+ # --- Latency and PDR Display ---
93
+ avg_latency_edge = gateway.stats_avg_latency_roundtrip_node_edge_ms()
94
+ has_latency_info = avg_latency_edge > 0
95
+
96
+ # Check if we have PDR info by looking at the gateway averages
97
+ avg_uart_pdr_up = gateway.stats_avg_pdr_uplink_uart()
98
+ avg_uart_pdr_down = gateway.stats_avg_pdr_downlink_uart()
99
+ has_uart_pdr_info = avg_uart_pdr_up > 0 or avg_uart_pdr_down > 0
100
+
101
+ avg_radio_pdr_down = gateway.stats_avg_pdr_downlink_radio()
102
+ avg_radio_pdr_up = gateway.stats_avg_pdr_uplink_radio()
103
+ has_radio_pdr_info = avg_radio_pdr_down > 0 or avg_radio_pdr_up > 0
104
+
105
+ latency_info = f" | Latency: {avg_latency_edge:.1f}ms" if has_latency_info else ""
106
+ pdr_info = " | PDR:" if has_uart_pdr_info or has_radio_pdr_info else ""
107
+ radio_pdr_info = (
108
+ f" Radio ↓ {avg_radio_pdr_down:.1%} ↑ {avg_radio_pdr_up:.1%}"
109
+ if has_radio_pdr_info
110
+ else ""
111
+ )
112
+ uart_pdr_info = (
113
+ f" UART ↓ {avg_uart_pdr_down:.1%} ↑ {avg_uart_pdr_up:.1%}" if has_uart_pdr_info else ""
114
+ )
115
+
91
116
  table.add_row(
92
117
  f"[bold cyan]0x{gateway.info.address:016X}[/bold cyan]",
93
- f"Nodes: {node_count} | Schedule: {schedule_info}",
118
+ f"Nodes: {node_count} | Schedule: {schedule_info}{latency_info}{pdr_info}{radio_pdr_info}{uart_pdr_info}",
94
119
  )
95
120
 
96
121
  # Row 2: Schedule usage