mmrelay 1.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mmrelay/__init__.py +5 -0
- mmrelay/__main__.py +29 -0
- mmrelay/cli.py +2013 -0
- mmrelay/cli_utils.py +746 -0
- mmrelay/config.py +956 -0
- mmrelay/constants/__init__.py +65 -0
- mmrelay/constants/app.py +29 -0
- mmrelay/constants/config.py +78 -0
- mmrelay/constants/database.py +22 -0
- mmrelay/constants/formats.py +20 -0
- mmrelay/constants/messages.py +45 -0
- mmrelay/constants/network.py +45 -0
- mmrelay/constants/plugins.py +42 -0
- mmrelay/constants/queue.py +20 -0
- mmrelay/db_runtime.py +269 -0
- mmrelay/db_utils.py +1017 -0
- mmrelay/e2ee_utils.py +400 -0
- mmrelay/log_utils.py +274 -0
- mmrelay/main.py +439 -0
- mmrelay/matrix_utils.py +3091 -0
- mmrelay/meshtastic_utils.py +1245 -0
- mmrelay/message_queue.py +647 -0
- mmrelay/plugin_loader.py +1933 -0
- mmrelay/plugins/__init__.py +3 -0
- mmrelay/plugins/base_plugin.py +638 -0
- mmrelay/plugins/debug_plugin.py +30 -0
- mmrelay/plugins/drop_plugin.py +127 -0
- mmrelay/plugins/health_plugin.py +64 -0
- mmrelay/plugins/help_plugin.py +79 -0
- mmrelay/plugins/map_plugin.py +353 -0
- mmrelay/plugins/mesh_relay_plugin.py +222 -0
- mmrelay/plugins/nodes_plugin.py +92 -0
- mmrelay/plugins/ping_plugin.py +128 -0
- mmrelay/plugins/telemetry_plugin.py +179 -0
- mmrelay/plugins/weather_plugin.py +312 -0
- mmrelay/runtime_utils.py +35 -0
- mmrelay/setup_utils.py +828 -0
- mmrelay/tools/__init__.py +27 -0
- mmrelay/tools/mmrelay.service +19 -0
- mmrelay/tools/sample-docker-compose-prebuilt.yaml +30 -0
- mmrelay/tools/sample-docker-compose.yaml +30 -0
- mmrelay/tools/sample.env +10 -0
- mmrelay/tools/sample_config.yaml +120 -0
- mmrelay/windows_utils.py +346 -0
- mmrelay-1.2.6.dist-info/METADATA +145 -0
- mmrelay-1.2.6.dist-info/RECORD +50 -0
- mmrelay-1.2.6.dist-info/WHEEL +5 -0
- mmrelay-1.2.6.dist-info/entry_points.txt +2 -0
- mmrelay-1.2.6.dist-info/licenses/LICENSE +675 -0
- mmrelay-1.2.6.dist-info/top_level.txt +1 -0
mmrelay/message_queue.py
ADDED
|
@@ -0,0 +1,647 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Message queue system for MMRelay.
|
|
3
|
+
|
|
4
|
+
Provides transparent message queuing with rate limiting to prevent overwhelming
|
|
5
|
+
the Meshtastic network. Messages are queued in memory and sent at the configured
|
|
6
|
+
rate, respecting connection state and firmware constraints.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import contextlib
|
|
11
|
+
import threading
|
|
12
|
+
import time
|
|
13
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
from functools import partial
|
|
16
|
+
from queue import Empty, Full, Queue
|
|
17
|
+
from typing import Callable, Optional
|
|
18
|
+
|
|
19
|
+
from mmrelay.constants.database import DEFAULT_MSGS_TO_KEEP
|
|
20
|
+
from mmrelay.constants.network import MINIMUM_MESSAGE_DELAY, RECOMMENDED_MINIMUM_DELAY
|
|
21
|
+
from mmrelay.constants.queue import (
|
|
22
|
+
DEFAULT_MESSAGE_DELAY,
|
|
23
|
+
MAX_QUEUE_SIZE,
|
|
24
|
+
QUEUE_HIGH_WATER_MARK,
|
|
25
|
+
QUEUE_MEDIUM_WATER_MARK,
|
|
26
|
+
)
|
|
27
|
+
from mmrelay.log_utils import get_logger
|
|
28
|
+
|
|
29
|
+
logger = get_logger(name="MessageQueue")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class QueuedMessage:
|
|
34
|
+
"""Represents a message in the queue with metadata."""
|
|
35
|
+
|
|
36
|
+
timestamp: float
|
|
37
|
+
send_function: Callable
|
|
38
|
+
args: tuple
|
|
39
|
+
kwargs: dict
|
|
40
|
+
description: str
|
|
41
|
+
# Optional message mapping information for replies/reactions
|
|
42
|
+
mapping_info: Optional[dict] = None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class MessageQueue:
|
|
46
|
+
"""
|
|
47
|
+
Simple FIFO message queue with rate limiting for Meshtastic messages.
|
|
48
|
+
|
|
49
|
+
Queues messages in memory and sends them in order at the configured rate to prevent
|
|
50
|
+
overwhelming the mesh network. Respects connection state and automatically
|
|
51
|
+
pauses during reconnections.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(self):
|
|
55
|
+
"""
|
|
56
|
+
Create a new MessageQueue, initializing its internal queue, timing and state variables, and a thread lock.
|
|
57
|
+
|
|
58
|
+
Attributes:
|
|
59
|
+
_queue (Queue): Bounded FIFO holding queued messages (maxsize=MAX_QUEUE_SIZE).
|
|
60
|
+
_processor_task (Optional[asyncio.Task]): Async task that processes the queue, created when started.
|
|
61
|
+
_running (bool): Whether the processor is active.
|
|
62
|
+
_lock (threading.Lock): Protects start/stop and other state transitions.
|
|
63
|
+
_last_send_time (float): Wall-clock timestamp of the last successful send.
|
|
64
|
+
_last_send_mono (float): Monotonic timestamp of the last successful send (used for rate limiting).
|
|
65
|
+
_message_delay (float): Minimum delay between sends; starts at DEFAULT_MESSAGE_DELAY and may be adjusted.
|
|
66
|
+
_executor (Optional[concurrent.futures.ThreadPoolExecutor]): Dedicated single-worker executor for blocking send operations (created on start).
|
|
67
|
+
_in_flight (bool): True while a message send is actively running in the executor.
|
|
68
|
+
_has_current (bool): True when there is a current message being processed (even if not yet dispatched to the executor).
|
|
69
|
+
"""
|
|
70
|
+
self._queue = Queue(maxsize=MAX_QUEUE_SIZE)
|
|
71
|
+
self._processor_task = None
|
|
72
|
+
self._running = False
|
|
73
|
+
self._lock = threading.Lock()
|
|
74
|
+
self._last_send_time = 0.0
|
|
75
|
+
self._last_send_mono = 0.0
|
|
76
|
+
self._message_delay = DEFAULT_MESSAGE_DELAY
|
|
77
|
+
self._executor = None # Dedicated ThreadPoolExecutor for this MessageQueue
|
|
78
|
+
self._in_flight = False
|
|
79
|
+
self._has_current = False
|
|
80
|
+
self._dropped_messages = 0
|
|
81
|
+
|
|
82
|
+
def start(self, message_delay: float = DEFAULT_MESSAGE_DELAY):
|
|
83
|
+
"""
|
|
84
|
+
Start the message queue processor and set the inter-message delay.
|
|
85
|
+
|
|
86
|
+
Activate the queue, apply the provided inter-message delay, ensure a single-worker executor exists for send operations, and schedule the background processor when an asyncio event loop is available. Logs a warning if the provided delay is less than or equal to MINIMUM_MESSAGE_DELAY.
|
|
87
|
+
|
|
88
|
+
Parameters:
|
|
89
|
+
message_delay (float): Delay between consecutive sends in seconds; applied as provided and may trigger a warning if <= MINIMUM_MESSAGE_DELAY.
|
|
90
|
+
"""
|
|
91
|
+
with self._lock:
|
|
92
|
+
if self._running:
|
|
93
|
+
return
|
|
94
|
+
|
|
95
|
+
# Set the message delay as requested
|
|
96
|
+
self._message_delay = message_delay
|
|
97
|
+
|
|
98
|
+
# Log warning if delay is at or below MINIMUM_MESSAGE_DELAY seconds due to firmware rate limiting
|
|
99
|
+
if message_delay <= MINIMUM_MESSAGE_DELAY:
|
|
100
|
+
logger.warning(
|
|
101
|
+
f"Message delay {message_delay}s is at or below {MINIMUM_MESSAGE_DELAY}s. "
|
|
102
|
+
f"Due to rate limiting in the Meshtastic Firmware, {RECOMMENDED_MINIMUM_DELAY}s or higher is recommended. "
|
|
103
|
+
f"Messages may be dropped by the firmware if sent too frequently."
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
self._running = True
|
|
107
|
+
|
|
108
|
+
# Create dedicated executor for this MessageQueue
|
|
109
|
+
if self._executor is None:
|
|
110
|
+
self._executor = ThreadPoolExecutor(
|
|
111
|
+
max_workers=1, thread_name_prefix=f"MessageQueue-{id(self)}"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# Start the processor in the event loop
|
|
115
|
+
try:
|
|
116
|
+
try:
|
|
117
|
+
loop = asyncio.get_running_loop()
|
|
118
|
+
except RuntimeError:
|
|
119
|
+
loop = None
|
|
120
|
+
if loop and loop.is_running():
|
|
121
|
+
self._processor_task = loop.create_task(self._process_queue())
|
|
122
|
+
logger.info(
|
|
123
|
+
f"Message queue started with {self._message_delay}s message delay"
|
|
124
|
+
)
|
|
125
|
+
else:
|
|
126
|
+
# Event loop exists but not running yet, defer startup
|
|
127
|
+
logger.debug(
|
|
128
|
+
"Event loop not running yet, will start processor later"
|
|
129
|
+
)
|
|
130
|
+
except RuntimeError:
|
|
131
|
+
# No event loop running, will start when one is available
|
|
132
|
+
logger.debug(
|
|
133
|
+
"No event loop available, queue processor will start later"
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
def stop(self):
|
|
137
|
+
"""
|
|
138
|
+
Stop the message queue processor and clean up internal resources.
|
|
139
|
+
|
|
140
|
+
Cancels the background processor task (if running) and attempts to wait for it to finish on the task's owning event loop without blocking the caller's event loop. Shuts down the dedicated ThreadPoolExecutor used for blocking I/O; when called from an asyncio event loop the executor shutdown is performed on a background thread to avoid blocking. Clears internal state flags and resources so the queue can be restarted later.
|
|
141
|
+
|
|
142
|
+
Notes:
|
|
143
|
+
- This method is thread-safe.
|
|
144
|
+
- It may block briefly (the implementation waits up to ~1 second when awaiting task completion) but will avoid blocking the current asyncio event loop when possible.
|
|
145
|
+
- No exceptions are propagated for normal cancellation/shutdown paths; internal exceptions during shutdown are suppressed.
|
|
146
|
+
"""
|
|
147
|
+
with self._lock:
|
|
148
|
+
if not self._running:
|
|
149
|
+
return
|
|
150
|
+
|
|
151
|
+
self._running = False
|
|
152
|
+
|
|
153
|
+
if self._processor_task:
|
|
154
|
+
self._processor_task.cancel()
|
|
155
|
+
|
|
156
|
+
# Wait for the task to complete on its owning loop
|
|
157
|
+
task_loop = self._processor_task.get_loop()
|
|
158
|
+
current_loop = None
|
|
159
|
+
with contextlib.suppress(RuntimeError):
|
|
160
|
+
current_loop = asyncio.get_running_loop()
|
|
161
|
+
if task_loop.is_closed():
|
|
162
|
+
# Owning loop is closed; nothing we can do to await it
|
|
163
|
+
pass
|
|
164
|
+
elif current_loop is task_loop:
|
|
165
|
+
# Avoid blocking the event loop thread; cancellation will finish naturally
|
|
166
|
+
pass
|
|
167
|
+
elif task_loop.is_running():
|
|
168
|
+
from asyncio import run_coroutine_threadsafe, shield
|
|
169
|
+
|
|
170
|
+
with contextlib.suppress(Exception):
|
|
171
|
+
fut = run_coroutine_threadsafe(
|
|
172
|
+
shield(self._processor_task), task_loop
|
|
173
|
+
)
|
|
174
|
+
# Wait for completion; ignore exceptions raised due to cancellation
|
|
175
|
+
fut.result(timeout=1.0)
|
|
176
|
+
else:
|
|
177
|
+
with contextlib.suppress(
|
|
178
|
+
asyncio.CancelledError, RuntimeError, Exception
|
|
179
|
+
):
|
|
180
|
+
task_loop.run_until_complete(self._processor_task)
|
|
181
|
+
|
|
182
|
+
self._processor_task = None
|
|
183
|
+
|
|
184
|
+
# Shut down our dedicated executor without blocking the event loop
|
|
185
|
+
if self._executor:
|
|
186
|
+
on_loop_thread = False
|
|
187
|
+
with contextlib.suppress(RuntimeError):
|
|
188
|
+
loop_chk = asyncio.get_running_loop()
|
|
189
|
+
on_loop_thread = loop_chk.is_running()
|
|
190
|
+
|
|
191
|
+
def _shutdown(exec_ref):
|
|
192
|
+
"""
|
|
193
|
+
Shut down an executor, waiting for running tasks to finish; falls back for executors that don't support `cancel_futures`.
|
|
194
|
+
|
|
195
|
+
Attempts to call executor.shutdown(wait=True, cancel_futures=True) and, if that raises a TypeError (older Python versions or executors without the `cancel_futures` parameter), retries with executor.shutdown(wait=True). This call blocks until shutdown completes.
|
|
196
|
+
"""
|
|
197
|
+
try:
|
|
198
|
+
exec_ref.shutdown(wait=True, cancel_futures=True)
|
|
199
|
+
except TypeError:
|
|
200
|
+
exec_ref.shutdown(wait=True)
|
|
201
|
+
|
|
202
|
+
if on_loop_thread:
|
|
203
|
+
threading.Thread(
|
|
204
|
+
target=_shutdown,
|
|
205
|
+
args=(self._executor,),
|
|
206
|
+
name="MessageQueueExecutorShutdown",
|
|
207
|
+
daemon=True,
|
|
208
|
+
).start()
|
|
209
|
+
else:
|
|
210
|
+
_shutdown(self._executor)
|
|
211
|
+
self._executor = None
|
|
212
|
+
|
|
213
|
+
logger.info("Message queue stopped")
|
|
214
|
+
|
|
215
|
+
def enqueue(
|
|
216
|
+
self,
|
|
217
|
+
send_function: Callable,
|
|
218
|
+
*args,
|
|
219
|
+
description: str = "",
|
|
220
|
+
mapping_info: Optional[dict] = None,
|
|
221
|
+
**kwargs,
|
|
222
|
+
) -> bool:
|
|
223
|
+
"""
|
|
224
|
+
Enqueue a message for ordered, rate-limited sending.
|
|
225
|
+
|
|
226
|
+
Ensures the queue processor is started (if an event loop is available) and attempts to add a QueuedMessage (containing the provided send function and its arguments) to the bounded in-memory queue. If the queue is not running or has reached capacity the message is not added and the method returns False. Optionally attach mapping_info metadata (used later to correlate sent messages with external IDs).
|
|
227
|
+
|
|
228
|
+
Parameters:
|
|
229
|
+
send_function (Callable): Callable to execute when the message is sent.
|
|
230
|
+
*args: Positional arguments to pass to send_function.
|
|
231
|
+
description (str, optional): Human-readable description used for logging.
|
|
232
|
+
mapping_info (dict | None, optional): Optional metadata to record after a successful send.
|
|
233
|
+
**kwargs: Keyword arguments to pass to send_function.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
bool: True if the message was successfully enqueued; False if the queue is not running or is full.
|
|
237
|
+
"""
|
|
238
|
+
# Ensure processor is started if event loop is now available.
|
|
239
|
+
# This is called outside the lock to prevent potential deadlocks.
|
|
240
|
+
self.ensure_processor_started()
|
|
241
|
+
|
|
242
|
+
with self._lock:
|
|
243
|
+
if not self._running:
|
|
244
|
+
# Refuse to send to prevent blocking the event loop
|
|
245
|
+
logger.error(
|
|
246
|
+
"Queue not running; cannot send message: %s. Start the message queue before sending.",
|
|
247
|
+
description,
|
|
248
|
+
)
|
|
249
|
+
return False
|
|
250
|
+
|
|
251
|
+
message = QueuedMessage(
|
|
252
|
+
timestamp=time.time(),
|
|
253
|
+
send_function=send_function,
|
|
254
|
+
args=args,
|
|
255
|
+
kwargs=kwargs,
|
|
256
|
+
description=description,
|
|
257
|
+
mapping_info=mapping_info,
|
|
258
|
+
)
|
|
259
|
+
# Enforce capacity via bounded queue
|
|
260
|
+
try:
|
|
261
|
+
self._queue.put_nowait(message)
|
|
262
|
+
except Full:
|
|
263
|
+
logger.warning(
|
|
264
|
+
f"Message queue full ({self._queue.qsize()}/{MAX_QUEUE_SIZE}), dropping message: {description}"
|
|
265
|
+
)
|
|
266
|
+
self._dropped_messages += 1
|
|
267
|
+
return False
|
|
268
|
+
# Only log queue status when there are multiple messages
|
|
269
|
+
queue_size = self._queue.qsize()
|
|
270
|
+
if queue_size >= 2:
|
|
271
|
+
logger.debug(
|
|
272
|
+
f"Queued message ({queue_size}/{MAX_QUEUE_SIZE}): {description}"
|
|
273
|
+
)
|
|
274
|
+
return True
|
|
275
|
+
|
|
276
|
+
def get_queue_size(self) -> int:
|
|
277
|
+
"""
|
|
278
|
+
Return the number of messages currently in the queue.
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
int: The current queue size.
|
|
282
|
+
"""
|
|
283
|
+
return self._queue.qsize()
|
|
284
|
+
|
|
285
|
+
def is_running(self) -> bool:
|
|
286
|
+
"""
|
|
287
|
+
Return whether the message queue processor is currently active.
|
|
288
|
+
"""
|
|
289
|
+
return self._running
|
|
290
|
+
|
|
291
|
+
def get_status(self) -> dict:
|
|
292
|
+
"""
|
|
293
|
+
Return current status of the message queue.
|
|
294
|
+
|
|
295
|
+
Provides a snapshot useful for monitoring and debugging.
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
dict: Mapping with the following keys:
|
|
299
|
+
- running (bool): Whether the queue processor is active.
|
|
300
|
+
- queue_size (int): Number of messages currently queued.
|
|
301
|
+
- message_delay (float): Configured minimum delay (seconds) between sends.
|
|
302
|
+
- processor_task_active (bool): True if the internal processor task exists and is not finished.
|
|
303
|
+
- last_send_time (float or None): Wall-clock time (seconds since the epoch) of the last successful send, or None if no send has occurred.
|
|
304
|
+
- time_since_last_send (float or None): Seconds elapsed since last_send_time, or None if no send has occurred.
|
|
305
|
+
- in_flight (bool): True when a message is currently being sent.
|
|
306
|
+
- dropped_messages (int): Number of messages dropped due to queue being full.
|
|
307
|
+
- default_msgs_to_keep (int): Default retention setting for message mappings.
|
|
308
|
+
"""
|
|
309
|
+
return {
|
|
310
|
+
"running": self._running,
|
|
311
|
+
"queue_size": self._queue.qsize(),
|
|
312
|
+
"message_delay": self._message_delay,
|
|
313
|
+
"processor_task_active": self._processor_task is not None
|
|
314
|
+
and not self._processor_task.done(),
|
|
315
|
+
"last_send_time": self._last_send_time,
|
|
316
|
+
"time_since_last_send": (
|
|
317
|
+
time.monotonic() - self._last_send_mono
|
|
318
|
+
if self._last_send_mono > 0
|
|
319
|
+
else None
|
|
320
|
+
),
|
|
321
|
+
"in_flight": self._in_flight,
|
|
322
|
+
"dropped_messages": getattr(self, "_dropped_messages", 0),
|
|
323
|
+
"default_msgs_to_keep": DEFAULT_MSGS_TO_KEEP,
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
async def drain(self, timeout: Optional[float] = None) -> bool:
|
|
327
|
+
"""
|
|
328
|
+
Asynchronously wait until the queue has fully drained (no queued messages and no in-flight or current message) or until an optional timeout elapses.
|
|
329
|
+
|
|
330
|
+
If `timeout` is provided, it is interpreted in seconds. Returns True when the queue is empty and there are no messages being processed; returns False if the queue was stopped before draining or the timeout was reached.
|
|
331
|
+
"""
|
|
332
|
+
deadline = (time.monotonic() + timeout) if timeout is not None else None
|
|
333
|
+
while (not self._queue.empty()) or self._in_flight or self._has_current:
|
|
334
|
+
if not self._running:
|
|
335
|
+
return False
|
|
336
|
+
if deadline is not None and time.monotonic() > deadline:
|
|
337
|
+
return False
|
|
338
|
+
await asyncio.sleep(0.1)
|
|
339
|
+
return True
|
|
340
|
+
|
|
341
|
+
def ensure_processor_started(self):
|
|
342
|
+
"""
|
|
343
|
+
Start the queue processor task if the queue is running and no processor task exists.
|
|
344
|
+
|
|
345
|
+
This method checks if the queue is active and, if so, attempts to create and start the asynchronous processor task within the current event loop.
|
|
346
|
+
"""
|
|
347
|
+
with self._lock:
|
|
348
|
+
if self._running and self._processor_task is None:
|
|
349
|
+
try:
|
|
350
|
+
loop = asyncio.get_running_loop()
|
|
351
|
+
except RuntimeError:
|
|
352
|
+
loop = None
|
|
353
|
+
if loop and loop.is_running():
|
|
354
|
+
self._processor_task = loop.create_task(self._process_queue())
|
|
355
|
+
logger.info(
|
|
356
|
+
f"Message queue processor started with {self._message_delay}s message delay"
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
async def _process_queue(self):
|
|
360
|
+
"""
|
|
361
|
+
Process queued messages in FIFO order, sending each when the connection is ready and the configured inter-message delay has elapsed.
|
|
362
|
+
|
|
363
|
+
Runs until the queue is stopped or the task is cancelled. After a successful send, updates last-send timestamps and, when provided mapping information is present and the send result exposes an `id`, persists the message mapping. Cancellation may drop an in-flight message.
|
|
364
|
+
"""
|
|
365
|
+
logger.debug("Message queue processor started")
|
|
366
|
+
current_message = None
|
|
367
|
+
|
|
368
|
+
while self._running:
|
|
369
|
+
try:
|
|
370
|
+
# Get next message if we don't have one waiting
|
|
371
|
+
if current_message is None:
|
|
372
|
+
# Monitor queue depth for operational awareness
|
|
373
|
+
queue_size = self._queue.qsize()
|
|
374
|
+
if queue_size > QUEUE_HIGH_WATER_MARK:
|
|
375
|
+
logger.warning(
|
|
376
|
+
f"Queue depth high: {queue_size} messages pending"
|
|
377
|
+
)
|
|
378
|
+
elif queue_size > QUEUE_MEDIUM_WATER_MARK:
|
|
379
|
+
logger.info(
|
|
380
|
+
f"Queue depth moderate: {queue_size} messages pending"
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
# Get next message (non-blocking)
|
|
384
|
+
try:
|
|
385
|
+
current_message = self._queue.get_nowait()
|
|
386
|
+
self._has_current = True
|
|
387
|
+
except Empty:
|
|
388
|
+
# No messages, wait a bit and continue
|
|
389
|
+
await asyncio.sleep(0.1)
|
|
390
|
+
continue
|
|
391
|
+
|
|
392
|
+
# Check if we should send (connection state, etc.)
|
|
393
|
+
if not self._should_send_message():
|
|
394
|
+
# Keep the message and wait - don't requeue to maintain FIFO order
|
|
395
|
+
logger.debug(
|
|
396
|
+
f"Connection not ready, waiting to send: {current_message.description}"
|
|
397
|
+
)
|
|
398
|
+
await asyncio.sleep(1.0)
|
|
399
|
+
continue
|
|
400
|
+
|
|
401
|
+
# Check if we need to wait for message delay (only if we've sent before)
|
|
402
|
+
if self._last_send_mono > 0:
|
|
403
|
+
time_since_last = time.monotonic() - self._last_send_mono
|
|
404
|
+
if time_since_last < self._message_delay:
|
|
405
|
+
wait_time = self._message_delay - time_since_last
|
|
406
|
+
logger.debug(
|
|
407
|
+
f"Rate limiting: waiting {wait_time:.1f}s before sending"
|
|
408
|
+
)
|
|
409
|
+
await asyncio.sleep(wait_time)
|
|
410
|
+
continue
|
|
411
|
+
elif time_since_last < MINIMUM_MESSAGE_DELAY:
|
|
412
|
+
# Warn when messages are sent less than MINIMUM_MESSAGE_DELAY seconds apart
|
|
413
|
+
logger.warning(
|
|
414
|
+
f"[Runtime] Messages sent {time_since_last:.1f}s apart, which is below {MINIMUM_MESSAGE_DELAY}s. "
|
|
415
|
+
f"Due to rate limiting in the Meshtastic Firmware, messages may be dropped."
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
# Send the message
|
|
419
|
+
try:
|
|
420
|
+
self._in_flight = True
|
|
421
|
+
logger.debug(
|
|
422
|
+
f"Sending queued message: {current_message.description}"
|
|
423
|
+
)
|
|
424
|
+
# Run synchronous Meshtastic I/O operations in executor to prevent blocking event loop
|
|
425
|
+
loop = asyncio.get_running_loop()
|
|
426
|
+
exec_ref = self._executor
|
|
427
|
+
if exec_ref is None:
|
|
428
|
+
raise RuntimeError("MessageQueue executor is not initialized")
|
|
429
|
+
result = await loop.run_in_executor(
|
|
430
|
+
exec_ref,
|
|
431
|
+
partial(
|
|
432
|
+
current_message.send_function,
|
|
433
|
+
*current_message.args,
|
|
434
|
+
**current_message.kwargs,
|
|
435
|
+
),
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# Update last send time
|
|
439
|
+
self._last_send_time = time.time()
|
|
440
|
+
self._last_send_mono = time.monotonic()
|
|
441
|
+
|
|
442
|
+
if result is None:
|
|
443
|
+
logger.warning(
|
|
444
|
+
f"Message send returned None: {current_message.description}"
|
|
445
|
+
)
|
|
446
|
+
else:
|
|
447
|
+
logger.debug(
|
|
448
|
+
f"Successfully sent queued message: {current_message.description}"
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
# Handle message mapping if provided
|
|
452
|
+
if current_message.mapping_info and hasattr(result, "id"):
|
|
453
|
+
await self._handle_message_mapping(
|
|
454
|
+
result, current_message.mapping_info
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
except Exception as e:
|
|
458
|
+
logger.error(
|
|
459
|
+
f"Error sending queued message '{current_message.description}': {e}"
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Mark task as done and clear current message
|
|
463
|
+
self._queue.task_done()
|
|
464
|
+
current_message = None
|
|
465
|
+
self._in_flight = False
|
|
466
|
+
self._has_current = False
|
|
467
|
+
|
|
468
|
+
except asyncio.CancelledError:
|
|
469
|
+
logger.debug("Message queue processor cancelled")
|
|
470
|
+
if current_message:
|
|
471
|
+
logger.warning(
|
|
472
|
+
f"Message in flight was dropped during shutdown: {current_message.description}"
|
|
473
|
+
)
|
|
474
|
+
with contextlib.suppress(Exception):
|
|
475
|
+
self._queue.task_done()
|
|
476
|
+
self._in_flight = False
|
|
477
|
+
self._has_current = False
|
|
478
|
+
break
|
|
479
|
+
except Exception:
|
|
480
|
+
logger.exception("Error in message queue processor")
|
|
481
|
+
await asyncio.sleep(1.0) # Prevent tight error loop
|
|
482
|
+
|
|
483
|
+
def _should_send_message(self) -> bool:
|
|
484
|
+
"""
|
|
485
|
+
Determine whether conditions allow sending a Meshtastic message.
|
|
486
|
+
|
|
487
|
+
Performs runtime checks: verifies the global reconnecting flag is not set, a Meshtastic client object exists, and—if the client exposes a connectivity indicator—that indicator reports connected. If importing Meshtastic utilities fails, logs a critical error and asynchronously stops the queue.
|
|
488
|
+
|
|
489
|
+
Returns:
|
|
490
|
+
`True` if not reconnecting, a Meshtastic client exists, and the client is connected when checkable; `False` otherwise.
|
|
491
|
+
"""
|
|
492
|
+
# Import here to avoid circular imports
|
|
493
|
+
try:
|
|
494
|
+
from mmrelay.meshtastic_utils import meshtastic_client, reconnecting
|
|
495
|
+
|
|
496
|
+
# Don't send during reconnection
|
|
497
|
+
if reconnecting:
|
|
498
|
+
logger.debug("Not sending - reconnecting is True")
|
|
499
|
+
return False
|
|
500
|
+
|
|
501
|
+
# Don't send if no client
|
|
502
|
+
if meshtastic_client is None:
|
|
503
|
+
logger.debug("Not sending - meshtastic_client is None")
|
|
504
|
+
return False
|
|
505
|
+
|
|
506
|
+
# Check if client is connected
|
|
507
|
+
if hasattr(meshtastic_client, "is_connected"):
|
|
508
|
+
is_conn = meshtastic_client.is_connected
|
|
509
|
+
if not (is_conn() if callable(is_conn) else is_conn):
|
|
510
|
+
logger.debug("Not sending - client not connected")
|
|
511
|
+
return False
|
|
512
|
+
|
|
513
|
+
logger.debug("Connection check passed - ready to send")
|
|
514
|
+
return True
|
|
515
|
+
|
|
516
|
+
except ImportError as e:
|
|
517
|
+
# ImportError indicates a serious problem with application structure,
|
|
518
|
+
# often during shutdown as modules are unloaded.
|
|
519
|
+
logger.critical(
|
|
520
|
+
f"Cannot import meshtastic_utils - serious application error: {e}. Stopping message queue."
|
|
521
|
+
)
|
|
522
|
+
# Stop asynchronously to avoid blocking the event loop thread.
|
|
523
|
+
threading.Thread(
|
|
524
|
+
target=self.stop, name="MessageQueueStopper", daemon=True
|
|
525
|
+
).start()
|
|
526
|
+
return False
|
|
527
|
+
|
|
528
|
+
async def _handle_message_mapping(self, result, mapping_info):
|
|
529
|
+
"""
|
|
530
|
+
Persist a mapping from a sent Meshtastic message to a Matrix event and optionally prune old mappings.
|
|
531
|
+
|
|
532
|
+
Stores a mapping when `mapping_info` contains `matrix_event_id`, `room_id`, and `text`, using `result.id` as the Meshtastic message id. If `mapping_info["msgs_to_keep"]` is present and greater than 0, prunes older mappings to retain that many entries; otherwise uses DEFAULT_MSGS_TO_KEEP.
|
|
533
|
+
|
|
534
|
+
Parameters:
|
|
535
|
+
result: An object returned by the send function with an `id` attribute representing the Meshtastic message id.
|
|
536
|
+
mapping_info (dict): Mapping details. Relevant keys:
|
|
537
|
+
- matrix_event_id (str): Matrix event ID to map to.
|
|
538
|
+
- room_id (str): Matrix room ID where the event was sent.
|
|
539
|
+
- text (str): Message text to associate with the mapping.
|
|
540
|
+
- meshnet (optional): Mesh network identifier to pass to storage.
|
|
541
|
+
- msgs_to_keep (optional, int): Number of mappings to retain when pruning.
|
|
542
|
+
"""
|
|
543
|
+
try:
|
|
544
|
+
# Import here to avoid circular imports
|
|
545
|
+
from mmrelay.db_utils import (
|
|
546
|
+
async_prune_message_map,
|
|
547
|
+
async_store_message_map,
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
# Extract mapping information
|
|
551
|
+
matrix_event_id = mapping_info.get("matrix_event_id")
|
|
552
|
+
room_id = mapping_info.get("room_id")
|
|
553
|
+
text = mapping_info.get("text")
|
|
554
|
+
meshnet = mapping_info.get("meshnet")
|
|
555
|
+
|
|
556
|
+
if matrix_event_id and room_id and text:
|
|
557
|
+
# Store the message mapping
|
|
558
|
+
await async_store_message_map(
|
|
559
|
+
result.id,
|
|
560
|
+
matrix_event_id,
|
|
561
|
+
room_id,
|
|
562
|
+
text,
|
|
563
|
+
meshtastic_meshnet=meshnet,
|
|
564
|
+
)
|
|
565
|
+
logger.debug(f"Stored message map for meshtastic_id: {result.id}")
|
|
566
|
+
|
|
567
|
+
# Handle pruning if configured
|
|
568
|
+
msgs_to_keep = mapping_info.get("msgs_to_keep", DEFAULT_MSGS_TO_KEEP)
|
|
569
|
+
if msgs_to_keep > 0:
|
|
570
|
+
await async_prune_message_map(msgs_to_keep)
|
|
571
|
+
|
|
572
|
+
except Exception:
|
|
573
|
+
logger.exception("Error handling message mapping")
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
# Global message queue instance
|
|
577
|
+
_message_queue = MessageQueue()
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
def get_message_queue() -> MessageQueue:
|
|
581
|
+
"""
|
|
582
|
+
Return the global instance of the message queue used for managing and rate-limiting message sending.
|
|
583
|
+
"""
|
|
584
|
+
return _message_queue
|
|
585
|
+
|
|
586
|
+
|
|
587
|
+
def start_message_queue(message_delay: float = DEFAULT_MESSAGE_DELAY):
|
|
588
|
+
"""
|
|
589
|
+
Start the global message queue processor with the given minimum delay between messages.
|
|
590
|
+
|
|
591
|
+
Parameters:
|
|
592
|
+
message_delay (float): Minimum number of seconds to wait between sending messages.
|
|
593
|
+
"""
|
|
594
|
+
_message_queue.start(message_delay)
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
def stop_message_queue():
|
|
598
|
+
"""
|
|
599
|
+
Stops the global message queue processor, preventing further message processing until restarted.
|
|
600
|
+
"""
|
|
601
|
+
_message_queue.stop()
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
def queue_message(
|
|
605
|
+
send_function: Callable,
|
|
606
|
+
*args,
|
|
607
|
+
description: str = "",
|
|
608
|
+
mapping_info: Optional[dict] = None,
|
|
609
|
+
**kwargs,
|
|
610
|
+
) -> bool:
|
|
611
|
+
"""
|
|
612
|
+
Enqueues a message for sending via the global message queue.
|
|
613
|
+
|
|
614
|
+
Parameters:
|
|
615
|
+
send_function (Callable): The function to execute for sending the message.
|
|
616
|
+
description (str, optional): Human-readable description of the message for logging purposes.
|
|
617
|
+
mapping_info (dict, optional): Additional metadata for message mapping, such as reply or reaction information.
|
|
618
|
+
|
|
619
|
+
Returns:
|
|
620
|
+
bool: True if the message was successfully enqueued; False if the queue is not running or full.
|
|
621
|
+
"""
|
|
622
|
+
return _message_queue.enqueue(
|
|
623
|
+
send_function,
|
|
624
|
+
*args,
|
|
625
|
+
description=description,
|
|
626
|
+
mapping_info=mapping_info,
|
|
627
|
+
**kwargs,
|
|
628
|
+
)
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
def get_queue_status() -> dict:
|
|
632
|
+
"""
|
|
633
|
+
Get a snapshot of the global message queue's current status.
|
|
634
|
+
|
|
635
|
+
Returns:
|
|
636
|
+
status (dict): Dictionary containing status fields including:
|
|
637
|
+
- running: whether the processor is active
|
|
638
|
+
- queue_size: current number of queued messages
|
|
639
|
+
- message_delay: configured inter-message delay (seconds)
|
|
640
|
+
- processor_task_active: whether the processor task exists and is not done
|
|
641
|
+
- last_send_time: wall-clock timestamp of the last successful send or None
|
|
642
|
+
- time_since_last_send: seconds since last send (monotonic) or None
|
|
643
|
+
- in_flight: whether a send is currently executing
|
|
644
|
+
- dropped_messages: count of messages dropped due to a full queue
|
|
645
|
+
- default_msgs_to_keep: configured number of message mappings to retain
|
|
646
|
+
"""
|
|
647
|
+
return _message_queue.get_status()
|