mmrelay 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mmrelay might be problematic. Click here for more details.

mmrelay/message_queue.py CHANGED
@@ -7,10 +7,13 @@ rate, respecting connection state and firmware constraints.
7
7
  """
8
8
 
9
9
  import asyncio
10
+ import contextlib
10
11
  import threading
11
12
  import time
13
+ from concurrent.futures import ThreadPoolExecutor
12
14
  from dataclasses import dataclass
13
- from queue import Empty, Queue
15
+ from functools import partial
16
+ from queue import Empty, Full, Queue
14
17
  from typing import Callable, Optional
15
18
 
16
19
  from mmrelay.constants.database import DEFAULT_MSGS_TO_KEEP
@@ -50,20 +53,40 @@ class MessageQueue:
50
53
 
51
54
  def __init__(self):
52
55
  """
53
- Initialize the MessageQueue with an empty queue, state variables, and a thread lock for safe operation.
56
+ Create a new MessageQueue, initializing its internal queue, timing and state variables, and a thread lock.
57
+
58
+ Attributes:
59
+ _queue (Queue): Bounded FIFO holding queued messages (maxsize=MAX_QUEUE_SIZE).
60
+ _processor_task (Optional[asyncio.Task]): Async task that processes the queue, created when started.
61
+ _running (bool): Whether the processor is active.
62
+ _lock (threading.Lock): Protects start/stop and other state transitions.
63
+ _last_send_time (float): Wall-clock timestamp of the last successful send.
64
+ _last_send_mono (float): Monotonic timestamp of the last successful send (used for rate limiting).
65
+ _message_delay (float): Minimum delay between sends; starts at DEFAULT_MESSAGE_DELAY and may be adjusted.
66
+ _executor (Optional[concurrent.futures.ThreadPoolExecutor]): Dedicated single-worker executor for blocking send operations (created on start).
67
+ _in_flight (bool): True while a message send is actively running in the executor.
68
+ _has_current (bool): True when there is a current message being processed (even if not yet dispatched to the executor).
54
69
  """
55
- self._queue = Queue()
70
+ self._queue = Queue(maxsize=MAX_QUEUE_SIZE)
56
71
  self._processor_task = None
57
72
  self._running = False
58
73
  self._lock = threading.Lock()
59
74
  self._last_send_time = 0.0
75
+ self._last_send_mono = 0.0
60
76
  self._message_delay = DEFAULT_MESSAGE_DELAY
77
+ self._executor = None # Dedicated ThreadPoolExecutor for this MessageQueue
78
+ self._in_flight = False
79
+ self._has_current = False
80
+ self._dropped_messages = 0
61
81
 
62
82
  def start(self, message_delay: float = DEFAULT_MESSAGE_DELAY):
63
83
  """
64
- Start the message queue processor with a specified minimum delay between messages.
84
+ Activate the message queue processor with a minimum inter-message delay.
65
85
 
66
- If the provided delay is below the firmware-enforced minimum, the minimum is used instead. The processor task is started immediately if the asyncio event loop is running; otherwise, startup is deferred until the event loop becomes available.
86
+ Enables processing, sets the configured message delay (raised to MINIMUM_MESSAGE_DELAY if a smaller value is provided), creates a dedicated ThreadPoolExecutor for send operations if one does not exist, and starts the asyncio processor task immediately when a running event loop is available; if no running loop is available startup is deferred until a loop is present.
87
+
88
+ Parameters:
89
+ message_delay (float): Requested minimum delay between sends, in seconds. Values below MINIMUM_MESSAGE_DELAY are replaced with MINIMUM_MESSAGE_DELAY.
67
90
  """
68
91
  with self._lock:
69
92
  if self._running:
@@ -79,10 +102,19 @@ class MessageQueue:
79
102
  self._message_delay = message_delay
80
103
  self._running = True
81
104
 
105
+ # Create dedicated executor for this MessageQueue
106
+ if self._executor is None:
107
+ self._executor = ThreadPoolExecutor(
108
+ max_workers=1, thread_name_prefix=f"MessageQueue-{id(self)}"
109
+ )
110
+
82
111
  # Start the processor in the event loop
83
112
  try:
84
- loop = asyncio.get_event_loop()
85
- if loop.is_running():
113
+ try:
114
+ loop = asyncio.get_running_loop()
115
+ except RuntimeError:
116
+ loop = None
117
+ if loop and loop.is_running():
86
118
  self._processor_task = loop.create_task(self._process_queue())
87
119
  logger.info(
88
120
  f"Message queue started with {self._message_delay}s message delay"
@@ -100,7 +132,14 @@ class MessageQueue:
100
132
 
101
133
  def stop(self):
102
134
  """
103
- Stops the message queue processor and cancels the processing task if active.
135
+ Stop the message queue processor and clean up internal resources.
136
+
137
+ Cancels the background processor task (if running) and attempts to wait for it to finish on the task's owning event loop without blocking the caller's event loop. Shuts down the dedicated ThreadPoolExecutor used for blocking I/O; when called from an asyncio event loop the executor shutdown is performed on a background thread to avoid blocking. Clears internal state flags and resources so the queue can be restarted later.
138
+
139
+ Notes:
140
+ - This method is thread-safe.
141
+ - It may block briefly (the implementation waits up to ~1 second when awaiting task completion) but will avoid blocking the current asyncio event loop when possible.
142
+ - No exceptions are propagated for normal cancellation/shutdown paths; internal exceptions during shutdown are suppressed.
104
143
  """
105
144
  with self._lock:
106
145
  if not self._running:
@@ -110,8 +149,64 @@ class MessageQueue:
110
149
 
111
150
  if self._processor_task:
112
151
  self._processor_task.cancel()
152
+
153
+ # Wait for the task to complete on its owning loop
154
+ task_loop = self._processor_task.get_loop()
155
+ current_loop = None
156
+ with contextlib.suppress(RuntimeError):
157
+ current_loop = asyncio.get_running_loop()
158
+ if task_loop.is_closed():
159
+ # Owning loop is closed; nothing we can do to await it
160
+ pass
161
+ elif current_loop is task_loop:
162
+ # Avoid blocking the event loop thread; cancellation will finish naturally
163
+ pass
164
+ elif task_loop.is_running():
165
+ from asyncio import run_coroutine_threadsafe, shield
166
+
167
+ with contextlib.suppress(Exception):
168
+ fut = run_coroutine_threadsafe(
169
+ shield(self._processor_task), task_loop
170
+ )
171
+ # Wait for completion; ignore exceptions raised due to cancellation
172
+ fut.result(timeout=1.0)
173
+ else:
174
+ with contextlib.suppress(
175
+ asyncio.CancelledError, RuntimeError, Exception
176
+ ):
177
+ task_loop.run_until_complete(self._processor_task)
178
+
113
179
  self._processor_task = None
114
180
 
181
+ # Shut down our dedicated executor without blocking the event loop
182
+ if self._executor:
183
+ on_loop_thread = False
184
+ with contextlib.suppress(RuntimeError):
185
+ loop_chk = asyncio.get_running_loop()
186
+ on_loop_thread = loop_chk.is_running()
187
+
188
+ def _shutdown(exec_ref):
189
+ """
190
+ Shut down an executor, waiting for running tasks to finish; falls back for executors that don't support `cancel_futures`.
191
+
192
+ Attempts to call executor.shutdown(wait=True, cancel_futures=True) and, if that raises a TypeError (older Python versions or executors without the `cancel_futures` parameter), retries with executor.shutdown(wait=True). This call blocks until shutdown completes.
193
+ """
194
+ try:
195
+ exec_ref.shutdown(wait=True, cancel_futures=True)
196
+ except TypeError:
197
+ exec_ref.shutdown(wait=True)
198
+
199
+ if on_loop_thread:
200
+ threading.Thread(
201
+ target=_shutdown,
202
+ args=(self._executor,),
203
+ name="MessageQueueExecutorShutdown",
204
+ daemon=True,
205
+ ).start()
206
+ else:
207
+ _shutdown(self._executor)
208
+ self._executor = None
209
+
115
210
  logger.info("Message queue stopped")
116
211
 
117
212
  def enqueue(
@@ -119,18 +214,20 @@ class MessageQueue:
119
214
  send_function: Callable,
120
215
  *args,
121
216
  description: str = "",
122
- mapping_info: dict = None,
217
+ mapping_info: Optional[dict] = None,
123
218
  **kwargs,
124
219
  ) -> bool:
125
220
  """
126
- Adds a message to the queue for rate-limited, ordered sending.
221
+ Enqueue a message for ordered, rate-limited sending.
222
+
223
+ Ensures the queue processor is started (if an event loop is available) and attempts to add a QueuedMessage (containing the provided send function and its arguments) to the bounded in-memory queue. If the queue is not running or has reached capacity the message is not added and the method returns False. Optionally attach mapping_info metadata (used later to correlate sent messages with external IDs).
127
224
 
128
225
  Parameters:
129
- send_function (Callable): The function to call to send the message.
130
- *args: Positional arguments for the send function.
131
- description (str, optional): Human-readable description for logging purposes.
132
- mapping_info (dict, optional): Optional metadata for message mapping (e.g., replies or reactions).
133
- **kwargs: Keyword arguments for the send function.
226
+ send_function (Callable): Callable to execute when the message is sent.
227
+ *args: Positional arguments to pass to send_function.
228
+ description (str, optional): Human-readable description used for logging.
229
+ mapping_info (dict | None, optional): Optional metadata to record after a successful send.
230
+ **kwargs: Keyword arguments to pass to send_function.
134
231
 
135
232
  Returns:
136
233
  bool: True if the message was successfully enqueued; False if the queue is not running or is full.
@@ -142,16 +239,9 @@ class MessageQueue:
142
239
  with self._lock:
143
240
  if not self._running:
144
241
  # Refuse to send to prevent blocking the event loop
145
- logger.error(f"Queue not running, cannot send message: {description}")
146
242
  logger.error(
147
- "Application is in invalid state - message queue should be started before sending messages"
148
- )
149
- return False
150
-
151
- # Check queue size to prevent memory issues
152
- if self._queue.qsize() >= MAX_QUEUE_SIZE:
153
- logger.warning(
154
- f"Message queue full ({self._queue.qsize()}/{MAX_QUEUE_SIZE}), dropping message: {description}"
243
+ "Queue not running; cannot send message: %s. Start the message queue before sending.",
244
+ description,
155
245
  )
156
246
  return False
157
247
 
@@ -163,8 +253,15 @@ class MessageQueue:
163
253
  description=description,
164
254
  mapping_info=mapping_info,
165
255
  )
166
-
167
- self._queue.put(message)
256
+ # Enforce capacity via bounded queue
257
+ try:
258
+ self._queue.put_nowait(message)
259
+ except Full:
260
+ logger.warning(
261
+ f"Message queue full ({self._queue.qsize()}/{MAX_QUEUE_SIZE}), dropping message: {description}"
262
+ )
263
+ self._dropped_messages += 1
264
+ return False
168
265
  # Only log queue status when there are multiple messages
169
266
  queue_size = self._queue.qsize()
170
267
  if queue_size >= 2:
@@ -190,10 +287,21 @@ class MessageQueue:
190
287
 
191
288
  def get_status(self) -> dict:
192
289
  """
193
- Return a dictionary with the current status of the message queue, including running state, queue size, message delay, processor activity, last send time, and time since last send.
290
+ Return current status of the message queue.
291
+
292
+ Provides a snapshot useful for monitoring and debugging.
194
293
 
195
294
  Returns:
196
- dict: Status information about the message queue for debugging and monitoring.
295
+ dict: Mapping with the following keys:
296
+ - running (bool): Whether the queue processor is active.
297
+ - queue_size (int): Number of messages currently queued.
298
+ - message_delay (float): Configured minimum delay (seconds) between sends.
299
+ - processor_task_active (bool): True if the internal processor task exists and is not finished.
300
+ - last_send_time (float or None): Wall-clock time (seconds since the epoch) of the last successful send, or None if no send has occurred.
301
+ - time_since_last_send (float or None): Seconds elapsed since last_send_time, or None if no send has occurred.
302
+ - in_flight (bool): True when a message is currently being sent.
303
+ - dropped_messages (int): Number of messages dropped due to queue being full.
304
+ - default_msgs_to_keep (int): Default retention setting for message mappings.
197
305
  """
198
306
  return {
199
307
  "running": self._running,
@@ -203,10 +311,30 @@ class MessageQueue:
203
311
  and not self._processor_task.done(),
204
312
  "last_send_time": self._last_send_time,
205
313
  "time_since_last_send": (
206
- time.time() - self._last_send_time if self._last_send_time > 0 else None
314
+ time.monotonic() - self._last_send_mono
315
+ if self._last_send_mono > 0
316
+ else None
207
317
  ),
318
+ "in_flight": self._in_flight,
319
+ "dropped_messages": getattr(self, "_dropped_messages", 0),
320
+ "default_msgs_to_keep": DEFAULT_MSGS_TO_KEEP,
208
321
  }
209
322
 
323
+ async def drain(self, timeout: Optional[float] = None) -> bool:
324
+ """
325
+ Asynchronously wait until the queue has fully drained (no queued messages and no in-flight or current message) or until an optional timeout elapses.
326
+
327
+ If `timeout` is provided, it is interpreted in seconds. Returns True when the queue is empty and there are no messages being processed; returns False if the queue was stopped before draining or the timeout was reached.
328
+ """
329
+ deadline = (time.monotonic() + timeout) if timeout is not None else None
330
+ while (not self._queue.empty()) or self._in_flight or self._has_current:
331
+ if not self._running:
332
+ return False
333
+ if deadline is not None and time.monotonic() > deadline:
334
+ return False
335
+ await asyncio.sleep(0.1)
336
+ return True
337
+
210
338
  def ensure_processor_started(self):
211
339
  """
212
340
  Start the queue processor task if the queue is running and no processor task exists.
@@ -216,21 +344,20 @@ class MessageQueue:
216
344
  with self._lock:
217
345
  if self._running and self._processor_task is None:
218
346
  try:
219
- loop = asyncio.get_event_loop()
220
- if loop.is_running():
221
- self._processor_task = loop.create_task(self._process_queue())
222
- logger.info(
223
- f"Message queue processor started with {self._message_delay}s message delay"
224
- )
347
+ loop = asyncio.get_running_loop()
225
348
  except RuntimeError:
226
- # Still no event loop available
227
- pass
349
+ loop = None
350
+ if loop and loop.is_running():
351
+ self._processor_task = loop.create_task(self._process_queue())
352
+ logger.info(
353
+ f"Message queue processor started with {self._message_delay}s message delay"
354
+ )
228
355
 
229
356
  async def _process_queue(self):
230
357
  """
231
- Asynchronously processes messages from the queue, sending each in order while enforcing rate limiting and connection readiness.
358
+ Process queued messages in FIFO order, sending each when the connection is ready and the configured inter-message delay has elapsed.
232
359
 
233
- This method runs as a background task, monitoring the queue, waiting for the connection to be ready, and ensuring a minimum delay between sends. Messages are sent using their provided callable, and optional message mapping is handled after successful sends. The processor logs queue depth warnings, handles errors gracefully, and maintains FIFO order even when waiting for connection or rate limits.
360
+ This background coroutine continuously pulls QueuedMessage items from the internal queue and executes their send_function in the configured executor, enforcing rate limiting (_message_delay) and checking connection/readiness via _should_send_message. After a successful send, it updates last-send timestamps and optionally persists message mapping information via _handle_message_mapping when mapping_info is present and the send result exposes an `id`. The coroutine exits when the queue is stopped or when cancelled; cancellation may drop an in-flight message, which will be logged.
234
361
  """
235
362
  logger.debug("Message queue processor started")
236
363
  current_message = None
@@ -253,6 +380,7 @@ class MessageQueue:
253
380
  # Get next message (non-blocking)
254
381
  try:
255
382
  current_message = self._queue.get_nowait()
383
+ self._has_current = True
256
384
  except Empty:
257
385
  # No messages, wait a bit and continue
258
386
  await asyncio.sleep(0.1)
@@ -268,8 +396,8 @@ class MessageQueue:
268
396
  continue
269
397
 
270
398
  # Check if we need to wait for message delay (only if we've sent before)
271
- if self._last_send_time > 0:
272
- time_since_last = time.time() - self._last_send_time
399
+ if self._last_send_mono > 0:
400
+ time_since_last = time.monotonic() - self._last_send_mono
273
401
  if time_since_last < self._message_delay:
274
402
  wait_time = self._message_delay - time_since_last
275
403
  logger.debug(
@@ -280,20 +408,27 @@ class MessageQueue:
280
408
 
281
409
  # Send the message
282
410
  try:
411
+ self._in_flight = True
283
412
  logger.debug(
284
413
  f"Sending queued message: {current_message.description}"
285
414
  )
286
415
  # Run synchronous Meshtastic I/O operations in executor to prevent blocking event loop
287
- # Use lambda with default arguments to properly capture loop variables
288
- result = await asyncio.get_running_loop().run_in_executor(
289
- None,
290
- lambda msg=current_message: msg.send_function(
291
- *msg.args, **msg.kwargs
416
+ loop = asyncio.get_running_loop()
417
+ exec_ref = self._executor
418
+ if exec_ref is None:
419
+ raise RuntimeError("MessageQueue executor is not initialized")
420
+ result = await loop.run_in_executor(
421
+ exec_ref,
422
+ partial(
423
+ current_message.send_function,
424
+ *current_message.args,
425
+ **current_message.kwargs,
292
426
  ),
293
427
  )
294
428
 
295
429
  # Update last send time
296
430
  self._last_send_time = time.time()
431
+ self._last_send_mono = time.monotonic()
297
432
 
298
433
  if result is None:
299
434
  logger.warning(
@@ -318,6 +453,8 @@ class MessageQueue:
318
453
  # Mark task as done and clear current message
319
454
  self._queue.task_done()
320
455
  current_message = None
456
+ self._in_flight = False
457
+ self._has_current = False
321
458
 
322
459
  except asyncio.CancelledError:
323
460
  logger.debug("Message queue processor cancelled")
@@ -325,17 +462,22 @@ class MessageQueue:
325
462
  logger.warning(
326
463
  f"Message in flight was dropped during shutdown: {current_message.description}"
327
464
  )
465
+ with contextlib.suppress(Exception):
466
+ self._queue.task_done()
467
+ self._in_flight = False
468
+ self._has_current = False
328
469
  break
329
- except Exception as e:
330
- logger.error(f"Error in message queue processor: {e}")
470
+ except Exception:
471
+ logger.exception("Error in message queue processor")
331
472
  await asyncio.sleep(1.0) # Prevent tight error loop
332
473
 
333
474
  def _should_send_message(self) -> bool:
334
475
  """
335
- Determine whether it is currently safe to send a message based on Meshtastic client connection and reconnection state.
336
-
337
- Returns:
338
- bool: True if the client is connected and not reconnecting; False otherwise.
476
+ Return True if it is currently safe to send a message via Meshtastic.
477
+
478
+ Performs runtime checks: ensures the global reconnecting flag is not set, a Meshtastic client object is available, and — if the client exposes `is_connected` (callable or boolean) — that it reports connected. Returns False if any check fails.
479
+
480
+ If importing the Meshtastic utilities raises ImportError, the method will asynchronously stop this MessageQueue and return False.
339
481
  """
340
482
  # Import here to avoid circular imports
341
483
  try:
@@ -367,18 +509,26 @@ class MessageQueue:
367
509
  logger.critical(
368
510
  f"Cannot import meshtastic_utils - serious application error: {e}. Stopping message queue."
369
511
  )
370
- self.stop()
512
+ # Stop asynchronously to avoid blocking the event loop thread.
513
+ threading.Thread(
514
+ target=self.stop, name="MessageQueueStopper", daemon=True
515
+ ).start()
371
516
  return False
372
517
 
373
518
  def _handle_message_mapping(self, result, mapping_info):
374
519
  """
375
- Update the message mapping database with information about a sent message and prune old mappings if configured.
376
-
520
+ Persist a sent message mapping (mesh message id Matrix event) and optionally prune old mappings.
521
+
522
+ If mapping_info contains 'matrix_event_id', 'room_id', and 'text', stores a mapping using result.id as the mesh message id. If 'msgs_to_keep' is present and > 0 it prunes older mappings to retain that many entries; otherwise DEFAULT_MSGS_TO_KEEP is used.
523
+
377
524
  Parameters:
378
- result: The result object from the send function, expected to have an `id` attribute.
379
- mapping_info (dict): Contains mapping details such as `matrix_event_id`, `room_id`, `text`, and optionally `meshnet` and `msgs_to_keep`.
380
-
381
- If required mapping fields are present, stores the mapping and prunes old entries based on the specified or default retention count.
525
+ result: Send function result object with an `id` attribute (the mesh message id).
526
+ mapping_info (dict): Mapping details. Relevant keys:
527
+ - matrix_event_id (str)
528
+ - room_id (str)
529
+ - text (str)
530
+ - meshnet (optional): passed to the store operation
531
+ - msgs_to_keep (optional, int): number of mappings to retain for pruning
382
532
  """
383
533
  try:
384
534
  # Import here to avoid circular imports
@@ -406,8 +556,8 @@ class MessageQueue:
406
556
  if msgs_to_keep > 0:
407
557
  prune_message_map(msgs_to_keep)
408
558
 
409
- except Exception as e:
410
- logger.error(f"Error handling message mapping: {e}")
559
+ except Exception:
560
+ logger.exception("Error handling message mapping")
411
561
 
412
562
 
413
563
  # Global message queue instance
@@ -442,7 +592,7 @@ def queue_message(
442
592
  send_function: Callable,
443
593
  *args,
444
594
  description: str = "",
445
- mapping_info: dict = None,
595
+ mapping_info: Optional[dict] = None,
446
596
  **kwargs,
447
597
  ) -> bool:
448
598
  """