mmrelay 1.1.2__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mmrelay might be problematic. Click here for more details.

@@ -340,7 +340,7 @@ async def reconnect():
340
340
 
341
341
  def on_meshtastic_message(packet, interface):
342
342
  """
343
- Process incoming Meshtastic messages and relay them to Matrix rooms or plugins according to message type and configuration.
343
+ Processes incoming Meshtastic messages and relays them to Matrix rooms or plugins based on message type and configuration.
344
344
 
345
345
  Handles reactions and replies by relaying them to Matrix if enabled. Normal text messages are relayed to all mapped Matrix rooms unless handled by a plugin or directed to the relay node. Non-text messages are passed to plugins for processing. Messages from unmapped channels or disabled detection sensors are ignored. Ensures sender information is retrieved or stored as needed.
346
346
  """
@@ -432,11 +432,16 @@ def on_meshtastic_message(packet, interface):
432
432
  else meshtastic_text
433
433
  )
434
434
 
435
- # Ensure that meshnet_name is always included, using our own meshnet for accuracy.
436
- full_display_name = f"{longname}/{meshnet_name}"
435
+ # Import the matrix prefix function
436
+ from mmrelay.matrix_utils import get_matrix_prefix
437
+
438
+ # Get the formatted prefix for the reaction
439
+ prefix = get_matrix_prefix(config, longname, shortname, meshnet_name)
437
440
 
438
441
  reaction_symbol = text.strip() if (text and text.strip()) else "⚠️"
439
- reaction_message = f'\n [{full_display_name}] reacted {reaction_symbol} to "{abbreviated_text}"'
442
+ reaction_message = (
443
+ f'\n {prefix}reacted {reaction_symbol} to "{abbreviated_text}"'
444
+ )
440
445
 
441
446
  # Relay the reaction as emote to Matrix, preserving the original meshnet name
442
447
  asyncio.run_coroutine_threadsafe(
@@ -469,9 +474,12 @@ def on_meshtastic_message(packet, interface):
469
474
  # orig = (matrix_event_id, matrix_room_id, meshtastic_text, meshtastic_meshnet)
470
475
  matrix_event_id, matrix_room_id, meshtastic_text, meshtastic_meshnet = orig
471
476
 
472
- # Format the reply message for Matrix
473
- full_display_name = f"{longname}/{meshnet_name}"
474
- formatted_message = f"[{full_display_name}]: {text}"
477
+ # Import the matrix prefix function
478
+ from mmrelay.matrix_utils import get_matrix_prefix
479
+
480
+ # Get the formatted prefix for the reply
481
+ prefix = get_matrix_prefix(config, longname, shortname, meshnet_name)
482
+ formatted_message = f"{prefix}{text}"
475
483
 
476
484
  logger.info(f"Relaying Meshtastic reply from {longname} to Matrix")
477
485
 
@@ -562,7 +570,12 @@ def on_meshtastic_message(packet, interface):
562
570
  if not shortname:
563
571
  shortname = str(sender)
564
572
 
565
- formatted_message = f"[{longname}/{meshnet_name}]: {text}"
573
+ # Import the matrix prefix function
574
+ from mmrelay.matrix_utils import get_matrix_prefix
575
+
576
+ # Get the formatted prefix
577
+ prefix = get_matrix_prefix(config, longname, shortname, meshnet_name)
578
+ formatted_message = f"{prefix}{text}"
566
579
 
567
580
  # Plugin functionality - Check if any plugin handles this message before relaying
568
581
  from mmrelay.plugin_loader import load_plugins
@@ -0,0 +1,475 @@
1
+ """
2
+ Message queue system for MMRelay.
3
+
4
+ Provides transparent message queuing with rate limiting to prevent overwhelming
5
+ the Meshtastic network. Messages are queued in memory and sent at the configured
6
+ rate, respecting connection state and firmware constraints.
7
+ """
8
+
9
+ import asyncio
10
+ import threading
11
+ import time
12
+ from dataclasses import dataclass
13
+ from queue import Empty, Queue
14
+ from typing import Callable, Optional
15
+
16
+ from mmrelay.log_utils import get_logger
17
+
18
+ logger = get_logger(name="MessageQueue")
19
+
20
+ # Default message delay in seconds (minimum 2.0 due to firmware constraints)
21
+ DEFAULT_MESSAGE_DELAY = 2.2
22
+
23
+ # Queue size configuration
24
+ MAX_QUEUE_SIZE = 100
25
+ QUEUE_HIGH_WATER_MARK = 75 # 75% of MAX_QUEUE_SIZE
26
+ QUEUE_MEDIUM_WATER_MARK = 50 # 50% of MAX_QUEUE_SIZE
27
+
28
+
29
+ @dataclass
30
+ class QueuedMessage:
31
+ """Represents a message in the queue with metadata."""
32
+
33
+ timestamp: float
34
+ send_function: Callable
35
+ args: tuple
36
+ kwargs: dict
37
+ description: str
38
+ # Optional message mapping information for replies/reactions
39
+ mapping_info: Optional[dict] = None
40
+
41
+
42
+ class MessageQueue:
43
+ """
44
+ Simple FIFO message queue with rate limiting for Meshtastic messages.
45
+
46
+ Queues messages in memory and sends them in order at the configured rate to prevent
47
+ overwhelming the mesh network. Respects connection state and automatically
48
+ pauses during reconnections.
49
+ """
50
+
51
+ def __init__(self):
52
+ """
53
+ Initialize the MessageQueue with an empty queue, state variables, and a thread lock for safe operation.
54
+ """
55
+ self._queue = Queue()
56
+ self._processor_task = None
57
+ self._running = False
58
+ self._lock = threading.Lock()
59
+ self._last_send_time = 0.0
60
+ self._message_delay = DEFAULT_MESSAGE_DELAY
61
+
62
+ def start(self, message_delay: float = DEFAULT_MESSAGE_DELAY):
63
+ """
64
+ Starts the message queue processor with the specified minimum delay between messages.
65
+
66
+ Enforces a minimum delay of 2.0 seconds due to firmware requirements. If the event loop is running, the processor task is started immediately; otherwise, startup is deferred until the event loop becomes available.
67
+ """
68
+ with self._lock:
69
+ if self._running:
70
+ return
71
+
72
+ # Validate and enforce firmware minimum
73
+ if message_delay < 2.0:
74
+ logger.warning(
75
+ f"Message delay {message_delay}s below firmware minimum (2.0s), using 2.0s"
76
+ )
77
+ self._message_delay = 2.0
78
+ else:
79
+ self._message_delay = message_delay
80
+ self._running = True
81
+
82
+ # Start the processor in the event loop
83
+ try:
84
+ loop = asyncio.get_event_loop()
85
+ if loop.is_running():
86
+ self._processor_task = loop.create_task(self._process_queue())
87
+ logger.info(
88
+ f"Message queue started with {self._message_delay}s message delay"
89
+ )
90
+ else:
91
+ # Event loop exists but not running yet, defer startup
92
+ logger.debug(
93
+ "Event loop not running yet, will start processor later"
94
+ )
95
+ except RuntimeError:
96
+ # No event loop running, will start when one is available
97
+ logger.debug(
98
+ "No event loop available, queue processor will start later"
99
+ )
100
+
101
+ def stop(self):
102
+ """
103
+ Stops the message queue processor and cancels the processing task if active.
104
+ """
105
+ with self._lock:
106
+ if not self._running:
107
+ return
108
+
109
+ self._running = False
110
+
111
+ if self._processor_task:
112
+ self._processor_task.cancel()
113
+ self._processor_task = None
114
+
115
+ logger.info("Message queue stopped")
116
+
117
+ def enqueue(
118
+ self,
119
+ send_function: Callable,
120
+ *args,
121
+ description: str = "",
122
+ mapping_info: dict = None,
123
+ **kwargs,
124
+ ) -> bool:
125
+ """
126
+ Adds a message to the queue for rate-limited, ordered sending.
127
+
128
+ Parameters:
129
+ send_function (Callable): The function to call to send the message.
130
+ *args: Positional arguments for the send function.
131
+ description (str, optional): Human-readable description for logging purposes.
132
+ mapping_info (dict, optional): Optional metadata for message mapping (e.g., replies or reactions).
133
+ **kwargs: Keyword arguments for the send function.
134
+
135
+ Returns:
136
+ bool: True if the message was successfully enqueued; False if the queue is not running or is full.
137
+ """
138
+ # Ensure processor is started if event loop is now available.
139
+ # This is called outside the lock to prevent potential deadlocks.
140
+ self.ensure_processor_started()
141
+
142
+ with self._lock:
143
+ if not self._running:
144
+ # Refuse to send to prevent blocking the event loop
145
+ logger.error(f"Queue not running, cannot send message: {description}")
146
+ logger.error(
147
+ "Application is in invalid state - message queue should be started before sending messages"
148
+ )
149
+ return False
150
+
151
+ # Check queue size to prevent memory issues
152
+ if self._queue.qsize() >= MAX_QUEUE_SIZE:
153
+ logger.warning(
154
+ f"Message queue full ({self._queue.qsize()}/{MAX_QUEUE_SIZE}), dropping message: {description}"
155
+ )
156
+ return False
157
+
158
+ message = QueuedMessage(
159
+ timestamp=time.time(),
160
+ send_function=send_function,
161
+ args=args,
162
+ kwargs=kwargs,
163
+ description=description,
164
+ mapping_info=mapping_info,
165
+ )
166
+
167
+ self._queue.put(message)
168
+ # Only log queue status when there are multiple messages
169
+ queue_size = self._queue.qsize()
170
+ if queue_size >= 2:
171
+ logger.debug(
172
+ f"Queued message ({queue_size}/{MAX_QUEUE_SIZE}): {description}"
173
+ )
174
+ return True
175
+
176
+ def get_queue_size(self) -> int:
177
+ """
178
+ Return the number of messages currently in the queue.
179
+
180
+ Returns:
181
+ int: The current queue size.
182
+ """
183
+ return self._queue.qsize()
184
+
185
+ def is_running(self) -> bool:
186
+ """
187
+ Return whether the message queue processor is currently active.
188
+ """
189
+ return self._running
190
+
191
+ def get_status(self) -> dict:
192
+ """
193
+ Return a dictionary with the current status of the message queue, including running state, queue size, message delay, processor activity, last send time, and time since last send.
194
+
195
+ Returns:
196
+ dict: Status information about the message queue for debugging and monitoring.
197
+ """
198
+ return {
199
+ "running": self._running,
200
+ "queue_size": self._queue.qsize(),
201
+ "message_delay": self._message_delay,
202
+ "processor_task_active": self._processor_task is not None
203
+ and not self._processor_task.done(),
204
+ "last_send_time": self._last_send_time,
205
+ "time_since_last_send": (
206
+ time.time() - self._last_send_time if self._last_send_time > 0 else None
207
+ ),
208
+ }
209
+
210
+ def ensure_processor_started(self):
211
+ """
212
+ Start the queue processor task if the queue is running and no processor task exists.
213
+
214
+ This method checks if the queue is active and, if so, attempts to create and start the asynchronous processor task within the current event loop.
215
+ """
216
+ with self._lock:
217
+ if self._running and self._processor_task is None:
218
+ try:
219
+ loop = asyncio.get_event_loop()
220
+ if loop.is_running():
221
+ self._processor_task = loop.create_task(self._process_queue())
222
+ logger.info(
223
+ f"Message queue processor started with {self._message_delay}s message delay"
224
+ )
225
+ except RuntimeError:
226
+ # Still no event loop available
227
+ pass
228
+
229
+ async def _process_queue(self):
230
+ """
231
+ Asynchronously processes messages from the queue, sending each in order while enforcing rate limiting and connection readiness.
232
+
233
+ This method runs as a background task, monitoring the queue, waiting for the connection to be ready, and ensuring a minimum delay between sends. Messages are sent using their provided callable, and optional message mapping is handled after successful sends. The processor logs queue depth warnings, handles errors gracefully, and maintains FIFO order even when waiting for connection or rate limits.
234
+ """
235
+ logger.debug("Message queue processor started")
236
+ current_message = None
237
+
238
+ while self._running:
239
+ try:
240
+ # Get next message if we don't have one waiting
241
+ if current_message is None:
242
+ # Monitor queue depth for operational awareness
243
+ queue_size = self._queue.qsize()
244
+ if queue_size > QUEUE_HIGH_WATER_MARK:
245
+ logger.warning(
246
+ f"Queue depth high: {queue_size} messages pending"
247
+ )
248
+ elif queue_size > QUEUE_MEDIUM_WATER_MARK:
249
+ logger.info(
250
+ f"Queue depth moderate: {queue_size} messages pending"
251
+ )
252
+
253
+ # Get next message (non-blocking)
254
+ try:
255
+ current_message = self._queue.get_nowait()
256
+ except Empty:
257
+ # No messages, wait a bit and continue
258
+ await asyncio.sleep(0.1)
259
+ continue
260
+
261
+ # Check if we should send (connection state, etc.)
262
+ if not self._should_send_message():
263
+ # Keep the message and wait - don't requeue to maintain FIFO order
264
+ logger.debug(
265
+ f"Connection not ready, waiting to send: {current_message.description}"
266
+ )
267
+ await asyncio.sleep(1.0)
268
+ continue
269
+
270
+ # Check if we need to wait for message delay (only if we've sent before)
271
+ if self._last_send_time > 0:
272
+ time_since_last = time.time() - self._last_send_time
273
+ if time_since_last < self._message_delay:
274
+ wait_time = self._message_delay - time_since_last
275
+ logger.debug(
276
+ f"Rate limiting: waiting {wait_time:.1f}s before sending"
277
+ )
278
+ await asyncio.sleep(wait_time)
279
+ continue
280
+
281
+ # Send the message
282
+ try:
283
+ logger.debug(
284
+ f"Sending queued message: {current_message.description}"
285
+ )
286
+ # Run synchronous Meshtastic I/O operations in executor to prevent blocking event loop
287
+ # Use lambda with default arguments to properly capture loop variables
288
+ result = await asyncio.get_running_loop().run_in_executor(
289
+ None,
290
+ lambda msg=current_message: msg.send_function(
291
+ *msg.args, **msg.kwargs
292
+ ),
293
+ )
294
+
295
+ # Update last send time
296
+ self._last_send_time = time.time()
297
+
298
+ if result is None:
299
+ logger.warning(
300
+ f"Message send returned None: {current_message.description}"
301
+ )
302
+ else:
303
+ logger.debug(
304
+ f"Successfully sent queued message: {current_message.description}"
305
+ )
306
+
307
+ # Handle message mapping if provided
308
+ if current_message.mapping_info and hasattr(result, "id"):
309
+ self._handle_message_mapping(
310
+ result, current_message.mapping_info
311
+ )
312
+
313
+ except Exception as e:
314
+ logger.error(
315
+ f"Error sending queued message '{current_message.description}': {e}"
316
+ )
317
+
318
+ # Mark task as done and clear current message
319
+ self._queue.task_done()
320
+ current_message = None
321
+
322
+ except asyncio.CancelledError:
323
+ logger.debug("Message queue processor cancelled")
324
+ if current_message:
325
+ logger.warning(
326
+ f"Message in flight was dropped during shutdown: {current_message.description}"
327
+ )
328
+ break
329
+ except Exception as e:
330
+ logger.error(f"Error in message queue processor: {e}")
331
+ await asyncio.sleep(1.0) # Prevent tight error loop
332
+
333
+ def _should_send_message(self) -> bool:
334
+ """
335
+ Determine whether it is currently safe to send a message based on Meshtastic client connection and reconnection state.
336
+
337
+ Returns:
338
+ bool: True if the client is connected and not reconnecting; False otherwise.
339
+ """
340
+ # Import here to avoid circular imports
341
+ try:
342
+ from mmrelay.meshtastic_utils import meshtastic_client, reconnecting
343
+
344
+ # Don't send during reconnection
345
+ if reconnecting:
346
+ logger.debug("Not sending - reconnecting is True")
347
+ return False
348
+
349
+ # Don't send if no client
350
+ if meshtastic_client is None:
351
+ logger.debug("Not sending - meshtastic_client is None")
352
+ return False
353
+
354
+ # Check if client is connected
355
+ if hasattr(meshtastic_client, "is_connected"):
356
+ is_conn = meshtastic_client.is_connected
357
+ if not (is_conn() if callable(is_conn) else is_conn):
358
+ logger.debug("Not sending - client not connected")
359
+ return False
360
+
361
+ logger.debug("Connection check passed - ready to send")
362
+ return True
363
+
364
+ except ImportError as e:
365
+ # ImportError indicates a serious problem with application structure,
366
+ # often during shutdown as modules are unloaded.
367
+ logger.critical(
368
+ f"Cannot import meshtastic_utils - serious application error: {e}. Stopping message queue."
369
+ )
370
+ self.stop()
371
+ return False
372
+
373
+ def _handle_message_mapping(self, result, mapping_info):
374
+ """
375
+ Stores and prunes message mapping information after a message is sent.
376
+
377
+ Parameters:
378
+ result: The result object from the send function, expected to have an `id` attribute.
379
+ mapping_info (dict): Dictionary containing mapping details such as `matrix_event_id`, `room_id`, `text`, and optional `meshnet` and `msgs_to_keep`.
380
+
381
+ This method updates the message mapping database with the new mapping and prunes old mappings if configured.
382
+ """
383
+ try:
384
+ # Import here to avoid circular imports
385
+ from mmrelay.db_utils import prune_message_map, store_message_map
386
+
387
+ # Extract mapping information
388
+ matrix_event_id = mapping_info.get("matrix_event_id")
389
+ room_id = mapping_info.get("room_id")
390
+ text = mapping_info.get("text")
391
+ meshnet = mapping_info.get("meshnet")
392
+
393
+ if matrix_event_id and room_id and text:
394
+ # Store the message mapping
395
+ store_message_map(
396
+ result.id,
397
+ matrix_event_id,
398
+ room_id,
399
+ text,
400
+ meshtastic_meshnet=meshnet,
401
+ )
402
+ logger.debug(f"Stored message map for meshtastic_id: {result.id}")
403
+
404
+ # Handle pruning if configured
405
+ msgs_to_keep = mapping_info.get("msgs_to_keep", 500)
406
+ if msgs_to_keep > 0:
407
+ prune_message_map(msgs_to_keep)
408
+
409
+ except Exception as e:
410
+ logger.error(f"Error handling message mapping: {e}")
411
+
412
+
413
+ # Global message queue instance
414
+ _message_queue = MessageQueue()
415
+
416
+
417
+ def get_message_queue() -> MessageQueue:
418
+ """
419
+ Return the global instance of the message queue used for managing and rate-limiting message sending.
420
+ """
421
+ return _message_queue
422
+
423
+
424
+ def start_message_queue(message_delay: float = DEFAULT_MESSAGE_DELAY):
425
+ """
426
+ Start the global message queue processor with the given minimum delay between messages.
427
+
428
+ Parameters:
429
+ message_delay (float): Minimum number of seconds to wait between sending messages.
430
+ """
431
+ _message_queue.start(message_delay)
432
+
433
+
434
+ def stop_message_queue():
435
+ """
436
+ Stops the global message queue processor, preventing further message processing until restarted.
437
+ """
438
+ _message_queue.stop()
439
+
440
+
441
+ def queue_message(
442
+ send_function: Callable,
443
+ *args,
444
+ description: str = "",
445
+ mapping_info: dict = None,
446
+ **kwargs,
447
+ ) -> bool:
448
+ """
449
+ Enqueues a message for sending via the global message queue.
450
+
451
+ Parameters:
452
+ send_function (Callable): The function to execute for sending the message.
453
+ description (str, optional): Human-readable description of the message for logging purposes.
454
+ mapping_info (dict, optional): Additional metadata for message mapping, such as reply or reaction information.
455
+
456
+ Returns:
457
+ bool: True if the message was successfully enqueued; False if the queue is not running or full.
458
+ """
459
+ return _message_queue.enqueue(
460
+ send_function,
461
+ *args,
462
+ description=description,
463
+ mapping_info=mapping_info,
464
+ **kwargs,
465
+ )
466
+
467
+
468
+ def get_queue_status() -> dict:
469
+ """
470
+ Return detailed status information about the global message queue.
471
+
472
+ Returns:
473
+ dict: A dictionary containing the running state, queue size, message delay, processor task activity, last send time, and time since last send.
474
+ """
475
+ return _message_queue.get_status()
mmrelay/plugin_loader.py CHANGED
@@ -896,8 +896,8 @@ def load_plugins(passed_config=None):
896
896
  getattr(plugin, "plugin_name", plugin.__class__.__name__)
897
897
  for plugin in sorted_active_plugins
898
898
  ]
899
- logger.info(f"Plugins loaded: {', '.join(plugin_names)}")
899
+ logger.info(f"Loaded: {', '.join(plugin_names)}")
900
900
  else:
901
- logger.info("Plugins loaded: none")
901
+ logger.info("Loaded: none")
902
902
 
903
903
  plugins_loaded = True # Set the flag to indicate that plugins have been load