mmrelay 1.1.2__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mmrelay might be problematic. Click here for more details.
- mmrelay/__init__.py +1 -13
- mmrelay/cli.py +124 -64
- mmrelay/config.py +63 -36
- mmrelay/config_checker.py +41 -12
- mmrelay/constants/__init__.py +54 -0
- mmrelay/constants/app.py +17 -0
- mmrelay/constants/config.py +73 -0
- mmrelay/constants/database.py +22 -0
- mmrelay/constants/formats.py +20 -0
- mmrelay/constants/messages.py +36 -0
- mmrelay/constants/network.py +35 -0
- mmrelay/constants/queue.py +17 -0
- mmrelay/db_utils.py +281 -132
- mmrelay/log_utils.py +38 -14
- mmrelay/main.py +23 -4
- mmrelay/matrix_utils.py +413 -162
- mmrelay/meshtastic_utils.py +223 -106
- mmrelay/message_queue.py +475 -0
- mmrelay/plugin_loader.py +56 -53
- mmrelay/plugins/base_plugin.py +139 -39
- mmrelay/plugins/drop_plugin.py +13 -5
- mmrelay/plugins/mesh_relay_plugin.py +7 -10
- mmrelay/plugins/weather_plugin.py +10 -1
- mmrelay/setup_utils.py +67 -30
- mmrelay/tools/sample_config.yaml +13 -3
- {mmrelay-1.1.2.dist-info → mmrelay-1.1.4.dist-info}/METADATA +12 -14
- mmrelay-1.1.4.dist-info/RECORD +43 -0
- mmrelay-1.1.4.dist-info/licenses/LICENSE +675 -0
- mmrelay-1.1.2.dist-info/RECORD +0 -34
- mmrelay-1.1.2.dist-info/licenses/LICENSE +0 -21
- {mmrelay-1.1.2.dist-info → mmrelay-1.1.4.dist-info}/WHEEL +0 -0
- {mmrelay-1.1.2.dist-info → mmrelay-1.1.4.dist-info}/entry_points.txt +0 -0
- {mmrelay-1.1.2.dist-info → mmrelay-1.1.4.dist-info}/top_level.txt +0 -0
mmrelay/message_queue.py
ADDED
|
@@ -0,0 +1,475 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Message queue system for MMRelay.
|
|
3
|
+
|
|
4
|
+
Provides transparent message queuing with rate limiting to prevent overwhelming
|
|
5
|
+
the Meshtastic network. Messages are queued in memory and sent at the configured
|
|
6
|
+
rate, respecting connection state and firmware constraints.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import threading
|
|
11
|
+
import time
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from queue import Empty, Queue
|
|
14
|
+
from typing import Callable, Optional
|
|
15
|
+
|
|
16
|
+
from mmrelay.constants.database import DEFAULT_MSGS_TO_KEEP
|
|
17
|
+
from mmrelay.constants.network import MINIMUM_MESSAGE_DELAY
|
|
18
|
+
from mmrelay.constants.queue import (
|
|
19
|
+
DEFAULT_MESSAGE_DELAY,
|
|
20
|
+
MAX_QUEUE_SIZE,
|
|
21
|
+
QUEUE_HIGH_WATER_MARK,
|
|
22
|
+
QUEUE_MEDIUM_WATER_MARK,
|
|
23
|
+
)
|
|
24
|
+
from mmrelay.log_utils import get_logger
|
|
25
|
+
|
|
26
|
+
logger = get_logger(name="MessageQueue")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class QueuedMessage:
|
|
31
|
+
"""Represents a message in the queue with metadata."""
|
|
32
|
+
|
|
33
|
+
timestamp: float
|
|
34
|
+
send_function: Callable
|
|
35
|
+
args: tuple
|
|
36
|
+
kwargs: dict
|
|
37
|
+
description: str
|
|
38
|
+
# Optional message mapping information for replies/reactions
|
|
39
|
+
mapping_info: Optional[dict] = None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class MessageQueue:
|
|
43
|
+
"""
|
|
44
|
+
Simple FIFO message queue with rate limiting for Meshtastic messages.
|
|
45
|
+
|
|
46
|
+
Queues messages in memory and sends them in order at the configured rate to prevent
|
|
47
|
+
overwhelming the mesh network. Respects connection state and automatically
|
|
48
|
+
pauses during reconnections.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self):
|
|
52
|
+
"""
|
|
53
|
+
Initialize the MessageQueue with an empty queue, state variables, and a thread lock for safe operation.
|
|
54
|
+
"""
|
|
55
|
+
self._queue = Queue()
|
|
56
|
+
self._processor_task = None
|
|
57
|
+
self._running = False
|
|
58
|
+
self._lock = threading.Lock()
|
|
59
|
+
self._last_send_time = 0.0
|
|
60
|
+
self._message_delay = DEFAULT_MESSAGE_DELAY
|
|
61
|
+
|
|
62
|
+
def start(self, message_delay: float = DEFAULT_MESSAGE_DELAY):
|
|
63
|
+
"""
|
|
64
|
+
Start the message queue processor with a specified minimum delay between messages.
|
|
65
|
+
|
|
66
|
+
If the provided delay is below the firmware-enforced minimum, the minimum is used instead. The processor task is started immediately if the asyncio event loop is running; otherwise, startup is deferred until the event loop becomes available.
|
|
67
|
+
"""
|
|
68
|
+
with self._lock:
|
|
69
|
+
if self._running:
|
|
70
|
+
return
|
|
71
|
+
|
|
72
|
+
# Validate and enforce firmware minimum
|
|
73
|
+
if message_delay < MINIMUM_MESSAGE_DELAY:
|
|
74
|
+
logger.warning(
|
|
75
|
+
f"Message delay {message_delay}s below firmware minimum ({MINIMUM_MESSAGE_DELAY}s), using {MINIMUM_MESSAGE_DELAY}s"
|
|
76
|
+
)
|
|
77
|
+
self._message_delay = MINIMUM_MESSAGE_DELAY
|
|
78
|
+
else:
|
|
79
|
+
self._message_delay = message_delay
|
|
80
|
+
self._running = True
|
|
81
|
+
|
|
82
|
+
# Start the processor in the event loop
|
|
83
|
+
try:
|
|
84
|
+
loop = asyncio.get_event_loop()
|
|
85
|
+
if loop.is_running():
|
|
86
|
+
self._processor_task = loop.create_task(self._process_queue())
|
|
87
|
+
logger.info(
|
|
88
|
+
f"Message queue started with {self._message_delay}s message delay"
|
|
89
|
+
)
|
|
90
|
+
else:
|
|
91
|
+
# Event loop exists but not running yet, defer startup
|
|
92
|
+
logger.debug(
|
|
93
|
+
"Event loop not running yet, will start processor later"
|
|
94
|
+
)
|
|
95
|
+
except RuntimeError:
|
|
96
|
+
# No event loop running, will start when one is available
|
|
97
|
+
logger.debug(
|
|
98
|
+
"No event loop available, queue processor will start later"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
def stop(self):
|
|
102
|
+
"""
|
|
103
|
+
Stops the message queue processor and cancels the processing task if active.
|
|
104
|
+
"""
|
|
105
|
+
with self._lock:
|
|
106
|
+
if not self._running:
|
|
107
|
+
return
|
|
108
|
+
|
|
109
|
+
self._running = False
|
|
110
|
+
|
|
111
|
+
if self._processor_task:
|
|
112
|
+
self._processor_task.cancel()
|
|
113
|
+
self._processor_task = None
|
|
114
|
+
|
|
115
|
+
logger.info("Message queue stopped")
|
|
116
|
+
|
|
117
|
+
def enqueue(
|
|
118
|
+
self,
|
|
119
|
+
send_function: Callable,
|
|
120
|
+
*args,
|
|
121
|
+
description: str = "",
|
|
122
|
+
mapping_info: dict = None,
|
|
123
|
+
**kwargs,
|
|
124
|
+
) -> bool:
|
|
125
|
+
"""
|
|
126
|
+
Adds a message to the queue for rate-limited, ordered sending.
|
|
127
|
+
|
|
128
|
+
Parameters:
|
|
129
|
+
send_function (Callable): The function to call to send the message.
|
|
130
|
+
*args: Positional arguments for the send function.
|
|
131
|
+
description (str, optional): Human-readable description for logging purposes.
|
|
132
|
+
mapping_info (dict, optional): Optional metadata for message mapping (e.g., replies or reactions).
|
|
133
|
+
**kwargs: Keyword arguments for the send function.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
bool: True if the message was successfully enqueued; False if the queue is not running or is full.
|
|
137
|
+
"""
|
|
138
|
+
# Ensure processor is started if event loop is now available.
|
|
139
|
+
# This is called outside the lock to prevent potential deadlocks.
|
|
140
|
+
self.ensure_processor_started()
|
|
141
|
+
|
|
142
|
+
with self._lock:
|
|
143
|
+
if not self._running:
|
|
144
|
+
# Refuse to send to prevent blocking the event loop
|
|
145
|
+
logger.error(f"Queue not running, cannot send message: {description}")
|
|
146
|
+
logger.error(
|
|
147
|
+
"Application is in invalid state - message queue should be started before sending messages"
|
|
148
|
+
)
|
|
149
|
+
return False
|
|
150
|
+
|
|
151
|
+
# Check queue size to prevent memory issues
|
|
152
|
+
if self._queue.qsize() >= MAX_QUEUE_SIZE:
|
|
153
|
+
logger.warning(
|
|
154
|
+
f"Message queue full ({self._queue.qsize()}/{MAX_QUEUE_SIZE}), dropping message: {description}"
|
|
155
|
+
)
|
|
156
|
+
return False
|
|
157
|
+
|
|
158
|
+
message = QueuedMessage(
|
|
159
|
+
timestamp=time.time(),
|
|
160
|
+
send_function=send_function,
|
|
161
|
+
args=args,
|
|
162
|
+
kwargs=kwargs,
|
|
163
|
+
description=description,
|
|
164
|
+
mapping_info=mapping_info,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
self._queue.put(message)
|
|
168
|
+
# Only log queue status when there are multiple messages
|
|
169
|
+
queue_size = self._queue.qsize()
|
|
170
|
+
if queue_size >= 2:
|
|
171
|
+
logger.debug(
|
|
172
|
+
f"Queued message ({queue_size}/{MAX_QUEUE_SIZE}): {description}"
|
|
173
|
+
)
|
|
174
|
+
return True
|
|
175
|
+
|
|
176
|
+
def get_queue_size(self) -> int:
|
|
177
|
+
"""
|
|
178
|
+
Return the number of messages currently in the queue.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
int: The current queue size.
|
|
182
|
+
"""
|
|
183
|
+
return self._queue.qsize()
|
|
184
|
+
|
|
185
|
+
def is_running(self) -> bool:
|
|
186
|
+
"""
|
|
187
|
+
Return whether the message queue processor is currently active.
|
|
188
|
+
"""
|
|
189
|
+
return self._running
|
|
190
|
+
|
|
191
|
+
def get_status(self) -> dict:
|
|
192
|
+
"""
|
|
193
|
+
Return a dictionary with the current status of the message queue, including running state, queue size, message delay, processor activity, last send time, and time since last send.
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
dict: Status information about the message queue for debugging and monitoring.
|
|
197
|
+
"""
|
|
198
|
+
return {
|
|
199
|
+
"running": self._running,
|
|
200
|
+
"queue_size": self._queue.qsize(),
|
|
201
|
+
"message_delay": self._message_delay,
|
|
202
|
+
"processor_task_active": self._processor_task is not None
|
|
203
|
+
and not self._processor_task.done(),
|
|
204
|
+
"last_send_time": self._last_send_time,
|
|
205
|
+
"time_since_last_send": (
|
|
206
|
+
time.time() - self._last_send_time if self._last_send_time > 0 else None
|
|
207
|
+
),
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
def ensure_processor_started(self):
|
|
211
|
+
"""
|
|
212
|
+
Start the queue processor task if the queue is running and no processor task exists.
|
|
213
|
+
|
|
214
|
+
This method checks if the queue is active and, if so, attempts to create and start the asynchronous processor task within the current event loop.
|
|
215
|
+
"""
|
|
216
|
+
with self._lock:
|
|
217
|
+
if self._running and self._processor_task is None:
|
|
218
|
+
try:
|
|
219
|
+
loop = asyncio.get_event_loop()
|
|
220
|
+
if loop.is_running():
|
|
221
|
+
self._processor_task = loop.create_task(self._process_queue())
|
|
222
|
+
logger.info(
|
|
223
|
+
f"Message queue processor started with {self._message_delay}s message delay"
|
|
224
|
+
)
|
|
225
|
+
except RuntimeError:
|
|
226
|
+
# Still no event loop available
|
|
227
|
+
pass
|
|
228
|
+
|
|
229
|
+
async def _process_queue(self):
|
|
230
|
+
"""
|
|
231
|
+
Asynchronously processes messages from the queue, sending each in order while enforcing rate limiting and connection readiness.
|
|
232
|
+
|
|
233
|
+
This method runs as a background task, monitoring the queue, waiting for the connection to be ready, and ensuring a minimum delay between sends. Messages are sent using their provided callable, and optional message mapping is handled after successful sends. The processor logs queue depth warnings, handles errors gracefully, and maintains FIFO order even when waiting for connection or rate limits.
|
|
234
|
+
"""
|
|
235
|
+
logger.debug("Message queue processor started")
|
|
236
|
+
current_message = None
|
|
237
|
+
|
|
238
|
+
while self._running:
|
|
239
|
+
try:
|
|
240
|
+
# Get next message if we don't have one waiting
|
|
241
|
+
if current_message is None:
|
|
242
|
+
# Monitor queue depth for operational awareness
|
|
243
|
+
queue_size = self._queue.qsize()
|
|
244
|
+
if queue_size > QUEUE_HIGH_WATER_MARK:
|
|
245
|
+
logger.warning(
|
|
246
|
+
f"Queue depth high: {queue_size} messages pending"
|
|
247
|
+
)
|
|
248
|
+
elif queue_size > QUEUE_MEDIUM_WATER_MARK:
|
|
249
|
+
logger.info(
|
|
250
|
+
f"Queue depth moderate: {queue_size} messages pending"
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
# Get next message (non-blocking)
|
|
254
|
+
try:
|
|
255
|
+
current_message = self._queue.get_nowait()
|
|
256
|
+
except Empty:
|
|
257
|
+
# No messages, wait a bit and continue
|
|
258
|
+
await asyncio.sleep(0.1)
|
|
259
|
+
continue
|
|
260
|
+
|
|
261
|
+
# Check if we should send (connection state, etc.)
|
|
262
|
+
if not self._should_send_message():
|
|
263
|
+
# Keep the message and wait - don't requeue to maintain FIFO order
|
|
264
|
+
logger.debug(
|
|
265
|
+
f"Connection not ready, waiting to send: {current_message.description}"
|
|
266
|
+
)
|
|
267
|
+
await asyncio.sleep(1.0)
|
|
268
|
+
continue
|
|
269
|
+
|
|
270
|
+
# Check if we need to wait for message delay (only if we've sent before)
|
|
271
|
+
if self._last_send_time > 0:
|
|
272
|
+
time_since_last = time.time() - self._last_send_time
|
|
273
|
+
if time_since_last < self._message_delay:
|
|
274
|
+
wait_time = self._message_delay - time_since_last
|
|
275
|
+
logger.debug(
|
|
276
|
+
f"Rate limiting: waiting {wait_time:.1f}s before sending"
|
|
277
|
+
)
|
|
278
|
+
await asyncio.sleep(wait_time)
|
|
279
|
+
continue
|
|
280
|
+
|
|
281
|
+
# Send the message
|
|
282
|
+
try:
|
|
283
|
+
logger.debug(
|
|
284
|
+
f"Sending queued message: {current_message.description}"
|
|
285
|
+
)
|
|
286
|
+
# Run synchronous Meshtastic I/O operations in executor to prevent blocking event loop
|
|
287
|
+
# Use lambda with default arguments to properly capture loop variables
|
|
288
|
+
result = await asyncio.get_running_loop().run_in_executor(
|
|
289
|
+
None,
|
|
290
|
+
lambda msg=current_message: msg.send_function(
|
|
291
|
+
*msg.args, **msg.kwargs
|
|
292
|
+
),
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Update last send time
|
|
296
|
+
self._last_send_time = time.time()
|
|
297
|
+
|
|
298
|
+
if result is None:
|
|
299
|
+
logger.warning(
|
|
300
|
+
f"Message send returned None: {current_message.description}"
|
|
301
|
+
)
|
|
302
|
+
else:
|
|
303
|
+
logger.debug(
|
|
304
|
+
f"Successfully sent queued message: {current_message.description}"
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Handle message mapping if provided
|
|
308
|
+
if current_message.mapping_info and hasattr(result, "id"):
|
|
309
|
+
self._handle_message_mapping(
|
|
310
|
+
result, current_message.mapping_info
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
except Exception as e:
|
|
314
|
+
logger.error(
|
|
315
|
+
f"Error sending queued message '{current_message.description}': {e}"
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# Mark task as done and clear current message
|
|
319
|
+
self._queue.task_done()
|
|
320
|
+
current_message = None
|
|
321
|
+
|
|
322
|
+
except asyncio.CancelledError:
|
|
323
|
+
logger.debug("Message queue processor cancelled")
|
|
324
|
+
if current_message:
|
|
325
|
+
logger.warning(
|
|
326
|
+
f"Message in flight was dropped during shutdown: {current_message.description}"
|
|
327
|
+
)
|
|
328
|
+
break
|
|
329
|
+
except Exception as e:
|
|
330
|
+
logger.error(f"Error in message queue processor: {e}")
|
|
331
|
+
await asyncio.sleep(1.0) # Prevent tight error loop
|
|
332
|
+
|
|
333
|
+
def _should_send_message(self) -> bool:
|
|
334
|
+
"""
|
|
335
|
+
Determine whether it is currently safe to send a message based on Meshtastic client connection and reconnection state.
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
bool: True if the client is connected and not reconnecting; False otherwise.
|
|
339
|
+
"""
|
|
340
|
+
# Import here to avoid circular imports
|
|
341
|
+
try:
|
|
342
|
+
from mmrelay.meshtastic_utils import meshtastic_client, reconnecting
|
|
343
|
+
|
|
344
|
+
# Don't send during reconnection
|
|
345
|
+
if reconnecting:
|
|
346
|
+
logger.debug("Not sending - reconnecting is True")
|
|
347
|
+
return False
|
|
348
|
+
|
|
349
|
+
# Don't send if no client
|
|
350
|
+
if meshtastic_client is None:
|
|
351
|
+
logger.debug("Not sending - meshtastic_client is None")
|
|
352
|
+
return False
|
|
353
|
+
|
|
354
|
+
# Check if client is connected
|
|
355
|
+
if hasattr(meshtastic_client, "is_connected"):
|
|
356
|
+
is_conn = meshtastic_client.is_connected
|
|
357
|
+
if not (is_conn() if callable(is_conn) else is_conn):
|
|
358
|
+
logger.debug("Not sending - client not connected")
|
|
359
|
+
return False
|
|
360
|
+
|
|
361
|
+
logger.debug("Connection check passed - ready to send")
|
|
362
|
+
return True
|
|
363
|
+
|
|
364
|
+
except ImportError as e:
|
|
365
|
+
# ImportError indicates a serious problem with application structure,
|
|
366
|
+
# often during shutdown as modules are unloaded.
|
|
367
|
+
logger.critical(
|
|
368
|
+
f"Cannot import meshtastic_utils - serious application error: {e}. Stopping message queue."
|
|
369
|
+
)
|
|
370
|
+
self.stop()
|
|
371
|
+
return False
|
|
372
|
+
|
|
373
|
+
def _handle_message_mapping(self, result, mapping_info):
|
|
374
|
+
"""
|
|
375
|
+
Update the message mapping database with information about a sent message and prune old mappings if configured.
|
|
376
|
+
|
|
377
|
+
Parameters:
|
|
378
|
+
result: The result object from the send function, expected to have an `id` attribute.
|
|
379
|
+
mapping_info (dict): Contains mapping details such as `matrix_event_id`, `room_id`, `text`, and optionally `meshnet` and `msgs_to_keep`.
|
|
380
|
+
|
|
381
|
+
If required mapping fields are present, stores the mapping and prunes old entries based on the specified or default retention count.
|
|
382
|
+
"""
|
|
383
|
+
try:
|
|
384
|
+
# Import here to avoid circular imports
|
|
385
|
+
from mmrelay.db_utils import prune_message_map, store_message_map
|
|
386
|
+
|
|
387
|
+
# Extract mapping information
|
|
388
|
+
matrix_event_id = mapping_info.get("matrix_event_id")
|
|
389
|
+
room_id = mapping_info.get("room_id")
|
|
390
|
+
text = mapping_info.get("text")
|
|
391
|
+
meshnet = mapping_info.get("meshnet")
|
|
392
|
+
|
|
393
|
+
if matrix_event_id and room_id and text:
|
|
394
|
+
# Store the message mapping
|
|
395
|
+
store_message_map(
|
|
396
|
+
result.id,
|
|
397
|
+
matrix_event_id,
|
|
398
|
+
room_id,
|
|
399
|
+
text,
|
|
400
|
+
meshtastic_meshnet=meshnet,
|
|
401
|
+
)
|
|
402
|
+
logger.debug(f"Stored message map for meshtastic_id: {result.id}")
|
|
403
|
+
|
|
404
|
+
# Handle pruning if configured
|
|
405
|
+
msgs_to_keep = mapping_info.get("msgs_to_keep", DEFAULT_MSGS_TO_KEEP)
|
|
406
|
+
if msgs_to_keep > 0:
|
|
407
|
+
prune_message_map(msgs_to_keep)
|
|
408
|
+
|
|
409
|
+
except Exception as e:
|
|
410
|
+
logger.error(f"Error handling message mapping: {e}")
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
# Global message queue instance
|
|
414
|
+
_message_queue = MessageQueue()
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def get_message_queue() -> MessageQueue:
|
|
418
|
+
"""
|
|
419
|
+
Return the global instance of the message queue used for managing and rate-limiting message sending.
|
|
420
|
+
"""
|
|
421
|
+
return _message_queue
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def start_message_queue(message_delay: float = DEFAULT_MESSAGE_DELAY):
|
|
425
|
+
"""
|
|
426
|
+
Start the global message queue processor with the given minimum delay between messages.
|
|
427
|
+
|
|
428
|
+
Parameters:
|
|
429
|
+
message_delay (float): Minimum number of seconds to wait between sending messages.
|
|
430
|
+
"""
|
|
431
|
+
_message_queue.start(message_delay)
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
def stop_message_queue():
|
|
435
|
+
"""
|
|
436
|
+
Stops the global message queue processor, preventing further message processing until restarted.
|
|
437
|
+
"""
|
|
438
|
+
_message_queue.stop()
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def queue_message(
|
|
442
|
+
send_function: Callable,
|
|
443
|
+
*args,
|
|
444
|
+
description: str = "",
|
|
445
|
+
mapping_info: dict = None,
|
|
446
|
+
**kwargs,
|
|
447
|
+
) -> bool:
|
|
448
|
+
"""
|
|
449
|
+
Enqueues a message for sending via the global message queue.
|
|
450
|
+
|
|
451
|
+
Parameters:
|
|
452
|
+
send_function (Callable): The function to execute for sending the message.
|
|
453
|
+
description (str, optional): Human-readable description of the message for logging purposes.
|
|
454
|
+
mapping_info (dict, optional): Additional metadata for message mapping, such as reply or reaction information.
|
|
455
|
+
|
|
456
|
+
Returns:
|
|
457
|
+
bool: True if the message was successfully enqueued; False if the queue is not running or full.
|
|
458
|
+
"""
|
|
459
|
+
return _message_queue.enqueue(
|
|
460
|
+
send_function,
|
|
461
|
+
*args,
|
|
462
|
+
description=description,
|
|
463
|
+
mapping_info=mapping_info,
|
|
464
|
+
**kwargs,
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def get_queue_status() -> dict:
|
|
469
|
+
"""
|
|
470
|
+
Return detailed status information about the global message queue.
|
|
471
|
+
|
|
472
|
+
Returns:
|
|
473
|
+
dict: A dictionary containing the running state, queue size, message delay, processor task activity, last send time, and time since last send.
|
|
474
|
+
"""
|
|
475
|
+
return _message_queue.get_status()
|
mmrelay/plugin_loader.py
CHANGED
|
@@ -519,26 +519,17 @@ def clone_or_update_repo(repo_url, ref, plugins_dir):
|
|
|
519
519
|
|
|
520
520
|
|
|
521
521
|
def load_plugins_from_directory(directory, recursive=False):
|
|
522
|
-
"""
|
|
523
|
-
|
|
524
|
-
Args:
|
|
525
|
-
directory (str): Directory path to search for plugin files
|
|
526
|
-
recursive (bool): Whether to search subdirectories recursively
|
|
527
|
-
|
|
528
|
-
Returns:
|
|
529
|
-
list: List of instantiated plugin objects found in the directory
|
|
522
|
+
"""
|
|
523
|
+
Dynamically loads and instantiates plugin classes from Python files in a specified directory.
|
|
530
524
|
|
|
531
|
-
Scans for
|
|
532
|
-
a 'Plugin' class in each module and instantiates it if found.
|
|
525
|
+
Scans the given directory (and subdirectories if `recursive` is True) for `.py` files, importing each as a module and instantiating its `Plugin` class if present. Automatically attempts to install missing dependencies when a `ModuleNotFoundError` occurs, supporting both pip and pipx environments. Provides compatibility for plugins importing from either `plugins` or `mmrelay.plugins`. Skips files without a `Plugin` class or with unresolved import errors.
|
|
533
526
|
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
- Proper sys.path management for plugin directory imports
|
|
538
|
-
- Comprehensive error handling and logging
|
|
527
|
+
Parameters:
|
|
528
|
+
directory (str): Path to the directory containing plugin files.
|
|
529
|
+
recursive (bool): If True, searches subdirectories recursively.
|
|
539
530
|
|
|
540
|
-
|
|
541
|
-
|
|
531
|
+
Returns:
|
|
532
|
+
list: Instantiated plugin objects found in the directory.
|
|
542
533
|
"""
|
|
543
534
|
plugins = []
|
|
544
535
|
if os.path.isdir(directory):
|
|
@@ -626,13 +617,23 @@ def load_plugins_from_directory(directory, recursive=False):
|
|
|
626
617
|
)
|
|
627
618
|
|
|
628
619
|
# Try to load the module again
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
620
|
+
try:
|
|
621
|
+
spec.loader.exec_module(plugin_module)
|
|
622
|
+
|
|
623
|
+
if hasattr(plugin_module, "Plugin"):
|
|
624
|
+
plugins.append(plugin_module.Plugin())
|
|
625
|
+
else:
|
|
626
|
+
logger.warning(
|
|
627
|
+
f"{plugin_path} does not define a Plugin class."
|
|
628
|
+
)
|
|
629
|
+
except ModuleNotFoundError:
|
|
630
|
+
logger.error(
|
|
631
|
+
f"Module {missing_module} still not available after installation. "
|
|
632
|
+
f"The package name might be different from the import name."
|
|
633
|
+
)
|
|
634
|
+
except Exception as retry_error:
|
|
635
|
+
logger.error(
|
|
636
|
+
f"Error loading plugin {plugin_path} after dependency installation: {retry_error}"
|
|
636
637
|
)
|
|
637
638
|
|
|
638
639
|
except subprocess.CalledProcessError:
|
|
@@ -660,25 +661,16 @@ def load_plugins_from_directory(directory, recursive=False):
|
|
|
660
661
|
|
|
661
662
|
|
|
662
663
|
def load_plugins(passed_config=None):
|
|
663
|
-
"""
|
|
664
|
+
"""
|
|
665
|
+
Discovers, loads, and initializes all active plugins according to the configuration.
|
|
664
666
|
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
667
|
+
This function manages the full plugin lifecycle: it loads core, custom, and community plugins as specified in the configuration, handles cloning and updating of community plugin repositories, installs dependencies as needed, and starts each active plugin. Plugins are filtered and sorted by priority before being returned. If plugins have already been loaded, returns the cached list.
|
|
668
|
+
|
|
669
|
+
Parameters:
|
|
670
|
+
passed_config (dict, optional): Configuration dictionary to use instead of the global config.
|
|
668
671
|
|
|
669
672
|
Returns:
|
|
670
|
-
list:
|
|
671
|
-
|
|
672
|
-
This is the main plugin loading function that:
|
|
673
|
-
- Loads core plugins from mmrelay.plugins package
|
|
674
|
-
- Processes custom plugins from ~/.mmrelay/plugins/custom and plugins/custom
|
|
675
|
-
- Downloads and loads community plugins from configured Git repositories
|
|
676
|
-
- Filters plugins based on active status in configuration
|
|
677
|
-
- Sorts active plugins by priority and calls their start() method
|
|
678
|
-
- Sets up proper plugin configuration and channel mapping
|
|
679
|
-
|
|
680
|
-
Only plugins explicitly marked as active=true in config are loaded.
|
|
681
|
-
Custom and community plugins are cloned/updated automatically.
|
|
673
|
+
list: Active plugin instances sorted by priority.
|
|
682
674
|
"""
|
|
683
675
|
global sorted_active_plugins
|
|
684
676
|
global plugins_loaded
|
|
@@ -750,11 +742,15 @@ def load_plugins(passed_config=None):
|
|
|
750
742
|
plugin_path = os.path.join(custom_dir, plugin_name)
|
|
751
743
|
if os.path.exists(plugin_path):
|
|
752
744
|
logger.debug(f"Loading custom plugin from: {plugin_path}")
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
745
|
+
try:
|
|
746
|
+
plugins.extend(
|
|
747
|
+
load_plugins_from_directory(plugin_path, recursive=False)
|
|
748
|
+
)
|
|
749
|
+
plugin_found = True
|
|
750
|
+
break
|
|
751
|
+
except Exception as e:
|
|
752
|
+
logger.error(f"Failed to load custom plugin {plugin_name}: {e}")
|
|
753
|
+
continue
|
|
758
754
|
|
|
759
755
|
if not plugin_found:
|
|
760
756
|
logger.warning(
|
|
@@ -842,11 +838,17 @@ def load_plugins(passed_config=None):
|
|
|
842
838
|
plugin_path = os.path.join(dir_path, repo_name)
|
|
843
839
|
if os.path.exists(plugin_path):
|
|
844
840
|
logger.info(f"Loading community plugin from: {plugin_path}")
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
841
|
+
try:
|
|
842
|
+
plugins.extend(
|
|
843
|
+
load_plugins_from_directory(plugin_path, recursive=True)
|
|
844
|
+
)
|
|
845
|
+
plugin_found = True
|
|
846
|
+
break
|
|
847
|
+
except Exception as e:
|
|
848
|
+
logger.error(
|
|
849
|
+
f"Failed to load community plugin {repo_name}: {e}"
|
|
850
|
+
)
|
|
851
|
+
continue
|
|
850
852
|
|
|
851
853
|
if not plugin_found:
|
|
852
854
|
logger.warning(
|
|
@@ -896,8 +898,9 @@ def load_plugins(passed_config=None):
|
|
|
896
898
|
getattr(plugin, "plugin_name", plugin.__class__.__name__)
|
|
897
899
|
for plugin in sorted_active_plugins
|
|
898
900
|
]
|
|
899
|
-
logger.info(f"
|
|
901
|
+
logger.info(f"Loaded: {', '.join(plugin_names)}")
|
|
900
902
|
else:
|
|
901
|
-
logger.info("
|
|
903
|
+
logger.info("Loaded: none")
|
|
902
904
|
|
|
903
|
-
plugins_loaded = True # Set the flag to indicate that plugins have been
|
|
905
|
+
plugins_loaded = True # Set the flag to indicate that plugins have been loaded
|
|
906
|
+
return sorted_active_plugins
|