nost-tools 2.0.0__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nost-tools might be problematic. Click here for more details.

nost_tools/application.py CHANGED
@@ -1,793 +1,800 @@
1
- """
2
- Provides a base application that publishes messages from a simulator to a broker.
3
- """
4
-
5
- import functools
6
- import logging
7
- import ssl
8
- import sys
9
- import threading
10
- import time
11
- from datetime import datetime, timedelta
12
- from typing import Callable
13
-
14
- import ntplib
15
- import pika
16
- import pika.connection
17
- import urllib3
18
- from keycloak.exceptions import KeycloakAuthenticationError
19
- from keycloak.keycloak_openid import KeycloakOpenID
20
-
21
- from .application_utils import ( # ConnectionConfig,
22
- ModeStatusObserver,
23
- ShutDownObserver,
24
- TimeStatusPublisher,
25
- )
26
- from .configuration import ConnectionConfig
27
- from .schemas import ReadyStatus
28
- from .simulator import Simulator
29
-
30
- logging.captureWarnings(True)
31
- logger = logging.getLogger(__name__)
32
- urllib3.disable_warnings()
33
-
34
-
35
- class Application:
36
- """
37
- Base class for a member application.
38
-
39
- This object class defines the main functionality of a NOS-T application which can be modified for user needs.
40
-
41
- Attributes:
42
- prefix (str): The test run namespace (prefix)
43
- simulator (:obj:`Simulator`): Application simulator -- calls on the simulator.py class for functionality
44
- client (:obj:`Client`): Application MQTT client
45
- app_name (str): Test run application name
46
- app_description (str): Test run application description (optional)
47
- time_status_step (:obj:`timedelta`): Scenario duration between time status messages
48
- time_status_init (:obj:`datetime`): Scenario time of first time status message
49
- """
50
-
51
- def __init__(self, app_name: str, app_description: str = None):
52
- """
53
- Initializes a new application.
54
-
55
- Args:
56
- app_name (str): application name
57
- app_description (str): application description (optional)
58
- """
59
- self.simulator = Simulator()
60
- self.connection = None
61
- self.channel = None
62
- self.prefix = None
63
- self.app_name = app_name
64
- self.app_description = app_description
65
- self._time_status_publisher = None
66
- self._mode_status_observer = None
67
- self._shut_down_observer = None
68
- self.config = None
69
- # Connection status
70
- self._is_connected = threading.Event()
71
- self._is_running = False
72
- self._io_thread = None
73
- self._consuming = False
74
- self._should_stop = threading.Event()
75
- self._closing = False
76
- # Queues
77
- self.channel_configs = []
78
- self.unique_exchanges = {}
79
- self.declared_queues = set()
80
- self.declared_exchanges = set()
81
- self.predefined_exchanges_queues = False
82
- self._callbacks_per_topic = {}
83
- # Token
84
- self.refresh_token = None
85
- self._token_refresh_thread = None
86
- self.token_refresh_interval = None
87
-
88
- def ready(self) -> None:
89
- """
90
- Signals the application is ready to initialize scenario execution.
91
- Publishes a :obj:`ReadyStatus` message to the topic `prefix.app_name.status.ready`.
92
- """
93
- status = ReadyStatus.model_validate(
94
- {
95
- "name": self.app_name,
96
- "description": self.app_description,
97
- "properties": {"ready": True},
98
- }
99
- )
100
- self.send_message(
101
- app_name=self.app_name,
102
- app_topics="status.ready",
103
- payload=status.model_dump_json(by_alias=True, exclude_none=True),
104
- )
105
-
106
- def new_access_token(self, refresh_token=None):
107
- """
108
- Obtains a new access token and refresh token from Keycloak. If a refresh token is provided,
109
- the access token is refreshed using the refresh token. Otherwise, the access token is obtained
110
- using the username and password provided in the configuration.
111
-
112
- Args:
113
- refresh_token (str): refresh token (optional)
114
- """
115
- logger.debug(
116
- "Acquiring access token."
117
- if not refresh_token
118
- else "Refreshing access token."
119
- )
120
- keycloak_openid = KeycloakOpenID(
121
- server_url=f"{'http' if 'localhost' in self.config.rc.server_configuration.servers.keycloak.host or '127.0.0.1' in self.config.rc.server_configuration.servers.keycloak.host else 'https'}://{self.config.rc.server_configuration.servers.keycloak.host}:{self.config.rc.server_configuration.servers.keycloak.port}",
122
- client_id=self.config.rc.credentials.client_id,
123
- realm_name=self.config.rc.server_configuration.servers.keycloak.realm,
124
- client_secret_key=self.config.rc.credentials.client_secret_key,
125
- verify=False,
126
- )
127
- try:
128
- if refresh_token:
129
- token = keycloak_openid.refresh_token(refresh_token)
130
- else:
131
- try:
132
- token = keycloak_openid.token(
133
- grant_type="password",
134
- username=self.config.rc.credentials.username,
135
- password=self.config.rc.credentials.password,
136
- )
137
- except KeycloakAuthenticationError as e:
138
- logger.error(f"Authentication error without OTP: {e}")
139
- otp = input("Enter OTP: ")
140
- token = keycloak_openid.token(
141
- grant_type="password",
142
- username=self.config.rc.credentials.username,
143
- password=self.config.rc.credentials.password,
144
- totp=otp,
145
- )
146
- if "access_token" in token:
147
- logger.debug(
148
- "Acquiring access token successfully completed."
149
- if not refresh_token
150
- else "Refreshing access token successfully completed."
151
- )
152
- return token["access_token"], token["refresh_token"]
153
- else:
154
- raise Exception("Error: The request was unsuccessful.")
155
- except Exception as e:
156
- logger.error(f"An error occurred: {e}")
157
- raise
158
-
159
- def start_token_refresh_thread(self):
160
- """
161
- Starts a background thread to refresh the access token periodically.
162
-
163
- Args:
164
- config (:obj:`ConnectionConfig`): connection configuration
165
- """
166
- logger.debug("Starting refresh token thread.")
167
-
168
- def refresh_token_periodically():
169
- while not self._should_stop.wait(timeout=self.token_refresh_interval):
170
- try:
171
- access_token, refresh_token = self.new_access_token(
172
- self.refresh_token
173
- )
174
- self.refresh_token = refresh_token
175
- self.update_connection_credentials(access_token)
176
- except Exception as e:
177
- logger.error(f"Failed to refresh access token: {e}")
178
-
179
- self._token_refresh_thread = threading.Thread(target=refresh_token_periodically)
180
- self._token_refresh_thread.start()
181
- logger.debug("Starting refresh token thread successfully completed.")
182
-
183
- def update_connection_credentials(self, access_token):
184
- """
185
- Updates the connection credentials with the new access token.
186
-
187
- Args:
188
- access_token (str): new access token
189
- """
190
- self.connection.update_secret(access_token, "secret")
191
-
192
- def start_up(
193
- self,
194
- prefix: str,
195
- config: ConnectionConfig,
196
- set_offset: bool = None, # True,
197
- time_status_step: timedelta = None,
198
- time_status_init: datetime = None,
199
- shut_down_when_terminated: bool = None,
200
- ) -> None:
201
- """
202
- Starts up the application to prepare for scenario execution.
203
- Connects to the message broker and starts a background event loop by establishing the simulation prefix,
204
- the connection configuration, and the intervals for publishing time status messages.
205
-
206
- Args:
207
- prefix (str): messaging namespace (prefix)
208
- config (:obj:`ConnectionConfig`): connection configuration
209
- set_offset (bool): True, if the system clock offset shall be set using a NTP request prior to execution
210
- time_status_step (:obj:`timedelta`): scenario duration between time status messages
211
- time_status_init (:obj:`datetime`): scenario time for first time status message
212
- shut_down_when_terminated (bool): True, if the application should shut down when the simulation is terminated
213
- """
214
- if (
215
- set_offset is not None
216
- and time_status_step is not None
217
- and time_status_init is not None
218
- and shut_down_when_terminated is not None
219
- ):
220
- self.set_offset = set_offset
221
- self.time_status_step = time_status_step
222
- self.time_status_init = time_status_init
223
- self.shut_down_when_terminated = shut_down_when_terminated
224
- else:
225
- self.config = config
226
- parameters = getattr(
227
- self.config.rc.simulation_configuration.execution_parameters,
228
- self.app_name,
229
- None,
230
- )
231
- self.set_offset = parameters.set_offset
232
- self.time_status_step = parameters.time_status_step
233
- self.time_status_init = parameters.time_status_init
234
- self.shut_down_when_terminated = parameters.shut_down_when_terminated
235
-
236
- if self.set_offset:
237
- # Set the system clock offset
238
- self.set_wallclock_offset()
239
-
240
- # Set the prefix and configuration parameters
241
- self.prefix = prefix
242
- self.config = config
243
- self._is_running = True
244
-
245
- if self.config.rc.server_configuration.servers.rabbitmq.keycloak_authentication:
246
- # Get the access token and refresh token
247
- self.token_refresh_interval = (
248
- self.config.rc.server_configuration.servers.keycloak.token_refresh_interval
249
- )
250
- logger.info(
251
- f"Keycloak authentication is enabled. Access token will be refreshed every {self.token_refresh_interval} seconds"
252
- )
253
- access_token, _ = self.new_access_token()
254
- self.start_token_refresh_thread()
255
- credentials = pika.PlainCredentials("", access_token)
256
- else:
257
- # Set up credentials
258
- credentials = pika.PlainCredentials(
259
- self.config.rc.credentials.username,
260
- self.config.rc.credentials.password,
261
- )
262
-
263
- # Set up connection parameters
264
- parameters = pika.ConnectionParameters(
265
- host=self.config.rc.server_configuration.servers.rabbitmq.host,
266
- virtual_host=self.config.rc.server_configuration.servers.rabbitmq.virtual_host,
267
- port=self.config.rc.server_configuration.servers.rabbitmq.port,
268
- credentials=credentials,
269
- heartbeat=config.rc.server_configuration.servers.rabbitmq.heartbeat,
270
- connection_attempts=config.rc.server_configuration.servers.rabbitmq.connection_attempts,
271
- retry_delay=config.rc.server_configuration.servers.rabbitmq.retry_delay,
272
- )
273
-
274
- # Configure transport layer security (TLS) if needed
275
- if self.config.rc.server_configuration.servers.rabbitmq.tls:
276
- logger.info("Using TLS/SSL.")
277
- parameters.ssl_options = pika.SSLOptions(ssl.SSLContext())
278
-
279
- # Callback functions for connection
280
- def on_connection_open(connection):
281
- self.connection = connection
282
- self.connection.channel(on_open_callback=self.on_channel_open)
283
- logger.info("Connection established successfully.")
284
-
285
- # Establish non-blocking connection to RabbitMQ
286
- self.connection = pika.SelectConnection(
287
- parameters=parameters,
288
- on_open_callback=on_connection_open,
289
- on_open_error_callback=self.on_connection_error,
290
- on_close_callback=self.on_connection_closed,
291
- )
292
-
293
- # Start the I/O loop in a separate thread
294
- self._io_thread = threading.Thread(target=self._start_io_loop)
295
- self._io_thread.start()
296
- self._is_connected.wait()
297
-
298
- if self.config.rc.simulation_configuration.predefined_exchanges_queues:
299
- # Get the unique exchanges and channel configurations
300
- self.predefined_exchanges_queues = True
301
- logger.debug(
302
- "Exchanges and queues are predefined in the YAML configuration file."
303
- )
304
- self.unique_exchanges, self.channel_configs = (
305
- self.config.rc.simulation_configuration.exchanges,
306
- self.config.rc.simulation_configuration.queues,
307
- )
308
-
309
- else:
310
- logger.debug(
311
- "Exchanges and queues are NOT predefined in the YAML configuration file."
312
- )
313
-
314
- # Configure observers
315
- self._create_time_status_publisher(self.time_status_step, self.time_status_init)
316
- self._create_mode_status_observer()
317
- if self.shut_down_when_terminated:
318
- self._create_shut_down_observer()
319
- logger.info(f"Application {self.app_name} successfully started up.")
320
-
321
- def _start_io_loop(self):
322
- """
323
- Starts the I/O loop for the connection.
324
- """
325
- self.stop_event = threading.Event()
326
- while not self.stop_event.is_set():
327
- self.connection.ioloop.start()
328
-
329
- def on_channel_open(self, channel):
330
- """
331
- Callback function for when the channel is opened.
332
-
333
- Args:
334
- channel (:obj:`pika.channel.Channel`): channel object
335
- """
336
- self.channel = channel
337
- # Signal that connection is established
338
- self._is_connected.set()
339
-
340
- def on_connection_error(self, connection, error):
341
- """
342
- Callback function for when a connection error occurs.
343
-
344
- Args:
345
- connection (:obj:`pika.connection.Connection`): connection object
346
- error (Exception): exception representing reason for loss of connection
347
- """
348
- logger.error(f"Connection error: {error}")
349
- self._is_connected.clear()
350
-
351
- def on_connection_closed(self, connection, reason):
352
- """
353
- This method is invoked by pika when the connection to RabbitMQ is
354
- closed unexpectedly. Since it is unexpected, we will reconnect to
355
- RabbitMQ if it disconnects.
356
-
357
- Args:
358
- connection (:obj:`pika.connection.Connection`): closed connection object
359
- reason (Exception): exception representing reason for loss of connection
360
- """
361
- self.channel = None
362
- if self._closing:
363
- self.connection.ioloop.stop()
364
-
365
- def shut_down(self) -> None:
366
- """
367
- Shuts down the application by stopping the background event loop and disconnecting from the broker.
368
- """
369
- # self._should_stop.set()
370
- if self._time_status_publisher is not None:
371
- self.simulator.remove_observer(self._time_status_publisher)
372
- self._time_status_publisher = None
373
-
374
- if self.connection:
375
- self.stop_application()
376
- self._consuming = False
377
- logger.info(f"Application {self.app_name} successfully shut down.")
378
-
379
- def send_message(self, app_name, app_topics, payload: str) -> None:
380
- """
381
- Sends a message to the broker. The message is sent to the exchange using the routing key. The routing key is created using the application name and topic. The message is published with an expiration of 60 seconds.
382
-
383
- Args:
384
- app_name (str): application name
385
- app_topics (str or list): topic name or list of topic names
386
- payload (str): message payload
387
- """
388
- if isinstance(app_topics, str):
389
- app_topics = [app_topics]
390
-
391
- for app_topic in app_topics:
392
- routing_key = self.create_routing_key(app_name=app_name, topic=app_topic)
393
- if not self.predefined_exchanges_queues:
394
- routing_key, queue_name = self.yamless_declare_bind_queue(
395
- routing_key=routing_key
396
- )
397
- self.channel.basic_publish(
398
- exchange=self.prefix,
399
- routing_key=routing_key,
400
- body=payload,
401
- properties=pika.BasicProperties(
402
- expiration=self.config.rc.server_configuration.servers.rabbitmq.message_expiration,
403
- delivery_mode=self.config.rc.server_configuration.servers.rabbitmq.delivery_mode,
404
- content_type=self.config.rc.server_configuration.servers.rabbitmq.content_type,
405
- app_id=self.app_name,
406
- ),
407
- )
408
- logger.debug(
409
- f"Successfully sent message '{payload}' to topic '{routing_key}'."
410
- )
411
-
412
- def routing_key_matches_pattern(self, routing_key, pattern):
413
- """
414
- Check if a routing key matches a wildcard pattern.
415
-
416
- Args:
417
- routing_key (str): The actual routing key of the message
418
- pattern (str): The pattern which may contain * or # wildcards
419
-
420
- Returns:
421
- bool: True if the routing key matches the pattern
422
- """
423
- # Split both keys into segments
424
- route_parts = routing_key.split(".")
425
- pattern_parts = pattern.split(".")
426
-
427
- # If # isn't in pattern, both must have same number of parts
428
- if "#" not in pattern_parts and len(route_parts) != len(pattern_parts):
429
- return False
430
-
431
- i = 0
432
- while i < len(pattern_parts):
433
- # Handle # wildcard (matches 0 or more segments)
434
- if pattern_parts[i] == "#":
435
- return True # # at the end matches everything remaining
436
-
437
- # Handle * wildcard (matches exactly one segment)
438
- elif pattern_parts[i] == "*":
439
- # Ensure there's a segment to match
440
- if i >= len(route_parts):
441
- return False
442
- # * matches any single segment, continue to next segment
443
- i += 1
444
- continue
445
-
446
- # Handle exact match segment
447
- else:
448
- # If we've run out of route parts or segments don't match
449
- if i >= len(route_parts) or pattern_parts[i] != route_parts[i]:
450
- return False
451
-
452
- i += 1
453
-
454
- # If we've gone through all pattern parts, make sure we've used all route parts
455
- return len(route_parts) <= i
456
-
457
- def add_message_callback(
458
- self, app_name: str, app_topic: str, user_callback: Callable
459
- ):
460
- """
461
- Add callback for a topic, supporting wildcards (* and #) in routing keys.
462
-
463
- * matches exactly one word
464
- # matches zero or more words
465
- """
466
- self.was_consuming = True
467
- self._consuming = True
468
-
469
- routing_key = self.create_routing_key(app_name=app_name, topic=app_topic)
470
-
471
- # Check if this is the first callback for this routing key pattern
472
- if routing_key not in self._callbacks_per_topic:
473
- self._callbacks_per_topic[routing_key] = []
474
-
475
- # Only set up the consumer once per topic
476
- if not self.predefined_exchanges_queues:
477
- # For wildcard subscriptions, use the app_name as queue suffix to ensure uniqueness
478
- queue_suffix = self.app_name
479
-
480
- # If using wildcards, bind to the wildcard pattern
481
- if "*" in routing_key or "#" in routing_key:
482
- # Create a unique queue name for this wildcard subscription
483
- queue_name = f"{routing_key.replace('*', 'star').replace('#', 'hash')}.{queue_suffix}"
484
-
485
- # Declare a new queue
486
- self.channel.queue_declare(
487
- queue=queue_name, durable=False, auto_delete=True
488
- )
489
-
490
- # Bind queue to the exchange with the wildcard pattern
491
- self.channel.queue_bind(
492
- exchange=self.prefix, queue=queue_name, routing_key=routing_key
493
- )
494
-
495
- # Track the declared queue
496
- self.declared_queues.add(queue_name)
497
- else:
498
- # For non-wildcard keys, use the standard approach
499
- routing_key, queue_name = self.yamless_declare_bind_queue(
500
- routing_key=routing_key, app_specific_extender=queue_suffix
501
- )
502
-
503
- self.channel.basic_qos(prefetch_count=1)
504
- self._consumer_tag = self.channel.basic_consume(
505
- queue=queue_name,
506
- on_message_callback=self._handle_message,
507
- auto_ack=False,
508
- )
509
-
510
- # Add the callback to the list for this routing key
511
- self._callbacks_per_topic[routing_key].append(user_callback)
512
-
513
- def _handle_message(self, ch, method, properties, body):
514
- """
515
- Callback for handling messages received from RabbitMQ.
516
- Supports both direct routing key matches and wildcard patterns.
517
- """
518
- routing_key = method.routing_key
519
- logger.debug(f"Received message with routing key: {routing_key}")
520
-
521
- # First check for exact routing key match
522
- direct_callbacks = self._callbacks_per_topic.get(routing_key, [])
523
-
524
- # Then find any wildcard patterns that match this routing key
525
- wildcard_callbacks = []
526
- for pattern, callbacks in self._callbacks_per_topic.items():
527
- # Skip exact matches (already handled) and patterns that don't match
528
- if pattern == routing_key:
529
- continue
530
-
531
- if "*" in pattern or "#" in pattern:
532
- if self.routing_key_matches_pattern(routing_key, pattern):
533
- wildcard_callbacks.extend(callbacks)
534
-
535
- # Combine all matching callbacks
536
- all_callbacks = direct_callbacks + wildcard_callbacks
537
-
538
- if all_callbacks:
539
- logger.debug(
540
- f"Found {len(all_callbacks)} callbacks for routing key: {routing_key}"
541
- )
542
- else:
543
- logger.debug(f"No callbacks found for routing key: {routing_key}")
544
- # Still acknowledge the message even if no callbacks matched
545
- self.acknowledge_message(method.delivery_tag)
546
- return
547
-
548
- try:
549
- # Execute all callbacks for this message
550
- for callback in all_callbacks:
551
- callback(ch, method, properties, body)
552
-
553
- # Only acknowledge after all callbacks complete successfully
554
- self.acknowledge_message(method.delivery_tag)
555
- except Exception as e:
556
- logger.error(f"Error processing message: {e}")
557
- # Reject the message if any callback fails
558
- if self.channel:
559
- self.channel.basic_reject(
560
- delivery_tag=method.delivery_tag, requeue=True
561
- )
562
-
563
- def acknowledge_message(self, delivery_tag):
564
- """Acknowledge the message delivery from RabbitMQ by sending a
565
- Basic.Ack RPC method for the delivery tag.
566
-
567
- :param int delivery_tag: The delivery tag from the Basic.Deliver frame
568
-
569
- """
570
- try:
571
- logger.debug(f"Acknowledging message {delivery_tag}")
572
- self.channel.basic_ack(delivery_tag, True)
573
- except:
574
- pass
575
-
576
- def create_routing_key(self, app_name: str, topic: str):
577
- """
578
- Creates a routing key for the application. The routing key is used to bind the queue to the exchange.
579
-
580
- Args:
581
- app_name (str): application name
582
- topic (str): topic name
583
- """
584
- routing_key = ".".join([self.prefix, app_name, topic])
585
- return routing_key
586
-
587
- def yamless_declare_bind_queue(
588
- self, routing_key: str = None, app_specific_extender: str = None
589
- ) -> None:
590
- """
591
- Declares and binds a queue to the exchange. The queue is bound to the exchange using the routing key. The routing key is created using the application name and topic.
592
- Args:
593
- app_name (str): application name
594
- topic (str): topic name
595
- app_specific_extender (str): application specific extender, used to create a unique queue name for the application. If the app_specific_extender is not provided, the queue name is the same as the routing key.
596
- """
597
- try:
598
- if app_specific_extender:
599
- queue_name = ".".join([routing_key, app_specific_extender])
600
- else:
601
- queue_name = routing_key
602
- self.channel.queue_declare(
603
- queue=queue_name, durable=False, auto_delete=True
604
- )
605
- self.channel.queue_bind(
606
- exchange=self.prefix, queue=queue_name, routing_key=routing_key
607
- )
608
- # Create list of declared queues and exchanges
609
- self.declared_queues.add(queue_name.strip())
610
- self.declared_queues.add(routing_key.strip())
611
- self.declared_exchanges.add(self.prefix.strip())
612
-
613
- logger.debug(f"Bound queue '{queue_name}' to topic '{routing_key}'.")
614
-
615
- except:
616
- routing_key = None
617
- queue_name = None
618
- pass
619
-
620
- return routing_key, queue_name
621
-
622
- def delete_queue(self, configs, app_name):
623
- """
624
- Deletes the queues from RabbitMQ.
625
-
626
- Args:
627
- configs (list): list of channel configurations
628
- app_name (str): application name
629
- """
630
- for config in configs:
631
- if config["app"] == app_name:
632
- logger.info(f"Deleting queue: {config['address']}")
633
- self.channel.queue_delete(queue=config["address"])
634
- logger.info("Successfully deleted queues.")
635
-
636
- def delete_exchange(self, unique_exchanges):
637
- """
638
- Deletes the exchanges from RabbitMQ.
639
-
640
- Args:
641
- unique_exchanges (dict): dictionary of unique exchanges
642
- """
643
- for exchange_name, exchange_config in unique_exchanges.items():
644
- self.channel.exchange_delete(exchange=exchange_name)
645
- logger.info("Successfully deleted exchanges.")
646
-
647
- def delete_all_queues_and_exchanges(self):
648
- """
649
- Deletes all declared queues and exchanges from RabbitMQ.
650
- """
651
- for queue_name in list(self.declared_queues):
652
- try:
653
- # self.channel.queue_purge(queue=queue_name)
654
- self.channel.queue_delete(queue=queue_name)
655
- logger.info(f"Deleted queue: {queue_name}")
656
- except Exception as e:
657
- logger.error(f"Failed to delete queue {queue_name}: {e}")
658
-
659
- for exchange_name in list(self.declared_exchanges):
660
- try:
661
- self.channel.exchange_delete(exchange=exchange_name)
662
- logger.info(f"Deleted exchange: {exchange_name}")
663
- except Exception as e:
664
- logger.error(f"Failed to delete exchange {exchange_name}: {e}")
665
-
666
- def stop_consuming(self):
667
- """Tell RabbitMQ that you would like to stop consuming by sending the
668
- Basic.Cancel RPC command.
669
- """
670
- if self.channel:
671
- logger.info("Sending a Basic.Cancel RPC command to RabbitMQ")
672
- cb = functools.partial(self.on_cancelok, userdata=self._consumer_tag)
673
- self.channel.basic_cancel(self._consumer_tag, cb)
674
-
675
- def on_cancelok(self, _unused_frame, userdata):
676
- """This method is invoked by pika when RabbitMQ acknowledges the
677
- cancellation of a consumer. At this point we will close the channel.
678
- This will invoke the on_channel_closed method once the channel has been
679
- closed, which will in-turn close the connection.
680
- :param pika.frame.Method _unused_frame: The Basic.CancelOk frame
681
- :param str|unicode userdata: Extra user data (consumer tag)
682
- """
683
- self._consuming = False
684
- logger.info(
685
- "RabbitMQ acknowledged the cancellation of the consumer: %s", userdata
686
- )
687
- self.close_channel()
688
- self.stop_loop()
689
-
690
- def close_channel(self):
691
- """Call to close the channel with RabbitMQ cleanly by issuing the
692
- Channel.Close RPC command.
693
- """
694
- logger.info("Deleting queues and exchanges.")
695
-
696
- if self.predefined_exchanges_queues:
697
- self.delete_queue(self.channel_configs, self.app_name)
698
- self.delete_exchange(self.unique_exchanges)
699
- else:
700
- self.delete_all_queues_and_exchanges()
701
-
702
- logger.info("Closing channel")
703
- self.channel.close()
704
-
705
- def stop_loop(self):
706
- """Stop the IO loop"""
707
- self.connection.ioloop.stop()
708
-
709
- def stop_application(self):
710
- """Cleanly shutdown the connection to RabbitMQ by stopping the consumer
711
- with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
712
- will be invoked by pika, which will then closing the channel and
713
- connection. The IOLoop is started again because this method is invoked
714
- when CTRL-C is pressed raising a KeyboardInterrupt exception. This
715
- exception stops the IOLoop which needs to be running for pika to
716
- communicate with RabbitMQ. All of the commands issued prior to starting
717
- the IOLoop will be buffered but not processed.
718
- """
719
- if not self._closing:
720
- self._closing = True
721
- if self._consuming:
722
- self.stop_consuming()
723
- # Signal the thread to stop
724
- if hasattr(self, "stop_event"):
725
- self.stop_event.set()
726
- if hasattr(self, "_should_stop"):
727
- self._should_stop.set()
728
- if hasattr(self, "io_thread"):
729
- self._io_thread.join()
730
- sys.exit()
731
- else:
732
- self.connection.ioloop.stop()
733
-
734
- def set_wallclock_offset(
735
- self, host="pool.ntp.org", retry_delay_s: int = 5, max_retry: int = 5
736
- ) -> None:
737
- """
738
- Issues a Network Time Protocol (NTP) request to determine the system clock offset.
739
-
740
- Args:
741
- host (str): NTP host (default: 'pool.ntp.org')
742
- retry_delay_s (int): number of seconds to wait before retrying
743
- max_retry (int): maximum number of retries allowed
744
- """
745
- for i in range(max_retry):
746
- try:
747
- logger.info(f"Contacting {host} to retrieve wallclock offset.")
748
- response = ntplib.NTPClient().request(host, version=3, timeout=2)
749
- offset = timedelta(seconds=response.offset)
750
- self.simulator.set_wallclock_offset(offset)
751
- logger.info(f"Wallclock offset updated to {offset}.")
752
- return
753
- except ntplib.NTPException:
754
- logger.warn(
755
- f"Could not connect to {host}, attempt #{i+1}/{max_retry} in {retry_delay_s} s."
756
- )
757
- time.sleep(retry_delay_s)
758
-
759
- def _create_time_status_publisher(
760
- self, time_status_step: timedelta, time_status_init: datetime
761
- ) -> None:
762
- """
763
- Creates a new time status publisher to publish the time status when it changes.
764
-
765
- Args:
766
- time_status_step (:obj:`timedelta`): scenario duration between time status messages
767
- time_status_init (:obj:`datetime`): scenario time for first time status message
768
- """
769
- if time_status_step is not None:
770
- if self._time_status_publisher is not None:
771
- self.simulator.remove_observer(self._time_status_publisher)
772
- self._time_status_publisher = TimeStatusPublisher(
773
- self, time_status_step, time_status_init
774
- )
775
- self.simulator.add_observer(self._time_status_publisher)
776
-
777
- def _create_mode_status_observer(self) -> None:
778
- """
779
- Creates a mode status observer to publish the mode status when it changes.
780
- """
781
- if self._mode_status_observer is not None:
782
- self.simulator.remove_observer(self._mode_status_observer)
783
- self._mode_status_observer = ModeStatusObserver(self)
784
- self.simulator.add_observer(self._mode_status_observer)
785
-
786
- def _create_shut_down_observer(self) -> None:
787
- """
788
- Creates an observer to shut down the application when the simulation is terminated.
789
- """
790
- if self._shut_down_observer is not None:
791
- self.simulator.remove_observer(self._shut_down_observer)
792
- self._shut_down_observer = ShutDownObserver(self)
793
- self.simulator.add_observer(self._shut_down_observer)
1
+ """
2
+ Provides a base application that publishes messages from a simulator to a broker.
3
+ """
4
+
5
+ import functools
6
+ import logging
7
+ import ssl
8
+ import sys
9
+ import threading
10
+ import time
11
+ from datetime import datetime, timedelta
12
+ from typing import Callable
13
+
14
+ import ntplib
15
+ import pika
16
+ import urllib3
17
+ from keycloak.exceptions import KeycloakAuthenticationError
18
+ from keycloak.keycloak_openid import KeycloakOpenID
19
+
20
+ from .application_utils import ( # ConnectionConfig,
21
+ ModeStatusObserver,
22
+ ShutDownObserver,
23
+ TimeStatusPublisher,
24
+ )
25
+ from .configuration import ConnectionConfig
26
+ from .schemas import ReadyStatus
27
+ from .simulator import Simulator
28
+
29
+ logging.captureWarnings(True)
30
+ logger = logging.getLogger(__name__)
31
+ urllib3.disable_warnings()
32
+
33
+
34
+ class Application:
35
+ """
36
+ Base class for a member application.
37
+
38
+ This object class defines the main functionality of a NOS-T application which can be modified for user needs.
39
+
40
+ Attributes:
41
+ prefix (str): The test run namespace (prefix)
42
+ simulator (:obj:`Simulator`): Application simulator -- calls on the simulator.py class for functionality
43
+ client (:obj:`Client`): Application MQTT client
44
+ app_name (str): Test run application name
45
+ app_description (str): Test run application description (optional)
46
+ time_status_step (:obj:`timedelta`): Scenario duration between time status messages
47
+ time_status_init (:obj:`datetime`): Scenario time of first time status message
48
+ """
49
+
50
+ def __init__(self, app_name: str, app_description: str = None):
51
+ """
52
+ Initializes a new application.
53
+
54
+ Args:
55
+ app_name (str): application name
56
+ app_description (str): application description (optional)
57
+ """
58
+ self.simulator = Simulator()
59
+ self.connection = None
60
+ self.channel = None
61
+ self.prefix = None
62
+ self.app_name = app_name
63
+ self.app_description = app_description
64
+ self._time_status_publisher = None
65
+ self._mode_status_observer = None
66
+ self._shut_down_observer = None
67
+ self.config = None
68
+ # Connection status
69
+ self._is_connected = threading.Event()
70
+ self._is_running = False
71
+ self._io_thread = None
72
+ self._consuming = False
73
+ self._should_stop = threading.Event()
74
+ self._closing = False
75
+ # Queues
76
+ self.channel_configs = []
77
+ self.unique_exchanges = {}
78
+ self.declared_queues = set()
79
+ self.declared_exchanges = set()
80
+ self.predefined_exchanges_queues = False
81
+ self._callbacks_per_topic = {}
82
+ # Token
83
+ self.refresh_token = None
84
+ self._token_refresh_thread = None
85
+ self.token_refresh_interval = None
86
+
87
+ def ready(self) -> None:
88
+ """
89
+ Signals the application is ready to initialize scenario execution.
90
+ Publishes a :obj:`ReadyStatus` message to the topic `prefix.app_name.status.ready`.
91
+ """
92
+ status = ReadyStatus.model_validate(
93
+ {
94
+ "name": self.app_name,
95
+ "description": self.app_description,
96
+ "properties": {"ready": True},
97
+ }
98
+ )
99
+ self.send_message(
100
+ app_name=self.app_name,
101
+ app_topics="status.ready",
102
+ payload=status.model_dump_json(by_alias=True, exclude_none=True),
103
+ )
104
+
105
+ def new_access_token(self, refresh_token=None):
106
+ """
107
+ Obtains a new access token and refresh token from Keycloak. If a refresh token is provided,
108
+ the access token is refreshed using the refresh token. Otherwise, the access token is obtained
109
+ using the username and password provided in the configuration.
110
+
111
+ Args:
112
+ refresh_token (str): refresh token (optional)
113
+ """
114
+ logger.debug(
115
+ "Acquiring access token."
116
+ if not refresh_token
117
+ else "Refreshing access token."
118
+ )
119
+ keycloak_openid = KeycloakOpenID(
120
+ server_url=f"{'http' if 'localhost' in self.config.rc.server_configuration.servers.keycloak.host or '127.0.0.1' in self.config.rc.server_configuration.servers.keycloak.host else 'https'}://{self.config.rc.server_configuration.servers.keycloak.host}:{self.config.rc.server_configuration.servers.keycloak.port}",
121
+ client_id=self.config.rc.credentials.client_id,
122
+ realm_name=self.config.rc.server_configuration.servers.keycloak.realm,
123
+ client_secret_key=self.config.rc.credentials.client_secret_key,
124
+ verify=False,
125
+ )
126
+ try:
127
+ if refresh_token:
128
+ token = keycloak_openid.refresh_token(refresh_token)
129
+ else:
130
+ try:
131
+ token = keycloak_openid.token(
132
+ grant_type="password",
133
+ username=self.config.rc.credentials.username,
134
+ password=self.config.rc.credentials.password,
135
+ )
136
+ except KeycloakAuthenticationError as e:
137
+ logger.error(f"Authentication error without OTP: {e}")
138
+ otp = input("Enter OTP: ")
139
+ token = keycloak_openid.token(
140
+ grant_type="password",
141
+ username=self.config.rc.credentials.username,
142
+ password=self.config.rc.credentials.password,
143
+ totp=otp,
144
+ )
145
+ if "access_token" in token:
146
+ logger.debug(
147
+ "Acquiring access token successfully completed."
148
+ if not refresh_token
149
+ else "Refreshing access token successfully completed."
150
+ )
151
+ return token["access_token"], token["refresh_token"]
152
+ else:
153
+ raise Exception("Error: The request was unsuccessful.")
154
+ except Exception as e:
155
+ logger.error(f"An error occurred: {e}")
156
+ raise
157
+
158
+ def start_token_refresh_thread(self):
159
+ """
160
+ Starts a background thread to refresh the access token periodically.
161
+
162
+ Args:
163
+ config (:obj:`ConnectionConfig`): connection configuration
164
+ """
165
+ logger.debug("Starting refresh token thread.")
166
+
167
+ def refresh_token_periodically():
168
+ while not self._should_stop.wait(timeout=self.token_refresh_interval):
169
+ try:
170
+ access_token, refresh_token = self.new_access_token(
171
+ self.refresh_token
172
+ )
173
+ self.refresh_token = refresh_token
174
+ self.update_connection_credentials(access_token)
175
+ except Exception as e:
176
+ logger.error(f"Failed to refresh access token: {e}")
177
+
178
+ self._token_refresh_thread = threading.Thread(target=refresh_token_periodically)
179
+ self._token_refresh_thread.start()
180
+ logger.debug("Starting refresh token thread successfully completed.")
181
+
182
+ def update_connection_credentials(self, access_token):
183
+ """
184
+ Updates the connection credentials with the new access token.
185
+
186
+ Args:
187
+ access_token (str): new access token
188
+ """
189
+ self.connection.update_secret(access_token, "secret")
190
+
191
+ def start_up(
192
+ self,
193
+ prefix: str,
194
+ config: ConnectionConfig,
195
+ set_offset: bool = None, # True,
196
+ time_status_step: timedelta = None,
197
+ time_status_init: datetime = None,
198
+ shut_down_when_terminated: bool = None,
199
+ ) -> None:
200
+ """
201
+ Starts up the application to prepare for scenario execution.
202
+ Connects to the message broker and starts a background event loop by establishing the simulation prefix,
203
+ the connection configuration, and the intervals for publishing time status messages.
204
+
205
+ Args:
206
+ prefix (str): messaging namespace (prefix)
207
+ config (:obj:`ConnectionConfig`): connection configuration
208
+ set_offset (bool): True, if the system clock offset shall be set using a NTP request prior to execution
209
+ time_status_step (:obj:`timedelta`): scenario duration between time status messages
210
+ time_status_init (:obj:`datetime`): scenario time for first time status message
211
+ shut_down_when_terminated (bool): True, if the application should shut down when the simulation is terminated
212
+ """
213
+ if (
214
+ set_offset is not None
215
+ and time_status_step is not None
216
+ and time_status_init is not None
217
+ and shut_down_when_terminated is not None
218
+ ):
219
+ self.set_offset = set_offset
220
+ self.time_status_step = time_status_step
221
+ self.time_status_init = time_status_init
222
+ self.shut_down_when_terminated = shut_down_when_terminated
223
+ else:
224
+ self.config = config
225
+ parameters = getattr(
226
+ self.config.rc.simulation_configuration.execution_parameters,
227
+ self.app_name,
228
+ None,
229
+ )
230
+ self.set_offset = parameters.set_offset
231
+ self.time_status_step = parameters.time_status_step
232
+ self.time_status_init = parameters.time_status_init
233
+ self.shut_down_when_terminated = parameters.shut_down_when_terminated
234
+
235
+ if self.set_offset:
236
+ # Set the system clock offset
237
+ self.set_wallclock_offset()
238
+
239
+ # Set the prefix and configuration parameters
240
+ self.prefix = prefix
241
+ self.config = config
242
+ self._is_running = True
243
+
244
+ if self.config.rc.server_configuration.servers.rabbitmq.keycloak_authentication:
245
+ # Get the access token and refresh token
246
+ self.token_refresh_interval = (
247
+ self.config.rc.server_configuration.servers.keycloak.token_refresh_interval
248
+ )
249
+ logger.info(
250
+ f"Keycloak authentication is enabled. Access token will be refreshed every {self.token_refresh_interval} seconds"
251
+ )
252
+ access_token, _ = self.new_access_token()
253
+ self.start_token_refresh_thread()
254
+ credentials = pika.PlainCredentials("", access_token)
255
+ else:
256
+ # Set up credentials
257
+ credentials = pika.PlainCredentials(
258
+ self.config.rc.credentials.username,
259
+ self.config.rc.credentials.password,
260
+ )
261
+
262
+ # Set up connection parameters
263
+ parameters = pika.ConnectionParameters(
264
+ host=self.config.rc.server_configuration.servers.rabbitmq.host,
265
+ virtual_host=self.config.rc.server_configuration.servers.rabbitmq.virtual_host,
266
+ port=self.config.rc.server_configuration.servers.rabbitmq.port,
267
+ credentials=credentials,
268
+ heartbeat=config.rc.server_configuration.servers.rabbitmq.heartbeat,
269
+ connection_attempts=config.rc.server_configuration.servers.rabbitmq.connection_attempts,
270
+ retry_delay=config.rc.server_configuration.servers.rabbitmq.retry_delay,
271
+ socket_timeout=config.rc.server_configuration.servers.rabbitmq.socket_timeout,
272
+ stack_timeout=config.rc.server_configuration.servers.rabbitmq.stack_timeout,
273
+ locale=config.rc.server_configuration.servers.rabbitmq.locale,
274
+ )
275
+
276
+ # Configure transport layer security (TLS) if needed
277
+ if self.config.rc.server_configuration.servers.rabbitmq.tls:
278
+ logger.info("Using TLS/SSL.")
279
+ # Create an SSL context
280
+ context = ssl.create_default_context()
281
+ context.check_hostname = False
282
+ context.verify_mode = ssl.CERT_NONE
283
+ # Set SSL options
284
+ parameters.ssl_options = pika.SSLOptions(context)
285
+
286
+ # Callback functions for connection
287
+ def on_connection_open(connection):
288
+ self.connection = connection
289
+ self.connection.channel(on_open_callback=self.on_channel_open)
290
+ logger.info("Connection established successfully.")
291
+
292
+ # Establish non-blocking connection to RabbitMQ
293
+ self.connection = pika.SelectConnection(
294
+ parameters=parameters,
295
+ on_open_callback=on_connection_open,
296
+ on_open_error_callback=self.on_connection_error,
297
+ on_close_callback=self.on_connection_closed,
298
+ )
299
+
300
+ # Start the I/O loop in a separate thread
301
+ self._io_thread = threading.Thread(target=self._start_io_loop)
302
+ self._io_thread.start()
303
+ self._is_connected.wait()
304
+
305
+ if self.config.rc.simulation_configuration.predefined_exchanges_queues:
306
+ # Get the unique exchanges and channel configurations
307
+ self.predefined_exchanges_queues = True
308
+ logger.debug(
309
+ "Exchanges and queues are predefined in the YAML configuration file."
310
+ )
311
+ self.unique_exchanges, self.channel_configs = (
312
+ self.config.rc.simulation_configuration.exchanges,
313
+ self.config.rc.simulation_configuration.queues,
314
+ )
315
+
316
+ else:
317
+ logger.debug(
318
+ "Exchanges and queues are NOT predefined in the YAML configuration file."
319
+ )
320
+
321
+ # Configure observers
322
+ self._create_time_status_publisher(self.time_status_step, self.time_status_init)
323
+ self._create_mode_status_observer()
324
+ if self.shut_down_when_terminated:
325
+ self._create_shut_down_observer()
326
+ logger.info(f"Application {self.app_name} successfully started up.")
327
+
328
+ def _start_io_loop(self):
329
+ """
330
+ Starts the I/O loop for the connection.
331
+ """
332
+ self.stop_event = threading.Event()
333
+ while not self.stop_event.is_set():
334
+ self.connection.ioloop.start()
335
+
336
+ def on_channel_open(self, channel):
337
+ """
338
+ Callback function for when the channel is opened.
339
+
340
+ Args:
341
+ channel (:obj:`pika.channel.Channel`): channel object
342
+ """
343
+ self.channel = channel
344
+ # Signal that connection is established
345
+ self._is_connected.set()
346
+
347
+ def on_connection_error(self, connection, error):
348
+ """
349
+ Callback function for when a connection error occurs.
350
+
351
+ Args:
352
+ connection (:obj:`pika.connection.Connection`): connection object
353
+ error (Exception): exception representing reason for loss of connection
354
+ """
355
+ logger.error(f"Connection error: {error}")
356
+ self._is_connected.clear()
357
+
358
+ def on_connection_closed(self, connection, reason):
359
+ """
360
+ This method is invoked by pika when the connection to RabbitMQ is
361
+ closed unexpectedly. Since it is unexpected, we will reconnect to
362
+ RabbitMQ if it disconnects.
363
+
364
+ Args:
365
+ connection (:obj:`pika.connection.Connection`): closed connection object
366
+ reason (Exception): exception representing reason for loss of connection
367
+ """
368
+ self.channel = None
369
+ if self._closing:
370
+ self.connection.ioloop.stop()
371
+
372
+ def shut_down(self) -> None:
373
+ """
374
+ Shuts down the application by stopping the background event loop and disconnecting from the broker.
375
+ """
376
+ # self._should_stop.set()
377
+ if self._time_status_publisher is not None:
378
+ self.simulator.remove_observer(self._time_status_publisher)
379
+ self._time_status_publisher = None
380
+
381
+ if self.connection:
382
+ self.stop_application()
383
+ self._consuming = False
384
+ logger.info(f"Application {self.app_name} successfully shut down.")
385
+
386
+ def send_message(self, app_name, app_topics, payload: str) -> None:
387
+ """
388
+ Sends a message to the broker. The message is sent to the exchange using the routing key. The routing key is created using the application name and topic. The message is published with an expiration of 60 seconds.
389
+
390
+ Args:
391
+ app_name (str): application name
392
+ app_topics (str or list): topic name or list of topic names
393
+ payload (str): message payload
394
+ """
395
+ if isinstance(app_topics, str):
396
+ app_topics = [app_topics]
397
+
398
+ for app_topic in app_topics:
399
+ routing_key = self.create_routing_key(app_name=app_name, topic=app_topic)
400
+ if not self.predefined_exchanges_queues:
401
+ routing_key, queue_name = self.yamless_declare_bind_queue(
402
+ routing_key=routing_key
403
+ )
404
+ self.channel.basic_publish(
405
+ exchange=self.prefix,
406
+ routing_key=routing_key,
407
+ body=payload,
408
+ properties=pika.BasicProperties(
409
+ expiration=self.config.rc.server_configuration.servers.rabbitmq.message_expiration,
410
+ delivery_mode=self.config.rc.server_configuration.servers.rabbitmq.delivery_mode,
411
+ content_type=self.config.rc.server_configuration.servers.rabbitmq.content_type,
412
+ app_id=self.app_name,
413
+ ),
414
+ )
415
+ logger.debug(
416
+ f"Successfully sent message '{payload}' to topic '{routing_key}'."
417
+ )
418
+
419
+ def routing_key_matches_pattern(self, routing_key, pattern):
420
+ """
421
+ Check if a routing key matches a wildcard pattern.
422
+
423
+ Args:
424
+ routing_key (str): The actual routing key of the message
425
+ pattern (str): The pattern which may contain * or # wildcards
426
+
427
+ Returns:
428
+ bool: True if the routing key matches the pattern
429
+ """
430
+ # Split both keys into segments
431
+ route_parts = routing_key.split(".")
432
+ pattern_parts = pattern.split(".")
433
+
434
+ # If # isn't in pattern, both must have same number of parts
435
+ if "#" not in pattern_parts and len(route_parts) != len(pattern_parts):
436
+ return False
437
+
438
+ i = 0
439
+ while i < len(pattern_parts):
440
+ # Handle # wildcard (matches 0 or more segments)
441
+ if pattern_parts[i] == "#":
442
+ return True # # at the end matches everything remaining
443
+
444
+ # Handle * wildcard (matches exactly one segment)
445
+ elif pattern_parts[i] == "*":
446
+ # Ensure there's a segment to match
447
+ if i >= len(route_parts):
448
+ return False
449
+ # * matches any single segment, continue to next segment
450
+ i += 1
451
+ continue
452
+
453
+ # Handle exact match segment
454
+ else:
455
+ # If we've run out of route parts or segments don't match
456
+ if i >= len(route_parts) or pattern_parts[i] != route_parts[i]:
457
+ return False
458
+
459
+ i += 1
460
+
461
+ # If we've gone through all pattern parts, make sure we've used all route parts
462
+ return len(route_parts) <= i
463
+
464
+ def add_message_callback(
465
+ self, app_name: str, app_topic: str, user_callback: Callable
466
+ ):
467
+ """
468
+ Add callback for a topic, supporting wildcards (* and #) in routing keys.
469
+
470
+ * matches exactly one word
471
+ # matches zero or more words
472
+ """
473
+ self.was_consuming = True
474
+ self._consuming = True
475
+
476
+ routing_key = self.create_routing_key(app_name=app_name, topic=app_topic)
477
+
478
+ # Check if this is the first callback for this routing key pattern
479
+ if routing_key not in self._callbacks_per_topic:
480
+ self._callbacks_per_topic[routing_key] = []
481
+
482
+ # Only set up the consumer once per topic
483
+ if not self.predefined_exchanges_queues:
484
+ # For wildcard subscriptions, use the app_name as queue suffix to ensure uniqueness
485
+ queue_suffix = self.app_name
486
+
487
+ # If using wildcards, bind to the wildcard pattern
488
+ if "*" in routing_key or "#" in routing_key:
489
+ # Create a unique queue name for this wildcard subscription
490
+ queue_name = f"{routing_key.replace('*', 'star').replace('#', 'hash')}.{queue_suffix}"
491
+
492
+ # Declare a new queue
493
+ self.channel.queue_declare(
494
+ queue=queue_name, durable=False, auto_delete=True
495
+ )
496
+
497
+ # Bind queue to the exchange with the wildcard pattern
498
+ self.channel.queue_bind(
499
+ exchange=self.prefix, queue=queue_name, routing_key=routing_key
500
+ )
501
+
502
+ # Track the declared queue
503
+ self.declared_queues.add(queue_name)
504
+ else:
505
+ # For non-wildcard keys, use the standard approach
506
+ routing_key, queue_name = self.yamless_declare_bind_queue(
507
+ routing_key=routing_key, app_specific_extender=queue_suffix
508
+ )
509
+
510
+ self.channel.basic_qos(prefetch_count=1)
511
+ self._consumer_tag = self.channel.basic_consume(
512
+ queue=queue_name,
513
+ on_message_callback=self._handle_message,
514
+ auto_ack=False,
515
+ )
516
+
517
+ # Add the callback to the list for this routing key
518
+ self._callbacks_per_topic[routing_key].append(user_callback)
519
+
520
+ def _handle_message(self, ch, method, properties, body):
521
+ """
522
+ Callback for handling messages received from RabbitMQ.
523
+ Supports both direct routing key matches and wildcard patterns.
524
+ """
525
+ routing_key = method.routing_key
526
+ logger.debug(f"Received message with routing key: {routing_key}")
527
+
528
+ # First check for exact routing key match
529
+ direct_callbacks = self._callbacks_per_topic.get(routing_key, [])
530
+
531
+ # Then find any wildcard patterns that match this routing key
532
+ wildcard_callbacks = []
533
+ for pattern, callbacks in self._callbacks_per_topic.items():
534
+ # Skip exact matches (already handled) and patterns that don't match
535
+ if pattern == routing_key:
536
+ continue
537
+
538
+ if "*" in pattern or "#" in pattern:
539
+ if self.routing_key_matches_pattern(routing_key, pattern):
540
+ wildcard_callbacks.extend(callbacks)
541
+
542
+ # Combine all matching callbacks
543
+ all_callbacks = direct_callbacks + wildcard_callbacks
544
+
545
+ if all_callbacks:
546
+ logger.debug(
547
+ f"Found {len(all_callbacks)} callbacks for routing key: {routing_key}"
548
+ )
549
+ else:
550
+ logger.debug(f"No callbacks found for routing key: {routing_key}")
551
+ # Still acknowledge the message even if no callbacks matched
552
+ self.acknowledge_message(method.delivery_tag)
553
+ return
554
+
555
+ try:
556
+ # Execute all callbacks for this message
557
+ for callback in all_callbacks:
558
+ callback(ch, method, properties, body)
559
+
560
+ # Only acknowledge after all callbacks complete successfully
561
+ self.acknowledge_message(method.delivery_tag)
562
+ except Exception as e:
563
+ logger.error(f"Error processing message: {e}")
564
+ # Reject the message if any callback fails
565
+ if self.channel:
566
+ self.channel.basic_reject(
567
+ delivery_tag=method.delivery_tag, requeue=True
568
+ )
569
+
570
+ def acknowledge_message(self, delivery_tag):
571
+ """Acknowledge the message delivery from RabbitMQ by sending a
572
+ Basic.Ack RPC method for the delivery tag.
573
+
574
+ :param int delivery_tag: The delivery tag from the Basic.Deliver frame
575
+
576
+ """
577
+ try:
578
+ logger.debug(f"Acknowledging message {delivery_tag}")
579
+ self.channel.basic_ack(delivery_tag, True)
580
+ except:
581
+ pass
582
+
583
+ def create_routing_key(self, app_name: str, topic: str):
584
+ """
585
+ Creates a routing key for the application. The routing key is used to bind the queue to the exchange.
586
+
587
+ Args:
588
+ app_name (str): application name
589
+ topic (str): topic name
590
+ """
591
+ routing_key = ".".join([self.prefix, app_name, topic])
592
+ return routing_key
593
+
594
+ def yamless_declare_bind_queue(
595
+ self, routing_key: str = None, app_specific_extender: str = None
596
+ ) -> None:
597
+ """
598
+ Declares and binds a queue to the exchange. The queue is bound to the exchange using the routing key. The routing key is created using the application name and topic.
599
+ Args:
600
+ app_name (str): application name
601
+ topic (str): topic name
602
+ app_specific_extender (str): application specific extender, used to create a unique queue name for the application. If the app_specific_extender is not provided, the queue name is the same as the routing key.
603
+ """
604
+ try:
605
+ if app_specific_extender:
606
+ queue_name = ".".join([routing_key, app_specific_extender])
607
+ else:
608
+ queue_name = routing_key
609
+ self.channel.queue_declare(
610
+ queue=queue_name, durable=False, auto_delete=True
611
+ )
612
+ self.channel.queue_bind(
613
+ exchange=self.prefix, queue=queue_name, routing_key=routing_key
614
+ )
615
+ # Create list of declared queues and exchanges
616
+ self.declared_queues.add(queue_name.strip())
617
+ self.declared_queues.add(routing_key.strip())
618
+ self.declared_exchanges.add(self.prefix.strip())
619
+
620
+ logger.debug(f"Bound queue '{queue_name}' to topic '{routing_key}'.")
621
+
622
+ except:
623
+ routing_key = None
624
+ queue_name = None
625
+ pass
626
+
627
+ return routing_key, queue_name
628
+
629
+ def delete_queue(self, configs, app_name):
630
+ """
631
+ Deletes the queues from RabbitMQ.
632
+
633
+ Args:
634
+ configs (list): list of channel configurations
635
+ app_name (str): application name
636
+ """
637
+ for config in configs:
638
+ if config["app"] == app_name:
639
+ logger.info(f"Deleting queue: {config['address']}")
640
+ self.channel.queue_delete(queue=config["address"])
641
+ logger.info("Successfully deleted queues.")
642
+
643
+ def delete_exchange(self, unique_exchanges):
644
+ """
645
+ Deletes the exchanges from RabbitMQ.
646
+
647
+ Args:
648
+ unique_exchanges (dict): dictionary of unique exchanges
649
+ """
650
+ for exchange_name, exchange_config in unique_exchanges.items():
651
+ self.channel.exchange_delete(exchange=exchange_name)
652
+ logger.info("Successfully deleted exchanges.")
653
+
654
+ def delete_all_queues_and_exchanges(self):
655
+ """
656
+ Deletes all declared queues and exchanges from RabbitMQ.
657
+ """
658
+ for queue_name in list(self.declared_queues):
659
+ try:
660
+ # self.channel.queue_purge(queue=queue_name)
661
+ self.channel.queue_delete(queue=queue_name)
662
+ logger.info(f"Deleted queue: {queue_name}")
663
+ except Exception as e:
664
+ logger.error(f"Failed to delete queue {queue_name}: {e}")
665
+
666
+ for exchange_name in list(self.declared_exchanges):
667
+ try:
668
+ self.channel.exchange_delete(exchange=exchange_name)
669
+ logger.info(f"Deleted exchange: {exchange_name}")
670
+ except Exception as e:
671
+ logger.error(f"Failed to delete exchange {exchange_name}: {e}")
672
+
673
+ def stop_consuming(self):
674
+ """Tell RabbitMQ that you would like to stop consuming by sending the
675
+ Basic.Cancel RPC command.
676
+ """
677
+ if self.channel:
678
+ logger.info("Sending a Basic.Cancel RPC command to RabbitMQ")
679
+ cb = functools.partial(self.on_cancelok, userdata=self._consumer_tag)
680
+ self.channel.basic_cancel(self._consumer_tag, cb)
681
+
682
+ def on_cancelok(self, _unused_frame, userdata):
683
+ """This method is invoked by pika when RabbitMQ acknowledges the
684
+ cancellation of a consumer. At this point we will close the channel.
685
+ This will invoke the on_channel_closed method once the channel has been
686
+ closed, which will in-turn close the connection.
687
+ :param pika.frame.Method _unused_frame: The Basic.CancelOk frame
688
+ :param str|unicode userdata: Extra user data (consumer tag)
689
+ """
690
+ self._consuming = False
691
+ logger.info(
692
+ "RabbitMQ acknowledged the cancellation of the consumer: %s", userdata
693
+ )
694
+ self.close_channel()
695
+ self.stop_loop()
696
+
697
+ def close_channel(self):
698
+ """Call to close the channel with RabbitMQ cleanly by issuing the
699
+ Channel.Close RPC command.
700
+ """
701
+ logger.info("Deleting queues and exchanges.")
702
+
703
+ if self.predefined_exchanges_queues:
704
+ self.delete_queue(self.channel_configs, self.app_name)
705
+ self.delete_exchange(self.unique_exchanges)
706
+ else:
707
+ self.delete_all_queues_and_exchanges()
708
+
709
+ logger.info("Closing channel")
710
+ self.channel.close()
711
+
712
+ def stop_loop(self):
713
+ """Stop the IO loop"""
714
+ self.connection.ioloop.stop()
715
+
716
+ def stop_application(self):
717
+ """Cleanly shutdown the connection to RabbitMQ by stopping the consumer
718
+ with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
719
+ will be invoked by pika, which will then closing the channel and
720
+ connection. The IOLoop is started again because this method is invoked
721
+ when CTRL-C is pressed raising a KeyboardInterrupt exception. This
722
+ exception stops the IOLoop which needs to be running for pika to
723
+ communicate with RabbitMQ. All of the commands issued prior to starting
724
+ the IOLoop will be buffered but not processed.
725
+ """
726
+ if not self._closing:
727
+ self._closing = True
728
+ if self._consuming:
729
+ self.stop_consuming()
730
+ # Signal the thread to stop
731
+ if hasattr(self, "stop_event"):
732
+ self.stop_event.set()
733
+ if hasattr(self, "_should_stop"):
734
+ self._should_stop.set()
735
+ if hasattr(self, "io_thread"):
736
+ self._io_thread.join()
737
+ sys.exit()
738
+ else:
739
+ self.connection.ioloop.stop()
740
+
741
+ def set_wallclock_offset(
742
+ self, host="pool.ntp.org", retry_delay_s: int = 5, max_retry: int = 5
743
+ ) -> None:
744
+ """
745
+ Issues a Network Time Protocol (NTP) request to determine the system clock offset.
746
+
747
+ Args:
748
+ host (str): NTP host (default: 'pool.ntp.org')
749
+ retry_delay_s (int): number of seconds to wait before retrying
750
+ max_retry (int): maximum number of retries allowed
751
+ """
752
+ for i in range(max_retry):
753
+ try:
754
+ logger.info(f"Contacting {host} to retrieve wallclock offset.")
755
+ response = ntplib.NTPClient().request(host, version=3, timeout=2)
756
+ offset = timedelta(seconds=response.offset)
757
+ self.simulator.set_wallclock_offset(offset)
758
+ logger.info(f"Wallclock offset updated to {offset}.")
759
+ return
760
+ except ntplib.NTPException:
761
+ logger.warn(
762
+ f"Could not connect to {host}, attempt #{i+1}/{max_retry} in {retry_delay_s} s."
763
+ )
764
+ time.sleep(retry_delay_s)
765
+
766
+ def _create_time_status_publisher(
767
+ self, time_status_step: timedelta, time_status_init: datetime
768
+ ) -> None:
769
+ """
770
+ Creates a new time status publisher to publish the time status when it changes.
771
+
772
+ Args:
773
+ time_status_step (:obj:`timedelta`): scenario duration between time status messages
774
+ time_status_init (:obj:`datetime`): scenario time for first time status message
775
+ """
776
+ if time_status_step is not None:
777
+ if self._time_status_publisher is not None:
778
+ self.simulator.remove_observer(self._time_status_publisher)
779
+ self._time_status_publisher = TimeStatusPublisher(
780
+ self, time_status_step, time_status_init
781
+ )
782
+ self.simulator.add_observer(self._time_status_publisher)
783
+
784
+ def _create_mode_status_observer(self) -> None:
785
+ """
786
+ Creates a mode status observer to publish the mode status when it changes.
787
+ """
788
+ if self._mode_status_observer is not None:
789
+ self.simulator.remove_observer(self._mode_status_observer)
790
+ self._mode_status_observer = ModeStatusObserver(self)
791
+ self.simulator.add_observer(self._mode_status_observer)
792
+
793
+ def _create_shut_down_observer(self) -> None:
794
+ """
795
+ Creates an observer to shut down the application when the simulation is terminated.
796
+ """
797
+ if self._shut_down_observer is not None:
798
+ self.simulator.remove_observer(self._shut_down_observer)
799
+ self._shut_down_observer = ShutDownObserver(self)
800
+ self.simulator.add_observer(self._shut_down_observer)