nost-tools 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nost-tools might be problematic. Click here for more details.

@@ -0,0 +1,793 @@
1
+ """
2
+ Provides a base application that publishes messages from a simulator to a broker.
3
+ """
4
+
5
+ import functools
6
+ import logging
7
+ import ssl
8
+ import sys
9
+ import threading
10
+ import time
11
+ from datetime import datetime, timedelta
12
+ from typing import Callable
13
+
14
+ import ntplib
15
+ import pika
16
+ import pika.connection
17
+ import urllib3
18
+ from keycloak.exceptions import KeycloakAuthenticationError
19
+ from keycloak.keycloak_openid import KeycloakOpenID
20
+
21
+ from .application_utils import ( # ConnectionConfig,
22
+ ModeStatusObserver,
23
+ ShutDownObserver,
24
+ TimeStatusPublisher,
25
+ )
26
+ from .configuration import ConnectionConfig
27
+ from .schemas import ReadyStatus
28
+ from .simulator import Simulator
29
+
30
+ logging.captureWarnings(True)
31
+ logger = logging.getLogger(__name__)
32
+ urllib3.disable_warnings()
33
+
34
+
35
+ class Application:
36
+ """
37
+ Base class for a member application.
38
+
39
+ This object class defines the main functionality of a NOS-T application which can be modified for user needs.
40
+
41
+ Attributes:
42
+ prefix (str): The test run namespace (prefix)
43
+ simulator (:obj:`Simulator`): Application simulator -- calls on the simulator.py class for functionality
44
+ client (:obj:`Client`): Application MQTT client
45
+ app_name (str): Test run application name
46
+ app_description (str): Test run application description (optional)
47
+ time_status_step (:obj:`timedelta`): Scenario duration between time status messages
48
+ time_status_init (:obj:`datetime`): Scenario time of first time status message
49
+ """
50
+
51
+ def __init__(self, app_name: str, app_description: str = None):
52
+ """
53
+ Initializes a new application.
54
+
55
+ Args:
56
+ app_name (str): application name
57
+ app_description (str): application description (optional)
58
+ """
59
+ self.simulator = Simulator()
60
+ self.connection = None
61
+ self.channel = None
62
+ self.prefix = None
63
+ self.app_name = app_name
64
+ self.app_description = app_description
65
+ self._time_status_publisher = None
66
+ self._mode_status_observer = None
67
+ self._shut_down_observer = None
68
+ self.config = None
69
+ # Connection status
70
+ self._is_connected = threading.Event()
71
+ self._is_running = False
72
+ self._io_thread = None
73
+ self._consuming = False
74
+ self._should_stop = threading.Event()
75
+ self._closing = False
76
+ # Queues
77
+ self.channel_configs = []
78
+ self.unique_exchanges = {}
79
+ self.declared_queues = set()
80
+ self.declared_exchanges = set()
81
+ self.predefined_exchanges_queues = False
82
+ self._callbacks_per_topic = {}
83
+ # Token
84
+ self.refresh_token = None
85
+ self._token_refresh_thread = None
86
+ self.token_refresh_interval = None
87
+
88
+ def ready(self) -> None:
89
+ """
90
+ Signals the application is ready to initialize scenario execution.
91
+ Publishes a :obj:`ReadyStatus` message to the topic `prefix.app_name.status.ready`.
92
+ """
93
+ status = ReadyStatus.model_validate(
94
+ {
95
+ "name": self.app_name,
96
+ "description": self.app_description,
97
+ "properties": {"ready": True},
98
+ }
99
+ )
100
+ self.send_message(
101
+ app_name=self.app_name,
102
+ app_topics="status.ready",
103
+ payload=status.model_dump_json(by_alias=True, exclude_none=True),
104
+ )
105
+
106
+ def new_access_token(self, refresh_token=None):
107
+ """
108
+ Obtains a new access token and refresh token from Keycloak. If a refresh token is provided,
109
+ the access token is refreshed using the refresh token. Otherwise, the access token is obtained
110
+ using the username and password provided in the configuration.
111
+
112
+ Args:
113
+ refresh_token (str): refresh token (optional)
114
+ """
115
+ logger.debug(
116
+ "Acquiring access token."
117
+ if not refresh_token
118
+ else "Refreshing access token."
119
+ )
120
+ keycloak_openid = KeycloakOpenID(
121
+ server_url=f"{'http' if 'localhost' in self.config.rc.server_configuration.servers.keycloak.host or '127.0.0.1' in self.config.rc.server_configuration.servers.keycloak.host else 'https'}://{self.config.rc.server_configuration.servers.keycloak.host}:{self.config.rc.server_configuration.servers.keycloak.port}",
122
+ client_id=self.config.rc.credentials.client_id,
123
+ realm_name=self.config.rc.server_configuration.servers.keycloak.realm,
124
+ client_secret_key=self.config.rc.credentials.client_secret_key,
125
+ verify=False,
126
+ )
127
+ try:
128
+ if refresh_token:
129
+ token = keycloak_openid.refresh_token(refresh_token)
130
+ else:
131
+ try:
132
+ token = keycloak_openid.token(
133
+ grant_type="password",
134
+ username=self.config.rc.credentials.username,
135
+ password=self.config.rc.credentials.password,
136
+ )
137
+ except KeycloakAuthenticationError as e:
138
+ logger.error(f"Authentication error without OTP: {e}")
139
+ otp = input("Enter OTP: ")
140
+ token = keycloak_openid.token(
141
+ grant_type="password",
142
+ username=self.config.rc.credentials.username,
143
+ password=self.config.rc.credentials.password,
144
+ totp=otp,
145
+ )
146
+ if "access_token" in token:
147
+ logger.debug(
148
+ "Acquiring access token successfully completed."
149
+ if not refresh_token
150
+ else "Refreshing access token successfully completed."
151
+ )
152
+ return token["access_token"], token["refresh_token"]
153
+ else:
154
+ raise Exception("Error: The request was unsuccessful.")
155
+ except Exception as e:
156
+ logger.error(f"An error occurred: {e}")
157
+ raise
158
+
159
+ def start_token_refresh_thread(self):
160
+ """
161
+ Starts a background thread to refresh the access token periodically.
162
+
163
+ Args:
164
+ config (:obj:`ConnectionConfig`): connection configuration
165
+ """
166
+ logger.debug("Starting refresh token thread.")
167
+
168
+ def refresh_token_periodically():
169
+ while not self._should_stop.wait(timeout=self.token_refresh_interval):
170
+ try:
171
+ access_token, refresh_token = self.new_access_token(
172
+ self.refresh_token
173
+ )
174
+ self.refresh_token = refresh_token
175
+ self.update_connection_credentials(access_token)
176
+ except Exception as e:
177
+ logger.error(f"Failed to refresh access token: {e}")
178
+
179
+ self._token_refresh_thread = threading.Thread(target=refresh_token_periodically)
180
+ self._token_refresh_thread.start()
181
+ logger.debug("Starting refresh token thread successfully completed.")
182
+
183
+ def update_connection_credentials(self, access_token):
184
+ """
185
+ Updates the connection credentials with the new access token.
186
+
187
+ Args:
188
+ access_token (str): new access token
189
+ """
190
+ self.connection.update_secret(access_token, "secret")
191
+
192
+ def start_up(
193
+ self,
194
+ prefix: str,
195
+ config: ConnectionConfig,
196
+ set_offset: bool = None, # True,
197
+ time_status_step: timedelta = None,
198
+ time_status_init: datetime = None,
199
+ shut_down_when_terminated: bool = None,
200
+ ) -> None:
201
+ """
202
+ Starts up the application to prepare for scenario execution.
203
+ Connects to the message broker and starts a background event loop by establishing the simulation prefix,
204
+ the connection configuration, and the intervals for publishing time status messages.
205
+
206
+ Args:
207
+ prefix (str): messaging namespace (prefix)
208
+ config (:obj:`ConnectionConfig`): connection configuration
209
+ set_offset (bool): True, if the system clock offset shall be set using a NTP request prior to execution
210
+ time_status_step (:obj:`timedelta`): scenario duration between time status messages
211
+ time_status_init (:obj:`datetime`): scenario time for first time status message
212
+ shut_down_when_terminated (bool): True, if the application should shut down when the simulation is terminated
213
+ """
214
+ if (
215
+ set_offset is not None
216
+ and time_status_step is not None
217
+ and time_status_init is not None
218
+ and shut_down_when_terminated is not None
219
+ ):
220
+ self.set_offset = set_offset
221
+ self.time_status_step = time_status_step
222
+ self.time_status_init = time_status_init
223
+ self.shut_down_when_terminated = shut_down_when_terminated
224
+ else:
225
+ self.config = config
226
+ parameters = getattr(
227
+ self.config.rc.simulation_configuration.execution_parameters,
228
+ self.app_name,
229
+ None,
230
+ )
231
+ self.set_offset = parameters.set_offset
232
+ self.time_status_step = parameters.time_status_step
233
+ self.time_status_init = parameters.time_status_init
234
+ self.shut_down_when_terminated = parameters.shut_down_when_terminated
235
+
236
+ if self.set_offset:
237
+ # Set the system clock offset
238
+ self.set_wallclock_offset()
239
+
240
+ # Set the prefix and configuration parameters
241
+ self.prefix = prefix
242
+ self.config = config
243
+ self._is_running = True
244
+
245
+ if self.config.rc.server_configuration.servers.rabbitmq.keycloak_authentication:
246
+ # Get the access token and refresh token
247
+ self.token_refresh_interval = (
248
+ self.config.rc.server_configuration.servers.keycloak.token_refresh_interval
249
+ )
250
+ logger.info(
251
+ f"Keycloak authentication is enabled. Access token will be refreshed every {self.token_refresh_interval} seconds"
252
+ )
253
+ access_token, _ = self.new_access_token()
254
+ self.start_token_refresh_thread()
255
+ credentials = pika.PlainCredentials("", access_token)
256
+ else:
257
+ # Set up credentials
258
+ credentials = pika.PlainCredentials(
259
+ self.config.rc.credentials.username,
260
+ self.config.rc.credentials.password,
261
+ )
262
+
263
+ # Set up connection parameters
264
+ parameters = pika.ConnectionParameters(
265
+ host=self.config.rc.server_configuration.servers.rabbitmq.host,
266
+ virtual_host=self.config.rc.server_configuration.servers.rabbitmq.virtual_host,
267
+ port=self.config.rc.server_configuration.servers.rabbitmq.port,
268
+ credentials=credentials,
269
+ heartbeat=config.rc.server_configuration.servers.rabbitmq.heartbeat,
270
+ connection_attempts=config.rc.server_configuration.servers.rabbitmq.connection_attempts,
271
+ retry_delay=config.rc.server_configuration.servers.rabbitmq.retry_delay,
272
+ )
273
+
274
+ # Configure transport layer security (TLS) if needed
275
+ if self.config.rc.server_configuration.servers.rabbitmq.tls:
276
+ logger.info("Using TLS/SSL.")
277
+ parameters.ssl_options = pika.SSLOptions(ssl.SSLContext())
278
+
279
+ # Callback functions for connection
280
+ def on_connection_open(connection):
281
+ self.connection = connection
282
+ self.connection.channel(on_open_callback=self.on_channel_open)
283
+ logger.info("Connection established successfully.")
284
+
285
+ # Establish non-blocking connection to RabbitMQ
286
+ self.connection = pika.SelectConnection(
287
+ parameters=parameters,
288
+ on_open_callback=on_connection_open,
289
+ on_open_error_callback=self.on_connection_error,
290
+ on_close_callback=self.on_connection_closed,
291
+ )
292
+
293
+ # Start the I/O loop in a separate thread
294
+ self._io_thread = threading.Thread(target=self._start_io_loop)
295
+ self._io_thread.start()
296
+ self._is_connected.wait()
297
+
298
+ if self.config.rc.simulation_configuration.predefined_exchanges_queues:
299
+ # Get the unique exchanges and channel configurations
300
+ self.predefined_exchanges_queues = True
301
+ logger.debug(
302
+ "Exchanges and queues are predefined in the YAML configuration file."
303
+ )
304
+ self.unique_exchanges, self.channel_configs = (
305
+ self.config.rc.simulation_configuration.exchanges,
306
+ self.config.rc.simulation_configuration.queues,
307
+ )
308
+
309
+ else:
310
+ logger.debug(
311
+ "Exchanges and queues are NOT predefined in the YAML configuration file."
312
+ )
313
+
314
+ # Configure observers
315
+ self._create_time_status_publisher(self.time_status_step, self.time_status_init)
316
+ self._create_mode_status_observer()
317
+ if self.shut_down_when_terminated:
318
+ self._create_shut_down_observer()
319
+ logger.info(f"Application {self.app_name} successfully started up.")
320
+
321
+ def _start_io_loop(self):
322
+ """
323
+ Starts the I/O loop for the connection.
324
+ """
325
+ self.stop_event = threading.Event()
326
+ while not self.stop_event.is_set():
327
+ self.connection.ioloop.start()
328
+
329
+ def on_channel_open(self, channel):
330
+ """
331
+ Callback function for when the channel is opened.
332
+
333
+ Args:
334
+ channel (:obj:`pika.channel.Channel`): channel object
335
+ """
336
+ self.channel = channel
337
+ # Signal that connection is established
338
+ self._is_connected.set()
339
+
340
+ def on_connection_error(self, connection, error):
341
+ """
342
+ Callback function for when a connection error occurs.
343
+
344
+ Args:
345
+ connection (:obj:`pika.connection.Connection`): connection object
346
+ error (Exception): exception representing reason for loss of connection
347
+ """
348
+ logger.error(f"Connection error: {error}")
349
+ self._is_connected.clear()
350
+
351
+ def on_connection_closed(self, connection, reason):
352
+ """
353
+ This method is invoked by pika when the connection to RabbitMQ is
354
+ closed unexpectedly. Since it is unexpected, we will reconnect to
355
+ RabbitMQ if it disconnects.
356
+
357
+ Args:
358
+ connection (:obj:`pika.connection.Connection`): closed connection object
359
+ reason (Exception): exception representing reason for loss of connection
360
+ """
361
+ self.channel = None
362
+ if self._closing:
363
+ self.connection.ioloop.stop()
364
+
365
+ def shut_down(self) -> None:
366
+ """
367
+ Shuts down the application by stopping the background event loop and disconnecting from the broker.
368
+ """
369
+ # self._should_stop.set()
370
+ if self._time_status_publisher is not None:
371
+ self.simulator.remove_observer(self._time_status_publisher)
372
+ self._time_status_publisher = None
373
+
374
+ if self.connection:
375
+ self.stop_application()
376
+ self._consuming = False
377
+ logger.info(f"Application {self.app_name} successfully shut down.")
378
+
379
+ def send_message(self, app_name, app_topics, payload: str) -> None:
380
+ """
381
+ Sends a message to the broker. The message is sent to the exchange using the routing key. The routing key is created using the application name and topic. The message is published with an expiration of 60 seconds.
382
+
383
+ Args:
384
+ app_name (str): application name
385
+ app_topics (str or list): topic name or list of topic names
386
+ payload (str): message payload
387
+ """
388
+ if isinstance(app_topics, str):
389
+ app_topics = [app_topics]
390
+
391
+ for app_topic in app_topics:
392
+ routing_key = self.create_routing_key(app_name=app_name, topic=app_topic)
393
+ if not self.predefined_exchanges_queues:
394
+ routing_key, queue_name = self.yamless_declare_bind_queue(
395
+ routing_key=routing_key
396
+ )
397
+ self.channel.basic_publish(
398
+ exchange=self.prefix,
399
+ routing_key=routing_key,
400
+ body=payload,
401
+ properties=pika.BasicProperties(
402
+ expiration=self.config.rc.server_configuration.servers.rabbitmq.message_expiration,
403
+ delivery_mode=self.config.rc.server_configuration.servers.rabbitmq.delivery_mode,
404
+ content_type=self.config.rc.server_configuration.servers.rabbitmq.content_type,
405
+ app_id=self.app_name,
406
+ ),
407
+ )
408
+ logger.debug(
409
+ f"Successfully sent message '{payload}' to topic '{routing_key}'."
410
+ )
411
+
412
+ def routing_key_matches_pattern(self, routing_key, pattern):
413
+ """
414
+ Check if a routing key matches a wildcard pattern.
415
+
416
+ Args:
417
+ routing_key (str): The actual routing key of the message
418
+ pattern (str): The pattern which may contain * or # wildcards
419
+
420
+ Returns:
421
+ bool: True if the routing key matches the pattern
422
+ """
423
+ # Split both keys into segments
424
+ route_parts = routing_key.split(".")
425
+ pattern_parts = pattern.split(".")
426
+
427
+ # If # isn't in pattern, both must have same number of parts
428
+ if "#" not in pattern_parts and len(route_parts) != len(pattern_parts):
429
+ return False
430
+
431
+ i = 0
432
+ while i < len(pattern_parts):
433
+ # Handle # wildcard (matches 0 or more segments)
434
+ if pattern_parts[i] == "#":
435
+ return True # # at the end matches everything remaining
436
+
437
+ # Handle * wildcard (matches exactly one segment)
438
+ elif pattern_parts[i] == "*":
439
+ # Ensure there's a segment to match
440
+ if i >= len(route_parts):
441
+ return False
442
+ # * matches any single segment, continue to next segment
443
+ i += 1
444
+ continue
445
+
446
+ # Handle exact match segment
447
+ else:
448
+ # If we've run out of route parts or segments don't match
449
+ if i >= len(route_parts) or pattern_parts[i] != route_parts[i]:
450
+ return False
451
+
452
+ i += 1
453
+
454
+ # If we've gone through all pattern parts, make sure we've used all route parts
455
+ return len(route_parts) <= i
456
+
457
+ def add_message_callback(
458
+ self, app_name: str, app_topic: str, user_callback: Callable
459
+ ):
460
+ """
461
+ Add callback for a topic, supporting wildcards (* and #) in routing keys.
462
+
463
+ * matches exactly one word
464
+ # matches zero or more words
465
+ """
466
+ self.was_consuming = True
467
+ self._consuming = True
468
+
469
+ routing_key = self.create_routing_key(app_name=app_name, topic=app_topic)
470
+
471
+ # Check if this is the first callback for this routing key pattern
472
+ if routing_key not in self._callbacks_per_topic:
473
+ self._callbacks_per_topic[routing_key] = []
474
+
475
+ # Only set up the consumer once per topic
476
+ if not self.predefined_exchanges_queues:
477
+ # For wildcard subscriptions, use the app_name as queue suffix to ensure uniqueness
478
+ queue_suffix = self.app_name
479
+
480
+ # If using wildcards, bind to the wildcard pattern
481
+ if "*" in routing_key or "#" in routing_key:
482
+ # Create a unique queue name for this wildcard subscription
483
+ queue_name = f"{routing_key.replace('*', 'star').replace('#', 'hash')}.{queue_suffix}"
484
+
485
+ # Declare a new queue
486
+ self.channel.queue_declare(
487
+ queue=queue_name, durable=False, auto_delete=True
488
+ )
489
+
490
+ # Bind queue to the exchange with the wildcard pattern
491
+ self.channel.queue_bind(
492
+ exchange=self.prefix, queue=queue_name, routing_key=routing_key
493
+ )
494
+
495
+ # Track the declared queue
496
+ self.declared_queues.add(queue_name)
497
+ else:
498
+ # For non-wildcard keys, use the standard approach
499
+ routing_key, queue_name = self.yamless_declare_bind_queue(
500
+ routing_key=routing_key, app_specific_extender=queue_suffix
501
+ )
502
+
503
+ self.channel.basic_qos(prefetch_count=1)
504
+ self._consumer_tag = self.channel.basic_consume(
505
+ queue=queue_name,
506
+ on_message_callback=self._handle_message,
507
+ auto_ack=False,
508
+ )
509
+
510
+ # Add the callback to the list for this routing key
511
+ self._callbacks_per_topic[routing_key].append(user_callback)
512
+
513
+ def _handle_message(self, ch, method, properties, body):
514
+ """
515
+ Callback for handling messages received from RabbitMQ.
516
+ Supports both direct routing key matches and wildcard patterns.
517
+ """
518
+ routing_key = method.routing_key
519
+ logger.debug(f"Received message with routing key: {routing_key}")
520
+
521
+ # First check for exact routing key match
522
+ direct_callbacks = self._callbacks_per_topic.get(routing_key, [])
523
+
524
+ # Then find any wildcard patterns that match this routing key
525
+ wildcard_callbacks = []
526
+ for pattern, callbacks in self._callbacks_per_topic.items():
527
+ # Skip exact matches (already handled) and patterns that don't match
528
+ if pattern == routing_key:
529
+ continue
530
+
531
+ if "*" in pattern or "#" in pattern:
532
+ if self.routing_key_matches_pattern(routing_key, pattern):
533
+ wildcard_callbacks.extend(callbacks)
534
+
535
+ # Combine all matching callbacks
536
+ all_callbacks = direct_callbacks + wildcard_callbacks
537
+
538
+ if all_callbacks:
539
+ logger.debug(
540
+ f"Found {len(all_callbacks)} callbacks for routing key: {routing_key}"
541
+ )
542
+ else:
543
+ logger.debug(f"No callbacks found for routing key: {routing_key}")
544
+ # Still acknowledge the message even if no callbacks matched
545
+ self.acknowledge_message(method.delivery_tag)
546
+ return
547
+
548
+ try:
549
+ # Execute all callbacks for this message
550
+ for callback in all_callbacks:
551
+ callback(ch, method, properties, body)
552
+
553
+ # Only acknowledge after all callbacks complete successfully
554
+ self.acknowledge_message(method.delivery_tag)
555
+ except Exception as e:
556
+ logger.error(f"Error processing message: {e}")
557
+ # Reject the message if any callback fails
558
+ if self.channel:
559
+ self.channel.basic_reject(
560
+ delivery_tag=method.delivery_tag, requeue=True
561
+ )
562
+
563
+ def acknowledge_message(self, delivery_tag):
564
+ """Acknowledge the message delivery from RabbitMQ by sending a
565
+ Basic.Ack RPC method for the delivery tag.
566
+
567
+ :param int delivery_tag: The delivery tag from the Basic.Deliver frame
568
+
569
+ """
570
+ try:
571
+ logger.debug(f"Acknowledging message {delivery_tag}")
572
+ self.channel.basic_ack(delivery_tag, True)
573
+ except:
574
+ pass
575
+
576
+ def create_routing_key(self, app_name: str, topic: str):
577
+ """
578
+ Creates a routing key for the application. The routing key is used to bind the queue to the exchange.
579
+
580
+ Args:
581
+ app_name (str): application name
582
+ topic (str): topic name
583
+ """
584
+ routing_key = ".".join([self.prefix, app_name, topic])
585
+ return routing_key
586
+
587
+ def yamless_declare_bind_queue(
588
+ self, routing_key: str = None, app_specific_extender: str = None
589
+ ) -> None:
590
+ """
591
+ Declares and binds a queue to the exchange. The queue is bound to the exchange using the routing key. The routing key is created using the application name and topic.
592
+ Args:
593
+ app_name (str): application name
594
+ topic (str): topic name
595
+ app_specific_extender (str): application specific extender, used to create a unique queue name for the application. If the app_specific_extender is not provided, the queue name is the same as the routing key.
596
+ """
597
+ try:
598
+ if app_specific_extender:
599
+ queue_name = ".".join([routing_key, app_specific_extender])
600
+ else:
601
+ queue_name = routing_key
602
+ self.channel.queue_declare(
603
+ queue=queue_name, durable=False, auto_delete=True
604
+ )
605
+ self.channel.queue_bind(
606
+ exchange=self.prefix, queue=queue_name, routing_key=routing_key
607
+ )
608
+ # Create list of declared queues and exchanges
609
+ self.declared_queues.add(queue_name.strip())
610
+ self.declared_queues.add(routing_key.strip())
611
+ self.declared_exchanges.add(self.prefix.strip())
612
+
613
+ logger.debug(f"Bound queue '{queue_name}' to topic '{routing_key}'.")
614
+
615
+ except:
616
+ routing_key = None
617
+ queue_name = None
618
+ pass
619
+
620
+ return routing_key, queue_name
621
+
622
+ def delete_queue(self, configs, app_name):
623
+ """
624
+ Deletes the queues from RabbitMQ.
625
+
626
+ Args:
627
+ configs (list): list of channel configurations
628
+ app_name (str): application name
629
+ """
630
+ for config in configs:
631
+ if config["app"] == app_name:
632
+ logger.info(f"Deleting queue: {config['address']}")
633
+ self.channel.queue_delete(queue=config["address"])
634
+ logger.info("Successfully deleted queues.")
635
+
636
+ def delete_exchange(self, unique_exchanges):
637
+ """
638
+ Deletes the exchanges from RabbitMQ.
639
+
640
+ Args:
641
+ unique_exchanges (dict): dictionary of unique exchanges
642
+ """
643
+ for exchange_name, exchange_config in unique_exchanges.items():
644
+ self.channel.exchange_delete(exchange=exchange_name)
645
+ logger.info("Successfully deleted exchanges.")
646
+
647
+ def delete_all_queues_and_exchanges(self):
648
+ """
649
+ Deletes all declared queues and exchanges from RabbitMQ.
650
+ """
651
+ for queue_name in list(self.declared_queues):
652
+ try:
653
+ # self.channel.queue_purge(queue=queue_name)
654
+ self.channel.queue_delete(queue=queue_name)
655
+ logger.info(f"Deleted queue: {queue_name}")
656
+ except Exception as e:
657
+ logger.error(f"Failed to delete queue {queue_name}: {e}")
658
+
659
+ for exchange_name in list(self.declared_exchanges):
660
+ try:
661
+ self.channel.exchange_delete(exchange=exchange_name)
662
+ logger.info(f"Deleted exchange: {exchange_name}")
663
+ except Exception as e:
664
+ logger.error(f"Failed to delete exchange {exchange_name}: {e}")
665
+
666
+ def stop_consuming(self):
667
+ """Tell RabbitMQ that you would like to stop consuming by sending the
668
+ Basic.Cancel RPC command.
669
+ """
670
+ if self.channel:
671
+ logger.info("Sending a Basic.Cancel RPC command to RabbitMQ")
672
+ cb = functools.partial(self.on_cancelok, userdata=self._consumer_tag)
673
+ self.channel.basic_cancel(self._consumer_tag, cb)
674
+
675
+ def on_cancelok(self, _unused_frame, userdata):
676
+ """This method is invoked by pika when RabbitMQ acknowledges the
677
+ cancellation of a consumer. At this point we will close the channel.
678
+ This will invoke the on_channel_closed method once the channel has been
679
+ closed, which will in-turn close the connection.
680
+ :param pika.frame.Method _unused_frame: The Basic.CancelOk frame
681
+ :param str|unicode userdata: Extra user data (consumer tag)
682
+ """
683
+ self._consuming = False
684
+ logger.info(
685
+ "RabbitMQ acknowledged the cancellation of the consumer: %s", userdata
686
+ )
687
+ self.close_channel()
688
+ self.stop_loop()
689
+
690
+ def close_channel(self):
691
+ """Call to close the channel with RabbitMQ cleanly by issuing the
692
+ Channel.Close RPC command.
693
+ """
694
+ logger.info("Deleting queues and exchanges.")
695
+
696
+ if self.predefined_exchanges_queues:
697
+ self.delete_queue(self.channel_configs, self.app_name)
698
+ self.delete_exchange(self.unique_exchanges)
699
+ else:
700
+ self.delete_all_queues_and_exchanges()
701
+
702
+ logger.info("Closing channel")
703
+ self.channel.close()
704
+
705
+ def stop_loop(self):
706
+ """Stop the IO loop"""
707
+ self.connection.ioloop.stop()
708
+
709
+ def stop_application(self):
710
+ """Cleanly shutdown the connection to RabbitMQ by stopping the consumer
711
+ with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
712
+ will be invoked by pika, which will then closing the channel and
713
+ connection. The IOLoop is started again because this method is invoked
714
+ when CTRL-C is pressed raising a KeyboardInterrupt exception. This
715
+ exception stops the IOLoop which needs to be running for pika to
716
+ communicate with RabbitMQ. All of the commands issued prior to starting
717
+ the IOLoop will be buffered but not processed.
718
+ """
719
+ if not self._closing:
720
+ self._closing = True
721
+ if self._consuming:
722
+ self.stop_consuming()
723
+ # Signal the thread to stop
724
+ if hasattr(self, "stop_event"):
725
+ self.stop_event.set()
726
+ if hasattr(self, "_should_stop"):
727
+ self._should_stop.set()
728
+ if hasattr(self, "io_thread"):
729
+ self._io_thread.join()
730
+ sys.exit()
731
+ else:
732
+ self.connection.ioloop.stop()
733
+
734
+ def set_wallclock_offset(
735
+ self, host="pool.ntp.org", retry_delay_s: int = 5, max_retry: int = 5
736
+ ) -> None:
737
+ """
738
+ Issues a Network Time Protocol (NTP) request to determine the system clock offset.
739
+
740
+ Args:
741
+ host (str): NTP host (default: 'pool.ntp.org')
742
+ retry_delay_s (int): number of seconds to wait before retrying
743
+ max_retry (int): maximum number of retries allowed
744
+ """
745
+ for i in range(max_retry):
746
+ try:
747
+ logger.info(f"Contacting {host} to retrieve wallclock offset.")
748
+ response = ntplib.NTPClient().request(host, version=3, timeout=2)
749
+ offset = timedelta(seconds=response.offset)
750
+ self.simulator.set_wallclock_offset(offset)
751
+ logger.info(f"Wallclock offset updated to {offset}.")
752
+ return
753
+ except ntplib.NTPException:
754
+ logger.warn(
755
+ f"Could not connect to {host}, attempt #{i+1}/{max_retry} in {retry_delay_s} s."
756
+ )
757
+ time.sleep(retry_delay_s)
758
+
759
+ def _create_time_status_publisher(
760
+ self, time_status_step: timedelta, time_status_init: datetime
761
+ ) -> None:
762
+ """
763
+ Creates a new time status publisher to publish the time status when it changes.
764
+
765
+ Args:
766
+ time_status_step (:obj:`timedelta`): scenario duration between time status messages
767
+ time_status_init (:obj:`datetime`): scenario time for first time status message
768
+ """
769
+ if time_status_step is not None:
770
+ if self._time_status_publisher is not None:
771
+ self.simulator.remove_observer(self._time_status_publisher)
772
+ self._time_status_publisher = TimeStatusPublisher(
773
+ self, time_status_step, time_status_init
774
+ )
775
+ self.simulator.add_observer(self._time_status_publisher)
776
+
777
+ def _create_mode_status_observer(self) -> None:
778
+ """
779
+ Creates a mode status observer to publish the mode status when it changes.
780
+ """
781
+ if self._mode_status_observer is not None:
782
+ self.simulator.remove_observer(self._mode_status_observer)
783
+ self._mode_status_observer = ModeStatusObserver(self)
784
+ self.simulator.add_observer(self._mode_status_observer)
785
+
786
+ def _create_shut_down_observer(self) -> None:
787
+ """
788
+ Creates an observer to shut down the application when the simulation is terminated.
789
+ """
790
+ if self._shut_down_observer is not None:
791
+ self.simulator.remove_observer(self._shut_down_observer)
792
+ self._shut_down_observer = ShutDownObserver(self)
793
+ self.simulator.add_observer(self._shut_down_observer)