mongo-charms-single-kernel 1.8.6__py3-none-any.whl → 1.8.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mongo-charms-single-kernel might be problematic. Click here for more details.

Files changed (47) hide show
  1. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/METADATA +2 -1
  2. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/RECORD +41 -40
  3. single_kernel_mongo/abstract_charm.py +8 -0
  4. single_kernel_mongo/config/literals.py +2 -23
  5. single_kernel_mongo/config/models.py +12 -0
  6. single_kernel_mongo/config/relations.py +0 -1
  7. single_kernel_mongo/config/statuses.py +10 -57
  8. single_kernel_mongo/core/abstract_upgrades_v3.py +149 -0
  9. single_kernel_mongo/core/k8s_workload.py +2 -2
  10. single_kernel_mongo/core/kubernetes_upgrades_v3.py +17 -0
  11. single_kernel_mongo/core/machine_upgrades_v3.py +54 -0
  12. single_kernel_mongo/core/operator.py +86 -5
  13. single_kernel_mongo/core/version_checker.py +7 -6
  14. single_kernel_mongo/core/vm_workload.py +30 -13
  15. single_kernel_mongo/core/workload.py +17 -19
  16. single_kernel_mongo/events/backups.py +3 -3
  17. single_kernel_mongo/events/cluster.py +1 -1
  18. single_kernel_mongo/events/database.py +1 -1
  19. single_kernel_mongo/events/lifecycle.py +5 -4
  20. single_kernel_mongo/events/tls.py +7 -4
  21. single_kernel_mongo/exceptions.py +4 -24
  22. single_kernel_mongo/lib/charms/operator_libs_linux/v1/systemd.py +288 -0
  23. single_kernel_mongo/managers/cluster.py +8 -8
  24. single_kernel_mongo/managers/config.py +5 -3
  25. single_kernel_mongo/managers/ldap.py +2 -1
  26. single_kernel_mongo/managers/mongo.py +48 -9
  27. single_kernel_mongo/managers/mongodb_operator.py +199 -96
  28. single_kernel_mongo/managers/mongos_operator.py +97 -35
  29. single_kernel_mongo/managers/sharding.py +4 -4
  30. single_kernel_mongo/managers/tls.py +54 -27
  31. single_kernel_mongo/managers/upgrade_v3.py +452 -0
  32. single_kernel_mongo/managers/upgrade_v3_status.py +133 -0
  33. single_kernel_mongo/state/app_peer_state.py +12 -2
  34. single_kernel_mongo/state/charm_state.py +31 -141
  35. single_kernel_mongo/state/config_server_state.py +0 -33
  36. single_kernel_mongo/state/unit_peer_state.py +10 -0
  37. single_kernel_mongo/templates/enable-transparent-huge-pages.service.j2 +14 -0
  38. single_kernel_mongo/utils/helpers.py +0 -6
  39. single_kernel_mongo/utils/mongo_config.py +32 -8
  40. single_kernel_mongo/core/abstract_upgrades.py +0 -890
  41. single_kernel_mongo/core/kubernetes_upgrades.py +0 -194
  42. single_kernel_mongo/core/machine_upgrades.py +0 -188
  43. single_kernel_mongo/events/upgrades.py +0 -157
  44. single_kernel_mongo/managers/upgrade.py +0 -334
  45. single_kernel_mongo/state/upgrade_state.py +0 -134
  46. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/WHEEL +0 -0
  47. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/licenses/LICENSE +0 -0
@@ -9,26 +9,23 @@ from __future__ import annotations
9
9
  import logging
10
10
  from typing import TYPE_CHECKING, final
11
11
 
12
+ import charm_refresh
12
13
  from data_platform_helpers.advanced_statuses.models import StatusObject
13
14
  from data_platform_helpers.advanced_statuses.protocol import ManagerStatusProtocol
14
15
  from data_platform_helpers.advanced_statuses.types import Scope as DPHScope
15
- from data_platform_helpers.version_check import (
16
- CrossAppVersionChecker,
17
- get_charm_revision,
18
- )
16
+ from data_platform_helpers.version_check import CrossAppVersionChecker, get_charm_revision
19
17
  from ops.framework import Object
20
18
  from ops.model import Container, ModelError, SecretNotFoundError, Unit
21
19
  from pymongo.errors import OperationFailure, PyMongoError, ServerSelectionTimeoutError
22
- from tenacity import Retrying, stop_after_attempt, wait_fixed
20
+ from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed
23
21
  from typing_extensions import override
24
22
 
25
23
  from single_kernel_mongo.config.literals import (
26
- OS_REQUIREMENTS,
24
+ FEATURE_VERSION,
27
25
  CharmKind,
28
26
  MongoPorts,
29
27
  Scope,
30
28
  Substrates,
31
- UnitState,
32
29
  )
33
30
  from single_kernel_mongo.config.models import (
34
31
  ROLES,
@@ -46,26 +43,21 @@ from single_kernel_mongo.config.statuses import (
46
43
  PasswordManagementStatuses,
47
44
  ShardStatuses,
48
45
  )
49
- from single_kernel_mongo.core.kubernetes_upgrades import KubernetesUpgrade
50
- from single_kernel_mongo.core.machine_upgrades import MachineUpgrade
46
+ from single_kernel_mongo.core.kubernetes_upgrades_v3 import KubernetesMongoDBRefresh
47
+ from single_kernel_mongo.core.machine_upgrades_v3 import MachineMongoDBRefresh
51
48
  from single_kernel_mongo.core.operator import OperatorProtocol
52
49
  from single_kernel_mongo.core.secrets import generate_secret_label
53
50
  from single_kernel_mongo.core.structured_config import MongoDBRoles
54
51
  from single_kernel_mongo.core.version_checker import VersionChecker
55
- from single_kernel_mongo.events.backups import (
56
- BackupEventsHandler,
57
- )
52
+ from single_kernel_mongo.events.backups import BackupEventsHandler
58
53
  from single_kernel_mongo.events.cluster import ClusterConfigServerEventHandler
59
54
  from single_kernel_mongo.events.database import DatabaseEventsHandler
60
55
  from single_kernel_mongo.events.ldap import LDAPEventHandler
61
56
  from single_kernel_mongo.events.primary_action import PrimaryActionHandler
62
- from single_kernel_mongo.events.sharding import (
63
- ConfigServerEventHandler,
64
- ShardEventHandler,
65
- )
57
+ from single_kernel_mongo.events.sharding import ConfigServerEventHandler, ShardEventHandler
66
58
  from single_kernel_mongo.events.tls import TLSEventsHandler
67
- from single_kernel_mongo.events.upgrades import UpgradeEventHandler
68
59
  from single_kernel_mongo.exceptions import (
60
+ BalancerNotEnabledError,
69
61
  ContainerNotReadyError,
70
62
  DeferrableFailedHookChecksError,
71
63
  EarlyRemovalOfConfigServerError,
@@ -99,13 +91,10 @@ from single_kernel_mongo.managers.mongo import MongoManager
99
91
  from single_kernel_mongo.managers.observability import ObservabilityManager
100
92
  from single_kernel_mongo.managers.sharding import ConfigServerManager, ShardManager
101
93
  from single_kernel_mongo.managers.tls import TLSManager
102
- from single_kernel_mongo.managers.upgrade import MongoDBUpgradeManager
94
+ from single_kernel_mongo.managers.upgrade_v3 import MongoDBUpgradesManager
95
+ from single_kernel_mongo.managers.upgrade_v3_status import MongoDBUpgradesStatusManager
103
96
  from single_kernel_mongo.state.charm_state import CharmState
104
- from single_kernel_mongo.utils.helpers import (
105
- is_valid_ldap_options,
106
- is_valid_ldapusertodnmapping,
107
- unit_number,
108
- )
97
+ from single_kernel_mongo.utils.helpers import is_valid_ldap_options, is_valid_ldapusertodnmapping
109
98
  from single_kernel_mongo.utils.mongo_connection import MongoConnection, NotReadyError
110
99
  from single_kernel_mongo.utils.mongodb_users import (
111
100
  BackupUser,
@@ -136,6 +125,7 @@ class MongoDBOperator(OperatorProtocol, Object):
136
125
 
137
126
  name = CharmKind.MONGOD.value
138
127
  workload: MongoDBWorkload
128
+ refresh: charm_refresh.Common | None
139
129
 
140
130
  def __init__(self, charm: AbstractMongoCharm):
141
131
  super(OperatorProtocol, self).__init__(charm, self.name)
@@ -160,7 +150,7 @@ class MongoDBOperator(OperatorProtocol, Object):
160
150
  self.cross_app_version_checker = CrossAppVersionChecker(
161
151
  self.charm,
162
152
  version=get_charm_revision(
163
- self.charm.unit, local_version=self.workload.get_internal_revision()
153
+ self.charm.unit, local_version=self.workload.get_charm_revision()
164
154
  ),
165
155
  relations_to_check=[
166
156
  RelationNames.SHARDING.value,
@@ -206,10 +196,6 @@ class MongoDBOperator(OperatorProtocol, Object):
206
196
  self.cluster_manager = ClusterProvider(
207
197
  self, self.state, self.substrate, RelationNames.CLUSTER
208
198
  )
209
- upgrade_backend = MachineUpgrade if self.substrate == Substrates.VM else KubernetesUpgrade
210
- self.upgrade_manager = MongoDBUpgradeManager(
211
- self, upgrade_backend, key=RelationNames.UPGRADE_VERSION.value
212
- )
213
199
 
214
200
  # LDAP Manager, which covers both send-ca-cert interface and ldap interface.
215
201
  self.ldap_manager = LDAPManager(
@@ -220,6 +206,41 @@ class MongoDBOperator(OperatorProtocol, Object):
220
206
  ExternalRequirerRelations.LDAP_CERT,
221
207
  )
222
208
 
209
+ # Upgrades
210
+ self.upgrades_manager = MongoDBUpgradesManager(self, self.state, self.workload)
211
+ if self.substrate == Substrates.VM:
212
+ upgrade_backend = MachineMongoDBRefresh(
213
+ dependent=self,
214
+ state=self.state,
215
+ upgrades_manager=self.upgrades_manager,
216
+ workload_name="MongoDB",
217
+ charm_name=self.charm.name,
218
+ )
219
+ refresh_class = charm_refresh.Machines
220
+ else:
221
+ upgrade_backend = KubernetesMongoDBRefresh(
222
+ dependent=self,
223
+ state=self.state,
224
+ upgrades_manager=self.upgrades_manager,
225
+ workload_name="MongoDB",
226
+ charm_name=self.charm.name,
227
+ oci_resource_name="mongodb-image",
228
+ )
229
+ refresh_class = charm_refresh.Kubernetes
230
+
231
+ try:
232
+ self.refresh = refresh_class(upgrade_backend) # type: ignore[argument-type]
233
+ except (charm_refresh.UnitTearingDown, charm_refresh.PeerRelationNotReady):
234
+ self.refresh = None
235
+ except charm_refresh.KubernetesJujuAppNotTrusted:
236
+ # As recommended, let the charm crash so that the user can trust
237
+ # the application and all events will resume afterwards.
238
+ raise
239
+
240
+ self.upgrades_status_manager = MongoDBUpgradesStatusManager(
241
+ state=self.state, workload=self.workload, refresh=self.refresh
242
+ )
243
+
223
244
  self.sysctl_config = sysctl.Config(name=self.charm.app.name)
224
245
 
225
246
  self.observability_manager = ObservabilityManager(self, self.state, self.substrate)
@@ -229,12 +250,100 @@ class MongoDBOperator(OperatorProtocol, Object):
229
250
  self.tls_events = TLSEventsHandler(self)
230
251
  self.primary_events = PrimaryActionHandler(self)
231
252
  self.client_events = DatabaseEventsHandler(self, RelationNames.DATABASE)
232
- self.upgrade_events = UpgradeEventHandler(self)
233
253
  self.config_server_events = ConfigServerEventHandler(self)
234
254
  self.sharding_event_handlers = ShardEventHandler(self)
235
255
  self.cluster_event_handlers = ClusterConfigServerEventHandler(self)
236
256
  self.ldap_events = LDAPEventHandler(self)
237
257
 
258
+ if self.refresh is not None and not self.refresh.next_unit_allowed_to_refresh:
259
+ if self.refresh.in_progress:
260
+ self._post_refresh(self.refresh)
261
+ else:
262
+ self.refresh.next_unit_allowed_to_refresh = True
263
+
264
+ if self.refresh is not None and not self.refresh.in_progress:
265
+ self._handle_fcv_and_balancer()
266
+
267
+ def _handle_fcv_and_balancer(self):
268
+ """Checks the versions equality.
269
+
270
+ This may run on all events, so we bring all the safeguards possible so
271
+ that it runs only if all conditions are met.
272
+ """
273
+ if not self.charm.unit.is_leader():
274
+ return
275
+
276
+ if not self.refresh:
277
+ return
278
+
279
+ if self.state.app_peer_data.feature_compatibility_version == FEATURE_VERSION:
280
+ # We have already run all this logic before, no need to run it again.
281
+ return
282
+
283
+ # Update the version across all relations so that we can notify other units
284
+ self.cross_app_version_checker.set_version_across_all_relations()
285
+
286
+ if (
287
+ self.state.is_role(MongoDBRoles.CONFIG_SERVER)
288
+ and not self.cross_app_version_checker.are_related_apps_valid()
289
+ ):
290
+ # Early return if not all apps are valid.
291
+ return
292
+
293
+ try:
294
+ self.upgrades_manager.wait_for_cluster_healthy() # type: ignore[attr-defined]
295
+ except RetryError:
296
+ logger.error(
297
+ "Cluster is not healthy after refresh, will retry next juju event.", exc_info=True
298
+ )
299
+ return
300
+
301
+ if not self.upgrades_manager.is_cluster_able_to_read_write(): # type: ignore[attr-defined]
302
+ logger.error(
303
+ "Cluster is not healthy after refresh, writes not propagated throughout cluster. Deferring post refresh check.",
304
+ )
305
+ return
306
+
307
+ try:
308
+ with MongoConnection(self.state.mongos_config) as mongos:
309
+ mongos.start_and_wait_for_balancer()
310
+ except BalancerNotEnabledError:
311
+ logger.error(
312
+ "Need more time to enable the balancer after finishing the refresh. Deferring event."
313
+ )
314
+ return
315
+
316
+ self.mongo_manager.set_feature_compatibility_version(FEATURE_VERSION)
317
+ self.state.app_peer_data.feature_compatibility_version = FEATURE_VERSION
318
+
319
+ def _post_refresh(self, refresh: charm_refresh.Common): # noqa: C901
320
+ """Post refresh checks and actions.
321
+
322
+ Checks if unit is healthy and allow the next unit to update.
323
+ """
324
+ if not self.state.db_initialised:
325
+ return
326
+
327
+ if not refresh.workload_allowed_to_start:
328
+ return
329
+ logger.info("Restarting workloads")
330
+ # always apply the current charm revision's config
331
+ self._configure_workloads()
332
+ self.start_charm_services()
333
+
334
+ self.state.unit_peer_data.current_revision = self.cross_app_version_checker.version
335
+
336
+ if self.name == CharmKind.MONGOD:
337
+ self._restart_related_services()
338
+
339
+ if self.mongo_manager.mongod_ready():
340
+ try:
341
+ self.upgrades_manager.wait_for_cluster_healthy()
342
+ refresh.next_unit_allowed_to_refresh = True
343
+ except RetryError as err:
344
+ logger.info("Cluster is not healthy after restart: %s", err)
345
+ return
346
+
238
347
  @property
239
348
  def config(self):
240
349
  """Returns the actual config."""
@@ -288,7 +397,7 @@ class MongoDBOperator(OperatorProtocol, Object):
288
397
  self.config_server_manager,
289
398
  self.backup_manager,
290
399
  self.ldap_manager,
291
- self.upgrade_manager,
400
+ self.upgrades_status_manager,
292
401
  )
293
402
 
294
403
  # BEGIN: Handlers.
@@ -302,8 +411,6 @@ class MongoDBOperator(OperatorProtocol, Object):
302
411
  if self.substrate == Substrates.VM:
303
412
  self._set_os_config()
304
413
 
305
- self.charm.unit.set_workload_version(self.workload.get_version())
306
-
307
414
  # Truncate the file.
308
415
  self.workload.write(self.workload.paths.config_file, "")
309
416
 
@@ -320,6 +427,12 @@ class MongoDBOperator(OperatorProtocol, Object):
320
427
  logger.debug("Storages not attached yet.")
321
428
  raise ContainerNotReadyError("Missing storage")
322
429
 
430
+ if not self.refresh:
431
+ raise ContainerNotReadyError("Workload not allowed to start yet.")
432
+
433
+ # Store application revision for cross cluster checks
434
+ self.state.unit_peer_data.current_revision = self.cross_app_version_checker.version
435
+
323
436
  if self.state.is_role(MongoDBRoles.UNKNOWN):
324
437
  raise InvalidConfigRoleError()
325
438
 
@@ -338,6 +451,10 @@ class MongoDBOperator(OperatorProtocol, Object):
338
451
  )
339
452
  raise
340
453
 
454
+ if self.refresh.in_progress: # type: ignore[union-attr]
455
+ # Bypass the regular start if refresh is in progress
456
+ return
457
+
341
458
  if self.charm.unit.is_leader():
342
459
  self.state.statuses.clear(scope="app", component=self.name)
343
460
 
@@ -360,12 +477,6 @@ class MongoDBOperator(OperatorProtocol, Object):
360
477
  self.start_charm_services()
361
478
  self.open_ports()
362
479
 
363
- # This seems unnecessary
364
- # if self.substrate == Substrates.K8S:
365
- # if not self.workload.exists(self.workload.paths.socket_path):
366
- # logger.debug("The mongod socket is not ready yet.")
367
- # raise WorkloadNotReadyError
368
-
369
480
  if not self.mongo_manager.mongod_ready():
370
481
  raise WorkloadNotReadyError
371
482
 
@@ -380,6 +491,10 @@ class MongoDBOperator(OperatorProtocol, Object):
380
491
  )
381
492
  raise
382
493
 
494
+ if self.charm.unit.is_leader():
495
+ self.mongo_manager.set_feature_compatibility_version(FEATURE_VERSION)
496
+ self.state.app_peer_data.feature_compatibility_version = FEATURE_VERSION
497
+
383
498
  try:
384
499
  self._restart_related_services()
385
500
  except WorkloadServiceError:
@@ -388,11 +503,6 @@ class MongoDBOperator(OperatorProtocol, Object):
388
503
 
389
504
  self.state.statuses.set(CharmStatuses.ACTIVE_IDLE.value, scope="unit", component=self.name)
390
505
 
391
- if self.substrate == Substrates.K8S:
392
- # K8S upgrades result in the start hook getting fired following this pattern
393
- # https://juju.is/docs/sdk/upgrade-charm-event#heading--emission-sequence
394
- self.upgrade_manager._reconcile_upgrade()
395
-
396
506
  @override
397
507
  def prepare_for_shutdown(self) -> None: # pragma: nocover
398
508
  """Handler for the stop event.
@@ -407,32 +517,16 @@ class MongoDBOperator(OperatorProtocol, Object):
407
517
  Note that with how Juju currently operates, we only have at most 30
408
518
  seconds until SIGTERM command, so we are by no means guaranteed to have
409
519
  stepped down before the pod is removed.
410
- Upon restart, the upgrade will still resume because all hooks run the
411
- `_reconcile_upgrade` handler.
412
520
  """
413
521
  if self.substrate == Substrates.VM:
414
522
  return
415
523
 
416
- # Raise partition to prevent other units from restarting if an upgrade is in progress.
417
- # If an upgrade is not in progress, the leader unit will reset the partition to 0.
418
- current_unit_number = unit_number(self.state.unit_upgrade_peer_data)
419
- if self.state.k8s_manager.get_partition() < current_unit_number:
420
- self.state.k8s_manager.set_partition(value=current_unit_number)
421
- logger.debug(f"Partition set to {current_unit_number} during stop event")
422
-
423
- if not self.upgrade_manager._upgrade:
424
- logger.debug("Upgrade Peer relation missing during stop event")
425
- return
426
-
427
- # We update the state to set up the unit as restarting
428
- self.upgrade_manager._upgrade.unit_state = UnitState.RESTARTING
429
-
430
524
  # According to the MongoDB documentation, before upgrading the primary, we must ensure a
431
525
  # safe primary re-election.
432
526
  try:
433
527
  if self.charm.unit.name == self.primary_unit_name:
434
528
  logger.debug("Stepping down current primary, before upgrading service...")
435
- self.upgrade_manager.step_down_primary_and_wait_reelection()
529
+ self.mongo_manager.step_down_primary_and_wait_reelection()
436
530
  except FailedToElectNewPrimaryError:
437
531
  logger.error("Failed to reelect primary before upgrading unit.")
438
532
  return
@@ -463,7 +557,7 @@ class MongoDBOperator(OperatorProtocol, Object):
463
557
  "Invalid LDAP Query template, please update your config."
464
558
  )
465
559
 
466
- if self.state.upgrade_in_progress:
560
+ if self.refresh_in_progress:
467
561
  logger.warning(
468
562
  "Changing config options is not permitted during an upgrade. The charm may be in a broken, unrecoverable state."
469
563
  )
@@ -524,6 +618,8 @@ class MongoDBOperator(OperatorProtocol, Object):
524
618
  case PasswordManagementState.NEED_PASSWORD_UPDATE:
525
619
  self.rotate_internal_passwords(context)
526
620
  self.clear_password_management_statuses()
621
+ case _:
622
+ pass
527
623
 
528
624
  def rotate_internal_passwords(self, context: PasswordManagementContext) -> None:
529
625
  """Rotate passwords for the internal users defined in the given context.
@@ -604,14 +700,15 @@ class MongoDBOperator(OperatorProtocol, Object):
604
700
  application in upgrade ?). Then we proceed to call the relation changed
605
701
  handler and update the list of related hosts.
606
702
  """
607
- if not self.charm.unit.is_leader():
608
- return
609
- if self.state.upgrade_in_progress:
703
+ if self.refresh_in_progress:
610
704
  logger.warning(
611
705
  "Adding replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state"
612
706
  )
613
707
  raise UpgradeInProgressError
614
708
 
709
+ if not self.charm.unit.is_leader():
710
+ return
711
+
615
712
  self.peer_changed()
616
713
  self.update_related_hosts()
617
714
 
@@ -620,10 +717,6 @@ class MongoDBOperator(OperatorProtocol, Object):
620
717
 
621
718
  Adds the unit as a replica to the MongoDB replica set.
622
719
  """
623
- if self.substrate == Substrates.K8S:
624
- # K8S Upgrades requires to reconcile the upgrade on lifecycle event.
625
- self.upgrade_manager._reconcile_upgrade()
626
-
627
720
  # Changing the monitor or the backup password will lead to non-leader
628
721
  # units receiving a relation changed event. We must update the monitor
629
722
  # and pbm URI if the password changes so that COS/pbm can continue to
@@ -637,7 +730,7 @@ class MongoDBOperator(OperatorProtocol, Object):
637
730
  if not self.charm.unit.is_leader() or not self.state.db_initialised:
638
731
  return
639
732
 
640
- if self.state.upgrade_in_progress:
733
+ if self.refresh_in_progress:
641
734
  logger.warning(
642
735
  "Adding replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state"
643
736
  )
@@ -696,7 +789,7 @@ class MongoDBOperator(OperatorProtocol, Object):
696
789
  """Handles the relation departed events."""
697
790
  if not self.charm.unit.is_leader() or departing_unit == self.charm.unit:
698
791
  return
699
- if self.state.upgrade_in_progress:
792
+ if self.refresh_in_progress:
700
793
  # do not defer or return here, if a user removes a unit, the config will be incorrect
701
794
  # and lead to MongoDB reporting that the replica set is unhealthy, we should make an
702
795
  # attempt to fix the replica set configuration even if an upgrade is occurring.
@@ -731,7 +824,7 @@ class MongoDBOperator(OperatorProtocol, Object):
731
824
  If the removing unit is primary also allow it to step down and elect another unit as
732
825
  primary while it still has access to its storage.
733
826
  """
734
- if self.state.upgrade_in_progress:
827
+ if self.refresh_in_progress:
735
828
  # We cannot defer and prevent a user from removing a unit, log a warning instead.
736
829
  logger.warning(
737
830
  "Removing replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state"
@@ -792,6 +885,10 @@ class MongoDBOperator(OperatorProtocol, Object):
792
885
  logger.info("Early return invalid statuses.")
793
886
  return
794
887
 
888
+ if self.cluster_version_checker.get_cluster_mismatched_revision_status():
889
+ logger.info("Early return, cluster mismatch version.")
890
+ return
891
+
795
892
  if self.state.is_role(MongoDBRoles.SHARD):
796
893
  shard_has_tls, config_server_has_tls = self.shard_manager.tls_status()
797
894
  if config_server_has_tls and not shard_has_tls:
@@ -802,13 +899,10 @@ class MongoDBOperator(OperatorProtocol, Object):
802
899
  logger.info("Mongod not ready.")
803
900
  return
804
901
 
805
- if self.substrate == Substrates.K8S:
806
- self.upgrade_manager._reconcile_upgrade()
807
-
808
902
  # It's useless to try to perform self healing if upgrade is in progress
809
903
  # as the handlers would raise an UpgradeInProgressError anyway so
810
904
  # better skip it when possible.
811
- if not self.state.upgrade_in_progress:
905
+ if not self.refresh_in_progress:
812
906
  try:
813
907
  self.perform_self_healing()
814
908
  except (ServerSelectionTimeoutError, OperationFailure) as e:
@@ -907,19 +1001,6 @@ class MongoDBOperator(OperatorProtocol, Object):
907
1001
  logger.exception(f"Failed to open port: {e}")
908
1002
  raise
909
1003
 
910
- def _set_os_config(self) -> None:
911
- """Sets sysctl config for mongodb."""
912
- try:
913
- self.sysctl_config.configure(OS_REQUIREMENTS)
914
- except (sysctl.ApplyError, sysctl.ValidationError, sysctl.CommandError) as e:
915
- # we allow events to continue in the case that we are not able to correctly configure
916
- # sysctl config, since we can still run the workload with wrong sysctl parameters
917
- # even if it is not optimal.
918
- logger.error(f"Error setting values on sysctl: {e.message}")
919
- # containers share the kernel with the host system, and some sysctl parameters are
920
- # set at kernel level.
921
- logger.warning("sysctl params cannot be set. Is the machine running on a container?")
922
-
923
1004
  @property
924
1005
  def primary_unit_name(self) -> str | None:
925
1006
  """Retrieves the primary unit with the primary replica."""
@@ -941,9 +1022,13 @@ class MongoDBOperator(OperatorProtocol, Object):
941
1022
 
942
1023
  If we are running as config-server, we should start both mongod and mongos.
943
1024
  """
944
- self.workload.start()
945
- if self.state.is_role(MongoDBRoles.CONFIG_SERVER):
946
- self.mongos_workload.start()
1025
+ if not self.refresh or not self.refresh.workload_allowed_to_start:
1026
+ raise WorkloadServiceError("Workload not allowed to start")
1027
+
1028
+ if self.refresh.workload_allowed_to_start:
1029
+ self.workload.start()
1030
+ if self.state.is_role(MongoDBRoles.CONFIG_SERVER):
1031
+ self.mongos_workload.start()
947
1032
 
948
1033
  @override
949
1034
  def stop_charm_services(self):
@@ -961,6 +1046,8 @@ class MongoDBOperator(OperatorProtocol, Object):
961
1046
 
962
1047
  If we are running as config-server, we should update both mongod and mongos environments.
963
1048
  """
1049
+ if not self.refresh or not self.refresh.workload_allowed_to_start:
1050
+ raise WorkloadServiceError("Workload not allowed to start")
964
1051
  try:
965
1052
  self.config_manager.configure_and_restart(force=force)
966
1053
  if self.state.is_role(MongoDBRoles.CONFIG_SERVER):
@@ -1045,6 +1132,9 @@ class MongoDBOperator(OperatorProtocol, Object):
1045
1132
  # Instantiate the keyfile
1046
1133
  self.instantiate_keyfile()
1047
1134
 
1135
+ # Instantiate the local directory for k8s
1136
+ self.build_local_tls_directory()
1137
+
1048
1138
  # Push TLS files if necessary
1049
1139
  self.tls_manager.push_tls_files_to_workload()
1050
1140
  self.ldap_manager.save_certificates(self.state.ldap.chain)
@@ -1119,17 +1209,30 @@ class MongoDBOperator(OperatorProtocol, Object):
1119
1209
  # don't bother checking revision mismatch on sharding interface if replica
1120
1210
  return statuses
1121
1211
 
1122
- if rev_status := self.cluster_version_checker.get_cluster_mismatched_revision_status():
1123
- statuses.append(rev_status)
1124
-
1125
1212
  return statuses
1126
1213
 
1214
+ def _cluster_mismatch_status(self, scope: DPHScope) -> list[StatusObject]:
1215
+ """Returns a list with at most a single status.
1216
+
1217
+ This status is recomputed on every hook:
1218
+ It's cheap, easy to recompute and we don't want to store it.
1219
+ We compute it on every hook EXCEPT if we should recompute.
1220
+ This way it does not get stored in the databag and stays as a purely dynamic status.
1221
+ """
1222
+ if scope == "unit":
1223
+ return []
1224
+ if rev_status := self.cluster_version_checker.get_cluster_mismatched_revision_status():
1225
+ return [rev_status]
1226
+ return []
1227
+
1127
1228
  def get_statuses(self, scope: DPHScope, recompute: bool = False) -> list[StatusObject]: # noqa: C901 # We know, this function is complex.
1128
1229
  """Returns the statuses of the charm manager."""
1129
1230
  charm_statuses: list[StatusObject] = []
1130
1231
 
1131
1232
  if not recompute:
1132
- return self.state.statuses.get(scope=scope, component=self.name).root
1233
+ return self.state.statuses.get(
1234
+ scope=scope, component=self.name
1235
+ ).root + self._cluster_mismatch_status(scope)
1133
1236
 
1134
1237
  if scope == "unit" and not self.workload.workload_present:
1135
1238
  return [CharmStatuses.MONGODB_NOT_INSTALLED.value]
@@ -1182,7 +1285,7 @@ class MongoDBOperator(OperatorProtocol, Object):
1182
1285
  if not self.model.unit.is_leader():
1183
1286
  return PasswordManagementContext(PasswordManagementState.NOT_LEADER)
1184
1287
 
1185
- if self.state.upgrade_in_progress:
1288
+ if self.refresh_in_progress:
1186
1289
  return PasswordManagementContext(
1187
1290
  PasswordManagementState.UPGRADE_RUNNING,
1188
1291
  "Cannot update passwords while an upgrade is in progress.",