mongo-charms-single-kernel 1.8.6__py3-none-any.whl → 1.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mongo-charms-single-kernel might be problematic. Click here for more details.

Files changed (44) hide show
  1. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/METADATA +2 -1
  2. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/RECORD +38 -39
  3. single_kernel_mongo/abstract_charm.py +8 -0
  4. single_kernel_mongo/config/literals.py +1 -20
  5. single_kernel_mongo/config/relations.py +0 -1
  6. single_kernel_mongo/config/statuses.py +10 -57
  7. single_kernel_mongo/core/abstract_upgrades_v3.py +149 -0
  8. single_kernel_mongo/core/k8s_workload.py +2 -2
  9. single_kernel_mongo/core/kubernetes_upgrades_v3.py +17 -0
  10. single_kernel_mongo/core/machine_upgrades_v3.py +54 -0
  11. single_kernel_mongo/core/operator.py +25 -4
  12. single_kernel_mongo/core/version_checker.py +7 -6
  13. single_kernel_mongo/core/vm_workload.py +30 -13
  14. single_kernel_mongo/core/workload.py +17 -19
  15. single_kernel_mongo/events/backups.py +3 -3
  16. single_kernel_mongo/events/cluster.py +1 -1
  17. single_kernel_mongo/events/database.py +1 -1
  18. single_kernel_mongo/events/lifecycle.py +5 -4
  19. single_kernel_mongo/events/tls.py +7 -4
  20. single_kernel_mongo/exceptions.py +4 -24
  21. single_kernel_mongo/managers/cluster.py +8 -8
  22. single_kernel_mongo/managers/config.py +5 -3
  23. single_kernel_mongo/managers/ldap.py +2 -1
  24. single_kernel_mongo/managers/mongo.py +48 -9
  25. single_kernel_mongo/managers/mongodb_operator.py +195 -67
  26. single_kernel_mongo/managers/mongos_operator.py +95 -35
  27. single_kernel_mongo/managers/sharding.py +4 -4
  28. single_kernel_mongo/managers/tls.py +54 -27
  29. single_kernel_mongo/managers/upgrade_v3.py +452 -0
  30. single_kernel_mongo/managers/upgrade_v3_status.py +133 -0
  31. single_kernel_mongo/state/app_peer_state.py +12 -2
  32. single_kernel_mongo/state/charm_state.py +31 -141
  33. single_kernel_mongo/state/config_server_state.py +0 -33
  34. single_kernel_mongo/state/unit_peer_state.py +10 -0
  35. single_kernel_mongo/utils/helpers.py +0 -6
  36. single_kernel_mongo/utils/mongo_config.py +32 -8
  37. single_kernel_mongo/core/abstract_upgrades.py +0 -890
  38. single_kernel_mongo/core/kubernetes_upgrades.py +0 -194
  39. single_kernel_mongo/core/machine_upgrades.py +0 -188
  40. single_kernel_mongo/events/upgrades.py +0 -157
  41. single_kernel_mongo/managers/upgrade.py +0 -334
  42. single_kernel_mongo/state/upgrade_state.py +0 -134
  43. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/WHEEL +0 -0
  44. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/licenses/LICENSE +0 -0
@@ -9,18 +9,18 @@ from collections.abc import Mapping
9
9
  from itertools import chain
10
10
  from logging import getLogger
11
11
  from pathlib import Path
12
+ from platform import machine
12
13
  from shutil import copyfile
13
14
 
14
15
  from ops import Container
15
- from tenacity import retry, retry_if_result, stop_after_attempt, wait_fixed
16
+ from tenacity import retry, retry_if_exception_type, retry_if_result, stop_after_attempt, wait_fixed
16
17
  from typing_extensions import override
17
18
 
18
19
  from single_kernel_mongo.config.literals import (
19
20
  CRON_FILE,
20
- SNAP,
21
21
  VmUser,
22
22
  )
23
- from single_kernel_mongo.config.models import CharmSpec
23
+ from single_kernel_mongo.config.models import SNAP_NAME, CharmSpec
24
24
  from single_kernel_mongo.core.workload import WorkloadBase
25
25
  from single_kernel_mongo.exceptions import (
26
26
  WorkloadExecError,
@@ -41,8 +41,7 @@ class VMWorkload(WorkloadBase):
41
41
 
42
42
  def __init__(self, role: CharmSpec, container: Container | None) -> None:
43
43
  super().__init__(role, container)
44
- self.snap = SNAP
45
- self.mongod_snap = snap.SnapCache()[self.snap.name]
44
+ self.mongod_snap = snap.SnapCache()[SNAP_NAME]
46
45
 
47
46
  @property
48
47
  @override
@@ -186,23 +185,41 @@ class VMWorkload(WorkloadBase):
186
185
  stop=stop_after_attempt(20),
187
186
  wait=wait_fixed(1),
188
187
  reraise=True,
188
+ retry=retry_if_exception_type(WorkloadServiceError),
189
189
  )
190
- def install(self) -> None:
191
- """Loads the MongoDB snap from LP.
190
+ def install(self, revision: str | None = None, retry_and_raise: bool = True) -> bool:
191
+ """Install the charmed-mongodb snap from the snap store.
192
+
193
+ Args:
194
+ revision (str | None): the snap revision to install. Will be loaded from the
195
+ `refresh_versions.toml` file if None.
196
+ retry_and_raise (bool): whether to retry in case of errors. Will raise if the error
197
+ persists.
192
198
 
193
199
  Returns:
194
- True if successfully installed. False otherwise.
200
+ True if successfully installed, False if errors occur and `retry_and_raise` is False.
195
201
  """
196
202
  try:
203
+ if not revision:
204
+ versions = self.load_toml_file(Path("refresh_versions.toml"))
205
+ revision = versions["snap"]["revisions"][machine()]
206
+
197
207
  self.mongod_snap.ensure(
198
- snap.SnapState.Latest,
199
- channel=self.snap.channel,
200
- revision=self.snap.revision,
208
+ snap.SnapState.Present,
209
+ revision=revision,
201
210
  )
202
211
  self.mongod_snap.hold()
212
+ return True
203
213
  except snap.SnapError as err:
204
- logger.error(f"Failed to install {self.snap.name}. Reason: {err}.")
205
- raise WorkloadNotReadyError("Failed to install mongodb")
214
+ logger.error(f"Failed to install {SNAP_NAME}. Reason: {err}.")
215
+ if retry_and_raise:
216
+ raise WorkloadNotReadyError("Failed to install mongodb")
217
+ return False
218
+
219
+ @override
220
+ def snap_revision(self) -> str:
221
+ """The currently installed snap_revision."""
222
+ return self.mongod_snap.revision
206
223
 
207
224
  @override
208
225
  def setup_cron(self, lines: list[str]) -> None: # pragma: nocover
@@ -9,12 +9,13 @@ import string
9
9
  from abc import ABC, abstractmethod
10
10
  from itertools import chain
11
11
  from pathlib import Path
12
- from typing import ClassVar
12
+ from typing import Any, ClassVar
13
13
 
14
+ import tomllib
14
15
  from ops import Container
15
16
  from ops.pebble import Layer
16
17
 
17
- from single_kernel_mongo.config.literals import WorkloadUser
18
+ from single_kernel_mongo.config.literals import VERSIONS_FILE, WorkloadUser
18
19
  from single_kernel_mongo.config.models import CharmSpec
19
20
 
20
21
 
@@ -149,7 +150,7 @@ class WorkloadBase(ABC): # pragma: nocover
149
150
  self.role = role
150
151
 
151
152
  @abstractmethod
152
- def install(self) -> None:
153
+ def install(self, revision: str | None = None, retry_and_raise: bool = True) -> bool:
153
154
  """Installs the workload snap or raises an error.
154
155
 
155
156
  VM-only: on k8s, just returns None.
@@ -283,6 +284,17 @@ class WorkloadBase(ABC): # pragma: nocover
283
284
  """
284
285
  ...
285
286
 
287
+ def snap_revision(self) -> str:
288
+ """The currently installed snap_revision."""
289
+ return ""
290
+
291
+ def load_toml_file(self, file: Path) -> dict[str, Any]:
292
+ """Loads a TOML file to a dictionary."""
293
+ if not file.exists():
294
+ return {}
295
+
296
+ return tomllib.loads(file.read_text())
297
+
286
298
  def get_version(self) -> str:
287
299
  """Get the workload version.
288
300
 
@@ -295,21 +307,6 @@ class WorkloadBase(ABC): # pragma: nocover
295
307
  version = ""
296
308
  return version
297
309
 
298
- def get_internal_revision(self) -> str:
299
- """Get the internal revision.
300
-
301
- Note: This should be removed soon because we're moving away from `charm
302
- version` + `internal revision` to `charm_version+git hash`.
303
-
304
- Returns:
305
- String of charm internal revision
306
- """
307
- try:
308
- version = Path("charm_version").read_text().strip()
309
- except: # noqa: E722
310
- version = ""
311
- return version
312
-
313
310
  def get_charm_revision(self) -> str:
314
311
  """Get the charm revision.
315
312
 
@@ -317,7 +314,8 @@ class WorkloadBase(ABC): # pragma: nocover
317
314
  String of charm revision
318
315
  """
319
316
  try:
320
- version = Path("charm_version").read_text().strip()
317
+ versions = self.load_toml_file(VERSIONS_FILE)
318
+ version = versions["charm"]
321
319
  except: # noqa: E722
322
320
  version = ""
323
321
  return version
@@ -79,7 +79,7 @@ class BackupEventsHandler(Object):
79
79
 
80
80
  def _on_s3_relation_joined(self, event: RelationJoinedEvent) -> None:
81
81
  """Checks for valid integration for s3-integrations."""
82
- if self.dependent.state.upgrade_in_progress:
82
+ if self.dependent.refresh_in_progress:
83
83
  logger.warning(
84
84
  "Adding s3-relations is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
85
85
  )
@@ -97,7 +97,7 @@ class BackupEventsHandler(Object):
97
97
 
98
98
  def _on_s3_credential_changed(self, event: CredentialsChangedEvent) -> None: # noqa: C901
99
99
  action = "configure-pbm"
100
- if self.dependent.state.upgrade_in_progress:
100
+ if self.dependent.refresh_in_progress:
101
101
  logger.warning(
102
102
  "Changing s3-credentials is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
103
103
  )
@@ -285,7 +285,7 @@ class BackupEventsHandler(Object):
285
285
  )
286
286
  return
287
287
 
288
- if self.dependent.state.upgrade_in_progress:
288
+ if self.dependent.refresh_in_progress:
289
289
  fail_action_with_error_log(
290
290
  logger,
291
291
  event,
@@ -72,7 +72,7 @@ class ClusterConfigServerEventHandler(Object):
72
72
  Calls the manager to share the secrets with mongos charm.
73
73
  """
74
74
  try:
75
- self.manager.share_secret_to_mongos(event.relation)
75
+ self.manager.share_secret_to_mongos(event.relation, initial_event=True)
76
76
  except DeferrableFailedHookChecksError as e:
77
77
  logger.info("Skipping database requested event: hook checks did not pass.")
78
78
  defer_event_with_info_log(logger, event, str(type(event)), str(e))
@@ -138,7 +138,7 @@ class DatabaseEventsHandler(Object):
138
138
  if not self.charm.unit.is_leader():
139
139
  return False
140
140
 
141
- if self.dependent.state.upgrade_in_progress:
141
+ if self.dependent.refresh_in_progress:
142
142
  logger.warning(
143
143
  "Adding relations is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
144
144
  )
@@ -130,9 +130,10 @@ class LifecycleEventsHandler(Object):
130
130
  """Start event."""
131
131
  try:
132
132
  self.dependent.prepare_for_startup()
133
- except (ContainerNotReadyError, WorkloadServiceError):
134
- logger.info("Not ready to start.")
135
- event.defer()
133
+ except (ContainerNotReadyError, WorkloadServiceError) as e:
134
+ defer_event_with_info_log(
135
+ logger, event, "start", f"Not ready to start: {e.__class__.__name__}({e})"
136
+ )
136
137
  return
137
138
  except InvalidConfigRoleError:
138
139
  logger.info("Missing a valid role.")
@@ -222,7 +223,7 @@ class LifecycleEventsHandler(Object):
222
223
  secret_id=event.secret.id or "",
223
224
  )
224
225
  except (WorkloadServiceError, ChangeError) as err:
225
- logger.info("Failed to restart services", err, exc_info=True)
226
+ logger.info("Failed to restart services: %s", err, exc_info=True)
226
227
  self.dependent.state.statuses.add(
227
228
  CharmStatuses.FAILED_SERVICES_START.value,
228
229
  scope="unit",
@@ -77,7 +77,7 @@ class TLSEventsHandler(Object):
77
77
  )
78
78
  event.fail("Mongos cannot set TLS keys until integrated to config-server.")
79
79
  return
80
- if self.manager.state.upgrade_in_progress:
80
+ if self.dependent.refresh_in_progress:
81
81
  fail_action_with_error_log(
82
82
  logger,
83
83
  event,
@@ -106,7 +106,8 @@ class TLSEventsHandler(Object):
106
106
  )
107
107
  event.defer()
108
108
  return
109
- if self.manager.state.upgrade_in_progress:
109
+
110
+ if self.dependent.refresh_in_progress:
110
111
  logger.warning(
111
112
  "Enabling TLS is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
112
113
  )
@@ -131,7 +132,7 @@ class TLSEventsHandler(Object):
131
132
  event.defer()
132
133
  return
133
134
 
134
- if self.manager.state.upgrade_in_progress:
135
+ if self.dependent.refresh_in_progress:
135
136
  logger.warning(
136
137
  "Disabling TLS is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
137
138
  )
@@ -162,7 +163,9 @@ class TLSEventsHandler(Object):
162
163
  logger.info(f"Deferring {str(type(event))}: db is not initialised")
163
164
  event.defer()
164
165
  return
165
- if self.manager.state.upgrade_in_progress:
166
+ # Check if refresh is in progress and this is the initial integration, delay.
167
+ # Otherwise it's a rotation and we're safe to continue.
168
+ if self.dependent.refresh_in_progress and self.manager.initial_integration():
166
169
  logger.warning(
167
170
  "Enabling TLS is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
168
171
  )
@@ -4,10 +4,6 @@
4
4
 
5
5
  """All general exceptions."""
6
6
 
7
- from data_platform_helpers.advanced_statuses.models import StatusObject
8
-
9
- from single_kernel_mongo.config.statuses import UpgradeStatuses
10
-
11
7
 
12
8
  class InvalidCharmKindError(Exception):
13
9
  """Raised when calling a function on the wrong charm kind."""
@@ -122,6 +118,10 @@ class UpgradeInProgressError(Exception):
122
118
  """Raised when an upgrade is in progress."""
123
119
 
124
120
 
121
+ class MongoDBUpgradeError(Exception):
122
+ """Raised when the snap upgrade fails."""
123
+
124
+
125
125
  class OpenPortFailedError(Exception):
126
126
  """Raised when we fail to open ports."""
127
127
 
@@ -209,22 +209,6 @@ class EarlyRemovalOfConfigServerError(Exception):
209
209
  """Raised when we try to remove config server while it still has shards."""
210
210
 
211
211
 
212
- class StatusError(Exception):
213
- """Exception with ops status."""
214
-
215
- def __init__(self, status: StatusObject) -> None:
216
- super().__init__(status.message)
217
- self.status = status
218
-
219
-
220
- class PrecheckFailedError(StatusError):
221
- """App is not ready to upgrade."""
222
-
223
- def __init__(self, message: str):
224
- self.message = message
225
- super().__init__(UpgradeStatuses.REFRESH_IN_PROGRESS.value)
226
-
227
-
228
212
  class FailedToElectNewPrimaryError(Exception):
229
213
  """Raised when a new primary isn't elected after stepping down."""
230
214
 
@@ -249,10 +233,6 @@ class ActionFailedError(Exception):
249
233
  """Raised when we failed an action."""
250
234
 
251
235
 
252
- class UnhealthyUpgradeError(Exception):
253
- """Raised when the upgrade is unhealthy during an post upgrade check."""
254
-
255
-
256
236
  class WaitingForLdapDataError(DeferrableError):
257
237
  """Raised when the charm hasn't received data from ldap yet."""
258
238
 
@@ -59,7 +59,7 @@ class ClusterProvider(Object):
59
59
  self.relation_name = relation_name
60
60
  self.data_interface = self.state.cluster_provider_data_interface
61
61
 
62
- def assert_pass_hook_checks(self) -> None:
62
+ def assert_pass_hook_checks(self, initial_event: bool = False) -> None:
63
63
  """Runs the pre hook checks, raises if it fails."""
64
64
  if not self.state.db_initialised:
65
65
  raise DeferrableFailedHookChecksError("DB is not initialised")
@@ -77,7 +77,7 @@ class ClusterProvider(Object):
77
77
  if not self.charm.unit.is_leader():
78
78
  raise NonDeferrableFailedHookChecksError("Not leader")
79
79
 
80
- if self.state.upgrade_in_progress:
80
+ if self.dependent.refresh_in_progress and initial_event:
81
81
  raise DeferrableFailedHookChecksError(
82
82
  "Processing mongos applications is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
83
83
  )
@@ -88,12 +88,12 @@ class ClusterProvider(Object):
88
88
  # we don't have any cluster relation.
89
89
  return self.state.is_role(MongoDBRoles.CONFIG_SERVER) or not self.state.cluster_relations
90
90
 
91
- def share_secret_to_mongos(self, relation: Relation) -> None:
91
+ def share_secret_to_mongos(self, relation: Relation, initial_event: bool = False) -> None:
92
92
  """Handles the database requested event.
93
93
 
94
94
  The first time secrets are written to relations should be on this event.
95
95
  """
96
- self.assert_pass_hook_checks()
96
+ self.assert_pass_hook_checks(initial_event=initial_event)
97
97
 
98
98
  config_server_db = self.state.generate_config_server_db()
99
99
  self.dependent.mongo_manager.reconcile_mongo_users_and_dbs(relation)
@@ -133,7 +133,7 @@ class ClusterProvider(Object):
133
133
  If it has departed, we run some checks and if we are a VM charm, we
134
134
  proceed to reconcile the users and DB and cleanup mongoDB.
135
135
  """
136
- if self.state.upgrade_in_progress:
136
+ if self.dependent.refresh_in_progress:
137
137
  logger.warning(
138
138
  "Removing integration to mongos is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
139
139
  )
@@ -269,8 +269,8 @@ class ClusterRequirer(Object):
269
269
  raise DeferrableFailedHookChecksError(
270
270
  "Mongos was waiting for config-server to enable TLS. Wait for TLS to be enabled until starting mongos."
271
271
  )
272
- if self.state.upgrade_in_progress:
273
- raise DeferrableFailedHookChecksError(
272
+ if self.dependent.refresh_in_progress:
273
+ logger.warning(
274
274
  "Processing client applications is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
275
275
  )
276
276
 
@@ -290,7 +290,7 @@ class ClusterRequirer(Object):
290
290
  """
291
291
  if not username or not password:
292
292
  raise WaitingForSecretsError
293
- if self.state.upgrade_in_progress:
293
+ if self.dependent.refresh_in_progress:
294
294
  logger.warning(
295
295
  "Processing client applications is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
296
296
  )
@@ -18,7 +18,6 @@ from typing_extensions import override
18
18
  from yaml import safe_dump, safe_load
19
19
 
20
20
  from single_kernel_mongo.config.literals import (
21
- LOCALHOST,
22
21
  PBM_RESTART_DELAY,
23
22
  CharmKind,
24
23
  MongoPorts,
@@ -343,6 +342,9 @@ class MongoConfigManager(FileBasedConfigManager, ABC):
343
342
  "allowInvalidCertificates": True,
344
343
  "clusterCAFile": f"{self.workload.paths.int_ca_file}",
345
344
  "clusterFile": f"{self.workload.paths.int_pem_file}",
345
+ "clusterAuthX509": {
346
+ "attributes": f"O={self.state.get_subject_name()}",
347
+ },
346
348
  }
347
349
  },
348
350
  },
@@ -366,7 +368,7 @@ class MongoConfigManager(FileBasedConfigManager, ABC):
366
368
  "tls": {
367
369
  "CAFile": f"{self.workload.paths.ext_ca_file}",
368
370
  "certificateKeyFile": f"{self.workload.paths.ext_pem_file}",
369
- "mode": "preferTLS",
371
+ "mode": "requireTLS",
370
372
  "disabledProtocols": "TLS1_0,TLS1_1",
371
373
  }
372
374
  },
@@ -494,7 +496,7 @@ class MongosConfigManager(MongoConfigManager):
494
496
  return {"sharding": {"configDB": uri}}
495
497
  return {
496
498
  "sharding": {
497
- "configDB": f"{self.state.app_peer_data.replica_set}/{LOCALHOST}:{MongoPorts.MONGODB_PORT.value}"
499
+ "configDB": f"{self.state.app_peer_data.replica_set}/{self.state.unit_peer_data.internal_address}:{MongoPorts.MONGODB_PORT.value}"
498
500
  }
499
501
  }
500
502
 
@@ -76,7 +76,8 @@ class LDAPManager(Object, ManagerStatusProtocol):
76
76
  raise DeferrableFailedHookChecksError("DB is not initialised")
77
77
  if self.state.is_role(MongoDBRoles.SHARD):
78
78
  raise InvalidLdapWithShardError("Cannot integrate LDAP with shard.")
79
- if self.state.upgrade_in_progress:
79
+ # Defer upon regular integration, but let's continue on an update.
80
+ if self.dependent.refresh_in_progress and not self.state.ldap.is_ready():
80
81
  raise DeferrableFailedHookChecksError(
81
82
  "Adding LDAP is not supported during an upgrade. The charm may be in a broken, unrecoverable state."
82
83
  )
@@ -14,6 +14,7 @@ from __future__ import annotations
14
14
  import json
15
15
  import logging
16
16
  from typing import TYPE_CHECKING
17
+ from urllib.parse import urlencode
17
18
 
18
19
  from dacite import from_dict
19
20
  from data_platform_helpers.advanced_statuses.models import StatusObject
@@ -27,6 +28,7 @@ from pymongo.errors import (
27
28
  PyMongoError,
28
29
  ServerSelectionTimeoutError,
29
30
  )
31
+ from tenacity import Retrying, stop_after_attempt, wait_fixed
30
32
 
31
33
  from single_kernel_mongo.config.literals import MongoPorts, Substrates
32
34
  from single_kernel_mongo.config.statuses import CharmStatuses, MongodStatuses
@@ -34,6 +36,7 @@ from single_kernel_mongo.core.structured_config import MongoDBRoles
34
36
  from single_kernel_mongo.exceptions import (
35
37
  DatabaseRequestedHasNotRunYetError,
36
38
  DeployedWithoutTrustError,
39
+ FailedToElectNewPrimaryError,
37
40
  MissingCredentialsError,
38
41
  SetPasswordError,
39
42
  )
@@ -97,9 +100,15 @@ class MongoManager(Object, ManagerStatusProtocol):
97
100
  Pass direct=True, when checking if a *single replica* is ready.
98
101
  Pass direct=False, when checking if the entire replica set is ready
99
102
  """
100
- if not uri and self.state.is_role(MongoDBRoles.MONGOS):
101
- uri = f"localhost:{MongoPorts.MONGOS_PORT.value}"
102
- actual_uri = uri or "localhost"
103
+ port = (
104
+ MongoPorts.MONGOS_PORT.value
105
+ if self.state.is_role(MongoDBRoles.MONGOS)
106
+ else MongoPorts.MONGODB_PORT.value
107
+ )
108
+ params = self.state.operator_config.tls_config
109
+
110
+ actual_uri = uri or f"mongodb://localhost:{port}"
111
+ actual_uri = f"{actual_uri}/?{urlencode(params)}"
103
112
  with MongoConnection(EMPTY_CONFIGURATION, actual_uri, direct=direct) as direct_mongo:
104
113
  return direct_mongo.is_ready
105
114
 
@@ -265,7 +274,7 @@ class MongoManager(Object, ManagerStatusProtocol):
265
274
  return
266
275
 
267
276
  data_interface.set_endpoints(relation.id, ",".join(sorted(config.hosts)))
268
- data_interface.set_uris(relation.id, config.uri)
277
+ data_interface.set_uris(relation.id, config.uri_without_tls)
269
278
 
270
279
  if not self.state.is_role(MongoDBRoles.MONGOS):
271
280
  data_interface.set_replset(
@@ -390,10 +399,10 @@ class MongoManager(Object, ManagerStatusProtocol):
390
399
  relation.id,
391
400
  ",".join(sorted(config.hosts)),
392
401
  )
393
- if config.uri != uris:
402
+ if config.uri_without_tls != uris:
394
403
  data_interface.set_uris(
395
404
  relation.id,
396
- config.uri,
405
+ config.uri_without_tls,
397
406
  )
398
407
  if config.database != database:
399
408
  data_interface.set_database(
@@ -421,8 +430,7 @@ class MongoManager(Object, ManagerStatusProtocol):
421
430
  "password": password,
422
431
  "hosts": self.state.app_hosts,
423
432
  "roles": set(roles.split(",")),
424
- "tls_external": False,
425
- "tls_internal": False,
433
+ "tls_enabled": False,
426
434
  "port": self.state.host_port,
427
435
  }
428
436
  if not self.state.is_role(MongoDBRoles.MONGOS):
@@ -466,7 +474,7 @@ class MongoManager(Object, ManagerStatusProtocol):
466
474
 
467
475
  for member in config_hosts - replset_members:
468
476
  logger.debug("Adding %s to replica set", member)
469
- if not self.mongod_ready(uri=member):
477
+ if not self.mongod_ready(uri=f"mongodb://{member}"):
470
478
  logger.debug("not reconfiguring: %s is not ready yet.", member)
471
479
  raise NotReadyError
472
480
  mongo.add_replset_member(member)
@@ -555,3 +563,34 @@ class MongoManager(Object, ManagerStatusProtocol):
555
563
  return [MongodStatuses.WAITING_RECONFIG.value]
556
564
 
557
565
  return charm_statuses
566
+
567
+ def set_feature_compatibility_version(self, feature_version: str) -> None:
568
+ """Sets the mongos feature compatibility version."""
569
+ if self.state.is_role(MongoDBRoles.REPLICATION):
570
+ config = self.state.mongo_config
571
+ elif self.state.is_role(MongoDBRoles.CONFIG_SERVER):
572
+ config = self.state.mongos_config
573
+ else:
574
+ return
575
+ with MongoConnection(config) as mongos:
576
+ mongos.client.admin.command(
577
+ "setFeatureCompatibilityVersion", value=feature_version, confirm=True
578
+ )
579
+
580
+ def step_down_primary_and_wait_reelection(self):
581
+ """Steps down the current primary and waits for a new one to be elected."""
582
+ if len(self.state.internal_hosts) < 2:
583
+ logger.warning(
584
+ "No secondaries to become primary - upgrading primary without electing a new one, expect downtime."
585
+ )
586
+ return
587
+
588
+ old_primary = self.dependent.primary_unit_name # type: ignore
589
+ with MongoConnection(self.state.mongo_config) as mongod:
590
+ mongod.step_down_primary()
591
+
592
+ for attempt in Retrying(stop=stop_after_attempt(30), wait=wait_fixed(1), reraise=True):
593
+ with attempt:
594
+ new_primary = self.dependent.primary_unit_name # type: ignore
595
+ if new_primary == old_primary:
596
+ raise FailedToElectNewPrimaryError()