mongo-charms-single-kernel 1.8.6__py3-none-any.whl → 1.8.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mongo-charms-single-kernel might be problematic. Click here for more details.
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/METADATA +2 -1
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/RECORD +38 -39
- single_kernel_mongo/abstract_charm.py +8 -0
- single_kernel_mongo/config/literals.py +1 -20
- single_kernel_mongo/config/relations.py +0 -1
- single_kernel_mongo/config/statuses.py +10 -57
- single_kernel_mongo/core/abstract_upgrades_v3.py +149 -0
- single_kernel_mongo/core/k8s_workload.py +2 -2
- single_kernel_mongo/core/kubernetes_upgrades_v3.py +17 -0
- single_kernel_mongo/core/machine_upgrades_v3.py +54 -0
- single_kernel_mongo/core/operator.py +25 -4
- single_kernel_mongo/core/version_checker.py +7 -6
- single_kernel_mongo/core/vm_workload.py +30 -13
- single_kernel_mongo/core/workload.py +17 -19
- single_kernel_mongo/events/backups.py +3 -3
- single_kernel_mongo/events/cluster.py +1 -1
- single_kernel_mongo/events/database.py +1 -1
- single_kernel_mongo/events/lifecycle.py +5 -4
- single_kernel_mongo/events/tls.py +7 -4
- single_kernel_mongo/exceptions.py +4 -24
- single_kernel_mongo/managers/cluster.py +8 -8
- single_kernel_mongo/managers/config.py +5 -3
- single_kernel_mongo/managers/ldap.py +2 -1
- single_kernel_mongo/managers/mongo.py +48 -9
- single_kernel_mongo/managers/mongodb_operator.py +195 -67
- single_kernel_mongo/managers/mongos_operator.py +95 -35
- single_kernel_mongo/managers/sharding.py +4 -4
- single_kernel_mongo/managers/tls.py +54 -27
- single_kernel_mongo/managers/upgrade_v3.py +452 -0
- single_kernel_mongo/managers/upgrade_v3_status.py +133 -0
- single_kernel_mongo/state/app_peer_state.py +12 -2
- single_kernel_mongo/state/charm_state.py +31 -141
- single_kernel_mongo/state/config_server_state.py +0 -33
- single_kernel_mongo/state/unit_peer_state.py +10 -0
- single_kernel_mongo/utils/helpers.py +0 -6
- single_kernel_mongo/utils/mongo_config.py +32 -8
- single_kernel_mongo/core/abstract_upgrades.py +0 -890
- single_kernel_mongo/core/kubernetes_upgrades.py +0 -194
- single_kernel_mongo/core/machine_upgrades.py +0 -188
- single_kernel_mongo/events/upgrades.py +0 -157
- single_kernel_mongo/managers/upgrade.py +0 -334
- single_kernel_mongo/state/upgrade_state.py +0 -134
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/WHEEL +0 -0
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,6 +9,7 @@ from __future__ import annotations
|
|
|
9
9
|
import logging
|
|
10
10
|
from typing import TYPE_CHECKING, final
|
|
11
11
|
|
|
12
|
+
import charm_refresh
|
|
12
13
|
from data_platform_helpers.advanced_statuses.models import StatusObject
|
|
13
14
|
from data_platform_helpers.advanced_statuses.protocol import ManagerStatusProtocol
|
|
14
15
|
from data_platform_helpers.advanced_statuses.types import Scope as DPHScope
|
|
@@ -19,16 +20,16 @@ from data_platform_helpers.version_check import (
|
|
|
19
20
|
from ops.framework import Object
|
|
20
21
|
from ops.model import Container, ModelError, SecretNotFoundError, Unit
|
|
21
22
|
from pymongo.errors import OperationFailure, PyMongoError, ServerSelectionTimeoutError
|
|
22
|
-
from tenacity import Retrying, stop_after_attempt, wait_fixed
|
|
23
|
+
from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed
|
|
23
24
|
from typing_extensions import override
|
|
24
25
|
|
|
25
26
|
from single_kernel_mongo.config.literals import (
|
|
27
|
+
FEATURE_VERSION,
|
|
26
28
|
OS_REQUIREMENTS,
|
|
27
29
|
CharmKind,
|
|
28
30
|
MongoPorts,
|
|
29
31
|
Scope,
|
|
30
32
|
Substrates,
|
|
31
|
-
UnitState,
|
|
32
33
|
)
|
|
33
34
|
from single_kernel_mongo.config.models import (
|
|
34
35
|
ROLES,
|
|
@@ -46,8 +47,8 @@ from single_kernel_mongo.config.statuses import (
|
|
|
46
47
|
PasswordManagementStatuses,
|
|
47
48
|
ShardStatuses,
|
|
48
49
|
)
|
|
49
|
-
from single_kernel_mongo.core.
|
|
50
|
-
from single_kernel_mongo.core.
|
|
50
|
+
from single_kernel_mongo.core.kubernetes_upgrades_v3 import KubernetesMongoDBRefresh
|
|
51
|
+
from single_kernel_mongo.core.machine_upgrades_v3 import MachineMongoDBRefresh
|
|
51
52
|
from single_kernel_mongo.core.operator import OperatorProtocol
|
|
52
53
|
from single_kernel_mongo.core.secrets import generate_secret_label
|
|
53
54
|
from single_kernel_mongo.core.structured_config import MongoDBRoles
|
|
@@ -64,8 +65,8 @@ from single_kernel_mongo.events.sharding import (
|
|
|
64
65
|
ShardEventHandler,
|
|
65
66
|
)
|
|
66
67
|
from single_kernel_mongo.events.tls import TLSEventsHandler
|
|
67
|
-
from single_kernel_mongo.events.upgrades import UpgradeEventHandler
|
|
68
68
|
from single_kernel_mongo.exceptions import (
|
|
69
|
+
BalancerNotEnabledError,
|
|
69
70
|
ContainerNotReadyError,
|
|
70
71
|
DeferrableFailedHookChecksError,
|
|
71
72
|
EarlyRemovalOfConfigServerError,
|
|
@@ -99,12 +100,12 @@ from single_kernel_mongo.managers.mongo import MongoManager
|
|
|
99
100
|
from single_kernel_mongo.managers.observability import ObservabilityManager
|
|
100
101
|
from single_kernel_mongo.managers.sharding import ConfigServerManager, ShardManager
|
|
101
102
|
from single_kernel_mongo.managers.tls import TLSManager
|
|
102
|
-
from single_kernel_mongo.managers.
|
|
103
|
+
from single_kernel_mongo.managers.upgrade_v3 import MongoDBUpgradesManager
|
|
104
|
+
from single_kernel_mongo.managers.upgrade_v3_status import MongoDBUpgradesStatusManager
|
|
103
105
|
from single_kernel_mongo.state.charm_state import CharmState
|
|
104
106
|
from single_kernel_mongo.utils.helpers import (
|
|
105
107
|
is_valid_ldap_options,
|
|
106
108
|
is_valid_ldapusertodnmapping,
|
|
107
|
-
unit_number,
|
|
108
109
|
)
|
|
109
110
|
from single_kernel_mongo.utils.mongo_connection import MongoConnection, NotReadyError
|
|
110
111
|
from single_kernel_mongo.utils.mongodb_users import (
|
|
@@ -136,6 +137,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
136
137
|
|
|
137
138
|
name = CharmKind.MONGOD.value
|
|
138
139
|
workload: MongoDBWorkload
|
|
140
|
+
refresh: charm_refresh.Common | None
|
|
139
141
|
|
|
140
142
|
def __init__(self, charm: AbstractMongoCharm):
|
|
141
143
|
super(OperatorProtocol, self).__init__(charm, self.name)
|
|
@@ -160,7 +162,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
160
162
|
self.cross_app_version_checker = CrossAppVersionChecker(
|
|
161
163
|
self.charm,
|
|
162
164
|
version=get_charm_revision(
|
|
163
|
-
self.charm.unit, local_version=self.workload.
|
|
165
|
+
self.charm.unit, local_version=self.workload.get_charm_revision()
|
|
164
166
|
),
|
|
165
167
|
relations_to_check=[
|
|
166
168
|
RelationNames.SHARDING.value,
|
|
@@ -206,10 +208,6 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
206
208
|
self.cluster_manager = ClusterProvider(
|
|
207
209
|
self, self.state, self.substrate, RelationNames.CLUSTER
|
|
208
210
|
)
|
|
209
|
-
upgrade_backend = MachineUpgrade if self.substrate == Substrates.VM else KubernetesUpgrade
|
|
210
|
-
self.upgrade_manager = MongoDBUpgradeManager(
|
|
211
|
-
self, upgrade_backend, key=RelationNames.UPGRADE_VERSION.value
|
|
212
|
-
)
|
|
213
211
|
|
|
214
212
|
# LDAP Manager, which covers both send-ca-cert interface and ldap interface.
|
|
215
213
|
self.ldap_manager = LDAPManager(
|
|
@@ -220,6 +218,41 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
220
218
|
ExternalRequirerRelations.LDAP_CERT,
|
|
221
219
|
)
|
|
222
220
|
|
|
221
|
+
# Upgrades
|
|
222
|
+
self.upgrades_manager = MongoDBUpgradesManager(self, self.state, self.workload)
|
|
223
|
+
if self.substrate == Substrates.VM:
|
|
224
|
+
upgrade_backend = MachineMongoDBRefresh(
|
|
225
|
+
dependent=self,
|
|
226
|
+
state=self.state,
|
|
227
|
+
upgrades_manager=self.upgrades_manager,
|
|
228
|
+
workload_name="MongoDB",
|
|
229
|
+
charm_name=self.charm.name,
|
|
230
|
+
)
|
|
231
|
+
refresh_class = charm_refresh.Machines
|
|
232
|
+
else:
|
|
233
|
+
upgrade_backend = KubernetesMongoDBRefresh(
|
|
234
|
+
dependent=self,
|
|
235
|
+
state=self.state,
|
|
236
|
+
upgrades_manager=self.upgrades_manager,
|
|
237
|
+
workload_name="MongoDB",
|
|
238
|
+
charm_name=self.charm.name,
|
|
239
|
+
oci_resource_name="mongodb-image",
|
|
240
|
+
)
|
|
241
|
+
refresh_class = charm_refresh.Kubernetes
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
self.refresh = refresh_class(upgrade_backend) # type: ignore[argument-type]
|
|
245
|
+
except (charm_refresh.UnitTearingDown, charm_refresh.PeerRelationNotReady):
|
|
246
|
+
self.refresh = None
|
|
247
|
+
except charm_refresh.KubernetesJujuAppNotTrusted:
|
|
248
|
+
# As recommended, let the charm crash so that the user can trust
|
|
249
|
+
# the application and all events will resume afterwards.
|
|
250
|
+
raise
|
|
251
|
+
|
|
252
|
+
self.upgrades_status_manager = MongoDBUpgradesStatusManager(
|
|
253
|
+
state=self.state, workload=self.workload, refresh=self.refresh
|
|
254
|
+
)
|
|
255
|
+
|
|
223
256
|
self.sysctl_config = sysctl.Config(name=self.charm.app.name)
|
|
224
257
|
|
|
225
258
|
self.observability_manager = ObservabilityManager(self, self.state, self.substrate)
|
|
@@ -229,12 +262,100 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
229
262
|
self.tls_events = TLSEventsHandler(self)
|
|
230
263
|
self.primary_events = PrimaryActionHandler(self)
|
|
231
264
|
self.client_events = DatabaseEventsHandler(self, RelationNames.DATABASE)
|
|
232
|
-
self.upgrade_events = UpgradeEventHandler(self)
|
|
233
265
|
self.config_server_events = ConfigServerEventHandler(self)
|
|
234
266
|
self.sharding_event_handlers = ShardEventHandler(self)
|
|
235
267
|
self.cluster_event_handlers = ClusterConfigServerEventHandler(self)
|
|
236
268
|
self.ldap_events = LDAPEventHandler(self)
|
|
237
269
|
|
|
270
|
+
if self.refresh is not None and not self.refresh.next_unit_allowed_to_refresh:
|
|
271
|
+
if self.refresh.in_progress:
|
|
272
|
+
self._post_refresh(self.refresh)
|
|
273
|
+
else:
|
|
274
|
+
self.refresh.next_unit_allowed_to_refresh = True
|
|
275
|
+
|
|
276
|
+
if self.refresh is not None and not self.refresh.in_progress:
|
|
277
|
+
self._handle_fcv_and_balancer()
|
|
278
|
+
|
|
279
|
+
def _handle_fcv_and_balancer(self):
|
|
280
|
+
"""Checks the versions equality.
|
|
281
|
+
|
|
282
|
+
This may run on all events, so we bring all the safeguards possible so
|
|
283
|
+
that it runs only if all conditions are met.
|
|
284
|
+
"""
|
|
285
|
+
if not self.charm.unit.is_leader():
|
|
286
|
+
return
|
|
287
|
+
|
|
288
|
+
if not self.refresh:
|
|
289
|
+
return
|
|
290
|
+
|
|
291
|
+
if self.state.app_peer_data.feature_compatibility_version == FEATURE_VERSION:
|
|
292
|
+
# We have already run all this logic before, no need to run it again.
|
|
293
|
+
return
|
|
294
|
+
|
|
295
|
+
# Update the version across all relations so that we can notify other units
|
|
296
|
+
self.cross_app_version_checker.set_version_across_all_relations()
|
|
297
|
+
|
|
298
|
+
if (
|
|
299
|
+
self.state.is_role(MongoDBRoles.CONFIG_SERVER)
|
|
300
|
+
and not self.cross_app_version_checker.are_related_apps_valid()
|
|
301
|
+
):
|
|
302
|
+
# Early return if not all apps are valid.
|
|
303
|
+
return
|
|
304
|
+
|
|
305
|
+
try:
|
|
306
|
+
self.upgrades_manager.wait_for_cluster_healthy() # type: ignore[attr-defined]
|
|
307
|
+
except RetryError:
|
|
308
|
+
logger.error(
|
|
309
|
+
"Cluster is not healthy after refresh, will retry next juju event.", exc_info=True
|
|
310
|
+
)
|
|
311
|
+
return
|
|
312
|
+
|
|
313
|
+
if not self.upgrades_manager.is_cluster_able_to_read_write(): # type: ignore[attr-defined]
|
|
314
|
+
logger.error(
|
|
315
|
+
"Cluster is not healthy after refresh, writes not propagated throughout cluster. Deferring post refresh check.",
|
|
316
|
+
)
|
|
317
|
+
return
|
|
318
|
+
|
|
319
|
+
try:
|
|
320
|
+
with MongoConnection(self.state.mongos_config) as mongos:
|
|
321
|
+
mongos.start_and_wait_for_balancer()
|
|
322
|
+
except BalancerNotEnabledError:
|
|
323
|
+
logger.error(
|
|
324
|
+
"Need more time to enable the balancer after finishing the refresh. Deferring event."
|
|
325
|
+
)
|
|
326
|
+
return
|
|
327
|
+
|
|
328
|
+
self.mongo_manager.set_feature_compatibility_version(FEATURE_VERSION)
|
|
329
|
+
self.state.app_peer_data.feature_compatibility_version = FEATURE_VERSION
|
|
330
|
+
|
|
331
|
+
def _post_refresh(self, refresh: charm_refresh.Common): # noqa: C901
|
|
332
|
+
"""Post refresh checks and actions.
|
|
333
|
+
|
|
334
|
+
Checks if unit is healthy and allow the next unit to update.
|
|
335
|
+
"""
|
|
336
|
+
if not self.state.db_initialised:
|
|
337
|
+
return
|
|
338
|
+
|
|
339
|
+
if not refresh.workload_allowed_to_start:
|
|
340
|
+
return
|
|
341
|
+
logger.info("Restarting workloads")
|
|
342
|
+
# always apply the current charm revision's config
|
|
343
|
+
self.dependent._configure_workloads()
|
|
344
|
+
self.dependent.start_charm_services()
|
|
345
|
+
|
|
346
|
+
self.state.unit_peer_data.current_revision = self.cross_app_version_checker.version
|
|
347
|
+
|
|
348
|
+
if self.dependent.name == CharmKind.MONGOD:
|
|
349
|
+
self.dependent._restart_related_services()
|
|
350
|
+
|
|
351
|
+
if self.dependent.mongo_manager.mongod_ready():
|
|
352
|
+
try:
|
|
353
|
+
self.upgrades_manager.wait_for_cluster_healthy()
|
|
354
|
+
refresh.next_unit_allowed_to_refresh = True
|
|
355
|
+
except RetryError as err:
|
|
356
|
+
logger.info("Cluster is not healthy after restart: %s", err)
|
|
357
|
+
return
|
|
358
|
+
|
|
238
359
|
@property
|
|
239
360
|
def config(self):
|
|
240
361
|
"""Returns the actual config."""
|
|
@@ -288,7 +409,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
288
409
|
self.config_server_manager,
|
|
289
410
|
self.backup_manager,
|
|
290
411
|
self.ldap_manager,
|
|
291
|
-
self.
|
|
412
|
+
self.upgrades_status_manager,
|
|
292
413
|
)
|
|
293
414
|
|
|
294
415
|
# BEGIN: Handlers.
|
|
@@ -302,8 +423,6 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
302
423
|
if self.substrate == Substrates.VM:
|
|
303
424
|
self._set_os_config()
|
|
304
425
|
|
|
305
|
-
self.charm.unit.set_workload_version(self.workload.get_version())
|
|
306
|
-
|
|
307
426
|
# Truncate the file.
|
|
308
427
|
self.workload.write(self.workload.paths.config_file, "")
|
|
309
428
|
|
|
@@ -320,6 +439,12 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
320
439
|
logger.debug("Storages not attached yet.")
|
|
321
440
|
raise ContainerNotReadyError("Missing storage")
|
|
322
441
|
|
|
442
|
+
if not self.refresh:
|
|
443
|
+
raise ContainerNotReadyError("Workload not allowed to start yet.")
|
|
444
|
+
|
|
445
|
+
# Store application revision for cross cluster checks
|
|
446
|
+
self.state.unit_peer_data.current_revision = self.cross_app_version_checker.version
|
|
447
|
+
|
|
323
448
|
if self.state.is_role(MongoDBRoles.UNKNOWN):
|
|
324
449
|
raise InvalidConfigRoleError()
|
|
325
450
|
|
|
@@ -338,6 +463,10 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
338
463
|
)
|
|
339
464
|
raise
|
|
340
465
|
|
|
466
|
+
if self.refresh.in_progress: # type: ignore[union-attr]
|
|
467
|
+
# Bypass the regular start if refresh is in progress
|
|
468
|
+
return
|
|
469
|
+
|
|
341
470
|
if self.charm.unit.is_leader():
|
|
342
471
|
self.state.statuses.clear(scope="app", component=self.name)
|
|
343
472
|
|
|
@@ -360,12 +489,6 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
360
489
|
self.start_charm_services()
|
|
361
490
|
self.open_ports()
|
|
362
491
|
|
|
363
|
-
# This seems unnecessary
|
|
364
|
-
# if self.substrate == Substrates.K8S:
|
|
365
|
-
# if not self.workload.exists(self.workload.paths.socket_path):
|
|
366
|
-
# logger.debug("The mongod socket is not ready yet.")
|
|
367
|
-
# raise WorkloadNotReadyError
|
|
368
|
-
|
|
369
492
|
if not self.mongo_manager.mongod_ready():
|
|
370
493
|
raise WorkloadNotReadyError
|
|
371
494
|
|
|
@@ -380,6 +503,10 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
380
503
|
)
|
|
381
504
|
raise
|
|
382
505
|
|
|
506
|
+
if self.charm.unit.is_leader():
|
|
507
|
+
self.mongo_manager.set_feature_compatibility_version(FEATURE_VERSION)
|
|
508
|
+
self.state.app_peer_data.feature_compatibility_version = FEATURE_VERSION
|
|
509
|
+
|
|
383
510
|
try:
|
|
384
511
|
self._restart_related_services()
|
|
385
512
|
except WorkloadServiceError:
|
|
@@ -388,11 +515,6 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
388
515
|
|
|
389
516
|
self.state.statuses.set(CharmStatuses.ACTIVE_IDLE.value, scope="unit", component=self.name)
|
|
390
517
|
|
|
391
|
-
if self.substrate == Substrates.K8S:
|
|
392
|
-
# K8S upgrades result in the start hook getting fired following this pattern
|
|
393
|
-
# https://juju.is/docs/sdk/upgrade-charm-event#heading--emission-sequence
|
|
394
|
-
self.upgrade_manager._reconcile_upgrade()
|
|
395
|
-
|
|
396
518
|
@override
|
|
397
519
|
def prepare_for_shutdown(self) -> None: # pragma: nocover
|
|
398
520
|
"""Handler for the stop event.
|
|
@@ -407,32 +529,16 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
407
529
|
Note that with how Juju currently operates, we only have at most 30
|
|
408
530
|
seconds until SIGTERM command, so we are by no means guaranteed to have
|
|
409
531
|
stepped down before the pod is removed.
|
|
410
|
-
Upon restart, the upgrade will still resume because all hooks run the
|
|
411
|
-
`_reconcile_upgrade` handler.
|
|
412
532
|
"""
|
|
413
533
|
if self.substrate == Substrates.VM:
|
|
414
534
|
return
|
|
415
535
|
|
|
416
|
-
# Raise partition to prevent other units from restarting if an upgrade is in progress.
|
|
417
|
-
# If an upgrade is not in progress, the leader unit will reset the partition to 0.
|
|
418
|
-
current_unit_number = unit_number(self.state.unit_upgrade_peer_data)
|
|
419
|
-
if self.state.k8s_manager.get_partition() < current_unit_number:
|
|
420
|
-
self.state.k8s_manager.set_partition(value=current_unit_number)
|
|
421
|
-
logger.debug(f"Partition set to {current_unit_number} during stop event")
|
|
422
|
-
|
|
423
|
-
if not self.upgrade_manager._upgrade:
|
|
424
|
-
logger.debug("Upgrade Peer relation missing during stop event")
|
|
425
|
-
return
|
|
426
|
-
|
|
427
|
-
# We update the state to set up the unit as restarting
|
|
428
|
-
self.upgrade_manager._upgrade.unit_state = UnitState.RESTARTING
|
|
429
|
-
|
|
430
536
|
# According to the MongoDB documentation, before upgrading the primary, we must ensure a
|
|
431
537
|
# safe primary re-election.
|
|
432
538
|
try:
|
|
433
539
|
if self.charm.unit.name == self.primary_unit_name:
|
|
434
540
|
logger.debug("Stepping down current primary, before upgrading service...")
|
|
435
|
-
self.
|
|
541
|
+
self.mongo_manager.step_down_primary_and_wait_reelection()
|
|
436
542
|
except FailedToElectNewPrimaryError:
|
|
437
543
|
logger.error("Failed to reelect primary before upgrading unit.")
|
|
438
544
|
return
|
|
@@ -463,7 +569,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
463
569
|
"Invalid LDAP Query template, please update your config."
|
|
464
570
|
)
|
|
465
571
|
|
|
466
|
-
if self.
|
|
572
|
+
if self.refresh_in_progress:
|
|
467
573
|
logger.warning(
|
|
468
574
|
"Changing config options is not permitted during an upgrade. The charm may be in a broken, unrecoverable state."
|
|
469
575
|
)
|
|
@@ -524,6 +630,8 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
524
630
|
case PasswordManagementState.NEED_PASSWORD_UPDATE:
|
|
525
631
|
self.rotate_internal_passwords(context)
|
|
526
632
|
self.clear_password_management_statuses()
|
|
633
|
+
case _:
|
|
634
|
+
pass
|
|
527
635
|
|
|
528
636
|
def rotate_internal_passwords(self, context: PasswordManagementContext) -> None:
|
|
529
637
|
"""Rotate passwords for the internal users defined in the given context.
|
|
@@ -604,14 +712,15 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
604
712
|
application in upgrade ?). Then we proceed to call the relation changed
|
|
605
713
|
handler and update the list of related hosts.
|
|
606
714
|
"""
|
|
607
|
-
if
|
|
608
|
-
return
|
|
609
|
-
if self.state.upgrade_in_progress:
|
|
715
|
+
if self.refresh_in_progress:
|
|
610
716
|
logger.warning(
|
|
611
717
|
"Adding replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state"
|
|
612
718
|
)
|
|
613
719
|
raise UpgradeInProgressError
|
|
614
720
|
|
|
721
|
+
if not self.charm.unit.is_leader():
|
|
722
|
+
return
|
|
723
|
+
|
|
615
724
|
self.peer_changed()
|
|
616
725
|
self.update_related_hosts()
|
|
617
726
|
|
|
@@ -620,10 +729,6 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
620
729
|
|
|
621
730
|
Adds the unit as a replica to the MongoDB replica set.
|
|
622
731
|
"""
|
|
623
|
-
if self.substrate == Substrates.K8S:
|
|
624
|
-
# K8S Upgrades requires to reconcile the upgrade on lifecycle event.
|
|
625
|
-
self.upgrade_manager._reconcile_upgrade()
|
|
626
|
-
|
|
627
732
|
# Changing the monitor or the backup password will lead to non-leader
|
|
628
733
|
# units receiving a relation changed event. We must update the monitor
|
|
629
734
|
# and pbm URI if the password changes so that COS/pbm can continue to
|
|
@@ -637,7 +742,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
637
742
|
if not self.charm.unit.is_leader() or not self.state.db_initialised:
|
|
638
743
|
return
|
|
639
744
|
|
|
640
|
-
if self.
|
|
745
|
+
if self.refresh_in_progress:
|
|
641
746
|
logger.warning(
|
|
642
747
|
"Adding replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state"
|
|
643
748
|
)
|
|
@@ -696,7 +801,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
696
801
|
"""Handles the relation departed events."""
|
|
697
802
|
if not self.charm.unit.is_leader() or departing_unit == self.charm.unit:
|
|
698
803
|
return
|
|
699
|
-
if self.
|
|
804
|
+
if self.refresh_in_progress:
|
|
700
805
|
# do not defer or return here, if a user removes a unit, the config will be incorrect
|
|
701
806
|
# and lead to MongoDB reporting that the replica set is unhealthy, we should make an
|
|
702
807
|
# attempt to fix the replica set configuration even if an upgrade is occurring.
|
|
@@ -731,7 +836,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
731
836
|
If the removing unit is primary also allow it to step down and elect another unit as
|
|
732
837
|
primary while it still has access to its storage.
|
|
733
838
|
"""
|
|
734
|
-
if self.
|
|
839
|
+
if self.refresh_in_progress:
|
|
735
840
|
# We cannot defer and prevent a user from removing a unit, log a warning instead.
|
|
736
841
|
logger.warning(
|
|
737
842
|
"Removing replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state"
|
|
@@ -792,6 +897,10 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
792
897
|
logger.info("Early return invalid statuses.")
|
|
793
898
|
return
|
|
794
899
|
|
|
900
|
+
if self.cluster_version_checker.get_cluster_mismatched_revision_status():
|
|
901
|
+
logger.info("Early return, cluster mismatch version.")
|
|
902
|
+
return
|
|
903
|
+
|
|
795
904
|
if self.state.is_role(MongoDBRoles.SHARD):
|
|
796
905
|
shard_has_tls, config_server_has_tls = self.shard_manager.tls_status()
|
|
797
906
|
if config_server_has_tls and not shard_has_tls:
|
|
@@ -802,13 +911,10 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
802
911
|
logger.info("Mongod not ready.")
|
|
803
912
|
return
|
|
804
913
|
|
|
805
|
-
if self.substrate == Substrates.K8S:
|
|
806
|
-
self.upgrade_manager._reconcile_upgrade()
|
|
807
|
-
|
|
808
914
|
# It's useless to try to perform self healing if upgrade is in progress
|
|
809
915
|
# as the handlers would raise an UpgradeInProgressError anyway so
|
|
810
916
|
# better skip it when possible.
|
|
811
|
-
if not self.
|
|
917
|
+
if not self.refresh_in_progress:
|
|
812
918
|
try:
|
|
813
919
|
self.perform_self_healing()
|
|
814
920
|
except (ServerSelectionTimeoutError, OperationFailure) as e:
|
|
@@ -941,9 +1047,13 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
941
1047
|
|
|
942
1048
|
If we are running as config-server, we should start both mongod and mongos.
|
|
943
1049
|
"""
|
|
944
|
-
self.
|
|
945
|
-
|
|
946
|
-
|
|
1050
|
+
if not self.refresh or not self.refresh.workload_allowed_to_start:
|
|
1051
|
+
raise WorkloadServiceError("Workload not allowed to start")
|
|
1052
|
+
|
|
1053
|
+
if self.refresh.workload_allowed_to_start:
|
|
1054
|
+
self.workload.start()
|
|
1055
|
+
if self.state.is_role(MongoDBRoles.CONFIG_SERVER):
|
|
1056
|
+
self.mongos_workload.start()
|
|
947
1057
|
|
|
948
1058
|
@override
|
|
949
1059
|
def stop_charm_services(self):
|
|
@@ -961,6 +1071,8 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
961
1071
|
|
|
962
1072
|
If we are running as config-server, we should update both mongod and mongos environments.
|
|
963
1073
|
"""
|
|
1074
|
+
if not self.refresh or not self.refresh.workload_allowed_to_start:
|
|
1075
|
+
raise WorkloadServiceError("Workload not allowed to start")
|
|
964
1076
|
try:
|
|
965
1077
|
self.config_manager.configure_and_restart(force=force)
|
|
966
1078
|
if self.state.is_role(MongoDBRoles.CONFIG_SERVER):
|
|
@@ -1045,6 +1157,9 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
1045
1157
|
# Instantiate the keyfile
|
|
1046
1158
|
self.instantiate_keyfile()
|
|
1047
1159
|
|
|
1160
|
+
# Instantiate the local directory for k8s
|
|
1161
|
+
self.build_local_tls_directory()
|
|
1162
|
+
|
|
1048
1163
|
# Push TLS files if necessary
|
|
1049
1164
|
self.tls_manager.push_tls_files_to_workload()
|
|
1050
1165
|
self.ldap_manager.save_certificates(self.state.ldap.chain)
|
|
@@ -1119,17 +1234,30 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
1119
1234
|
# don't bother checking revision mismatch on sharding interface if replica
|
|
1120
1235
|
return statuses
|
|
1121
1236
|
|
|
1122
|
-
if rev_status := self.cluster_version_checker.get_cluster_mismatched_revision_status():
|
|
1123
|
-
statuses.append(rev_status)
|
|
1124
|
-
|
|
1125
1237
|
return statuses
|
|
1126
1238
|
|
|
1239
|
+
def _cluster_mismatch_status(self, scope: DPHScope) -> list[StatusObject]:
|
|
1240
|
+
"""Returns a list with at most a single status.
|
|
1241
|
+
|
|
1242
|
+
This status is recomputed on every hook:
|
|
1243
|
+
It's cheap, easy to recompute and we don't want to store it.
|
|
1244
|
+
We compute it on every hook EXCEPT if we should recompute.
|
|
1245
|
+
This way it does not get stored in the databag and stays as a purely dynamic status.
|
|
1246
|
+
"""
|
|
1247
|
+
if scope == "unit":
|
|
1248
|
+
return []
|
|
1249
|
+
if rev_status := self.cluster_version_checker.get_cluster_mismatched_revision_status():
|
|
1250
|
+
return [rev_status]
|
|
1251
|
+
return []
|
|
1252
|
+
|
|
1127
1253
|
def get_statuses(self, scope: DPHScope, recompute: bool = False) -> list[StatusObject]: # noqa: C901 # We know, this function is complex.
|
|
1128
1254
|
"""Returns the statuses of the charm manager."""
|
|
1129
1255
|
charm_statuses: list[StatusObject] = []
|
|
1130
1256
|
|
|
1131
1257
|
if not recompute:
|
|
1132
|
-
return self.state.statuses.get(
|
|
1258
|
+
return self.state.statuses.get(
|
|
1259
|
+
scope=scope, component=self.name
|
|
1260
|
+
).root + self._cluster_mismatch_status(scope)
|
|
1133
1261
|
|
|
1134
1262
|
if scope == "unit" and not self.workload.workload_present:
|
|
1135
1263
|
return [CharmStatuses.MONGODB_NOT_INSTALLED.value]
|
|
@@ -1182,7 +1310,7 @@ class MongoDBOperator(OperatorProtocol, Object):
|
|
|
1182
1310
|
if not self.model.unit.is_leader():
|
|
1183
1311
|
return PasswordManagementContext(PasswordManagementState.NOT_LEADER)
|
|
1184
1312
|
|
|
1185
|
-
if self.
|
|
1313
|
+
if self.refresh_in_progress:
|
|
1186
1314
|
return PasswordManagementContext(
|
|
1187
1315
|
PasswordManagementState.UPGRADE_RUNNING,
|
|
1188
1316
|
"Cannot update passwords while an upgrade is in progress.",
|