mongo-charms-single-kernel 1.8.6__py3-none-any.whl → 1.8.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mongo-charms-single-kernel might be problematic. Click here for more details.

Files changed (47) hide show
  1. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/METADATA +2 -1
  2. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/RECORD +41 -40
  3. single_kernel_mongo/abstract_charm.py +8 -0
  4. single_kernel_mongo/config/literals.py +2 -23
  5. single_kernel_mongo/config/models.py +12 -0
  6. single_kernel_mongo/config/relations.py +0 -1
  7. single_kernel_mongo/config/statuses.py +10 -57
  8. single_kernel_mongo/core/abstract_upgrades_v3.py +149 -0
  9. single_kernel_mongo/core/k8s_workload.py +2 -2
  10. single_kernel_mongo/core/kubernetes_upgrades_v3.py +17 -0
  11. single_kernel_mongo/core/machine_upgrades_v3.py +54 -0
  12. single_kernel_mongo/core/operator.py +86 -5
  13. single_kernel_mongo/core/version_checker.py +7 -6
  14. single_kernel_mongo/core/vm_workload.py +30 -13
  15. single_kernel_mongo/core/workload.py +17 -19
  16. single_kernel_mongo/events/backups.py +3 -3
  17. single_kernel_mongo/events/cluster.py +1 -1
  18. single_kernel_mongo/events/database.py +1 -1
  19. single_kernel_mongo/events/lifecycle.py +5 -4
  20. single_kernel_mongo/events/tls.py +7 -4
  21. single_kernel_mongo/exceptions.py +4 -24
  22. single_kernel_mongo/lib/charms/operator_libs_linux/v1/systemd.py +288 -0
  23. single_kernel_mongo/managers/cluster.py +8 -8
  24. single_kernel_mongo/managers/config.py +5 -3
  25. single_kernel_mongo/managers/ldap.py +2 -1
  26. single_kernel_mongo/managers/mongo.py +48 -9
  27. single_kernel_mongo/managers/mongodb_operator.py +199 -96
  28. single_kernel_mongo/managers/mongos_operator.py +97 -35
  29. single_kernel_mongo/managers/sharding.py +4 -4
  30. single_kernel_mongo/managers/tls.py +54 -27
  31. single_kernel_mongo/managers/upgrade_v3.py +452 -0
  32. single_kernel_mongo/managers/upgrade_v3_status.py +133 -0
  33. single_kernel_mongo/state/app_peer_state.py +12 -2
  34. single_kernel_mongo/state/charm_state.py +31 -141
  35. single_kernel_mongo/state/config_server_state.py +0 -33
  36. single_kernel_mongo/state/unit_peer_state.py +10 -0
  37. single_kernel_mongo/templates/enable-transparent-huge-pages.service.j2 +14 -0
  38. single_kernel_mongo/utils/helpers.py +0 -6
  39. single_kernel_mongo/utils/mongo_config.py +32 -8
  40. single_kernel_mongo/core/abstract_upgrades.py +0 -890
  41. single_kernel_mongo/core/kubernetes_upgrades.py +0 -194
  42. single_kernel_mongo/core/machine_upgrades.py +0 -188
  43. single_kernel_mongo/events/upgrades.py +0 -157
  44. single_kernel_mongo/managers/upgrade.py +0 -334
  45. single_kernel_mongo/state/upgrade_state.py +0 -134
  46. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/WHEEL +0 -0
  47. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/licenses/LICENSE +0 -0
@@ -1,194 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Copyright 2024 Canonical Ltd.
3
- # See LICENSE file for licensing details.
4
- """Kubernetes Upgrade Code.
5
-
6
- This code is slightly different from the code which was written originally.
7
- It is required to deploy the application with `--trust` for this code to work
8
- as it has to interact with the Kubernetes StatefulSet.
9
- """
10
-
11
- from __future__ import annotations
12
-
13
- from logging import getLogger
14
- from typing import TYPE_CHECKING
15
-
16
- from data_platform_helpers.advanced_statuses.models import StatusObject
17
- from lightkube.core.exceptions import ApiError
18
- from overrides import override
19
-
20
- from single_kernel_mongo.config.literals import CharmKind, UnitState
21
- from single_kernel_mongo.config.statuses import UpgradeStatuses
22
- from single_kernel_mongo.core.abstract_upgrades import (
23
- AbstractUpgrade,
24
- )
25
- from single_kernel_mongo.exceptions import ActionFailedError, DeployedWithoutTrustError
26
- from single_kernel_mongo.state.upgrade_state import UnitUpgradePeerData
27
- from single_kernel_mongo.utils.helpers import unit_number
28
-
29
- if TYPE_CHECKING:
30
- from single_kernel_mongo.core.operator import OperatorProtocol
31
-
32
- logger = getLogger()
33
-
34
-
35
- class KubernetesUpgrade(AbstractUpgrade):
36
- """Code for Kubernetes Upgrade.
37
-
38
- This is the implementation of Kubernetes Upgrade methods.
39
- """
40
-
41
- def __init__(self, dependent: OperatorProtocol, *args, **kwargs):
42
- super().__init__(dependent, *args, **kwargs)
43
-
44
- self.k8s_manager = self.state.k8s_manager
45
- try:
46
- self.k8s_manager.get_partition()
47
- except ApiError as err:
48
- if err.status.code == 403:
49
- raise DeployedWithoutTrustError(app_name=dependent.charm.app.name)
50
- raise
51
-
52
- @override
53
- def _get_unit_healthy_status(self) -> StatusObject:
54
- version = self.state.unit_workload_container_version
55
- if version == self.state.app_workload_container_version:
56
- return UpgradeStatuses.k8s_active_upgrade(
57
- self._current_versions["workload"], self._current_versions["charm"]
58
- )
59
-
60
- return UpgradeStatuses.k8s_active_upgrade(
61
- self._current_versions["workload"],
62
- self._current_versions["charm"],
63
- outdated=True,
64
- )
65
-
66
- @property
67
- def app_status(self) -> StatusObject | None:
68
- """App upgrade status."""
69
- if not self.is_compatible:
70
- logger.info(
71
- "Refresh incompatible. Rollback with `juju refresh`. "
72
- "If you accept potential *data loss* and *downtime*, you can continue by running `force-refresh-start`"
73
- "action on each remaining unit"
74
- )
75
- return UpgradeStatuses.INCOMPATIBLE_UPGRADE.value
76
- return super().app_status
77
-
78
- @property
79
- def partition(self) -> int:
80
- """Specifies which units should upgrade.
81
-
82
- Unit numbers >= partition should upgrade
83
- Unit numbers < partition should not upgrade
84
-
85
- https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
86
-
87
- For Kubernetes, unit numbers are guaranteed to be sequential.
88
- """
89
- return self.k8s_manager.get_partition()
90
-
91
- @partition.setter
92
- def partition(self, value: int) -> None:
93
- """Sets the partition number."""
94
- self.k8s_manager.set_partition(value)
95
-
96
- @property
97
- def upgrade_resumed(self) -> bool:
98
- """Whether user has resumed upgrade with Juju action."""
99
- return self.partition < unit_number(self.state.units_upgrade_peer_data[0])
100
-
101
- def _determine_partition(
102
- self, units: list[UnitUpgradePeerData], from_event: bool, force: bool
103
- ) -> int:
104
- """Determine the new partition to use.
105
-
106
- We get the current state of each unit, and according to `action_event`,
107
- `force` and the state, we decide the new value of the partition.
108
- A specific case:
109
- * If we don't have action event and the upgrade_order_index is 1, we
110
- return because it means we're waiting for the resume-refresh/force-refresh event to run.
111
- """
112
- if not self.state.upgrade_in_progress:
113
- return 0
114
- logger.debug(f"{self.state.unit_upgrade_peer_data.relation_data=}")
115
- for upgrade_order_index, unit in enumerate(units):
116
- # Note: upgrade_order_index != unit number
117
- state = unit.unit_state
118
- if (
119
- not force and state is not UnitState.HEALTHY
120
- ) or self.state.unit_workload_container_versions[
121
- unit.name
122
- ] != self.state.app_workload_container_version:
123
- if self.dependent.name == CharmKind.MONGOD:
124
- if not from_event and upgrade_order_index == 1:
125
- # User confirmation needed to resume upgrade (i.e. upgrade second unit)
126
- return unit_number(units[0])
127
- return unit_number(unit)
128
- return 0
129
-
130
- def reconcile_partition(self, *, from_event: bool = False, force: bool = False) -> str | None: # noqa: C901
131
- """If ready, lower partition to upgrade next unit.
132
-
133
- If upgrade is not in progress, set partition to 0. (If a unit receives a stop event, it may
134
- raise the partition even if an upgrade is not in progress.)
135
-
136
- Automatically upgrades next unit if all upgraded units are healthy—except if only one unit
137
- has upgraded (need manual user confirmation [via Juju action] to upgrade next unit)
138
-
139
- Handle Juju action to:
140
- - confirm first upgraded unit is healthy and resume upgrade
141
- - force upgrade of next unit if 1 or more upgraded units are unhealthy
142
- """
143
- message: str | None = None
144
- if self.dependent.name == CharmKind.MONGOD:
145
- force = from_event and force
146
- else:
147
- force = from_event
148
-
149
- units = self.state.units_upgrade_peer_data
150
-
151
- partition_ = self._determine_partition(
152
- units,
153
- from_event,
154
- force,
155
- )
156
- logger.debug(f"{self.partition=}, {partition_=}")
157
- # Only lower the partition—do not raise it.
158
- # If this method is called during the action event and then called during another event a
159
- # few seconds later, `determine_partition()` could return a lower number during the action
160
- # and then a higher number a few seconds later.
161
- # This can cause the unit to hang.
162
- # Example: If partition is lowered to 1, unit 1 begins to upgrade, and partition is set to
163
- # 2 right away, the unit/Juju agent will hang
164
- # Details: https://chat.charmhub.io/charmhub/pl/on8rd538ufn4idgod139skkbfr
165
- # This does not address the situation where another unit > 1 restarts and sets the
166
- # partition during the `stop` event, but that is unlikely to occur in the small time window
167
- # that causes the unit to hang.
168
- if from_event:
169
- assert len(units) >= 2
170
- if partition_ > unit_number(units[1]):
171
- message = "Highest number unit is unhealthy. Refresh will not resume."
172
- raise ActionFailedError(message)
173
- if force:
174
- # If a unit was unhealthy and the upgrade was forced, only
175
- # the next unit will upgrade. As long as 1 or more units
176
- # are unhealthy, the upgrade will need to be forced for
177
- # each unit.
178
-
179
- # Include "Attempting to" because (on Kubernetes) we only
180
- # control the partition, not which units upgrade.
181
- # Kubernetes may not upgrade a unit even if the partition
182
- # allows it (e.g. if the charm container of a higher unit
183
- # is not ready). This is also applicable `if not force`,
184
- # but is unlikely to happen since all units are healthy `if
185
- # not force`.
186
- message = f"Attempting to refresh unit {partition_}."
187
- else:
188
- message = f"Refresh resumed. Unit {partition_} is refreshing next."
189
- if partition_ < self.partition:
190
- self.partition = partition_
191
- logger.debug(
192
- f"Lowered partition to {partition_} {from_event=} {force=} {self.state.upgrade_in_progress=}"
193
- )
194
- return message
@@ -1,188 +0,0 @@
1
- # Copyright 2024 Canonical Ltd.
2
- # See LICENSE file for licensing details.
3
-
4
- """In-place upgrades on machines.
5
-
6
- Derived from specification: DA058 - In-Place Upgrades - Kubernetes v2
7
- (https://docs.google.com/document/d/1tLjknwHudjcHs42nzPVBNkHs98XxAOT2BXGGpP7NyEU/)
8
- """
9
-
10
- from __future__ import annotations
11
-
12
- import logging
13
- from typing import TYPE_CHECKING
14
-
15
- from data_platform_helpers.advanced_statuses.models import StatusObject
16
-
17
- from single_kernel_mongo.config.literals import SNAP, CharmKind, UnitState
18
- from single_kernel_mongo.config.statuses import UpgradeStatuses
19
- from single_kernel_mongo.core.abstract_upgrades import (
20
- AbstractUpgrade,
21
- )
22
- from single_kernel_mongo.exceptions import FailedToElectNewPrimaryError
23
-
24
- if TYPE_CHECKING:
25
- from single_kernel_mongo.core.operator import OperatorProtocol
26
-
27
- logger = logging.getLogger(__name__)
28
-
29
-
30
- class MachineUpgrade(AbstractUpgrade):
31
- """In-place upgrades on machines."""
32
-
33
- @property
34
- def unit_state(self) -> UnitState | None:
35
- """Returns the unit state."""
36
- if (
37
- self.state.unit_workload_container_version is not None
38
- and self.state.unit_workload_container_version
39
- != self.state.app_workload_container_version
40
- ):
41
- logger.debug("Unit refresh state: outdated")
42
- return UnitState.OUTDATED
43
- return super().unit_state
44
-
45
- @unit_state.setter
46
- def unit_state(self, value: UnitState) -> None:
47
- # Super call
48
- AbstractUpgrade.unit_state.fset(self, value) # type: ignore[attr-defined]
49
-
50
- def _get_unit_healthy_status(self) -> StatusObject:
51
- if self.state.unit_workload_container_version == self.state.app_workload_container_version:
52
- return UpgradeStatuses.vm_active_upgrade(
53
- self._unit_workload_version,
54
- self.state.unit_workload_container_version,
55
- self._current_versions["charm"],
56
- )
57
-
58
- return UpgradeStatuses.vm_active_upgrade(
59
- self._unit_workload_version,
60
- self.state.unit_workload_container_version,
61
- self._current_versions["charm"],
62
- outdated=True,
63
- )
64
-
65
- @property
66
- def app_status(self) -> StatusObject | None:
67
- """App upgrade status."""
68
- if not self.is_compatible:
69
- logger.info(
70
- "Refresh incompatible. Rollback with `juju refresh`. "
71
- "If you accept potential *data loss* and *downtime*, you can continue by running `force-refresh-start`"
72
- "action on each remaining unit"
73
- )
74
- return UpgradeStatuses.INCOMPATIBLE_UPGRADE.value
75
- return super().app_status
76
-
77
- @property
78
- def _unit_workload_version(self) -> str | None:
79
- """Installed MongoDB version for this unit."""
80
- return self._current_versions["workload"]
81
-
82
- def reconcile_partition(self, *, from_event: bool = False, force: bool = False) -> str | None:
83
- """Handle Juju action to confirm first upgraded unit is healthy and resume upgrade."""
84
- if from_event:
85
- self.upgrade_resumed = True
86
- return "Refresh resumed."
87
- return None
88
-
89
- @property
90
- def upgrade_resumed(self) -> bool:
91
- """Whether user has resumed upgrade with Juju action.
92
-
93
- Reset to `False` after each `juju refresh`
94
- VM-only.
95
- """
96
- return self.state.app_upgrade_peer_data.upgrade_resumed
97
-
98
- @upgrade_resumed.setter
99
- def upgrade_resumed(self, value: bool):
100
- # Trigger peer relation_changed event even if value does not change
101
- # (Needed when leader sets value to False during `ops.UpgradeCharmEvent`)
102
- self.state.app_upgrade_peer_data.upgrade_resumed = value
103
-
104
- @property
105
- def authorized(self) -> bool:
106
- """Whether this unit is authorized to upgrade.
107
-
108
- Only applies to machine charm.
109
-
110
- Raises:
111
- PrecheckFailed: App is not ready to upgrade
112
- """
113
- assert (
114
- self.state.unit_workload_container_version != self.state.app_workload_container_version
115
- )
116
- assert self.state.app_upgrade_peer_data.versions
117
- for index, unit in enumerate(self.state.units_upgrade_peer_data):
118
- # Higher number units have already upgraded
119
- if unit.name == self.unit_name:
120
- if index == 0:
121
- if (
122
- self.state.app_upgrade_peer_data.versions["charm"]
123
- == self._current_versions["charm"]
124
- ):
125
- # Assumes charm version uniquely identifies charm revision
126
- logger.debug("Rollback detected. Skipping pre-refresh check")
127
- else:
128
- # Run pre-upgrade check
129
- # (in case user forgot to run pre-upgrade-check action)
130
- self.pre_upgrade_check()
131
- logger.debug("Pre-refresh check after `juju refresh` successful")
132
- elif index == 1 and self.dependent.name == CharmKind.MONGOD:
133
- # User confirmation needed to resume upgrade (i.e. upgrade second unit)
134
- logger.debug(f"Second unit authorized to refresh if {self.upgrade_resumed=}")
135
- return self.upgrade_resumed
136
- return True
137
- state = unit.unit_state
138
- if (
139
- self.state.unit_workload_container_versions.get(unit.name)
140
- != self.state.app_workload_container_version
141
- or state is not UnitState.HEALTHY
142
- ):
143
- # Waiting for higher number units to upgrade
144
- return False
145
- return False
146
-
147
- def upgrade_unit(self, *, dependent: OperatorProtocol) -> None:
148
- """Runs the upgrade procedure.
149
-
150
- Only applies to machine charm.
151
- """
152
- if dependent.name == CharmKind.MONGOD:
153
- # According to the MongoDB documentation, before upgrading the
154
- # primary, we must ensure a safe primary re-election.
155
- try:
156
- if self.unit_name == dependent.primary_unit_name: # type: ignore
157
- logger.debug("Stepping down current primary, before upgrading service...")
158
- dependent.upgrade_manager.step_down_primary_and_wait_reelection()
159
- except FailedToElectNewPrimaryError:
160
- # by not setting the snap revision and immediately returning, this function will be
161
- # called again, and an empty re-elect a primary will occur again.
162
- logger.error("Failed to reelect primary before upgrading unit.")
163
- return
164
-
165
- logger.debug(f"Upgrading {self.unit_name=}")
166
- self.unit_state = UnitState.UPGRADING
167
- dependent.workload.install()
168
- # Start charm services if they were not running after refresh
169
- dependent._configure_workloads()
170
- dependent.start_charm_services()
171
- if dependent.name == CharmKind.MONGOD:
172
- dependent._restart_related_services() # type: ignore[attr-defined]
173
-
174
- self.state.unit_upgrade_peer_data.snap_revision = SNAP.revision
175
- logger.debug(f"Saved {SNAP.revision} in unit databag after refresh")
176
-
177
- self.charm.unit.set_workload_version(self.workload.get_version())
178
- if dependent.name == CharmKind.MONGOD:
179
- self.state.unit_upgrade_peer_data.current_revision = (
180
- dependent.cross_app_version_checker.version # type: ignore
181
- )
182
-
183
- # post upgrade check should be retried in case of failure, for this it is necessary to
184
- # emit a separate event.
185
- dependent.upgrade_events.post_app_upgrade_event.emit()
186
-
187
- def save_snap_revision_after_first_install(self):
188
- """Set snap revision on first install."""
@@ -1,157 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Copyright 2024 Canonical Ltd.
3
- # See LICENSE file for licensing details.
4
-
5
- """Event handler for upgrades."""
6
-
7
- from __future__ import annotations
8
-
9
- from logging import getLogger
10
- from typing import TYPE_CHECKING
11
-
12
- from ops.charm import ActionEvent, RelationCreatedEvent, UpgradeCharmEvent
13
- from ops.framework import EventBase, EventSource, Object
14
- from ops.model import ModelError
15
-
16
- from single_kernel_mongo.config.literals import CharmKind
17
- from single_kernel_mongo.config.relations import RelationNames
18
- from single_kernel_mongo.config.statuses import UpgradeStatuses
19
- from single_kernel_mongo.core.abstract_upgrades import UpgradeActions
20
- from single_kernel_mongo.exceptions import (
21
- ActionFailedError,
22
- DeferrableError,
23
- UnhealthyUpgradeError,
24
- )
25
- from single_kernel_mongo.managers.upgrade import ROLLBACK_INSTRUCTIONS
26
- from single_kernel_mongo.utils.event_helpers import defer_event_with_info_log
27
-
28
- if TYPE_CHECKING:
29
- from single_kernel_mongo.abstract_charm import AbstractMongoCharm
30
- from single_kernel_mongo.core.operator import OperatorProtocol
31
-
32
-
33
- logger = getLogger(__name__)
34
-
35
-
36
- class _PostUpgradeCheckMongoDB(EventBase):
37
- """Run post upgrade check on MongoDB to verify that the cluster is healhty."""
38
-
39
-
40
- class UpgradeEventHandler(Object):
41
- """Handler for upgrade related events."""
42
-
43
- post_app_upgrade_event = EventSource(_PostUpgradeCheckMongoDB)
44
- post_cluster_upgrade_event = EventSource(_PostUpgradeCheckMongoDB)
45
-
46
- def __init__(self, dependent: OperatorProtocol):
47
- self.dependent = dependent
48
- self.manager = self.dependent.upgrade_manager
49
- self.charm: AbstractMongoCharm = dependent.charm
50
- self.relation_name = RelationNames.UPGRADE_VERSION.value
51
- super().__init__(parent=dependent, key=self.relation_name)
52
-
53
- self.framework.observe(
54
- self.charm.on[UpgradeActions.PRECHECK_ACTION_NAME].action,
55
- self._on_pre_upgrade_check_action,
56
- )
57
-
58
- self.framework.observe(
59
- self.charm.on[self.relation_name].relation_created,
60
- self._on_upgrade_peer_relation_created,
61
- )
62
- self.framework.observe(
63
- self.charm.on[self.relation_name].relation_changed, self._reconcile_upgrade
64
- )
65
- self.framework.observe(self.charm.on.upgrade_charm, self._on_upgrade_charm)
66
- self.framework.observe(
67
- self.charm.on[UpgradeActions.FORCE_REFRESH_START].action,
68
- self._on_force_upgrade_action,
69
- )
70
- self.framework.observe(self.post_app_upgrade_event, self._run_post_app_upgrade_task)
71
-
72
- if self.dependent.name == CharmKind.MONGOD:
73
- self.framework.observe(
74
- self.charm.on[UpgradeActions.RESUME_ACTION_NAME].action,
75
- self._on_resume_upgrade_action,
76
- )
77
- self.framework.observe(
78
- self.post_cluster_upgrade_event, self._run_post_cluster_upgrade_task
79
- )
80
-
81
- def _on_pre_upgrade_check_action(self, event: ActionEvent):
82
- try:
83
- self.manager.run_pre_refresh_checks()
84
- event.set_results({"result": "Charm is ready for refresh."})
85
- except ActionFailedError as e:
86
- logger.debug(f"Pre-refresh check failed: {e}")
87
- event.fail(str(e))
88
-
89
- def _on_upgrade_peer_relation_created(self, event: RelationCreatedEvent) -> None:
90
- # We have to catch a possible ModelError here.
91
- # TODO: remove try/catch when https://bugs.launchpad.net/juju/+bug/2093129 is fixed.
92
- try:
93
- self.manager.store_initial_revisions()
94
- except ModelError as err:
95
- logger.info(f"Deferring because of model error: {err}")
96
- event.defer()
97
-
98
- def _reconcile_upgrade(self, _) -> None:
99
- self.manager._reconcile_upgrade(during_upgrade=True)
100
-
101
- def _on_upgrade_charm(self, event: UpgradeCharmEvent) -> None:
102
- try:
103
- self.manager.upgrade_charm()
104
- except DeferrableError as err:
105
- logger.info(f"Deferring upgrade because of {err}")
106
- event.defer()
107
-
108
- def _on_resume_upgrade_action(self, event: ActionEvent) -> None:
109
- try:
110
- force: bool = event.params.get("force", False)
111
- message = self.manager.resume_upgrade(force=force)
112
- event.set_results({"result": message})
113
- except ActionFailedError as e:
114
- logger.debug(f"Resume refresh failed: {e}")
115
- event.fail(str(e))
116
-
117
- def _on_force_upgrade_action(self, event: ActionEvent) -> None:
118
- try:
119
- message = self.manager.force_upgrade(event)
120
- event.set_results({"result": message})
121
- except ActionFailedError as e:
122
- logger.debug(f"Resume refresh failed: {e}")
123
- event.fail(str(e))
124
-
125
- def _run_post_app_upgrade_task(self, event: _PostUpgradeCheckMongoDB) -> None:
126
- try:
127
- self.manager.run_post_app_upgrade_task()
128
- except DeferrableError as e:
129
- logger.info(ROLLBACK_INSTRUCTIONS)
130
- defer_event_with_info_log(logger, event, "post cluster upgrade checks", str(e))
131
- except UnhealthyUpgradeError:
132
- logger.info(ROLLBACK_INSTRUCTIONS)
133
- self.manager.state.statuses.add(
134
- UpgradeStatuses.UNHEALTHY_UPGRADE.value,
135
- scope="unit",
136
- component=self.manager.name,
137
- )
138
- event.defer()
139
-
140
- def _run_post_cluster_upgrade_task(self, event: _PostUpgradeCheckMongoDB) -> None:
141
- """Runs after a sharded cluster has been upgraded.
142
-
143
- It is necessary to check that the entire cluster is healthy.
144
- """
145
- try:
146
- self.manager.run_post_cluster_upgrade_task()
147
- except DeferrableError as e:
148
- logger.info(ROLLBACK_INSTRUCTIONS)
149
- defer_event_with_info_log(logger, event, "post cluster upgrade checks", str(e))
150
- except UnhealthyUpgradeError:
151
- logger.info(ROLLBACK_INSTRUCTIONS)
152
- self.manager.state.statuses.add(
153
- UpgradeStatuses.UNHEALTHY_UPGRADE.value,
154
- scope="unit",
155
- component=self.manager.name,
156
- )
157
- event.defer()