mongo-charms-single-kernel 1.8.6__py3-none-any.whl → 1.8.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mongo-charms-single-kernel might be problematic. Click here for more details.
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/METADATA +2 -1
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/RECORD +38 -39
- single_kernel_mongo/abstract_charm.py +8 -0
- single_kernel_mongo/config/literals.py +1 -20
- single_kernel_mongo/config/relations.py +0 -1
- single_kernel_mongo/config/statuses.py +10 -57
- single_kernel_mongo/core/abstract_upgrades_v3.py +149 -0
- single_kernel_mongo/core/k8s_workload.py +2 -2
- single_kernel_mongo/core/kubernetes_upgrades_v3.py +17 -0
- single_kernel_mongo/core/machine_upgrades_v3.py +54 -0
- single_kernel_mongo/core/operator.py +25 -4
- single_kernel_mongo/core/version_checker.py +7 -6
- single_kernel_mongo/core/vm_workload.py +30 -13
- single_kernel_mongo/core/workload.py +17 -19
- single_kernel_mongo/events/backups.py +3 -3
- single_kernel_mongo/events/cluster.py +1 -1
- single_kernel_mongo/events/database.py +1 -1
- single_kernel_mongo/events/lifecycle.py +5 -4
- single_kernel_mongo/events/tls.py +7 -4
- single_kernel_mongo/exceptions.py +4 -24
- single_kernel_mongo/managers/cluster.py +8 -8
- single_kernel_mongo/managers/config.py +5 -3
- single_kernel_mongo/managers/ldap.py +2 -1
- single_kernel_mongo/managers/mongo.py +48 -9
- single_kernel_mongo/managers/mongodb_operator.py +195 -67
- single_kernel_mongo/managers/mongos_operator.py +95 -35
- single_kernel_mongo/managers/sharding.py +4 -4
- single_kernel_mongo/managers/tls.py +54 -27
- single_kernel_mongo/managers/upgrade_v3.py +452 -0
- single_kernel_mongo/managers/upgrade_v3_status.py +133 -0
- single_kernel_mongo/state/app_peer_state.py +12 -2
- single_kernel_mongo/state/charm_state.py +31 -141
- single_kernel_mongo/state/config_server_state.py +0 -33
- single_kernel_mongo/state/unit_peer_state.py +10 -0
- single_kernel_mongo/utils/helpers.py +0 -6
- single_kernel_mongo/utils/mongo_config.py +32 -8
- single_kernel_mongo/core/abstract_upgrades.py +0 -890
- single_kernel_mongo/core/kubernetes_upgrades.py +0 -194
- single_kernel_mongo/core/machine_upgrades.py +0 -188
- single_kernel_mongo/events/upgrades.py +0 -157
- single_kernel_mongo/managers/upgrade.py +0 -334
- single_kernel_mongo/state/upgrade_state.py +0 -134
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/WHEEL +0 -0
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,334 +0,0 @@
|
|
|
1
|
-
# Copyright 2024 Canonical Ltd.
|
|
2
|
-
# See LICENSE file for licensing details.
|
|
3
|
-
|
|
4
|
-
"""Manager for handling MongoDB in-place upgrades."""
|
|
5
|
-
|
|
6
|
-
from __future__ import annotations
|
|
7
|
-
|
|
8
|
-
import logging
|
|
9
|
-
from typing import Generic, TypeVar
|
|
10
|
-
|
|
11
|
-
from ops import ActionEvent
|
|
12
|
-
from tenacity import RetryError
|
|
13
|
-
|
|
14
|
-
from single_kernel_mongo.config.literals import (
|
|
15
|
-
FEATURE_VERSION,
|
|
16
|
-
CharmKind,
|
|
17
|
-
Substrates,
|
|
18
|
-
UnitState,
|
|
19
|
-
)
|
|
20
|
-
from single_kernel_mongo.config.statuses import UpgradeStatuses
|
|
21
|
-
from single_kernel_mongo.core.abstract_upgrades import (
|
|
22
|
-
GenericMongoDBUpgradeManager,
|
|
23
|
-
UpgradeActions,
|
|
24
|
-
)
|
|
25
|
-
from single_kernel_mongo.core.operator import OperatorProtocol
|
|
26
|
-
from single_kernel_mongo.core.structured_config import MongoDBRoles
|
|
27
|
-
from single_kernel_mongo.exceptions import (
|
|
28
|
-
ActionFailedError,
|
|
29
|
-
BalancerNotEnabledError,
|
|
30
|
-
ContainerNotReadyError,
|
|
31
|
-
DeferrableError,
|
|
32
|
-
PrecheckFailedError,
|
|
33
|
-
UnhealthyUpgradeError,
|
|
34
|
-
)
|
|
35
|
-
from single_kernel_mongo.utils.mongo_connection import MongoConnection
|
|
36
|
-
from single_kernel_mongo.utils.mongodb_users import LogRotateUser
|
|
37
|
-
|
|
38
|
-
T = TypeVar("T", bound=OperatorProtocol)
|
|
39
|
-
|
|
40
|
-
logger = logging.getLogger()
|
|
41
|
-
ROLLBACK_INSTRUCTIONS = "To rollback, `juju refresh` to the previous revision"
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class MongoUpgradeManager(Generic[T], GenericMongoDBUpgradeManager[T]):
|
|
45
|
-
"""Upgrade manager for Mongo upgrades."""
|
|
46
|
-
|
|
47
|
-
def upgrade_charm(self):
|
|
48
|
-
"""Upgrade event handler.
|
|
49
|
-
|
|
50
|
-
On K8S, during an upgrade event, it will set the version in all relations,
|
|
51
|
-
replan the container and process the upgrade statuses. If the upgrade
|
|
52
|
-
is compatible, it will end up emitting a post upgrade event that
|
|
53
|
-
verifies the health of the cluster.
|
|
54
|
-
On VM, during an upgrade event, it will call the reconcile upgrade
|
|
55
|
-
after setting the version across all relations.
|
|
56
|
-
"""
|
|
57
|
-
if self.dependent.substrate == Substrates.VM:
|
|
58
|
-
self._vm_upgrade()
|
|
59
|
-
else:
|
|
60
|
-
self._kubernetes_upgrade()
|
|
61
|
-
|
|
62
|
-
def _kubernetes_upgrade(self) -> None:
|
|
63
|
-
assert self._upgrade
|
|
64
|
-
if self.charm.unit.is_leader() and self.dependent.name == CharmKind.MONGOD:
|
|
65
|
-
self.dependent.cross_app_version_checker.set_version_across_all_relations() # type: ignore
|
|
66
|
-
|
|
67
|
-
# If the user was not existing yet, create it.
|
|
68
|
-
# This user was added after the first stable release so we have to
|
|
69
|
-
# create it on upgrade if necessary.
|
|
70
|
-
if not self.state.get_user_password(LogRotateUser):
|
|
71
|
-
self.state.set_user_password(
|
|
72
|
-
LogRotateUser, self.dependent.workload.generate_password()
|
|
73
|
-
)
|
|
74
|
-
self.dependent.mongo_manager.initialise_user(LogRotateUser)
|
|
75
|
-
try:
|
|
76
|
-
# Start services.
|
|
77
|
-
self.dependent.install_workloads()
|
|
78
|
-
self.dependent._configure_workloads()
|
|
79
|
-
if self.dependent.name == CharmKind.MONGOS:
|
|
80
|
-
if keyfile := self.state.cluster.keyfile:
|
|
81
|
-
self.dependent.update_keyfile(keyfile) # type: ignore
|
|
82
|
-
self.dependent.start_charm_services()
|
|
83
|
-
else:
|
|
84
|
-
self.dependent.start_charm_services()
|
|
85
|
-
self.state.unit_upgrade_peer_data.current_revision = (
|
|
86
|
-
self.dependent.cross_app_version_checker.version # type: ignore
|
|
87
|
-
)
|
|
88
|
-
except ContainerNotReadyError:
|
|
89
|
-
self.state.statuses.add(
|
|
90
|
-
UpgradeStatuses.UNHEALTHY_UPGRADE.value, scope="unit", component=self.name
|
|
91
|
-
)
|
|
92
|
-
self._reconcile_upgrade(during_upgrade=True)
|
|
93
|
-
raise DeferrableError("Container not ready")
|
|
94
|
-
|
|
95
|
-
self.state.statuses.add(
|
|
96
|
-
UpgradeStatuses.WAITING_POST_UPGRADE_STATUS.value, scope="unit", component=self.name
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
self._reconcile_upgrade(during_upgrade=True)
|
|
100
|
-
|
|
101
|
-
if self._upgrade.is_compatible:
|
|
102
|
-
# Post upgrade event verifies the success of the upgrade.
|
|
103
|
-
self.dependent.upgrade_events.post_app_upgrade_event.emit()
|
|
104
|
-
|
|
105
|
-
def _vm_upgrade(self):
|
|
106
|
-
if not self.state.upgrade_in_progress and self.dependent.name == CharmKind.MONGOD:
|
|
107
|
-
self.state.unit_upgrade_peer_data.current_revision = (
|
|
108
|
-
self.dependent.cross_app_version_checker.version # type: ignore
|
|
109
|
-
)
|
|
110
|
-
if self.charm.unit.is_leader() and not self.state.upgrade_in_progress:
|
|
111
|
-
logger.info("Charm refreshed. MongoDB version unchanged")
|
|
112
|
-
|
|
113
|
-
if self.dependent.name == CharmKind.MONGOD and self.charm.unit.is_leader():
|
|
114
|
-
# If the user was not existing yet, create it.
|
|
115
|
-
# This user was added after the first stable release so we have to
|
|
116
|
-
# create it on upgrade if necessary.
|
|
117
|
-
if not self.state.get_user_password(LogRotateUser):
|
|
118
|
-
self.state.set_user_password(
|
|
119
|
-
LogRotateUser, self.dependent.workload.generate_password()
|
|
120
|
-
)
|
|
121
|
-
self.dependent.mongo_manager.initialise_user(LogRotateUser)
|
|
122
|
-
self.dependent.logrotate_config_manager.configure_and_restart()
|
|
123
|
-
self.state.app_upgrade_peer_data.upgrade_resumed = False
|
|
124
|
-
self.dependent.cross_app_version_checker.set_version_across_all_relations() # type: ignore
|
|
125
|
-
# MONGODB: Only call `_reconcile_upgrade` on leader unit to
|
|
126
|
-
# avoid race conditions with `upgrade_resumed`
|
|
127
|
-
self._reconcile_upgrade()
|
|
128
|
-
elif self.dependent.name == CharmKind.MONGOS:
|
|
129
|
-
# All units call it on mongos
|
|
130
|
-
self._reconcile_upgrade()
|
|
131
|
-
|
|
132
|
-
def run_pre_refresh_checks(self) -> None:
|
|
133
|
-
"""Pre upgrade checks."""
|
|
134
|
-
if not self.charm.unit.is_leader():
|
|
135
|
-
message = f"Must run action on leader unit. (e.g. `juju run {self.charm.app.name}/leader {UpgradeActions.PRECHECK_ACTION_NAME.value}`)"
|
|
136
|
-
raise ActionFailedError(message)
|
|
137
|
-
if not self._upgrade:
|
|
138
|
-
message = "No upgrade relation found."
|
|
139
|
-
raise ActionFailedError(message)
|
|
140
|
-
if not self._upgrade or self.state.upgrade_in_progress:
|
|
141
|
-
message = "Refresh already in progress"
|
|
142
|
-
raise ActionFailedError(message)
|
|
143
|
-
try:
|
|
144
|
-
self._upgrade.pre_upgrade_check()
|
|
145
|
-
except PrecheckFailedError as exception:
|
|
146
|
-
message = (
|
|
147
|
-
f"Charm is not ready for refresh. Pre-refresh check failed: {exception.message}"
|
|
148
|
-
)
|
|
149
|
-
raise ActionFailedError(message)
|
|
150
|
-
|
|
151
|
-
def resume_upgrade(self, force: bool = False) -> str | None:
|
|
152
|
-
"""Resume upgrade action handler."""
|
|
153
|
-
if not self.charm.unit.is_leader():
|
|
154
|
-
message = f"Must run action on leader unit. (e.g. `juju run {self.charm.app.name}/leader {UpgradeActions.RESUME_ACTION_NAME.value}`)"
|
|
155
|
-
raise ActionFailedError(message)
|
|
156
|
-
if not self._upgrade or not self.state.upgrade_in_progress:
|
|
157
|
-
message = "No refresh in progress"
|
|
158
|
-
raise ActionFailedError(message)
|
|
159
|
-
return self._upgrade.reconcile_partition(from_event=True, force=force)
|
|
160
|
-
|
|
161
|
-
def force_upgrade(self: MongoUpgradeManager[T], event: ActionEvent) -> str:
|
|
162
|
-
"""Force upgrade action handler."""
|
|
163
|
-
if not self._upgrade or not self.state.upgrade_in_progress:
|
|
164
|
-
message = "No refresh in progress"
|
|
165
|
-
raise ActionFailedError(message)
|
|
166
|
-
|
|
167
|
-
if self.substrate == Substrates.VM and self._upgrade.unit_state != UnitState.OUTDATED:
|
|
168
|
-
message = "Unit already refreshed"
|
|
169
|
-
raise ActionFailedError(message)
|
|
170
|
-
|
|
171
|
-
if self.substrate == Substrates.K8S and not self.charm.unit.is_leader():
|
|
172
|
-
message = f"Must run action on leader unit. (e.g. `juju run {self.charm.app.name}/leader force-refresh-start`)"
|
|
173
|
-
raise ActionFailedError(message)
|
|
174
|
-
|
|
175
|
-
if self.dependent.name == CharmKind.MONGOD and not self._upgrade.upgrade_resumed:
|
|
176
|
-
message = f"Run `juju run {self.charm.app.name}/leader {UpgradeActions.RESUME_ACTION_NAME.value}` before trying to force refresh"
|
|
177
|
-
raise ActionFailedError(message)
|
|
178
|
-
|
|
179
|
-
logger.debug("Forcing refresh")
|
|
180
|
-
event.log(f"Forcefully refreshing {self.charm.unit.name}")
|
|
181
|
-
if self.substrate == Substrates.VM:
|
|
182
|
-
self._upgrade.upgrade_unit(dependent=self.dependent) # type: ignore
|
|
183
|
-
else:
|
|
184
|
-
self._upgrade.reconcile_partition(from_event=True, force=True)
|
|
185
|
-
logger.debug("Forced refresh")
|
|
186
|
-
return f"Forcefully refreshed {self.charm.unit.name}"
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
class MongoDBUpgradeManager(MongoUpgradeManager[T]):
|
|
190
|
-
"""MongoDB specific upgrade mechanism."""
|
|
191
|
-
|
|
192
|
-
def run_post_app_upgrade_task(self):
|
|
193
|
-
"""Runs the post upgrade check to verify that the cluster is healthy.
|
|
194
|
-
|
|
195
|
-
By deferring before setting unit state to HEALTHY, the user will either:
|
|
196
|
-
1. have to wait for the unit to resolve itself.
|
|
197
|
-
2. have to run the force-refresh-start action (to upgrade the next unit).
|
|
198
|
-
"""
|
|
199
|
-
self.state.statuses.delete(
|
|
200
|
-
UpgradeStatuses.WAITING_POST_UPGRADE_STATUS.value, scope="unit", component=self.name
|
|
201
|
-
)
|
|
202
|
-
logger.debug("Running post refresh checks to verify cluster is not broken after refresh")
|
|
203
|
-
self.run_post_upgrade_checks(finished_whole_cluster=False)
|
|
204
|
-
|
|
205
|
-
if self._upgrade.unit_state != UnitState.HEALTHY:
|
|
206
|
-
return
|
|
207
|
-
|
|
208
|
-
logger.debug("Cluster is healthy after refreshing unit %s", self.charm.unit.name)
|
|
209
|
-
|
|
210
|
-
if self.charm.unit.is_leader() and not self.state.upgrade_in_progress:
|
|
211
|
-
self.state.statuses.set(
|
|
212
|
-
status=UpgradeStatuses.ACTIVE_IDLE.value, scope="app", component=self.name
|
|
213
|
-
)
|
|
214
|
-
|
|
215
|
-
# Leader of config-server must wait for all shards to be upgraded before finalising the
|
|
216
|
-
# upgrade.
|
|
217
|
-
if not self.charm.unit.is_leader() or not self.state.is_role(MongoDBRoles.CONFIG_SERVER):
|
|
218
|
-
return
|
|
219
|
-
|
|
220
|
-
self.dependent.upgrade_events.post_cluster_upgrade_event.emit()
|
|
221
|
-
|
|
222
|
-
def run_post_cluster_upgrade_task(self) -> None:
|
|
223
|
-
"""Waits for entire cluster to be upgraded before enabling the balancer."""
|
|
224
|
-
# Leader of config-server must wait for all shards to be upgraded before finalising the
|
|
225
|
-
# upgrade.
|
|
226
|
-
if not self.charm.unit.is_leader() or not self.state.is_role(MongoDBRoles.CONFIG_SERVER):
|
|
227
|
-
return
|
|
228
|
-
|
|
229
|
-
# We can because we now we are a config server.
|
|
230
|
-
if not self.dependent.cross_app_version_checker.are_related_apps_valid(): # type: ignore
|
|
231
|
-
raise DeferrableError("Waiting to finalise refresh, one or more shards need refresh.")
|
|
232
|
-
|
|
233
|
-
logger.debug(
|
|
234
|
-
"Entire cluster has been refreshed, checking health of the cluster and enabling balancer."
|
|
235
|
-
)
|
|
236
|
-
self.run_post_upgrade_checks(finished_whole_cluster=True)
|
|
237
|
-
|
|
238
|
-
try:
|
|
239
|
-
with MongoConnection(self.state.mongos_config) as mongos:
|
|
240
|
-
mongos.start_and_wait_for_balancer()
|
|
241
|
-
except BalancerNotEnabledError:
|
|
242
|
-
raise DeferrableError(
|
|
243
|
-
"Need more time to enable the balancer after finishing the refresh. Deferring event."
|
|
244
|
-
)
|
|
245
|
-
|
|
246
|
-
self.set_mongos_feature_compatibilty_version(FEATURE_VERSION)
|
|
247
|
-
|
|
248
|
-
# END: Event handlers
|
|
249
|
-
|
|
250
|
-
# BEGIN: Helpers
|
|
251
|
-
def run_post_upgrade_checks(self, finished_whole_cluster: bool = False) -> None:
|
|
252
|
-
"""Runs post-upgrade checks for after a shard/config-server/replset/cluster upgrade."""
|
|
253
|
-
assert self._upgrade
|
|
254
|
-
upgrade_type = "unit." if not finished_whole_cluster else "sharded cluster"
|
|
255
|
-
try:
|
|
256
|
-
self.wait_for_cluster_healthy() # type: ignore
|
|
257
|
-
except RetryError:
|
|
258
|
-
logger.error(
|
|
259
|
-
"Cluster is not healthy after refreshing %s. Will retry next juju event.",
|
|
260
|
-
upgrade_type,
|
|
261
|
-
)
|
|
262
|
-
raise UnhealthyUpgradeError
|
|
263
|
-
|
|
264
|
-
if not self.is_cluster_able_to_read_write(): # type: ignore
|
|
265
|
-
logger.error(
|
|
266
|
-
"Cluster is not healthy after refreshing %s, writes not propagated throughout cluster. Deferring post refresh check.",
|
|
267
|
-
upgrade_type,
|
|
268
|
-
)
|
|
269
|
-
raise UnhealthyUpgradeError
|
|
270
|
-
|
|
271
|
-
# TODO this will be addressed in the Advanced Status Handling, when we have the
|
|
272
|
-
# functionality to clear a status.
|
|
273
|
-
if self.charm.unit.status == UpgradeStatuses.UNHEALTHY_UPGRADE.value:
|
|
274
|
-
self.state.statuses.delete(
|
|
275
|
-
UpgradeStatuses.UNHEALTHY_UPGRADE.value, scope="unit", component=self.name
|
|
276
|
-
)
|
|
277
|
-
|
|
278
|
-
self._upgrade.unit_state = UnitState.HEALTHY
|
|
279
|
-
|
|
280
|
-
# Clear the statuses and set the new upgrade status.
|
|
281
|
-
self.state.statuses.clear(scope="unit", component=self.name)
|
|
282
|
-
self._set_upgrade_status()
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
class MongosUpgradeManager(MongoUpgradeManager[T]):
|
|
286
|
-
"""Mongos specific upgrade mechanism."""
|
|
287
|
-
|
|
288
|
-
def run_post_app_upgrade_task(self):
|
|
289
|
-
"""Runs the post upgrade check to verify that the mongos router is healthy."""
|
|
290
|
-
logger.debug("Running post refresh checks to verify monogs is not broken after refresh")
|
|
291
|
-
if not self.state.db_initialised:
|
|
292
|
-
self._upgrade.unit_state = UnitState.HEALTHY
|
|
293
|
-
return
|
|
294
|
-
|
|
295
|
-
self.run_post_upgrade_checks()
|
|
296
|
-
|
|
297
|
-
if self._upgrade.unit_state != UnitState.HEALTHY:
|
|
298
|
-
return
|
|
299
|
-
|
|
300
|
-
logger.debug("Cluster is healthy after refreshing unit %s", self.charm.unit.name)
|
|
301
|
-
|
|
302
|
-
# Leader of config-server must wait for all shards to be upgraded before finalising the
|
|
303
|
-
# upgrade.
|
|
304
|
-
if not self.charm.unit.is_leader() or not self.state.is_role(MongoDBRoles.CONFIG_SERVER):
|
|
305
|
-
return
|
|
306
|
-
|
|
307
|
-
self.dependent.upgrade_events.post_cluster_upgrade_event.emit()
|
|
308
|
-
|
|
309
|
-
# Unused parameter only present for typing.
|
|
310
|
-
def run_post_upgrade_checks(self, finished_whole_cluster: bool = False) -> None:
|
|
311
|
-
"""Runs post-upgrade checks for after a shard/config-server/replset/cluster upgrade."""
|
|
312
|
-
assert self._upgrade
|
|
313
|
-
if not self.dependent.is_mongos_running(): # type: ignore
|
|
314
|
-
raise DeferrableError(
|
|
315
|
-
"Waiting for mongos router to be ready before finalising refresh."
|
|
316
|
-
)
|
|
317
|
-
|
|
318
|
-
if not self.is_mongos_able_to_read_write(): # type: ignore
|
|
319
|
-
self.state.statuses.set(
|
|
320
|
-
UpgradeStatuses.UNHEALTHY_UPGRADE.value, scope="unit", component=self.name
|
|
321
|
-
)
|
|
322
|
-
logger.info(ROLLBACK_INSTRUCTIONS)
|
|
323
|
-
raise DeferrableError("mongos is not able to read/write after refresh.")
|
|
324
|
-
|
|
325
|
-
if self.charm.unit.status == UpgradeStatuses.UNHEALTHY_UPGRADE.value:
|
|
326
|
-
self.state.statuses.delete(
|
|
327
|
-
UpgradeStatuses.UNHEALTHY_UPGRADE.value, scope="unit", component=self.name
|
|
328
|
-
)
|
|
329
|
-
|
|
330
|
-
logger.debug("refresh of unit succeeded.")
|
|
331
|
-
self._upgrade.unit_state = UnitState.HEALTHY
|
|
332
|
-
self.state.statuses.set(
|
|
333
|
-
UpgradeStatuses.ACTIVE_IDLE.value, scope="unit", component=self.name
|
|
334
|
-
)
|
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
# Copyright 2024 Canonical Ltd.
|
|
2
|
-
# See LICENSE file for licensing details.
|
|
3
|
-
"""The upgrade peer unit relation databag."""
|
|
4
|
-
|
|
5
|
-
import json
|
|
6
|
-
import time
|
|
7
|
-
from enum import Enum
|
|
8
|
-
from logging import getLogger
|
|
9
|
-
|
|
10
|
-
from ops.model import Application, Relation, Unit
|
|
11
|
-
|
|
12
|
-
from single_kernel_mongo.config.literals import Substrates, UnitState
|
|
13
|
-
from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( # type: ignore
|
|
14
|
-
DataPeerData,
|
|
15
|
-
DataPeerUnitData,
|
|
16
|
-
)
|
|
17
|
-
from single_kernel_mongo.state.abstract_state import AbstractRelationState
|
|
18
|
-
|
|
19
|
-
logger = getLogger(__name__)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class UnitUpgradeRelationKeys(str, Enum):
|
|
23
|
-
"""The unit upgrade peer relation model."""
|
|
24
|
-
|
|
25
|
-
STATE = "state"
|
|
26
|
-
SNAP_REVISION = "snap_revision"
|
|
27
|
-
CURRENT_REVISION = "current_revision"
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
class AppUpgradeRelationKeys(str, Enum):
|
|
31
|
-
"""The app upgrade peer relation model."""
|
|
32
|
-
|
|
33
|
-
VERSIONS = "versions"
|
|
34
|
-
UPGRADE_RESUMED = "upgrade-resumed"
|
|
35
|
-
UNUSED_TIMESTAMP = "-unused-timestamp-upgrade-resume-last-updated"
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
class UnitUpgradePeerData(AbstractRelationState[DataPeerUnitData]):
|
|
39
|
-
"""State collection for unit data."""
|
|
40
|
-
|
|
41
|
-
component: Unit
|
|
42
|
-
|
|
43
|
-
def __init__(
|
|
44
|
-
self,
|
|
45
|
-
relation: Relation | None,
|
|
46
|
-
data_interface: DataPeerUnitData,
|
|
47
|
-
component: Unit,
|
|
48
|
-
substrate: Substrates,
|
|
49
|
-
):
|
|
50
|
-
super().__init__(relation, data_interface, component, None)
|
|
51
|
-
self.data_interface = data_interface
|
|
52
|
-
self.substrate = substrate
|
|
53
|
-
self.unit = component
|
|
54
|
-
|
|
55
|
-
@property
|
|
56
|
-
def unit_state(self) -> UnitState | None:
|
|
57
|
-
"""Unit upgrade state."""
|
|
58
|
-
if state := self.relation_data.get(UnitUpgradeRelationKeys.STATE.value):
|
|
59
|
-
return UnitState(state)
|
|
60
|
-
return None
|
|
61
|
-
|
|
62
|
-
@unit_state.setter
|
|
63
|
-
def unit_state(self, value: UnitState) -> None:
|
|
64
|
-
self.update({UnitUpgradeRelationKeys.STATE.value: value.value})
|
|
65
|
-
|
|
66
|
-
@property
|
|
67
|
-
def snap_revision(self) -> str | None:
|
|
68
|
-
"""Installed snap revision for this unit."""
|
|
69
|
-
return self.relation_data.get(UnitUpgradeRelationKeys.SNAP_REVISION.value)
|
|
70
|
-
|
|
71
|
-
@snap_revision.setter
|
|
72
|
-
def snap_revision(self, value: str):
|
|
73
|
-
self.update({UnitUpgradeRelationKeys.SNAP_REVISION.value: value})
|
|
74
|
-
|
|
75
|
-
@property
|
|
76
|
-
def current_revision(self) -> str:
|
|
77
|
-
"""The revision of the charm that's running before the upgrade."""
|
|
78
|
-
return self.relation_data.get(UnitUpgradeRelationKeys.CURRENT_REVISION, "-1")
|
|
79
|
-
|
|
80
|
-
@current_revision.setter
|
|
81
|
-
def current_revision(self, value: str):
|
|
82
|
-
self.update({UnitUpgradeRelationKeys.CURRENT_REVISION.value: value})
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
class AppUpgradePeerData(AbstractRelationState[DataPeerData]):
|
|
86
|
-
"""State collection for unit data."""
|
|
87
|
-
|
|
88
|
-
component: Application
|
|
89
|
-
|
|
90
|
-
def __init__(
|
|
91
|
-
self,
|
|
92
|
-
relation: Relation | None,
|
|
93
|
-
data_interface: DataPeerData,
|
|
94
|
-
component: Application,
|
|
95
|
-
substrate: Substrates,
|
|
96
|
-
):
|
|
97
|
-
super().__init__(relation, data_interface, component, None)
|
|
98
|
-
self.data_interface = data_interface
|
|
99
|
-
self.substrate = substrate
|
|
100
|
-
self.unit = component
|
|
101
|
-
|
|
102
|
-
@property
|
|
103
|
-
def versions(self) -> dict[str, str] | None:
|
|
104
|
-
"""Unit upgrade state."""
|
|
105
|
-
if state := self.relation_data.get(AppUpgradeRelationKeys.VERSIONS.value):
|
|
106
|
-
return json.loads(state)
|
|
107
|
-
return None
|
|
108
|
-
|
|
109
|
-
@versions.setter
|
|
110
|
-
def versions(self, value: dict[str, str]) -> None:
|
|
111
|
-
self.update({AppUpgradeRelationKeys.VERSIONS.value: json.dumps(value)})
|
|
112
|
-
|
|
113
|
-
@property
|
|
114
|
-
def upgrade_resumed(self) -> bool:
|
|
115
|
-
"""Whether user has resumed upgrade with Juju action.
|
|
116
|
-
|
|
117
|
-
Reset to `False` after each `juju refresh`
|
|
118
|
-
VM-Only.
|
|
119
|
-
"""
|
|
120
|
-
return json.loads(
|
|
121
|
-
self.relation_data.get(AppUpgradeRelationKeys.UPGRADE_RESUMED.value, "false")
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
@upgrade_resumed.setter
|
|
125
|
-
def upgrade_resumed(self, value: bool):
|
|
126
|
-
# Trigger peer relation_changed event even if value does not change
|
|
127
|
-
# (Needed when leader sets value to False during `ops.UpgradeCharmEvent`)
|
|
128
|
-
self.update(
|
|
129
|
-
{
|
|
130
|
-
AppUpgradeRelationKeys.UPGRADE_RESUMED.value: json.dumps(value),
|
|
131
|
-
AppUpgradeRelationKeys.UNUSED_TIMESTAMP.value: str(time.time()),
|
|
132
|
-
}
|
|
133
|
-
)
|
|
134
|
-
logger.debug(f"Set upgrade-resumed to {value=}")
|
{mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|