mongo-charms-single-kernel 1.8.6__py3-none-any.whl → 1.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mongo-charms-single-kernel might be problematic. Click here for more details.

Files changed (44) hide show
  1. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/METADATA +2 -1
  2. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/RECORD +38 -39
  3. single_kernel_mongo/abstract_charm.py +8 -0
  4. single_kernel_mongo/config/literals.py +1 -20
  5. single_kernel_mongo/config/relations.py +0 -1
  6. single_kernel_mongo/config/statuses.py +10 -57
  7. single_kernel_mongo/core/abstract_upgrades_v3.py +149 -0
  8. single_kernel_mongo/core/k8s_workload.py +2 -2
  9. single_kernel_mongo/core/kubernetes_upgrades_v3.py +17 -0
  10. single_kernel_mongo/core/machine_upgrades_v3.py +54 -0
  11. single_kernel_mongo/core/operator.py +25 -4
  12. single_kernel_mongo/core/version_checker.py +7 -6
  13. single_kernel_mongo/core/vm_workload.py +30 -13
  14. single_kernel_mongo/core/workload.py +17 -19
  15. single_kernel_mongo/events/backups.py +3 -3
  16. single_kernel_mongo/events/cluster.py +1 -1
  17. single_kernel_mongo/events/database.py +1 -1
  18. single_kernel_mongo/events/lifecycle.py +5 -4
  19. single_kernel_mongo/events/tls.py +7 -4
  20. single_kernel_mongo/exceptions.py +4 -24
  21. single_kernel_mongo/managers/cluster.py +8 -8
  22. single_kernel_mongo/managers/config.py +5 -3
  23. single_kernel_mongo/managers/ldap.py +2 -1
  24. single_kernel_mongo/managers/mongo.py +48 -9
  25. single_kernel_mongo/managers/mongodb_operator.py +195 -67
  26. single_kernel_mongo/managers/mongos_operator.py +95 -35
  27. single_kernel_mongo/managers/sharding.py +4 -4
  28. single_kernel_mongo/managers/tls.py +54 -27
  29. single_kernel_mongo/managers/upgrade_v3.py +452 -0
  30. single_kernel_mongo/managers/upgrade_v3_status.py +133 -0
  31. single_kernel_mongo/state/app_peer_state.py +12 -2
  32. single_kernel_mongo/state/charm_state.py +31 -141
  33. single_kernel_mongo/state/config_server_state.py +0 -33
  34. single_kernel_mongo/state/unit_peer_state.py +10 -0
  35. single_kernel_mongo/utils/helpers.py +0 -6
  36. single_kernel_mongo/utils/mongo_config.py +32 -8
  37. single_kernel_mongo/core/abstract_upgrades.py +0 -890
  38. single_kernel_mongo/core/kubernetes_upgrades.py +0 -194
  39. single_kernel_mongo/core/machine_upgrades.py +0 -188
  40. single_kernel_mongo/events/upgrades.py +0 -157
  41. single_kernel_mongo/managers/upgrade.py +0 -334
  42. single_kernel_mongo/state/upgrade_state.py +0 -134
  43. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/WHEEL +0 -0
  44. {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,452 @@
1
+ """Manager for upgrades, to run pre and post upgrades checks."""
2
+
3
+ import logging
4
+ from uuid import uuid4
5
+
6
+ import charm_ as charm_api
7
+ from pymongo.cursor import copy
8
+ from pymongo.errors import OperationFailure, PyMongoError, ServerSelectionTimeoutError
9
+ from tenacity import Retrying, retry, stop_after_attempt, wait_fixed
10
+
11
+ from single_kernel_mongo.core.operator import OperatorProtocol
12
+ from single_kernel_mongo.core.structured_config import MongoDBRoles
13
+ from single_kernel_mongo.core.workload import WorkloadBase
14
+ from single_kernel_mongo.exceptions import (
15
+ BalancerStillRunningError,
16
+ ClusterNotHealthyError,
17
+ )
18
+ from single_kernel_mongo.state.charm_state import CharmState
19
+ from single_kernel_mongo.utils.mongo_config import MongoConfiguration
20
+ from single_kernel_mongo.utils.mongo_connection import MongoConnection
21
+ from single_kernel_mongo.utils.mongodb_users import OperatorUser
22
+
23
+ logger = logging.getLogger()
24
+
25
+ WRITE_KEY = "write_value"
26
+ SHARD_NAME_INDEX = "_id"
27
+
28
+
29
+ class MongoDBUpgradesManager:
30
+ """Upgrades manager to run pre and post upgrade checks."""
31
+
32
+ def __init__(
33
+ self, dependent: OperatorProtocol, state: CharmState, workload: WorkloadBase
34
+ ) -> None:
35
+ self.dependent = dependent
36
+ self.state = state
37
+ self.workload = workload
38
+
39
+ def wait_for_cluster_healthy(self) -> None:
40
+ """Waits until the cluster is healthy after upgrading.
41
+
42
+ After a unit restarts it can take some time for the cluster to settle.
43
+
44
+ Raises:
45
+ ClusterNotHealthyError.
46
+ """
47
+ for attempt in Retrying(stop=stop_after_attempt(10), wait=wait_fixed(1)):
48
+ with attempt:
49
+ if not self.is_cluster_healthy():
50
+ raise ClusterNotHealthyError()
51
+
52
+ def is_cluster_healthy(self) -> bool:
53
+ """Returns True if all nodes in the replica set / cluster are healthy."""
54
+ try:
55
+ return self.are_nodes_healthy()
56
+ except (PyMongoError, OperationFailure, ServerSelectionTimeoutError) as e:
57
+ logger.error(
58
+ "Cannot proceed with refresh. Failed to check cluster health, error: %s",
59
+ e,
60
+ )
61
+ return False
62
+
63
+ def are_nodes_healthy(self) -> bool:
64
+ """Returns true if all nodes in the MongoDB deployment are healthy."""
65
+ if self.state.is_sharding_component and not self.state.has_sharding_integration:
66
+ return True
67
+ if self.state.is_role(MongoDBRoles.REPLICATION):
68
+ return self.are_replica_set_nodes_healthy(self.state.mongo_config)
69
+
70
+ mongos_config = self.get_cluster_mongos()
71
+ if not self.are_shards_healthy(mongos_config):
72
+ logger.info(
73
+ "One or more individual shards are not healthy - do not proceed with refresh."
74
+ )
75
+ return False
76
+
77
+ if not self.are_replicas_in_sharded_cluster_healthy(mongos_config):
78
+ logger.info("One or more nodes are not healthy - do not proceed with refresh.")
79
+ return False
80
+
81
+ return True
82
+
83
+ def are_replica_set_nodes_healthy(self, mongodb_config: MongoConfiguration) -> bool:
84
+ """Returns true if all nodes in the MongoDB replica set are healthy."""
85
+ with MongoConnection(mongodb_config) as mongod:
86
+ rs_status = mongod.get_replset_status()
87
+ rs_status = mongod.client.admin.command("replSetGetStatus")
88
+ return not mongod.is_any_sync(rs_status)
89
+
90
+ def get_cluster_mongos(self) -> MongoConfiguration:
91
+ """Return a mongos configuration for the sharded cluster."""
92
+ return (
93
+ self.state.mongos_config
94
+ if self.state.is_role(MongoDBRoles.CONFIG_SERVER)
95
+ else self.state.mongos_config_for_user(
96
+ OperatorUser, hosts=set(self.state.shard_state.mongos_hosts)
97
+ )
98
+ )
99
+
100
+ def are_shards_healthy(self, mongos_config: MongoConfiguration) -> bool:
101
+ """Returns True if all shards in the cluster are healthy."""
102
+ with MongoConnection(mongos_config) as mongos:
103
+ if mongos.is_any_shard_draining():
104
+ logger.info("Cluster is draining a shard, do not proceed with refresh.")
105
+ return False
106
+
107
+ if not mongos.are_all_shards_aware():
108
+ logger.info("Not all shards are shard aware, do not proceed with refresh.")
109
+ return False
110
+
111
+ # Config-Server has access to all the related shard applications.
112
+ if self.state.is_role(MongoDBRoles.CONFIG_SERVER):
113
+ relation_shards = {
114
+ relation.app.name for relation in self.state.config_server_relation
115
+ }
116
+ cluster_shards = mongos.get_shard_members()
117
+ if len(relation_shards - cluster_shards):
118
+ logger.info(
119
+ "Not all shards have been added/drained, do not proceed with refresh."
120
+ )
121
+ return False
122
+
123
+ return True
124
+
125
+ def are_replicas_in_sharded_cluster_healthy(self, mongos_config: MongoConfiguration) -> bool:
126
+ """Returns True if all replicas in the sharded cluster are healthy."""
127
+ # dictionary of all replica sets in the sharded cluster
128
+ for mongodb_config in self.get_all_replica_set_configs_in_cluster(mongos_config):
129
+ if not self.are_replica_set_nodes_healthy(mongodb_config):
130
+ logger.info(f"Replica set: {mongodb_config.replset} contains unhealthy nodes.")
131
+ return False
132
+
133
+ return True
134
+
135
+ def get_all_replica_set_configs_in_cluster(
136
+ self, mongos_config: MongoConfiguration
137
+ ) -> list[MongoConfiguration]:
138
+ """Returns a list of all the mongodb_configurations for each application in the cluster."""
139
+ mongodb_configurations = []
140
+ if self.state.is_role(MongoDBRoles.SHARD):
141
+ # the hosts of the integrated mongos application are also the config-server hosts
142
+ config_server_hosts = self.state.app_peer_data.mongos_hosts
143
+ mongodb_configurations = [
144
+ self.state.mongodb_config_for_user(
145
+ OperatorUser,
146
+ hosts=set(config_server_hosts),
147
+ replset=self.state.config_server_name,
148
+ )
149
+ ]
150
+ elif self.state.is_role(MongoDBRoles.CONFIG_SERVER):
151
+ mongodb_configurations = [self.state.mongo_config]
152
+
153
+ with MongoConnection(mongos_config) as mongos:
154
+ sc_status = mongos.client.admin.command("listShards")
155
+ for shard in sc_status["shards"]:
156
+ mongodb_configurations.append(self.get_mongodb_config_from_shard_entry(shard))
157
+
158
+ return mongodb_configurations
159
+
160
+ def get_mongodb_config_from_shard_entry(self, shard_entry: dict) -> MongoConfiguration:
161
+ """Returns a replica set MongoConfiguration based on a shard entry from ListShards."""
162
+ # field hosts is of the form shard01/host1:27018,host2:27018,host3:27018
163
+ shard_hosts = shard_entry["host"].split("/")[1]
164
+ parsed_ips = {host.split(":")[0] for host in shard_hosts.split(",")}
165
+ return self.state.mongodb_config_for_user(
166
+ OperatorUser, parsed_ips, replset=shard_entry[SHARD_NAME_INDEX]
167
+ )
168
+
169
+ def get_random_write_and_collection(self) -> tuple[str, str, str]:
170
+ """Returns a tuple for a random collection name and a unique write to add to it."""
171
+ collection_name = f"collection-{uuid4()}"
172
+ write_value = f"unique-write-{uuid4()}"
173
+ db_name = f"db-name-{uuid4()}"
174
+ return (db_name, collection_name, write_value)
175
+
176
+ def add_write_to_sharded_cluster(
177
+ self, mongos_config: MongoConfiguration, db_name, collection_name, write_value
178
+ ) -> None:
179
+ """Adds a the provided write to the provided database with the provided collection."""
180
+ with MongoConnection(mongos_config) as mongod:
181
+ db = mongod.client[db_name]
182
+ test_collection = db[collection_name]
183
+ write = {WRITE_KEY: write_value}
184
+ test_collection.insert_one(write)
185
+
186
+ def get_primary_for_database(
187
+ self, config: MongoConfiguration, shard_name: str, db_name: str
188
+ ) -> bool:
189
+ """Gets the primary for a database to ensure that it was correctly moved."""
190
+ with MongoConnection(config) as mongos:
191
+ db = mongos.client["config"]
192
+ collection = db["databases"]
193
+ result = collection.find_one({"_id": db_name})
194
+ if not result:
195
+ return False
196
+ if result.get("primary", "") != shard_name:
197
+ return False
198
+ return True
199
+
200
+ @retry(
201
+ stop=stop_after_attempt(10),
202
+ wait=wait_fixed(1),
203
+ reraise=True,
204
+ )
205
+ def confirm_expected_write_cluster(
206
+ self,
207
+ config: MongoConfiguration,
208
+ collection_name: str,
209
+ expected_write_value: str,
210
+ db_name: str | None = None,
211
+ ) -> bool:
212
+ """Returns True if the replica contains the expected write in the provided collection."""
213
+ database = db_name or config.database
214
+ with MongoConnection(config) as mongos:
215
+ db = mongos.client[database]
216
+ test_collection = db[collection_name]
217
+ query = test_collection.find({}, {WRITE_KEY: 1})
218
+ if query[0][WRITE_KEY] != expected_write_value:
219
+ return False
220
+
221
+ return True
222
+
223
+ def add_write_to_replica_set(
224
+ self, mongodb_config: MongoConfiguration, collection_name, write_value
225
+ ) -> None:
226
+ """Adds a the provided write to the admin database with the provided collection."""
227
+ with MongoConnection(mongodb_config) as mongod:
228
+ db = mongod.client["admin"]
229
+ test_collection = db[collection_name]
230
+ write = {WRITE_KEY: write_value}
231
+ test_collection.insert_one(write)
232
+
233
+ def is_write_on_secondaries(
234
+ self,
235
+ mongodb_config: MongoConfiguration,
236
+ collection_name,
237
+ expected_write_value,
238
+ db_name: str = "admin",
239
+ ) -> bool:
240
+ """Returns true if the expected write is on secondaries."""
241
+ # Generate a new object so we don't corrupt the initial config object
242
+ secondary_config = copy.deepcopy(mongodb_config)
243
+
244
+ for replica_ip in mongodb_config.hosts:
245
+ secondary_config.hosts = {replica_ip}
246
+ with MongoConnection(secondary_config, direct=True) as direct_secondary:
247
+ db = direct_secondary.client[db_name]
248
+ test_collection = db[collection_name]
249
+ query = test_collection.find({}, {WRITE_KEY: 1})
250
+ if query[0][WRITE_KEY] != expected_write_value:
251
+ logger.info("Secondary with IP %s, does not contain the expected write.")
252
+ return False
253
+
254
+ return True
255
+
256
+ def clear_tmp_collection(self, mongo_config: MongoConfiguration, collection_name: str) -> None:
257
+ """Clears the temporary collection."""
258
+ with MongoConnection(mongo_config) as mongo:
259
+ db = mongo.client[mongo_config.database]
260
+ db.drop_collection(collection_name)
261
+
262
+ def clear_db_collection(self, mongos_config: MongoConfiguration, db_name: str) -> None:
263
+ """Clears the temporary collection."""
264
+ with MongoConnection(mongos_config) as mongos:
265
+ mongos.client.drop_database(db_name)
266
+
267
+ def is_replica_set_able_read_write(self) -> bool:
268
+ """Returns True if is possible to write to primary and read from replicas."""
269
+ _, collection_name, write_value = self.get_random_write_and_collection()
270
+ mongodb_config = self.state.mongo_config
271
+ self.add_write_to_replica_set(mongodb_config, collection_name, write_value)
272
+ write_replicated = self.is_write_on_secondaries(
273
+ mongodb_config, collection_name, write_value
274
+ )
275
+ self.clear_tmp_collection(mongodb_config, collection_name)
276
+ return write_replicated
277
+
278
+ def is_mongos_able_to_read_write(self) -> bool:
279
+ """Returns True if read and write is feasible from mongos."""
280
+ _, collection_name, write_value = self.get_random_write_and_collection()
281
+ config = self.state.mongos_config
282
+ self.add_write_to_sharded_cluster(config, config.database, collection_name, write_value)
283
+
284
+ write_replicated = self.confirm_expected_write_cluster(
285
+ config,
286
+ collection_name,
287
+ write_value,
288
+ )
289
+ self.clear_tmp_collection(config, collection_name)
290
+
291
+ if not write_replicated:
292
+ logger.info("Test read/write to cluster failed.")
293
+ return False
294
+
295
+ return True
296
+
297
+ def move_primary_to_last_upgrade_unit(self) -> None:
298
+ """Moves the primary to last unit that gets upgraded (the unit with the lowest id).
299
+
300
+ Raises FailedToMovePrimaryError
301
+ """
302
+ # no need to move primary in the scenario of one unit
303
+ if len(self.state.units) < 2:
304
+ return
305
+
306
+ with MongoConnection(self.state.mongo_config) as mongod:
307
+ unit_with_lowest_id = self.state.reverse_order_peer_units[-1]
308
+ unit_host = self.state.peer_unit_data(unit_with_lowest_id).internal_address
309
+ if mongod.primary() == unit_host:
310
+ logger.info(
311
+ "Not moving Primary before refresh, primary is already on the last unit to refresh."
312
+ )
313
+ return
314
+
315
+ logger.info("Moving primary to unit: %s", unit_with_lowest_id)
316
+ mongod.move_primary(new_primary_ip=unit_host)
317
+
318
+ def is_sharded_cluster_able_to_read_write(self) -> bool:
319
+ """Returns True if possible to write all cluster shards and read from all replicas."""
320
+ mongos_config = self.get_cluster_mongos()
321
+ with MongoConnection(mongos_config) as mongos:
322
+ sc_status = mongos.client.admin.command("listShards")
323
+ for shard in sc_status["shards"]:
324
+ shard_name = shard[SHARD_NAME_INDEX]
325
+ # force a write to a specific shard to ensure the primary on that shard can
326
+ # receive writes
327
+ db_name, collection_name, write_value = self.get_random_write_and_collection()
328
+ self.add_write_to_sharded_cluster(
329
+ mongos_config, db_name, collection_name, write_value
330
+ )
331
+
332
+ # Can't move if there's not at least 2 shards
333
+ if len(sc_status["shards"]) > 1:
334
+ mongos.client.admin.command("movePrimary", db_name, to=shard_name)
335
+
336
+ has_correct_primary = self.get_primary_for_database(
337
+ mongos_config, shard_name, db_name
338
+ )
339
+
340
+ write_replicated = self.confirm_expected_write_cluster(
341
+ mongos_config,
342
+ collection_name,
343
+ write_value,
344
+ db_name=db_name,
345
+ )
346
+
347
+ self.clear_db_collection(mongos_config, db_name)
348
+ if not (write_replicated and has_correct_primary):
349
+ logger.info(f"Test read/write to shard {shard_name} failed.")
350
+ return False
351
+
352
+ return True
353
+
354
+ def is_cluster_able_to_read_write(self) -> bool:
355
+ """Returns True if read and write is feasible for cluster."""
356
+ try:
357
+ if self.state.is_role(MongoDBRoles.REPLICATION):
358
+ return self.is_replica_set_able_read_write()
359
+ return self.is_sharded_cluster_able_to_read_write()
360
+ except (ServerSelectionTimeoutError, OperationFailure):
361
+ logger.warning("Impossible to select server, will try again later")
362
+ return False
363
+
364
+ def are_pre_upgrade_operations_config_server_successful(self) -> bool:
365
+ """Runs pre-upgrade operations for config-server and returns True if successful."""
366
+ if not self.state.is_role(MongoDBRoles.CONFIG_SERVER):
367
+ return True
368
+
369
+ if (
370
+ isinstance(charm_api.event, charm_api.ActionEvent)
371
+ and charm_api.event.action == "pre-refresh-check"
372
+ ):
373
+ return True
374
+
375
+ try:
376
+ self.turn_off_and_wait_for_balancer()
377
+ except BalancerStillRunningError:
378
+ logger.info("Balancer is still running. Please try the pre-refresh check later.")
379
+ return False
380
+
381
+ return True
382
+
383
+ def is_feature_compatibility_version(self, expected_feature_version: str) -> bool:
384
+ """Returns True if all nodes in the cluster have the expected FCV.
385
+
386
+ Args:
387
+ expected_feature_version: The version all nodes should have.
388
+ """
389
+ if self.state.is_role(MongoDBRoles.CONFIG_SERVER):
390
+ return self._is_mongos_feature_compatibility_version(expected_feature_version)
391
+ return self._is_rs_feature_compatibility_version(expected_feature_version)
392
+
393
+ def _is_mongos_feature_compatibility_version(self, expected_feature_version: str) -> bool:
394
+ """Returns True if all nodes in the sharded cluster have the expected_feature_version.
395
+
396
+ Note it is NOT sufficient to check only mongos or the individual shards. It is necessary to
397
+ check each node according to MongoDB upgrade docs.
398
+ """
399
+ mongos_config = self.get_cluster_mongos()
400
+ for replica_set_config in self.get_all_replica_set_configs_in_cluster(mongos_config):
401
+ for single_host in replica_set_config.hosts:
402
+ single_replica_config = self.state.mongodb_config_for_user(
403
+ OperatorUser,
404
+ hosts={single_host},
405
+ replset=replica_set_config.replset,
406
+ standalone=True,
407
+ )
408
+ with MongoConnection(single_replica_config) as mongod:
409
+ version = mongod.client.admin.command(
410
+ {"getParameter": 1, "featureCompatibilityVersion": 1}
411
+ )
412
+ if (
413
+ version["featureCompatibilityVersion"]["version"]
414
+ != expected_feature_version
415
+ ):
416
+ return False
417
+
418
+ return True
419
+
420
+ def _is_rs_feature_compatibility_version(self, expected_feature_version: str) -> bool:
421
+ """Returns True if all nodes in the sharded cluster have the expected_feature_version.
422
+
423
+ Note it is NOT sufficient to check only mongos or the individual shards. It is necessary to
424
+ check each node according to MongoDB upgrade docs.
425
+ """
426
+ config = self.state.mongo_config
427
+ for host in config.hosts:
428
+ single_unit_config = self.state.mongodb_config_for_user(
429
+ OperatorUser, hosts={host}, replset=config.replset, standalone=True
430
+ )
431
+ with MongoConnection(single_unit_config) as mongod:
432
+ version = mongod.client.admin.command(
433
+ {"getParameter": 1, "featureCompatibilityVersion": 1}
434
+ )
435
+ if version["featureCompatibilityVersion"]["version"] != expected_feature_version:
436
+ return False
437
+ return True
438
+
439
+ @retry(
440
+ stop=stop_after_attempt(10),
441
+ wait=wait_fixed(1),
442
+ reraise=True,
443
+ )
444
+ def turn_off_and_wait_for_balancer(self) -> None:
445
+ """Sends the stop command to the balancer and wait for it to stop running."""
446
+ with MongoConnection(self.state.mongos_config) as mongos:
447
+ mongos.client.admin.command("balancerStop")
448
+ balancer_state = mongos.client.admin.command("balancerStatus")
449
+ if balancer_state["mode"] != "off":
450
+ raise BalancerStillRunningError("balancer is still Running.")
451
+
452
+ # END: helpers
@@ -0,0 +1,133 @@
1
+ """Manager for upgrades, only to convert statuses to advanced statuses."""
2
+
3
+ import logging
4
+
5
+ import charm_refresh
6
+ from data_platform_helpers.advanced_statuses.models import StatusObject
7
+ from data_platform_helpers.advanced_statuses.protocol import ManagerStatusProtocol
8
+ from data_platform_helpers.advanced_statuses.types import Scope
9
+ from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, StatusBase, WaitingStatus
10
+
11
+ from single_kernel_mongo.config.literals import Substrates
12
+ from single_kernel_mongo.config.statuses import CharmStatuses, UpgradeStatuses
13
+ from single_kernel_mongo.core.workload import WorkloadBase
14
+ from single_kernel_mongo.exceptions import DeployedWithoutTrustError
15
+ from single_kernel_mongo.state.charm_state import CharmState
16
+
17
+ logger = logging.getLogger()
18
+
19
+
20
+ class MongoDBUpgradesStatusManager(ManagerStatusProtocol):
21
+ """Manage upgrades statuses but nothing else."""
22
+
23
+ name: str = "upgrades"
24
+
25
+ def __init__(
26
+ self, state: CharmState, workload: WorkloadBase, refresh: charm_refresh.Common | None
27
+ ) -> None:
28
+ self.state = state
29
+ self.workload = workload
30
+ self.refresh = refresh
31
+
32
+ if self.state.substrate == Substrates.K8S:
33
+ try:
34
+ self.state.k8s_manager.get_pod()
35
+ except DeployedWithoutTrustError:
36
+ self.state.statuses.add(
37
+ CharmStatuses.DEPLOYED_WITHOUT_TRUST.value, scope="unit", component=self.name
38
+ )
39
+ if self.charm.unit.is_leader():
40
+ self.state.statuses.add(
41
+ CharmStatuses.DEPLOYED_WITHOUT_TRUST.value, scope="app", component=self.name
42
+ )
43
+
44
+ def get_statuses(self, scope: Scope, recompute: bool = False) -> list[StatusObject]:
45
+ """Compute the upgrades-relevant statuses.
46
+
47
+ Advanced Statuses defines the status priority order per component. It is not possible to
48
+ have some statuses of a component more important and some other statuses of the same
49
+ component less important.
50
+
51
+ While `refresh.[app|unit]_status_higher_priority` must be of higher priority than any other
52
+ status, `refresh.unit_status_lower_priority()` should only be set it if there is no other
53
+ status at all. We achieve this by setting the field `approved_critical_component` to True
54
+ if the refresh_status is of higher priority than any other status.
55
+
56
+ For more information: see https://canonical-charm-refresh.readthedocs-hosted.com/latest/add-to-charm/status/
57
+ """
58
+ status_list: list[StatusObject] = []
59
+
60
+ # Check if Juju app was deployed with `--trust` (needed to patch StatefulSet partition)
61
+ if self.state.substrate == Substrates.K8S:
62
+ try:
63
+ self.state.k8s_manager.get_pod()
64
+ except DeployedWithoutTrustError:
65
+ return [CharmStatuses.DEPLOYED_WITHOUT_TRUST.value]
66
+
67
+ if not self.refresh:
68
+ return [UpgradeStatuses.ACTIVE_IDLE.value]
69
+
70
+ if scope == "app" and (refresh_app_status := self.refresh.app_status_higher_priority):
71
+ app_status = self._convert_ops_status_to_advanced_status(refresh_app_status)
72
+ status_list.append(app_status)
73
+ return status_list
74
+
75
+ if self.refresh.in_progress and not self.refresh.next_unit_allowed_to_refresh:
76
+ if not self.dependent.mongo_manager.mongod_ready():
77
+ status_list.append(UpgradeStatuses.HEALTH_CHECK_FAILED.value)
78
+ else:
79
+ status_list.append(UpgradeStatuses.CLUSTER_CHECK_FAILED.value)
80
+
81
+ if refresh_unit_status := self.refresh.unit_status_higher_priority:
82
+ unit_status = self._convert_ops_status_to_advanced_status(refresh_unit_status)
83
+ status_list.append(unit_status)
84
+
85
+ if refresh_lower_unit_status := self.refresh.unit_status_lower_priority(
86
+ workload_is_running=self.workload.active()
87
+ ):
88
+ lower_unit_status = self._convert_ops_status_to_advanced_status(
89
+ refresh_lower_unit_status, critical=False
90
+ )
91
+ status_list.append(lower_unit_status)
92
+
93
+ return status_list if status_list else [CharmStatuses.ACTIVE_IDLE.value]
94
+
95
+ @staticmethod
96
+ def _convert_ops_status_to_advanced_status(
97
+ ops_status: StatusBase, critical: bool = True
98
+ ) -> StatusObject:
99
+ """Convert an ops status into an advanced statuses StatusObject.
100
+
101
+ Args:
102
+ ops_status (ops.StatusBase): the status to convert into an advanced status
103
+ critical (bool): whether the returned StatusObject should have the field
104
+ `approved_critical_component` set to True or False
105
+ """
106
+ # this code may not be very concise, focus is on readability
107
+ match ops_status:
108
+ case BlockedStatus():
109
+ return StatusObject(
110
+ status="blocked",
111
+ message=ops_status.message,
112
+ approved_critical_component=critical,
113
+ )
114
+
115
+ case MaintenanceStatus():
116
+ return StatusObject(
117
+ status="maintenance",
118
+ message=ops_status.message,
119
+ approved_critical_component=critical,
120
+ )
121
+
122
+ case WaitingStatus():
123
+ return StatusObject(
124
+ status="waiting",
125
+ message=ops_status.message,
126
+ approved_critical_component=critical,
127
+ )
128
+
129
+ case ActiveStatus():
130
+ return StatusObject(status="active", message=ops_status.message)
131
+
132
+ case _:
133
+ raise ValueError(f"Unknown status type: {ops_status.name}: {ops_status.message}")
@@ -8,7 +8,7 @@ from enum import Enum
8
8
  from ops.model import Application, Model, Relation
9
9
  from typing_extensions import override
10
10
 
11
- from single_kernel_mongo.config.literals import SECRETS_APP, Substrates
11
+ from single_kernel_mongo.config.literals import FEATURE_VERSION, SECRETS_APP, Substrates
12
12
  from single_kernel_mongo.core.structured_config import ExposeExternal, MongoDBRoles
13
13
  from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( # type: ignore
14
14
  DataPeerData,
@@ -28,6 +28,7 @@ class AppPeerDataKeys(str, Enum):
28
28
 
29
29
  # Shared
30
30
  ROLE = "role"
31
+ FCV = "feature-compatibility-version"
31
32
 
32
33
  # Mongos
33
34
  DATABASE = "database"
@@ -184,7 +185,7 @@ class AppPeerReplicaSet(AbstractRelationState[DataPeerData]):
184
185
  return set(
185
186
  self.relation_data.get(
186
187
  AppPeerDataKeys.EXTRA_USER_ROLES.value,
187
- "default",
188
+ "",
188
189
  ).split(",")
189
190
  )
190
191
 
@@ -204,3 +205,12 @@ class AppPeerReplicaSet(AbstractRelationState[DataPeerData]):
204
205
  @expose_external.setter
205
206
  def expose_external(self, value: ExposeExternal):
206
207
  self.update({AppPeerDataKeys.EXPOSE_EXTERNAL.value: f"{value}"})
208
+
209
+ @property
210
+ def feature_compatibility_version(self) -> str:
211
+ """The value of the feature-compatibility-version."""
212
+ return self.relation_data.get(AppPeerDataKeys.FCV.value, FEATURE_VERSION)
213
+
214
+ @feature_compatibility_version.setter
215
+ def feature_compatibility_version(self, value: str):
216
+ self.update({AppPeerDataKeys.FCV.value: value})