qontract-reconcile 0.10.2.dev174__py3-none-any.whl → 0.10.2.dev175__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qontract-reconcile
3
- Version: 0.10.2.dev174
3
+ Version: 0.10.2.dev175
4
4
  Summary: Collection of tools to reconcile services with their desired state as defined in the app-interface DB.
5
5
  Project-URL: homepage, https://github.com/app-sre/qontract-reconcile
6
6
  Project-URL: repository, https://github.com/app-sre/qontract-reconcile
@@ -123,7 +123,7 @@ reconcile/vpc_peerings_validator.py,sha256=aESqrhm1tpkc2iqSL1UV5to_HjNgjRSffD0cr
123
123
  reconcile/aus/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
124
124
  reconcile/aus/advanced_upgrade_service.py,sha256=lt684trHbKvVDLwwuNVz3Wu_MnytFSbS_7MZTIITh9k,23969
125
125
  reconcile/aus/aus_label_source.py,sha256=o0S2f0qwcII_8nzhHZhRQ83gEZ1DrSXyO4xzSwLebuU,4382
126
- reconcile/aus/base.py,sha256=K-CLPUDpaFSfFsf_UgcvaUPCgqTLTGkx_aoUH0VWhv8,50431
126
+ reconcile/aus/base.py,sha256=rx2OuShoFRP7O6Kov9rRjEkhCPpPavfDF81tieB6XFg,50747
127
127
  reconcile/aus/cluster_version_data.py,sha256=VZWbUEIbrDKO-sroMpQtiWCTqDraTMd8tssKV0HyTQ0,7140
128
128
  reconcile/aus/healthchecks.py,sha256=jR9c-syh9impnkV0fd6XW3Bnk7iRN5zv8oCRYM-yIRY,2700
129
129
  reconcile/aus/metrics.py,sha256=nKT4m2zGT-QOMR0c-z-npVNKWsNMubzdffpU_f9n4II,3927
@@ -807,7 +807,7 @@ tools/saas_promotion_state/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
807
807
  tools/saas_promotion_state/saas_promotion_state.py,sha256=UfwwRLS5Ya4_Nh1w5n1dvoYtchQvYE9yj1VANt2IKqI,3925
808
808
  tools/sre_checkpoints/__init__.py,sha256=CDaDaywJnmRCLyl_NCcvxi-Zc0hTi_3OdwKiFOyS39I,145
809
809
  tools/sre_checkpoints/util.py,sha256=zEDbGr18ZeHNQwW8pUsr2JRjuXIPz--WAGJxZo9sv_Y,894
810
- qontract_reconcile-0.10.2.dev174.dist-info/METADATA,sha256=BxdoN9UJDbN5C4PogvVgSSyKg_ruSMt9Ee0gMjCnZB8,24627
811
- qontract_reconcile-0.10.2.dev174.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
812
- qontract_reconcile-0.10.2.dev174.dist-info/entry_points.txt,sha256=5i9l54La3vQrDLAdwDKQWC0iG4sV9RRfOb1BpvzOWLc,698
813
- qontract_reconcile-0.10.2.dev174.dist-info/RECORD,,
810
+ qontract_reconcile-0.10.2.dev175.dist-info/METADATA,sha256=UCbZxjmW8UpEZhxqFSoasTNEH2w_UT9s0PtMCZj4kfU,24627
811
+ qontract_reconcile-0.10.2.dev175.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
812
+ qontract_reconcile-0.10.2.dev175.dist-info/entry_points.txt,sha256=5i9l54La3vQrDLAdwDKQWC0iG4sV9RRfOb1BpvzOWLc,698
813
+ qontract_reconcile-0.10.2.dev175.dist-info/RECORD,,
reconcile/aus/base.py CHANGED
@@ -970,56 +970,53 @@ def verify_schedule_should_skip(
970
970
  return next_schedule.strftime("%Y-%m-%dT%H:%M:%SZ")
971
971
 
972
972
 
973
- def verify_lock_should_skip(
974
- desired: ClusterUpgradeSpec, locked: dict[str, str]
975
- ) -> bool:
976
- mutexes = desired.effective_mutexes
977
- if any(lock in locked for lock in mutexes):
978
- locking = {lock: locked[lock] for lock in mutexes if lock in locked}
979
- logging.debug(
980
- f"[{desired.org.org_id}/{desired.org.name}/{desired.cluster.name}] skipping cluster: locked out by {locking}"
981
- )
982
- return True
983
- return False
984
-
985
-
986
973
  def verify_max_upgrades_should_skip(
987
974
  desired: ClusterUpgradeSpec,
988
- sector_upgrades: dict[str, set[str]],
975
+ locked: dict[str, str],
976
+ sector_mutex_upgrades: dict[tuple[str, str], set[str]],
989
977
  sector: Sector | None,
990
978
  ) -> bool:
991
- if sector is None:
992
- return False
979
+ mutexes = desired.effective_mutexes
993
980
 
994
- current_upgrades = sector_upgrades[sector.name]
995
- # Allow at least one upgrade
996
- if len(current_upgrades) == 0:
981
+ # if sector.max_parallel_upgrades is not set, we allow 1 upgrade per mutex, across the whole org
982
+ if sector is None or sector.max_parallel_upgrades is None:
983
+ if any(lock in locked for lock in mutexes):
984
+ locking = {lock: locked[lock] for lock in mutexes if lock in locked}
985
+ logging.debug(
986
+ f"[{desired.org.org_id}/{desired.org.name}/{desired.cluster.name}] skipping cluster: locked out by {locking}"
987
+ )
988
+ return True
997
989
  return False
998
990
 
999
- # if sector.max_parallel_upgrades is not set, we allow all upgrades
1000
- if sector.max_parallel_upgrades is None:
1001
- return False
991
+ current_upgrades_count_per_mutex = {
992
+ mutex: len(sector_mutex_upgrades[sector.name, mutex]) for mutex in mutexes
993
+ }
1002
994
 
1003
- sector_cluster_count = len(sector.specs)
995
+ current_upgrades_total_count = sum(current_upgrades_count_per_mutex.values())
996
+ if current_upgrades_total_count == 0:
997
+ return False
1004
998
 
1005
- if sector.max_parallel_upgrades.endswith("%"):
1006
- max_parallel_upgrades_percent = int(sector.max_parallel_upgrades[:-1])
1007
- max_parallel_upgrades = round(
1008
- sector_cluster_count * max_parallel_upgrades_percent / 100
1009
- )
1010
- else:
1011
- max_parallel_upgrades = int(sector.max_parallel_upgrades)
999
+ for mutex in mutexes:
1000
+ cluster_count = len([s for s in sector.specs if mutex in s.effective_mutexes])
1001
+ if sector.max_parallel_upgrades.endswith("%"):
1002
+ max_parallel_upgrades_percent = int(sector.max_parallel_upgrades[:-1])
1003
+ max_parallel_upgrades = round(
1004
+ cluster_count * max_parallel_upgrades_percent / 100
1005
+ )
1006
+ else:
1007
+ max_parallel_upgrades = int(sector.max_parallel_upgrades)
1012
1008
 
1013
- # we allow at least one upgrade
1014
- if max_parallel_upgrades == 0:
1015
- max_parallel_upgrades = 1
1009
+ # we allow at least one upgrade
1010
+ if max_parallel_upgrades == 0:
1011
+ max_parallel_upgrades = 1
1016
1012
 
1017
- if len(current_upgrades) >= max_parallel_upgrades:
1018
- logging.debug(
1019
- f"[{desired.org.org_id}/{desired.org.name}/{desired.cluster.name}] skipping cluster: "
1020
- f"sector '{sector.name}' has reached max parallel upgrades {sector.max_parallel_upgrades}"
1021
- )
1022
- return True
1013
+ if current_upgrades_count_per_mutex.get(mutex, 0) >= max_parallel_upgrades:
1014
+ logging.debug(
1015
+ f"[{desired.org.org_id}/{desired.org.name}/{desired.cluster.name}] skipping cluster: "
1016
+ f"sector '{sector.name}' has reached max parallel upgrades {sector.max_parallel_upgrades} "
1017
+ f"for mutex '{mutex}'"
1018
+ )
1019
+ return True
1023
1020
 
1024
1021
  return False
1025
1022
 
@@ -1085,15 +1082,15 @@ def calculate_diff(
1085
1082
  """
1086
1083
 
1087
1084
  locked: dict[str, str] = {}
1088
- sector_upgrades: dict[str, set[str]] = defaultdict(set)
1085
+ sector_mutex_upgrades: dict[tuple[str, str], set[str]] = defaultdict(set)
1089
1086
 
1090
1087
  def set_upgrading(
1091
- cluster_id: str, mutexes: set[str] | None, sector_name: str | None
1088
+ cluster_id: str, mutexes: set[str], sector_name: str | None
1092
1089
  ) -> None:
1093
- for mutex in mutexes or set():
1090
+ for mutex in mutexes:
1094
1091
  locked[mutex] = cluster_id
1095
- if sector_name:
1096
- sector_upgrades[sector_name].add(cluster_id)
1092
+ if sector_name:
1093
+ sector_mutex_upgrades[sector_name, mutex].add(cluster_id)
1097
1094
 
1098
1095
  diffs: list[UpgradePolicyHandler] = []
1099
1096
 
@@ -1113,7 +1110,9 @@ def calculate_diff(
1113
1110
  # Upgrading node pools, only required for Hypershift clusters
1114
1111
  # do this in the same loop, to skip cluster on node pool upgrade
1115
1112
  if spec.cluster.is_rosa_hypershift():
1116
- if verify_lock_should_skip(spec, locked):
1113
+ if verify_max_upgrades_should_skip(
1114
+ spec, locked, sector_mutex_upgrades, sector
1115
+ ):
1117
1116
  continue
1118
1117
 
1119
1118
  node_pool_update = _calculate_node_pool_diffs(spec, now)
@@ -1135,10 +1134,7 @@ def calculate_diff(
1135
1134
  if not next_schedule:
1136
1135
  continue
1137
1136
 
1138
- if verify_lock_should_skip(spec, locked):
1139
- continue
1140
-
1141
- if verify_max_upgrades_should_skip(spec, sector_upgrades, sector):
1137
+ if verify_max_upgrades_should_skip(spec, locked, sector_mutex_upgrades, sector):
1142
1138
  continue
1143
1139
 
1144
1140
  version = upgradeable_version(spec, version_data, sector)