qontract-reconcile 0.10.2.dev256__py3-none-any.whl → 0.10.2.dev257__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. {qontract_reconcile-0.10.2.dev256.dist-info → qontract_reconcile-0.10.2.dev257.dist-info}/METADATA +1 -1
  2. {qontract_reconcile-0.10.2.dev256.dist-info → qontract_reconcile-0.10.2.dev257.dist-info}/RECORD +94 -94
  3. reconcile/aus/advanced_upgrade_service.py +1 -1
  4. reconcile/aus/base.py +2 -2
  5. reconcile/aus/version_gates/sts_version_gate_handler.py +2 -2
  6. reconcile/aws_account_manager/reconciler.py +22 -20
  7. reconcile/aws_iam_keys.py +5 -5
  8. reconcile/aws_iam_password_reset.py +5 -5
  9. reconcile/aws_saml_roles/integration.py +5 -5
  10. reconcile/aws_version_sync/integration.py +4 -3
  11. reconcile/cli.py +5 -5
  12. reconcile/closedbox_endpoint_monitoring_base.py +1 -0
  13. reconcile/database_access_manager.py +4 -4
  14. reconcile/dynatrace_token_provider/integration.py +2 -2
  15. reconcile/external_resources/manager.py +2 -2
  16. reconcile/external_resources/model.py +1 -1
  17. reconcile/external_resources/secrets_sync.py +2 -2
  18. reconcile/gabi_authorized_users.py +3 -3
  19. reconcile/github_org.py +2 -2
  20. reconcile/gitlab_housekeeping.py +1 -1
  21. reconcile/gitlab_mr_sqs_consumer.py +1 -1
  22. reconcile/glitchtip/integration.py +2 -2
  23. reconcile/jenkins_worker_fleets.py +5 -5
  24. reconcile/ldap_groups/integration.py +3 -3
  25. reconcile/ocm_clusters.py +2 -2
  26. reconcile/ocm_internal_notifications/integration.py +2 -2
  27. reconcile/ocm_labels/integration.py +3 -2
  28. reconcile/openshift_base.py +12 -11
  29. reconcile/openshift_cluster_bots.py +2 -2
  30. reconcile/openshift_resources_base.py +3 -3
  31. reconcile/openshift_rhcs_certs.py +2 -2
  32. reconcile/openshift_saas_deploy.py +1 -1
  33. reconcile/quay_membership.py +4 -4
  34. reconcile/rhidp/common.py +3 -2
  35. reconcile/run_integration.py +7 -4
  36. reconcile/skupper_network/integration.py +3 -3
  37. reconcile/slack_usergroups.py +4 -4
  38. reconcile/status_board.py +3 -3
  39. reconcile/terraform_cloudflare_dns.py +5 -5
  40. reconcile/terraform_cloudflare_users.py +15 -17
  41. reconcile/terraform_resources.py +6 -6
  42. reconcile/terraform_vpc_peerings.py +9 -9
  43. reconcile/unleash_feature_toggles/integration.py +1 -1
  44. reconcile/utils/aggregated_list.py +2 -2
  45. reconcile/utils/aws_api_typed/iam.py +2 -2
  46. reconcile/utils/aws_api_typed/organization.py +4 -4
  47. reconcile/utils/aws_api_typed/service_quotas.py +4 -4
  48. reconcile/utils/aws_api_typed/support.py +9 -9
  49. reconcile/utils/aws_helper.py +1 -1
  50. reconcile/utils/config.py +8 -4
  51. reconcile/utils/deadmanssnitch_api.py +2 -4
  52. reconcile/utils/glitchtip/models.py +18 -12
  53. reconcile/utils/gql.py +4 -4
  54. reconcile/utils/internal_groups/client.py +2 -2
  55. reconcile/utils/jinja2/utils.py +7 -3
  56. reconcile/utils/jjb_client.py +2 -2
  57. reconcile/utils/models.py +2 -1
  58. reconcile/utils/mr/__init__.py +3 -3
  59. reconcile/utils/mr/app_interface_reporter.py +2 -2
  60. reconcile/utils/mr/aws_access.py +5 -2
  61. reconcile/utils/mr/base.py +3 -3
  62. reconcile/utils/mr/user_maintenance.py +1 -1
  63. reconcile/utils/oc.py +11 -11
  64. reconcile/utils/oc_connection_parameters.py +4 -4
  65. reconcile/utils/ocm/base.py +3 -3
  66. reconcile/utils/ocm/products.py +8 -8
  67. reconcile/utils/ocm/search_filters.py +2 -2
  68. reconcile/utils/openshift_resource.py +21 -18
  69. reconcile/utils/pagerduty_api.py +5 -5
  70. reconcile/utils/quay_api.py +2 -2
  71. reconcile/utils/rosa/rosa_cli.py +1 -1
  72. reconcile/utils/rosa/session.py +2 -2
  73. reconcile/utils/runtime/desired_state_diff.py +7 -7
  74. reconcile/utils/saasherder/interfaces.py +1 -0
  75. reconcile/utils/saasherder/models.py +1 -1
  76. reconcile/utils/saasherder/saasherder.py +1 -1
  77. reconcile/utils/secret_reader.py +20 -20
  78. reconcile/utils/slack_api.py +5 -5
  79. reconcile/utils/slo_document_manager.py +6 -6
  80. reconcile/utils/state.py +8 -8
  81. reconcile/utils/terraform_client.py +3 -3
  82. reconcile/utils/terrascript/cloudflare_client.py +2 -2
  83. reconcile/utils/terrascript/cloudflare_resources.py +1 -0
  84. reconcile/utils/terrascript_aws_client.py +12 -11
  85. reconcile/utils/vault.py +22 -22
  86. reconcile/vault_replication.py +15 -15
  87. tools/cli_commands/erv2.py +3 -2
  88. tools/cli_commands/gpg_encrypt.py +9 -9
  89. tools/cli_commands/systems_and_tools.py +1 -1
  90. tools/qontract_cli.py +13 -14
  91. tools/saas_promotion_state/saas_promotion_state.py +4 -4
  92. tools/template_validation.py +5 -5
  93. {qontract_reconcile-0.10.2.dev256.dist-info → qontract_reconcile-0.10.2.dev257.dist-info}/WHEEL +0 -0
  94. {qontract_reconcile-0.10.2.dev256.dist-info → qontract_reconcile-0.10.2.dev257.dist-info}/entry_points.txt +0 -0
@@ -39,7 +39,7 @@ from reconcile.utils.oc import (
39
39
  OCLogMsg,
40
40
  PrimaryClusterIPCanNotBeUnsetError,
41
41
  RequestEntityTooLargeError,
42
- StatefulSetUpdateForbidden,
42
+ StatefulSetUpdateForbiddenError,
43
43
  StatusCodeError,
44
44
  UnsupportedMediaTypeError,
45
45
  )
@@ -52,13 +52,19 @@ from reconcile.utils.three_way_diff_strategy import three_way_diff_using_hash
52
52
 
53
53
  ACTION_APPLIED = "applied"
54
54
  ACTION_DELETED = "deleted"
55
+ AUTH_METHOD_USER_KEY = {
56
+ "github-org": "github_username",
57
+ "github-org-team": "github_username",
58
+ "oidc": "org_username",
59
+ "rhidp": "org_username",
60
+ }
55
61
 
56
62
 
57
63
  class ValidationError(Exception):
58
64
  pass
59
65
 
60
66
 
61
- class ValidationErrorJobFailed(Exception):
67
+ class ValidationErrorJobFailedError(Exception):
62
68
  pass
63
69
 
64
70
 
@@ -498,7 +504,7 @@ def apply(
498
504
 
499
505
  oc.delete(namespace=namespace, kind=resource_type, name=resource.name)
500
506
  oc.apply(namespace=namespace, resource=annotated)
501
- except StatefulSetUpdateForbidden:
507
+ except StatefulSetUpdateForbiddenError:
502
508
  if resource_type != "StatefulSet":
503
509
  raise
504
510
 
@@ -1228,7 +1234,7 @@ def validate_realized_data(actions: Iterable[dict[str, str]], oc_map: ClusterMap
1228
1234
  for c in conditions:
1229
1235
  if c.get("type") == "Failed":
1230
1236
  msg = f"{name}: {c.get('reason')}"
1231
- raise ValidationErrorJobFailed(msg)
1237
+ raise ValidationErrorJobFailedError(msg)
1232
1238
  raise ValidationError(name)
1233
1239
  elif kind == "ClowdApp":
1234
1240
  deployments = status.get("deployments")
@@ -1256,7 +1262,7 @@ def validate_realized_data(actions: Iterable[dict[str, str]], oc_map: ClusterMap
1256
1262
  if job_state == "Failed":
1257
1263
  failed_jobs.append(job_name)
1258
1264
  if failed_jobs:
1259
- raise ValidationErrorJobFailed(
1265
+ raise ValidationErrorJobFailedError(
1260
1266
  f"CJI {name} failed jobs: {failed_jobs}"
1261
1267
  )
1262
1268
  else:
@@ -1413,12 +1419,7 @@ def determine_user_keys_for_access(
1413
1419
  enforced_user_keys: list[str] | None = None,
1414
1420
  ) -> list[str]:
1415
1421
  """Return user keys based on enabled cluster authentication methods."""
1416
- AUTH_METHOD_USER_KEY = {
1417
- "github-org": "github_username",
1418
- "github-org-team": "github_username",
1419
- "oidc": "org_username",
1420
- "rhidp": "org_username",
1421
- }
1422
+
1422
1423
  user_keys: list[str] = []
1423
1424
 
1424
1425
  if enforced_user_keys:
@@ -111,7 +111,7 @@ def sa_secret_name(sa: str) -> str:
111
111
  return f"{sa}-token"
112
112
 
113
113
 
114
- class TokenNotReadyException(Exception):
114
+ class TokenNotReadyError(Exception):
115
115
  pass
116
116
 
117
117
 
@@ -120,7 +120,7 @@ class TokenNotReadyException(Exception):
120
120
  def retrieve_token(kubeconfig: str, namespace: str, sa: str) -> str:
121
121
  secret = oc(kubeconfig, namespace, ["get", "secret", sa_secret_name(sa)])
122
122
  if not secret or "token" not in secret.get("data", {}):
123
- raise TokenNotReadyException()
123
+ raise TokenNotReadyError()
124
124
  b64_token = secret["data"]["token"]
125
125
  return base64.b64decode(b64_token).decode()
126
126
 
@@ -66,8 +66,8 @@ from reconcile.utils.secret_reader import SecretReader, SecretReaderBase
66
66
  from reconcile.utils.semver_helper import make_semver
67
67
  from reconcile.utils.sharding import is_in_shard
68
68
  from reconcile.utils.vault import (
69
- SecretVersionIsNone,
70
- SecretVersionNotFound,
69
+ SecretVersionIsNoneError,
70
+ SecretVersionNotFoundError,
71
71
  )
72
72
 
73
73
  # +-----------------------+-------------------------+-------------+
@@ -602,7 +602,7 @@ def fetch_openshift_resource(
602
602
  alertmanager_config_key=alertmanager_config_key,
603
603
  settings=settings,
604
604
  )
605
- except (SecretVersionNotFound, SecretVersionIsNone) as e:
605
+ except (SecretVersionNotFoundError, SecretVersionIsNoneError) as e:
606
606
  raise FetchSecretError(e) from None
607
607
  elif provider == "route":
608
608
  path = resource["resource"]["path"]
@@ -35,7 +35,7 @@ from reconcile.utils.rhcsv2_certs import RhcsV2Cert, generate_cert
35
35
  from reconcile.utils.runtime.integration import DesiredStateShardConfig
36
36
  from reconcile.utils.secret_reader import create_secret_reader
37
37
  from reconcile.utils.semver_helper import make_semver
38
- from reconcile.utils.vault import SecretNotFound, VaultClient
38
+ from reconcile.utils.vault import SecretNotFoundError, VaultClient
39
39
 
40
40
  QONTRACT_INTEGRATION = "openshift-rhcs-certs"
41
41
  QONTRACT_INTEGRATION_VERSION = make_semver(1, 9, 3)
@@ -124,7 +124,7 @@ def get_vault_cert_secret(
124
124
  vault_cert_secret = vault.read_all({ # type: ignore[attr-defined]
125
125
  "path": f"{vault_base_path}/{ns.cluster.name}/{ns.name}/{cert_resource.secret_name}"
126
126
  })
127
- except SecretNotFound:
127
+ except SecretNotFoundError:
128
128
  logging.info(
129
129
  f"No existing cert found for cluster='{ns.cluster.name}', namespace='{ns.name}', secret='{cert_resource.secret_name}''"
130
130
  )
@@ -345,4 +345,4 @@ def run(
345
345
  if image_auth.auth_server:
346
346
  json_file = os.path.join(io_dir, "dockerconfigjson")
347
347
  with open(json_file, "w", encoding="locale") as f:
348
- f.write(json.dumps(image_auth.getDockerConfigJson(), indent=2))
348
+ f.write(json.dumps(image_auth.get_docker_config_json(), indent=2))
@@ -18,9 +18,9 @@ from reconcile.utils import (
18
18
  from reconcile.utils.aggregated_list import (
19
19
  AggregatedDiffRunner,
20
20
  AggregatedList,
21
- RunnerException,
21
+ RunnerError,
22
22
  )
23
- from reconcile.utils.quay_api import QuayTeamNotFoundException
23
+ from reconcile.utils.quay_api import QuayTeamNotFoundError
24
24
 
25
25
  QONTRACT_INTEGRATION = "quay-membership"
26
26
 
@@ -63,7 +63,7 @@ def fetch_current_state(quay_api_store):
63
63
  for team in teams:
64
64
  try:
65
65
  members = quay_api.list_team_members(team)
66
- except QuayTeamNotFoundException:
66
+ except QuayTeamNotFoundError:
67
67
  logging.warning(
68
68
  "Attempted to list members for team %s in "
69
69
  "org %s/%s, but it doesn't exist",
@@ -149,7 +149,7 @@ class RunnerAction:
149
149
  # Ensure all quay org/teams are declared as dependencies in a
150
150
  # `/dependencies/quay-org-1.yml` datafile.
151
151
  if team not in self.quay_api_store[org]["teams"]:
152
- raise RunnerException(
152
+ raise RunnerError(
153
153
  f"Quay team {team} is not defined as a "
154
154
  f"managedTeam in the {org} org."
155
155
  )
reconcile/rhidp/common.py CHANGED
@@ -61,8 +61,9 @@ class ClusterAuth(BaseModel):
61
61
  status: str
62
62
 
63
63
  @root_validator
64
- def name_no_spaces( # pylint: disable=no-self-argument
65
- cls, values: MutableMapping[str, Any]
64
+ def name_no_spaces(
65
+ cls, # noqa: N805
66
+ values: MutableMapping[str, Any],
66
67
  ) -> MutableMapping[str, Any]:
67
68
  values["name"] = values["name"].replace(" ", "-")
68
69
  return values
@@ -65,10 +65,13 @@ HANDLERS = [STREAM_HANDLER]
65
65
  # Messages to the log file
66
66
  if LOG_FILE is not None:
67
67
  FILE_HANDLER = logging.FileHandler(LOG_FILE)
68
- logFileFormat = "%(message)s"
69
- if PREFIX_LOG_LEVEL == "true":
70
- logFileFormat = "[%(levelname)s] %(message)s"
71
- FILE_HANDLER.setFormatter(logging.Formatter(fmt=logFileFormat))
68
+ FILE_HANDLER.setFormatter(
69
+ logging.Formatter(
70
+ fmt="[%(levelname)s] %(message)s"
71
+ if PREFIX_LOG_LEVEL == "true"
72
+ else "%(message)s"
73
+ )
74
+ )
72
75
  HANDLERS.append(FILE_HANDLER) # type: ignore
73
76
 
74
77
  # Setting up the root logger
@@ -41,7 +41,7 @@ SITE_CONTROLLER_LABELS = {
41
41
  CONFIG_NAME = "skupper-site"
42
42
 
43
43
 
44
- class SkupperNetworkExcpetion(Exception):
44
+ class SkupperNetworkError(Exception):
45
45
  """Base exception for Skupper Network integration."""
46
46
 
47
47
 
@@ -55,7 +55,7 @@ def load_site_controller_template(
55
55
  resource["content"], undefined=jinja2.StrictUndefined
56
56
  ).render(variables)
57
57
  except jinja2.exceptions.UndefinedError as e:
58
- raise SkupperNetworkExcpetion(
58
+ raise SkupperNetworkError(
59
59
  f"Failed to render template {path}: {e.message}"
60
60
  ) from None
61
61
  return yaml.safe_load(body)
@@ -113,7 +113,7 @@ def compile_skupper_sites(
113
113
  # we don't support skupper installations with just one site
114
114
  for site in network_sites:
115
115
  if site.is_island(network_sites):
116
- raise SkupperNetworkExcpetion(
116
+ raise SkupperNetworkError(
117
117
  f"{site}: Site is not connected to any other skupper site in the network."
118
118
  )
119
119
 
@@ -52,7 +52,7 @@ from reconcile.utils.extended_early_exit import (
52
52
  from reconcile.utils.github_api import GithubRepositoryApi
53
53
  from reconcile.utils.gitlab_api import GitLabApi
54
54
  from reconcile.utils.pagerduty_api import (
55
- PagerDutyApiException,
55
+ PagerDutyApiError,
56
56
  PagerDutyMap,
57
57
  get_pagerduty_map,
58
58
  )
@@ -64,7 +64,7 @@ from reconcile.utils.secret_reader import (
64
64
  from reconcile.utils.slack_api import (
65
65
  SlackApi,
66
66
  SlackApiError,
67
- UsergroupNotFoundException,
67
+ UsergroupNotFoundError,
68
68
  )
69
69
  from reconcile.utils.vcs import VCS
70
70
 
@@ -204,7 +204,7 @@ def get_current_state(
204
204
  continue
205
205
  try:
206
206
  users, channels, description = spec.slack.describe_usergroup(ug)
207
- except UsergroupNotFoundException:
207
+ except UsergroupNotFoundError:
208
208
  continue
209
209
  current_state.setdefault(workspace, {})[ug] = State(
210
210
  workspace=workspace,
@@ -249,7 +249,7 @@ def get_usernames_from_pagerduty(
249
249
  pd = pagerduty_map.get(pagerduty.instance.name)
250
250
  try:
251
251
  pagerduty_names = pd.get_pagerduty_users(pd_resource_type, pd_resource_id)
252
- except PagerDutyApiException as e:
252
+ except PagerDutyApiError as e:
253
253
  logging.error(
254
254
  f"[{usergroup}] PagerDuty API error: {e} "
255
255
  "(hint: check that pagerduty schedule_id/escalation_policy_id is correct)"
reconcile/status_board.py CHANGED
@@ -97,7 +97,7 @@ class Product(AbstractStatusBoard):
97
97
  def update(self, ocm: OCMBaseClient) -> None:
98
98
  err_msg = "Called update on StatusBoardHandler that doesn't have update method"
99
99
  logging.error(err_msg)
100
- raise UpdateNotSupported(err_msg)
100
+ raise UpdateNotSupportedError(err_msg)
101
101
 
102
102
  def delete(self, ocm: OCMBaseClient) -> None:
103
103
  if not self.id:
@@ -133,7 +133,7 @@ class Application(AbstractStatusBoard):
133
133
  def update(self, ocm: OCMBaseClient) -> None:
134
134
  err_msg = "Called update on StatusBoardHandler that doesn't have update method"
135
135
  logging.error(err_msg)
136
- raise UpdateNotSupported(err_msg)
136
+ raise UpdateNotSupportedError(err_msg)
137
137
 
138
138
  def delete(self, ocm: OCMBaseClient) -> None:
139
139
  if not self.id:
@@ -219,7 +219,7 @@ Application.update_forward_refs()
219
219
  Service.update_forward_refs()
220
220
 
221
221
 
222
- class UpdateNotSupported(Exception):
222
+ class UpdateNotSupportedError(Exception):
223
223
  pass
224
224
 
225
225
 
@@ -44,8 +44,8 @@ from reconcile.utils.terraform_client import TerraformClient
44
44
  from reconcile.utils.terrascript.cloudflare_client import (
45
45
  DEFAULT_PROVIDER_RPS,
46
46
  DNSZoneShardingStrategy,
47
- IntegrationUndefined,
48
- InvalidTerraformState,
47
+ IntegrationUndefinedError,
48
+ InvalidTerraformStateError,
49
49
  TerrascriptCloudflareClientFactory,
50
50
  )
51
51
  from reconcile.utils.terrascript.models import (
@@ -312,9 +312,9 @@ def build_cloudflare_terraform_config_collection(
312
312
  integrations = tf_state.integrations
313
313
 
314
314
  if not bucket:
315
- raise InvalidTerraformState("Terraform state must have bucket defined")
315
+ raise InvalidTerraformStateError("Terraform state must have bucket defined")
316
316
  if not region:
317
- raise InvalidTerraformState("Terraform state must have region defined")
317
+ raise InvalidTerraformStateError("Terraform state must have region defined")
318
318
 
319
319
  integration = None
320
320
  for i in integrations:
@@ -323,7 +323,7 @@ def build_cloudflare_terraform_config_collection(
323
323
  break
324
324
 
325
325
  if not integration:
326
- raise IntegrationUndefined(
326
+ raise IntegrationUndefinedError(
327
327
  f"Must declare integration name under Terraform state in {zone.account.terraform_state_account.name} AWS account for {cf_account.name} Cloudflare account in app-interface"
328
328
  )
329
329
 
@@ -35,15 +35,15 @@ from reconcile.utils.terraform.config_client import (
35
35
  TerraformConfigClientCollection,
36
36
  )
37
37
  from reconcile.utils.terraform_client import (
38
- TerraformApplyFailed,
38
+ TerraformApplyFailedError,
39
39
  TerraformClient,
40
- TerraformDeletionDetected,
41
- TerraformPlanFailed,
40
+ TerraformDeletionDetectedError,
41
+ TerraformPlanFailedError,
42
42
  )
43
43
  from reconcile.utils.terrascript.cloudflare_client import (
44
44
  AccountShardingStrategy,
45
- IntegrationUndefined,
46
- InvalidTerraformState,
45
+ IntegrationUndefinedError,
46
+ InvalidTerraformStateError,
47
47
  TerrascriptCloudflareClientFactory,
48
48
  )
49
49
  from reconcile.utils.terrascript.models import (
@@ -153,9 +153,6 @@ class TerraformCloudflareUsers(
153
153
  ]
154
154
 
155
155
  self._run_terraform(
156
- QONTRACT_INTEGRATION,
157
- QONTRACT_INTEGRATION_VERSION,
158
- QONTRACT_TF_PREFIX,
159
156
  dry_run,
160
157
  enable_deletion,
161
158
  thread_pool_size,
@@ -165,9 +162,6 @@ class TerraformCloudflareUsers(
165
162
 
166
163
  def _run_terraform(
167
164
  self,
168
- QONTRACT_INTEGRATION: str,
169
- QONTRACT_INTEGRATION_VERSION: str,
170
- QONTRACT_TF_PREFIX: str,
171
165
  dry_run: bool,
172
166
  enable_deletion: bool,
173
167
  thread_pool_size: int,
@@ -186,11 +180,11 @@ class TerraformCloudflareUsers(
186
180
  try:
187
181
  disabled_deletions_detected, err = tf.plan(enable_deletion)
188
182
  if err:
189
- raise TerraformPlanFailed(
183
+ raise TerraformPlanFailedError(
190
184
  f"Failed to run terraform plan for integration {QONTRACT_INTEGRATION}"
191
185
  )
192
186
  if disabled_deletions_detected:
193
- raise TerraformDeletionDetected(
187
+ raise TerraformDeletionDetectedError(
194
188
  "Deletions detected but they are disabled"
195
189
  )
196
190
 
@@ -199,7 +193,7 @@ class TerraformCloudflareUsers(
199
193
 
200
194
  err = tf.apply()
201
195
  if err:
202
- raise TerraformApplyFailed(
196
+ raise TerraformApplyFailedError(
203
197
  f"Failed to run terraform apply for integration {QONTRACT_INTEGRATION}"
204
198
  )
205
199
  finally:
@@ -235,9 +229,13 @@ class TerraformCloudflareUsers(
235
229
  integrations = tf_state.integrations
236
230
 
237
231
  if not bucket:
238
- raise InvalidTerraformState("Terraform state must have bucket defined")
232
+ raise InvalidTerraformStateError(
233
+ "Terraform state must have bucket defined"
234
+ )
239
235
  if not region:
240
- raise InvalidTerraformState("Terraform state must have region defined")
236
+ raise InvalidTerraformStateError(
237
+ "Terraform state must have region defined"
238
+ )
241
239
 
242
240
  integration = None
243
241
  for i in integrations:
@@ -246,7 +244,7 @@ class TerraformCloudflareUsers(
246
244
  break
247
245
 
248
246
  if not integration:
249
- raise IntegrationUndefined(
247
+ raise IntegrationUndefinedError(
250
248
  "Must declare integration name under Terraform state in app-interface"
251
249
  )
252
250
 
@@ -200,20 +200,20 @@ def get_aws_accounts(
200
200
  if exclude_accounts and not dry_run:
201
201
  message = "--exclude-accounts is only supported in dry-run mode"
202
202
  logging.error(message)
203
- raise ExcludeAccountsAndDryRunException(message)
203
+ raise ExcludeAccountsAndDryRunError(message)
204
204
 
205
205
  if (exclude_accounts and include_accounts) and any(
206
206
  a in exclude_accounts for a in include_accounts
207
207
  ):
208
208
  message = "Using --exclude-accounts and --account-name with the same account is not allowed"
209
209
  logging.error(message)
210
- raise ExcludeAccountsAndAccountNameException(message)
210
+ raise ExcludeAccountsAndAccountNameError(message)
211
211
 
212
212
  # If we are not running in dry run we don't want to run with more than one account
213
213
  if include_accounts and len(include_accounts) > 1 and not dry_run:
214
214
  message = "Running with multiple accounts is only supported in dry-run mode"
215
215
  logging.error(message)
216
- raise MultipleAccountNamesInDryRunException(message)
216
+ raise MultipleAccountNamesInDryRunError(message)
217
217
 
218
218
  accounts = queries.get_aws_accounts(terraform_state=True)
219
219
 
@@ -345,15 +345,15 @@ def populate_desired_state(
345
345
  )
346
346
 
347
347
 
348
- class ExcludeAccountsAndDryRunException(Exception):
348
+ class ExcludeAccountsAndDryRunError(Exception):
349
349
  pass
350
350
 
351
351
 
352
- class ExcludeAccountsAndAccountNameException(Exception):
352
+ class ExcludeAccountsAndAccountNameError(Exception):
353
353
  pass
354
354
 
355
355
 
356
- class MultipleAccountNamesInDryRunException(Exception):
356
+ class MultipleAccountNamesInDryRunError(Exception):
357
357
  pass
358
358
 
359
359
 
@@ -28,7 +28,7 @@ QONTRACT_INTEGRATION = "terraform_vpc_peerings"
28
28
  QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0)
29
29
 
30
30
 
31
- class BadTerraformPeeringState(Exception):
31
+ class BadTerraformPeeringStateError(Exception):
32
32
  pass
33
33
 
34
34
 
@@ -122,7 +122,7 @@ def aws_assume_roles_for_cluster_vpc_peering(
122
122
  # accepters peering connection
123
123
  infra_account = accepter_connection["awsInfrastructureManagementAccount"]
124
124
  if infra_account and infra_account["name"] not in allowed_accounts:
125
- raise BadTerraformPeeringState(
125
+ raise BadTerraformPeeringStateError(
126
126
  "[account_not_allowed] "
127
127
  f"account {infra_account['name']} used on the peering accepter of "
128
128
  f"cluster {accepter_cluster['name']} is not listed as a "
@@ -135,7 +135,7 @@ def aws_assume_roles_for_cluster_vpc_peering(
135
135
  infra_account = _get_default_management_account(accepter_cluster)
136
136
 
137
137
  if not infra_account:
138
- raise BadTerraformPeeringState(
138
+ raise BadTerraformPeeringStateError(
139
139
  f"[no_account_available] unable to find infra account "
140
140
  f"for {accepter_cluster['name']} to manage the VPC peering "
141
141
  f"with {requester_cluster['name']}"
@@ -147,7 +147,7 @@ def aws_assume_roles_for_cluster_vpc_peering(
147
147
  infra_account, requester_cluster, ocm, requester_connection.get("assumeRole")
148
148
  )
149
149
  if req_aws is None:
150
- raise BadTerraformPeeringState(
150
+ raise BadTerraformPeeringStateError(
151
151
  f"[assume_role_not_found] unable to find assume role "
152
152
  f"on cluster-vpc-requester for account {infra_account['name']} and "
153
153
  f"cluster {requester_cluster['name']} "
@@ -156,7 +156,7 @@ def aws_assume_roles_for_cluster_vpc_peering(
156
156
  infra_account, accepter_cluster, ocm, accepter_connection.get("assumeRole")
157
157
  )
158
158
  if acc_aws is None:
159
- raise BadTerraformPeeringState(
159
+ raise BadTerraformPeeringStateError(
160
160
  f"[assume_role_not_found] unable to find assume role "
161
161
  f"on cluster-vpc-accepter for account {infra_account['name']} and "
162
162
  f"cluster {accepter_cluster['name']} "
@@ -192,7 +192,7 @@ def build_desired_state_single_cluster(
192
192
  cluster_info, peer_cluster, "cluster-vpc-accepter"
193
193
  )
194
194
  if not peer_info:
195
- raise BadTerraformPeeringState(
195
+ raise BadTerraformPeeringStateError(
196
196
  "[no_matching_peering] could not find a matching peering "
197
197
  f"connection for cluster {cluster_name}, connection "
198
198
  f"{peer_connection_name}"
@@ -297,7 +297,7 @@ def build_desired_state_all_clusters(
297
297
  cluster_info, ocm, awsapi, account_filter
298
298
  )
299
299
  desired_state.extend(items)
300
- except (KeyError, BadTerraformPeeringState, aws_api.MissingARNError):
300
+ except (KeyError, BadTerraformPeeringStateError, aws_api.MissingARNError):
301
301
  logging.exception(f"Failed to get desired state for {cluster}")
302
302
  error = True
303
303
 
@@ -421,7 +421,7 @@ def build_desired_state_vpc_mesh(
421
421
  cluster_info, ocm, awsapi, account_filter
422
422
  )
423
423
  desired_state.extend(items)
424
- except (KeyError, BadTerraformPeeringState, aws_api.MissingARNError):
424
+ except (KeyError, BadTerraformPeeringStateError, aws_api.MissingARNError):
425
425
  logging.exception(f"Unable to create VPC mesh for cluster {cluster}")
426
426
  error = True
427
427
 
@@ -554,7 +554,7 @@ def build_desired_state_vpc(
554
554
  cluster_info, ocm, awsapi, account_filter
555
555
  )
556
556
  desired_state.extend(items)
557
- except (KeyError, BadTerraformPeeringState, aws_api.MissingARNError):
557
+ except (KeyError, BadTerraformPeeringStateError, aws_api.MissingARNError):
558
558
  logging.exception(f"Unable to process {cluster_info['name']}")
559
559
  error = True
560
560
 
@@ -44,7 +44,7 @@ def feature_toggle_equal(c: FeatureToggle, d: FeatureToggleUnleashV1) -> bool:
44
44
  )
45
45
 
46
46
 
47
- class UnleashFeatureToggleException(Exception):
47
+ class UnleashFeatureToggleError(Exception):
48
48
  """Raised when a feature toggle is manually created."""
49
49
 
50
50
 
@@ -7,7 +7,7 @@ Action = Callable[[Any, list[Any]], bool]
7
7
  Cond = Callable[[Any], bool]
8
8
 
9
9
 
10
- class RunnerException(Exception):
10
+ class RunnerError(Exception):
11
11
  pass
12
12
 
13
13
 
@@ -88,7 +88,7 @@ class AggregatedList:
88
88
  def dump(self) -> list[AggregatedItem]:
89
89
  return list(self._dict.values())
90
90
 
91
- def toJSON(self) -> str:
91
+ def to_json(self) -> str:
92
92
  return json.dumps(self.dump(), indent=4)
93
93
 
94
94
  @staticmethod
@@ -20,7 +20,7 @@ class AWSUser(BaseModel):
20
20
  path: str = Field(..., alias="Path")
21
21
 
22
22
 
23
- class AWSEntityAlreadyExistsException(Exception):
23
+ class AWSEntityAlreadyExistsError(Exception):
24
24
  """Raised when the user already exists in IAM."""
25
25
 
26
26
 
@@ -41,7 +41,7 @@ class AWSApiIam:
41
41
  user = self.client.create_user(UserName=user_name)
42
42
  return AWSUser(**user["User"])
43
43
  except self.client.exceptions.EntityAlreadyExistsException:
44
- raise AWSEntityAlreadyExistsException(
44
+ raise AWSEntityAlreadyExistsError(
45
45
  f"User {user_name} already exists"
46
46
  ) from None
47
47
 
@@ -61,11 +61,11 @@ class AWSAccount(BaseModel):
61
61
  state: str = Field(..., alias="Status")
62
62
 
63
63
 
64
- class AWSAccountCreationException(Exception):
64
+ class AWSAccountCreationError(Exception):
65
65
  """Exception raised when account creation failed."""
66
66
 
67
67
 
68
- class AWSAccountNotFoundException(Exception):
68
+ class AWSAccountNotFoundError(Exception):
69
69
  """Exception raised when the account cannot be found in the specified OU."""
70
70
 
71
71
 
@@ -102,7 +102,7 @@ class AWSApiOrganizations:
102
102
  )
103
103
  status = AWSAccountStatus(**resp["CreateAccountStatus"])
104
104
  if status.state == "FAILED":
105
- raise AWSAccountCreationException(
105
+ raise AWSAccountCreationError(
106
106
  f"Account creation failed: {status.failure_reason}"
107
107
  )
108
108
  return status
@@ -122,7 +122,7 @@ class AWSApiOrganizations:
122
122
  for p in resp.get("Parents", []):
123
123
  if p["Type"] in {"ORGANIZATIONAL_UNIT", "ROOT"}:
124
124
  return p["Id"]
125
- raise AWSAccountNotFoundException(f"Account {uid} not found!")
125
+ raise AWSAccountNotFoundError(f"Account {uid} not found!")
126
126
 
127
127
  def move_account(self, uid: str, destination_parent_id: str) -> None:
128
128
  """Move an account to a different organizational unit."""
@@ -31,11 +31,11 @@ class AWSQuota(BaseModel):
31
31
  return str(self)
32
32
 
33
33
 
34
- class AWSNoSuchResourceException(Exception):
34
+ class AWSNoSuchResourceError(Exception):
35
35
  """Raised when a resource is not found in a service quotas API call."""
36
36
 
37
37
 
38
- class AWSResourceAlreadyExistsException(Exception):
38
+ class AWSResourceAlreadyExistsError(Exception):
39
39
  """Raised when quota increase request already exists."""
40
40
 
41
41
 
@@ -62,7 +62,7 @@ class AWSApiServiceQuotas:
62
62
  )
63
63
  return AWSRequestedServiceQuotaChange(**req["RequestedQuota"])
64
64
  except self.client.exceptions.ResourceAlreadyExistsException:
65
- raise AWSResourceAlreadyExistsException(
65
+ raise AWSResourceAlreadyExistsError(
66
66
  f"Service quota increase request {service_code=}, {quota_code=} already exists."
67
67
  ) from None
68
68
 
@@ -74,6 +74,6 @@ class AWSApiServiceQuotas:
74
74
  )
75
75
  return AWSQuota(**quota["Quota"])
76
76
  except self.client.exceptions.NoSuchResourceException:
77
- raise AWSNoSuchResourceException(
77
+ raise AWSNoSuchResourceError(
78
78
  f"Service quota {service_code=}, {quota_code=} not found."
79
79
  ) from None