qontract-reconcile 0.10.1rc460__py3-none-any.whl → 0.10.1rc462__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. {qontract_reconcile-0.10.1rc460.dist-info → qontract_reconcile-0.10.1rc462.dist-info}/METADATA +1 -1
  2. {qontract_reconcile-0.10.1rc460.dist-info → qontract_reconcile-0.10.1rc462.dist-info}/RECORD +88 -88
  3. reconcile/aus/base.py +3 -3
  4. reconcile/aws_iam_keys.py +1 -1
  5. reconcile/aws_support_cases_sos.py +1 -1
  6. reconcile/change_owners/change_owners.py +2 -3
  7. reconcile/change_owners/diff.py +1 -1
  8. reconcile/change_owners/tester.py +3 -3
  9. reconcile/checkpoint.py +1 -1
  10. reconcile/cli.py +2 -1
  11. reconcile/closedbox_endpoint_monitoring_base.py +1 -1
  12. reconcile/cna/state.py +2 -2
  13. reconcile/dashdotdb_base.py +3 -3
  14. reconcile/dynatrace_token_provider.py +7 -8
  15. reconcile/gcr_mirror.py +2 -2
  16. reconcile/github_org.py +2 -2
  17. reconcile/github_owners.py +1 -1
  18. reconcile/gitlab_housekeeping.py +3 -3
  19. reconcile/gitlab_labeler.py +4 -5
  20. reconcile/glitchtip/reconciler.py +3 -3
  21. reconcile/glitchtip_project_alerts/integration.py +3 -3
  22. reconcile/gql_definitions/common/clusters.py +0 -2
  23. reconcile/gql_definitions/common/clusters_minimal.py +0 -2
  24. reconcile/ocm_clusters.py +9 -9
  25. reconcile/ocm_github_idp.py +1 -1
  26. reconcile/ocm_groups.py +1 -1
  27. reconcile/openshift_base.py +6 -6
  28. reconcile/openshift_clusterrolebindings.py +1 -1
  29. reconcile/openshift_groups.py +1 -1
  30. reconcile/openshift_namespace_labels.py +12 -12
  31. reconcile/openshift_resources_base.py +3 -3
  32. reconcile/openshift_rolebindings.py +1 -1
  33. reconcile/openshift_saas_deploy.py +1 -1
  34. reconcile/quay_mirror.py +2 -2
  35. reconcile/queries.py +0 -2
  36. reconcile/rhidp/common.py +2 -2
  37. reconcile/saas_auto_promotions_manager/merge_request_manager/merge_request_manager.py +9 -9
  38. reconcile/slack_usergroups.py +9 -9
  39. reconcile/sql_query.py +3 -4
  40. reconcile/terraform_aws_route53.py +1 -1
  41. reconcile/terraform_cloudflare_users.py +7 -7
  42. reconcile/terraform_repo.py +3 -1
  43. reconcile/terraform_vpc_peerings.py +10 -10
  44. reconcile/test/fixtures.py +1 -1
  45. reconcile/test/saas_auto_promotions_manager/merge_request_manager/renderer/conftest.py +2 -2
  46. reconcile/test/test_jump_host.py +2 -2
  47. reconcile/test/test_quay_mirror.py +3 -1
  48. reconcile/test/test_quay_mirror_org.py +3 -1
  49. reconcile/test/test_terraform_repo.py +2 -2
  50. reconcile/typed_queries/saas_files.py +5 -5
  51. reconcile/utils/amtool.py +2 -2
  52. reconcile/utils/aws_api.py +5 -29
  53. reconcile/utils/config.py +1 -2
  54. reconcile/utils/environ.py +1 -1
  55. reconcile/utils/git.py +7 -3
  56. reconcile/utils/git_secrets.py +2 -2
  57. reconcile/utils/helm.py +1 -1
  58. reconcile/utils/jjb_client.py +7 -7
  59. reconcile/utils/jump_host.py +2 -2
  60. reconcile/utils/metrics.py +3 -3
  61. reconcile/utils/models.py +47 -51
  62. reconcile/utils/mr/aws_access.py +1 -1
  63. reconcile/utils/mr/base.py +1 -1
  64. reconcile/utils/mr/user_maintenance.py +1 -1
  65. reconcile/utils/oc.py +8 -8
  66. reconcile/utils/oc_connection_parameters.py +12 -13
  67. reconcile/utils/ocm/base.py +1 -1
  68. reconcile/utils/ocm/ocm.py +9 -9
  69. reconcile/utils/openshift_resource.py +8 -9
  70. reconcile/utils/parse_dhms_duration.py +1 -1
  71. reconcile/utils/runtime/sharding.py +1 -1
  72. reconcile/utils/saasherder/saasherder.py +5 -5
  73. reconcile/utils/slack_api.py +2 -2
  74. reconcile/utils/terraform/config_client.py +1 -1
  75. reconcile/utils/terraform_client.py +5 -5
  76. reconcile/utils/terrascript/cloudflare_client.py +3 -1
  77. reconcile/utils/terrascript_aws_client.py +40 -40
  78. reconcile/utils/three_way_diff_strategy.py +2 -2
  79. reconcile/utils/unleash.py +1 -1
  80. reconcile/utils/vault.py +1 -1
  81. reconcile/vpc_peerings_validator.py +6 -6
  82. release/version.py +7 -2
  83. tools/app_interface_reporter.py +3 -3
  84. tools/cli_commands/gpg_encrypt.py +2 -2
  85. tools/qontract_cli.py +7 -6
  86. {qontract_reconcile-0.10.1rc460.dist-info → qontract_reconcile-0.10.1rc462.dist-info}/WHEEL +0 -0
  87. {qontract_reconcile-0.10.1rc460.dist-info → qontract_reconcile-0.10.1rc462.dist-info}/entry_points.txt +0 -0
  88. {qontract_reconcile-0.10.1rc460.dist-info → qontract_reconcile-0.10.1rc462.dist-info}/top_level.txt +0 -0
@@ -149,9 +149,9 @@ class DashdotdbBase:
149
149
  if token:
150
150
  headers["Authorization"] = f"Bearer {token}"
151
151
  elif username and password:
152
- headers[
153
- "Authorization"
154
- ] = f"Basic {b64encode(f'{username}:{password}'.encode()).decode('utf-8')}"
152
+ headers["Authorization"] = (
153
+ f"Basic {b64encode(f'{username}:{password}'.encode()).decode('utf-8')}"
154
+ )
155
155
  response = requests.get(
156
156
  url, params=params, headers=headers, verify=ssl_verify, timeout=(5, 120)
157
157
  )
@@ -169,9 +169,9 @@ class DynatraceTokenProviderIntegration(
169
169
  continue
170
170
 
171
171
  if tenant_id not in existing_dtp_tokens:
172
- existing_dtp_tokens[
173
- tenant_id
174
- ] = self.get_all_dtp_tokens(dt_clients[tenant_id])
172
+ existing_dtp_tokens[tenant_id] = (
173
+ self.get_all_dtp_tokens(dt_clients[tenant_id])
174
+ )
175
175
 
176
176
  self.process_cluster(
177
177
  dry_run,
@@ -285,11 +285,10 @@ class DynatraceTokenProviderIntegration(
285
285
  logging.info(
286
286
  f"Operator token created in Dynatrace for cluster {cluster.ocm_cluster.external_id}."
287
287
  )
288
- else:
289
- if token_name == DYNATRACE_INGESTION_TOKEN_NAME:
290
- ingestion_token = ApiTokenCreated(raw_element=token)
291
- elif token_name == DYNATRACE_OPERATOR_TOKEN_NAME:
292
- operator_token = ApiTokenCreated(raw_element=token)
288
+ elif token_name == DYNATRACE_INGESTION_TOKEN_NAME:
289
+ ingestion_token = ApiTokenCreated(raw_element=token)
290
+ elif token_name == DYNATRACE_OPERATOR_TOKEN_NAME:
291
+ operator_token = ApiTokenCreated(raw_element=token)
293
292
  if need_patching:
294
293
  if not dry_run:
295
294
  patch_syncset_payload = self.construct_base_syncset(
reconcile/gcr_mirror.py CHANGED
@@ -221,7 +221,7 @@ class QuayMirror:
221
221
  control_file_name = "qontract-reconcile-gcr-mirror.timestamp"
222
222
  control_file_path = os.path.join(tempfile.gettempdir(), control_file_name)
223
223
  try:
224
- with open(control_file_path, "r") as file_obj:
224
+ with open(control_file_path, "r", encoding="locale") as file_obj:
225
225
  last_deep_sync = float(file_obj.read())
226
226
  except FileNotFoundError:
227
227
  self._record_timestamp(control_file_path)
@@ -236,7 +236,7 @@ class QuayMirror:
236
236
 
237
237
  @staticmethod
238
238
  def _record_timestamp(path):
239
- with open(path, "w") as file_object:
239
+ with open(path, "w", encoding="locale") as file_object:
240
240
  file_object.write(str(time.time()))
241
241
 
242
242
  def _get_push_creds(self):
reconcile/github_org.py CHANGED
@@ -198,7 +198,7 @@ def fetch_desired_state(infer_clusters=True):
198
198
  for role in roles:
199
199
  permissions = list(
200
200
  filter(
201
- lambda p: p.get("service") in ["github-org", "github-org-team"],
201
+ lambda p: p.get("service") in {"github-org", "github-org-team"},
202
202
  role["permissions"],
203
203
  )
204
204
  )
@@ -238,7 +238,7 @@ def fetch_desired_state(infer_clusters=True):
238
238
  )
239
239
  for cluster in clusters:
240
240
  for auth in cluster["auth"]:
241
- if auth["service"] not in ["github-org", "github-org-team"]:
241
+ if auth["service"] not in {"github-org", "github-org-team"}:
242
242
  continue
243
243
 
244
244
  cluster_name = cluster["name"]
@@ -48,7 +48,7 @@ def fetch_desired_state():
48
48
  permissions = [
49
49
  p
50
50
  for p in role["permissions"]
51
- if p.get("service") in ["github-org", "github-org-team"]
51
+ if p.get("service") in {"github-org", "github-org-team"}
52
52
  and p.get("role") == "owner"
53
53
  ]
54
54
  if not permissions:
@@ -129,7 +129,7 @@ def get_timed_out_pipelines(
129
129
  ) -> list[dict]:
130
130
  now = datetime.utcnow()
131
131
 
132
- pending_pipelines = [p for p in pipelines if p["status"] in ["pending", "running"]]
132
+ pending_pipelines = [p for p in pipelines if p["status"] in {"pending", "running"}]
133
133
 
134
134
  if not pending_pipelines:
135
135
  return []
@@ -299,10 +299,10 @@ def preprocess_merge_requests(
299
299
  ) -> list[dict[str, Any]]:
300
300
  results = []
301
301
  for mr in project_merge_requests:
302
- if mr.merge_status in [
302
+ if mr.merge_status in {
303
303
  MRStatus.CANNOT_BE_MERGED,
304
304
  MRStatus.CANNOT_BE_MERGED_RECHECK,
305
- ]:
305
+ }:
306
306
  continue
307
307
  if mr.work_in_progress:
308
308
  continue
@@ -68,12 +68,11 @@ def guess_onboarding_status(
68
68
  else:
69
69
  app = apps[app_name]
70
70
  labels.add(app["onboardingStatus"])
71
+ elif app_name in apps:
72
+ app = apps[app_name]
73
+ labels.add(app["onboardingStatus"])
71
74
  else:
72
- if app_name in apps:
73
- app = apps[app_name]
74
- labels.add(app["onboardingStatus"])
75
- else:
76
- logging.debug("Error getting app name " + path)
75
+ logging.debug("Error getting app name " + path)
77
76
 
78
77
  if len(labels) == 1:
79
78
  return labels.pop()
@@ -81,9 +81,9 @@ class GlitchtipReconciler:
81
81
  name=project.name,
82
82
  platform=project.platform,
83
83
  )
84
- organization_projects[
85
- organization_projects.index(project)
86
- ] = updated_project
84
+ organization_projects[organization_projects.index(project)] = (
85
+ updated_project
86
+ )
87
87
 
88
88
  for desired_project in desired_projects:
89
89
  current_project = organization_projects[
@@ -236,9 +236,9 @@ class GlitchtipProjectAlertsIntegration(
236
236
  glitchtip_instances = glitchtip_instance_query(
237
237
  query_func=gqlapi.query
238
238
  ).instances
239
- glitchtip_projects_by_instance: dict[
240
- str, list[GlitchtipProjectsV1]
241
- ] = defaultdict(list)
239
+ glitchtip_projects_by_instance: dict[str, list[GlitchtipProjectsV1]] = (
240
+ defaultdict(list)
241
+ )
242
242
  for glitchtip_project in self.get_projects(query_func=gqlapi.query):
243
243
  glitchtip_projects_by_instance[
244
244
  glitchtip_project.organization.instance.name
@@ -104,7 +104,6 @@ query Clusters($name: String) {
104
104
  name
105
105
  serverUrl
106
106
  consoleUrl
107
- kibanaUrl
108
107
  elbFQDN
109
108
  prometheusUrl
110
109
  managedGroups
@@ -645,7 +644,6 @@ class ClusterV1(ConfiguredBaseModel):
645
644
  name: str = Field(..., alias="name")
646
645
  server_url: str = Field(..., alias="serverUrl")
647
646
  console_url: str = Field(..., alias="consoleUrl")
648
- kibana_url: str = Field(..., alias="kibanaUrl")
649
647
  elb_fqdn: str = Field(..., alias="elbFQDN")
650
648
  prometheus_url: str = Field(..., alias="prometheusUrl")
651
649
  managed_groups: Optional[list[str]] = Field(..., alias="managedGroups")
@@ -45,7 +45,6 @@ query ClustersMinimal($name: String) {
45
45
  name
46
46
  serverUrl
47
47
  consoleUrl
48
- kibanaUrl
49
48
  prometheusUrl
50
49
  insecureSkipTLSVerify
51
50
  jumpHost {
@@ -126,7 +125,6 @@ class ClusterV1(ConfiguredBaseModel):
126
125
  name: str = Field(..., alias="name")
127
126
  server_url: str = Field(..., alias="serverUrl")
128
127
  console_url: str = Field(..., alias="consoleUrl")
129
- kibana_url: str = Field(..., alias="kibanaUrl")
130
128
  prometheus_url: str = Field(..., alias="prometheusUrl")
131
129
  insecure_skip_tls_verify: Optional[bool] = Field(..., alias="insecureSkipTLSVerify")
132
130
  jump_host: Optional[CommonJumphostFields] = Field(..., alias="jumpHost")
reconcile/ocm_clusters.py CHANGED
@@ -163,17 +163,17 @@ def get_app_interface_spec_updates(
163
163
  desired_spec.spec.disable_user_workload_monitoring is None
164
164
  and current_spec.spec.disable_user_workload_monitoring
165
165
  ):
166
- ocm_spec_updates[
167
- ocmmod.SPEC_ATTR_DISABLE_UWM
168
- ] = current_spec.spec.disable_user_workload_monitoring
166
+ ocm_spec_updates[ocmmod.SPEC_ATTR_DISABLE_UWM] = (
167
+ current_spec.spec.disable_user_workload_monitoring
168
+ )
169
169
 
170
170
  if (
171
171
  current_spec.spec.provision_shard_id is not None
172
172
  and desired_spec.spec.provision_shard_id != current_spec.spec.provision_shard_id
173
173
  ):
174
- ocm_spec_updates[
175
- ocmmod.SPEC_ATTR_PROVISION_SHARD_ID
176
- ] = current_spec.spec.provision_shard_id
174
+ ocm_spec_updates[ocmmod.SPEC_ATTR_PROVISION_SHARD_ID] = (
175
+ current_spec.spec.provision_shard_id
176
+ )
177
177
 
178
178
  if isinstance(current_spec.spec, ROSAClusterSpec) and isinstance(
179
179
  desired_spec.spec, ROSAClusterSpec
@@ -183,9 +183,9 @@ def get_app_interface_spec_updates(
183
183
  and desired_spec.spec.oidc_endpoint_url
184
184
  != current_spec.spec.oidc_endpoint_url
185
185
  ):
186
- ocm_spec_updates[
187
- ocmmod.SPEC_ATTR_OIDC_ENDPONT_URL
188
- ] = current_spec.spec.oidc_endpoint_url
186
+ ocm_spec_updates[ocmmod.SPEC_ATTR_OIDC_ENDPONT_URL] = (
187
+ current_spec.spec.oidc_endpoint_url
188
+ )
189
189
 
190
190
  if current_spec.server_url and desired_spec.server_url != current_spec.server_url:
191
191
  root_updates[ocmmod.SPEC_ATTR_SERVER_URL] = current_spec.server_url
@@ -63,7 +63,7 @@ def fetch_desired_state(clusters, vault_input_path, settings):
63
63
 
64
64
 
65
65
  def sanitize(state):
66
- return {k: v for k, v in state.items() if k not in ["client_secret", "id"]}
66
+ return {k: v for k, v in state.items() if k not in {"client_secret", "id"}}
67
67
 
68
68
 
69
69
  def act(dry_run, ocm_map, current_state, desired_state):
reconcile/ocm_groups.py CHANGED
@@ -85,7 +85,7 @@ def run(dry_run, thread_pool_size=10):
85
85
 
86
86
  for diff in diffs:
87
87
  # we do not need to create/delete groups in OCM
88
- if diff["action"] in ["create_group", "delete_group"]:
88
+ if diff["action"] in {"create_group", "delete_group"}:
89
89
  continue
90
90
  logging.info(list(diff.values()))
91
91
 
@@ -416,7 +416,7 @@ def apply(
416
416
  except FieldIsImmutableError:
417
417
  # Add more resources types to the list when you're
418
418
  # sure they're safe.
419
- if resource_type not in ["Route", "Service", "Secret"]:
419
+ if resource_type not in {"Route", "Service", "Secret"}:
420
420
  raise
421
421
  oc.delete(namespace=namespace, kind=resource_type, name=resource.name)
422
422
  oc.apply(namespace=namespace, resource=annotated)
@@ -470,7 +470,7 @@ def apply(
470
470
  obsolete_rs["metadata"]["ownerReferences"] = owner_references
471
471
  oc.apply(namespace=namespace, resource=OR(obsolete_rs, "", ""))
472
472
  except (MayNotChangeOnceSetError, PrimaryClusterIPCanNotBeUnsetError):
473
- if resource_type not in ["Service"]:
473
+ if resource_type not in {"Service"}:
474
474
  raise
475
475
 
476
476
  oc.delete(namespace=namespace, kind=resource_type, name=resource.name)
@@ -1075,10 +1075,10 @@ def _validate_resources_used_exist(
1075
1075
  )
1076
1076
  # we found one! does it's value (secret name) match the
1077
1077
  # using resource's?
1078
- if used_name in (
1078
+ if used_name in {
1079
1079
  serving_cert_alpha_secret_name,
1080
1080
  serving_cert_beta_secret_name,
1081
- ):
1081
+ }:
1082
1082
  # found a match. we assume the serving cert secret will
1083
1083
  # be present at some point soon after the Service is deployed
1084
1084
  resource = service
@@ -1115,7 +1115,7 @@ def validate_planned_data(ri: ResourceInventory, oc_map: ClusterMap) -> None:
1115
1115
  oc = oc_map.get_cluster(cluster)
1116
1116
 
1117
1117
  for name, d_item in data["desired"].items():
1118
- if kind in ("Deployment", "DeploymentConfig"):
1118
+ if kind in {"Deployment", "DeploymentConfig"}:
1119
1119
  spec = d_item.body["spec"]["template"]["spec"]
1120
1120
  _validate_resources_used_exist(
1121
1121
  ri, oc, spec, cluster, namespace, kind, name, "Secret"
@@ -1162,7 +1162,7 @@ def validate_realized_data(actions: Iterable[dict[str, str]], oc_map: ClusterMap
1162
1162
  if not status:
1163
1163
  raise ValidationError("status")
1164
1164
  # add elif to validate additional resource kinds
1165
- if kind in ["Deployment", "DeploymentConfig", "StatefulSet"]:
1165
+ if kind in {"Deployment", "DeploymentConfig", "StatefulSet"}:
1166
1166
  desired_replicas = resource["spec"]["replicas"]
1167
1167
  if desired_replicas == 0:
1168
1168
  continue
@@ -89,7 +89,7 @@ def fetch_desired_state(ri, oc_map):
89
89
  permissions = [
90
90
  {"cluster": a["cluster"], "cluster_role": a["clusterRole"]}
91
91
  for a in role["access"] or []
92
- if None not in [a["cluster"], a["clusterRole"]]
92
+ if None not in {a["cluster"], a["clusterRole"]}
93
93
  ]
94
94
  if not permissions:
95
95
  continue
@@ -233,7 +233,7 @@ def validate_diffs(diffs: Iterable[Mapping[str, Optional[str]]]) -> None:
233
233
 
234
234
 
235
235
  def sort_diffs(diff: Mapping[str, Optional[str]]) -> int:
236
- if diff["action"] in ["create_group", "del_user_from_group"]:
236
+ if diff["action"] in {"create_group", "del_user_from_group"}:
237
237
  return 1
238
238
  return 2
239
239
 
@@ -182,18 +182,18 @@ class LabelInventory:
182
182
  if k not in managed:
183
183
  self.update_managed_keys(cluster, ns, k)
184
184
  changed[k] = v
185
- else: # k in current:
186
- if k not in managed: # conflicting labels
187
- self.add_error(
188
- cluster,
189
- ns,
190
- "Label conflict:"
191
- + f"desired {k}={v} vs "
192
- + f"current {k}={current[k]}",
193
- )
194
- else:
195
- if v != current[k]:
196
- changed[k] = v
185
+
186
+ elif k not in managed: # conflicting labels
187
+ self.add_error(
188
+ cluster,
189
+ ns,
190
+ "Label conflict:"
191
+ + f"desired {k}={v} vs "
192
+ + f"current {k}={current[k]}",
193
+ )
194
+
195
+ elif v != current[k]:
196
+ changed[k] = v
197
197
 
198
198
  # remove old labels
199
199
  for k, v in current.items():
@@ -539,9 +539,9 @@ def fetch_provider_resource(
539
539
  if not annotations:
540
540
  continue
541
541
  # TODO(mafriedm): make this better
542
- rule["annotations"][
543
- "html_url"
544
- ] = f"{APP_INT_BASE_URL}/blob/master/resources{path}"
542
+ rule["annotations"]["html_url"] = (
543
+ f"{APP_INT_BASE_URL}/blob/master/resources{path}"
544
+ )
545
545
  except Exception:
546
546
  logging.warning(
547
547
  "could not add html_url annotation to" + body["metadata"]["name"]
@@ -97,7 +97,7 @@ def fetch_desired_state(ri, oc_map, enforced_user_keys=None):
97
97
  "role": a["role"],
98
98
  }
99
99
  for a in role["access"] or []
100
- if None not in [a["namespace"], a["role"]]
100
+ if None not in {a["namespace"], a["role"]}
101
101
  and a["namespace"].get("managedRoles")
102
102
  and not ob.is_namespace_deleted(a["namespace"])
103
103
  ]
@@ -316,7 +316,7 @@ def run(
316
316
  emails = " ".join([o.email for o in owners])
317
317
  file, url = saasherder.get_archive_info(saas_file, trigger_reason)
318
318
  sast_file = os.path.join(io_dir, "sast")
319
- with open(sast_file, "w") as f:
319
+ with open(sast_file, "w", encoding="locale") as f:
320
320
  f.write(file + "\n")
321
321
  f.write(url + "\n")
322
322
  f.write(emails + "\n")
reconcile/quay_mirror.py CHANGED
@@ -349,7 +349,7 @@ class QuayMirror:
349
349
  @staticmethod
350
350
  def check_compare_tags_elapsed_time(path, interval) -> bool:
351
351
  try:
352
- with open(path, "r") as file_obj:
352
+ with open(path, "r", encoding="locale") as file_obj:
353
353
  last_compare_tags = float(file_obj.read())
354
354
  except FileNotFoundError:
355
355
  return True
@@ -362,7 +362,7 @@ class QuayMirror:
362
362
 
363
363
  @staticmethod
364
364
  def record_timestamp(path) -> None:
365
- with open(path, "w") as file_object:
365
+ with open(path, "w", encoding="locale") as file_object:
366
366
  file_object.write(str(time.time()))
367
367
 
368
368
  def _get_push_creds(self):
reconcile/queries.py CHANGED
@@ -651,7 +651,6 @@ CLUSTERS_QUERY = """
651
651
  name
652
652
  serverUrl
653
653
  consoleUrl
654
- kibanaUrl
655
654
  elbFQDN
656
655
  prometheusUrl
657
656
  managedGroups
@@ -968,7 +967,6 @@ CLUSTERS_MINIMAL_QUERY = """
968
967
  name
969
968
  serverUrl
970
969
  consoleUrl
971
- kibanaUrl
972
970
  prometheusUrl
973
971
  insecureSkipTLSVerify
974
972
  jumpHost {
reconcile/rhidp/common.py CHANGED
@@ -66,10 +66,10 @@ class ClusterAuth(BaseModel):
66
66
 
67
67
  @property
68
68
  def oidc_enabled(self) -> bool:
69
- return self.status not in (
69
+ return self.status not in {
70
70
  StatusValue.DISABLED.value,
71
71
  StatusValue.RHIDP_ONLY.value,
72
- )
72
+ }
73
73
 
74
74
  @property
75
75
  def enforced(self) -> bool:
@@ -237,10 +237,10 @@ class MergeRequestManager:
237
237
  for sub in subs:
238
238
  if sub.target_file_path not in content_by_path:
239
239
  try:
240
- content_by_path[
241
- sub.target_file_path
242
- ] = self._vcs.get_file_content_from_app_interface_master(
243
- file_path=sub.target_file_path
240
+ content_by_path[sub.target_file_path] = (
241
+ self._vcs.get_file_content_from_app_interface_master(
242
+ file_path=sub.target_file_path
243
+ )
244
244
  )
245
245
  except GitlabGetError as e:
246
246
  if e.response_code == 404:
@@ -251,11 +251,11 @@ class MergeRequestManager:
251
251
  has_error = True
252
252
  break
253
253
  raise e
254
- content_by_path[
255
- sub.target_file_path
256
- ] = self._renderer.render_merge_request_content(
257
- subscriber=sub,
258
- current_content=content_by_path[sub.target_file_path],
254
+ content_by_path[sub.target_file_path] = (
255
+ self._renderer.render_merge_request_content(
256
+ subscriber=sub,
257
+ current_content=content_by_path[sub.target_file_path],
258
+ )
259
259
  )
260
260
  if has_error:
261
261
  continue
@@ -114,7 +114,7 @@ class State(BaseModel):
114
114
  usergroup_id: Optional[str] = None
115
115
 
116
116
  def __bool__(self) -> bool:
117
- return self.workspace != ""
117
+ return self.workspace != "" # noqa: PLC1901
118
118
 
119
119
 
120
120
  SlackState = dict[str, dict[str, State]]
@@ -235,7 +235,7 @@ def get_usernames_from_pagerduty(
235
235
  pagerduty_map: PagerDutyMap,
236
236
  ) -> list[str]:
237
237
  """Return list of usernames from all pagerduties."""
238
- global error_occurred
238
+ global error_occurred # noqa: PLW0603
239
239
  all_output_usernames = []
240
240
  all_pagerduty_names = [get_pagerduty_name(u) for u in users]
241
241
  for pagerduty in pagerduties:
@@ -477,9 +477,9 @@ def get_desired_state_cluster_usergroups(
477
477
  ) -> SlackState:
478
478
  """Get the desired state of Slack usergroups."""
479
479
  desired_state: SlackState = {}
480
- openshift_users_desired_state: list[
481
- dict[str, str]
482
- ] = openshift_users.fetch_desired_state(oc_map=None)
480
+ openshift_users_desired_state: list[dict[str, str]] = (
481
+ openshift_users.fetch_desired_state(oc_map=None)
482
+ )
483
483
  for cluster in clusters:
484
484
  if not integration_is_enabled(QONTRACT_INTEGRATION, cluster):
485
485
  logging.debug(
@@ -544,7 +544,7 @@ def _create_usergroups(
544
544
  dry_run: bool = True,
545
545
  ) -> None:
546
546
  """Create Slack usergroups."""
547
- global error_occurred
547
+ global error_occurred # noqa: PLW0603
548
548
  if current_ug_state:
549
549
  logging.debug(
550
550
  f"[{desired_ug_state.workspace}] Usergroup exists and will not be created {desired_ug_state.usergroup}"
@@ -572,7 +572,7 @@ def _update_usergroup_users_from_state(
572
572
  dry_run: bool = True,
573
573
  ) -> None:
574
574
  """Update the users in a Slack usergroup."""
575
- global error_occurred
575
+ global error_occurred # noqa: PLW0603
576
576
  if current_ug_state.users == desired_ug_state.users:
577
577
  logging.debug(
578
578
  f"No usergroup user changes detected for {desired_ug_state.usergroup}"
@@ -622,7 +622,7 @@ def _update_usergroup_from_state(
622
622
  dry_run: bool = True,
623
623
  ) -> None:
624
624
  """Update a Slack usergroup."""
625
- global error_occurred
625
+ global error_occurred # noqa: PLW0603
626
626
  if (
627
627
  current_ug_state.channels == desired_ug_state.channels
628
628
  and current_ug_state.description == desired_ug_state.description
@@ -735,7 +735,7 @@ def run(
735
735
  workspace_name: Optional[str] = None,
736
736
  usergroup_name: Optional[str] = None,
737
737
  ) -> None:
738
- global error_occurred
738
+ global error_occurred # noqa: PLW0603
739
739
  error_occurred = False
740
740
 
741
741
  gqlapi = gql.get_api()
reconcile/sql_query.py CHANGED
@@ -356,7 +356,7 @@ def make_mysql_command(sqlqueries_file: str) -> str:
356
356
 
357
357
 
358
358
  def make_output_cmd(output: str, recipient: str) -> str:
359
- if output in ("filesystem", "encrypted"):
359
+ if output in {"filesystem", "encrypted"}:
360
360
  command = filesystem_redir_stdout()
361
361
  else:
362
362
  # stdout
@@ -681,10 +681,9 @@ def _get_query_status(
681
681
  # CronJob
682
682
  if query.get("delete"):
683
683
  return QueryStatus.PENDING_DELETION
684
- else:
684
+ elif time.time() >= query_state + JOB_TTL:
685
685
  # Job
686
- if time.time() >= query_state + JOB_TTL:
687
- return QueryStatus.PENDING_DELETION
686
+ return QueryStatus.PENDING_DELETION
688
687
  return QueryStatus.ACTIVE
689
688
 
690
689
 
@@ -87,7 +87,7 @@ def build_desired_state(
87
87
  # get_a_record is used here to validate the record and reused later
88
88
  target_cluster_elb_value = dnsutils.get_a_records(target_cluster_elb)
89
89
 
90
- if target_cluster_elb is None or target_cluster_elb == "":
90
+ if not target_cluster_elb:
91
91
  msg = (
92
92
  f"{zone_name}: field `_target_cluster` for record "
93
93
  f"{record_name} of type {record_type} points to a "
@@ -304,13 +304,13 @@ def get_cloudflare_users(
304
304
  user.cloudflare_user
305
305
  ].roles.update(set(cf_role.roles))
306
306
  else:
307
- users[cf_role.account.name][
308
- user.cloudflare_user
309
- ] = CloudflareUser(
310
- user.cloudflare_user,
311
- cf_role.account.name,
312
- user.org_username,
313
- set(cf_role.roles),
307
+ users[cf_role.account.name][user.cloudflare_user] = (
308
+ CloudflareUser(
309
+ user.cloudflare_user,
310
+ cf_role.account.name,
311
+ user.org_username,
312
+ set(cf_role.roles),
313
+ )
314
314
  )
315
315
 
316
316
  else:
@@ -166,7 +166,9 @@ class TerraformRepoIntegration(
166
166
 
167
167
  if self.params.output_file:
168
168
  try:
169
- with open(self.params.output_file, "w") as output_file:
169
+ with open(
170
+ self.params.output_file, "w", encoding="locale"
171
+ ) as output_file:
170
172
  yaml.safe_dump(
171
173
  data=output.dict(),
172
174
  stream=output_file,
@@ -298,10 +298,10 @@ def build_desired_state_vpc_mesh_single_cluster(
298
298
  if provided_assume_role:
299
299
  account["assume_role"] = provided_assume_role
300
300
  elif ocm is not None:
301
- account[
302
- "assume_role"
303
- ] = ocm.get_aws_infrastructure_access_terraform_assume_role(
304
- cluster, account["uid"], account["terraformUsername"]
301
+ account["assume_role"] = (
302
+ ocm.get_aws_infrastructure_access_terraform_assume_role(
303
+ cluster, account["uid"], account["terraformUsername"]
304
+ )
305
305
  )
306
306
  account["assume_region"] = requester["region"]
307
307
  account["assume_cidr"] = requester["cidr_block"]
@@ -418,12 +418,12 @@ def build_desired_state_vpc_single_cluster(
418
418
  if provided_assume_role:
419
419
  account["assume_role"] = provided_assume_role
420
420
  elif ocm is not None:
421
- account[
422
- "assume_role"
423
- ] = ocm.get_aws_infrastructure_access_terraform_assume_role(
424
- cluster,
425
- peer_vpc["account"]["uid"],
426
- peer_vpc["account"]["terraformUsername"],
421
+ account["assume_role"] = (
422
+ ocm.get_aws_infrastructure_access_terraform_assume_role(
423
+ cluster,
424
+ peer_vpc["account"]["uid"],
425
+ peer_vpc["account"]["terraformUsername"],
426
+ )
427
427
  )
428
428
  else:
429
429
  raise KeyError(