qontract-reconcile 0.10.2.dev427__py3-none-any.whl → 0.10.2.dev456__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {qontract_reconcile-0.10.2.dev427.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/METADATA +1 -1
  2. {qontract_reconcile-0.10.2.dev427.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/RECORD +35 -34
  3. {qontract_reconcile-0.10.2.dev427.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/WHEEL +1 -1
  4. reconcile/aus/aus_sts_gate_handler.py +59 -0
  5. reconcile/aus/base.py +9 -5
  6. reconcile/aus/version_gate_approver.py +1 -16
  7. reconcile/aus/version_gates/sts_version_gate_handler.py +5 -125
  8. reconcile/aws_ecr_image_pull_secrets.py +1 -1
  9. reconcile/change_owners/change_owners.py +100 -34
  10. reconcile/cli.py +1 -1
  11. reconcile/external_resources/secrets_sync.py +2 -3
  12. reconcile/gql_definitions/common/aws_vpc_requests.py +3 -0
  13. reconcile/gql_definitions/common/clusters.py +2 -0
  14. reconcile/gql_definitions/external_resources/external_resources_namespaces.py +3 -1
  15. reconcile/gql_definitions/fragments/aws_vpc_request.py +5 -0
  16. reconcile/gql_definitions/introspection.json +48 -0
  17. reconcile/gql_definitions/rhcs/certs.py +1 -0
  18. reconcile/gql_definitions/rhcs/openshift_resource_rhcs_cert.py +1 -0
  19. reconcile/gql_definitions/terraform_resources/terraform_resources_namespaces.py +5 -1
  20. reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator.py +3 -0
  21. reconcile/gql_definitions/vpc_peerings_validator/vpc_peerings_validator_peered_cluster_fragment.py +1 -0
  22. reconcile/openshift_namespaces.py +3 -4
  23. reconcile/openshift_rhcs_certs.py +51 -12
  24. reconcile/templates/rosa-classic-cluster-creation.sh.j2 +1 -1
  25. reconcile/templates/rosa-hcp-cluster-creation.sh.j2 +1 -1
  26. reconcile/terraform_vpc_resources/integration.py +10 -7
  27. reconcile/typed_queries/saas_files.py +9 -4
  28. reconcile/utils/environ.py +5 -0
  29. reconcile/utils/gitlab_api.py +12 -0
  30. reconcile/utils/jjb_client.py +19 -3
  31. reconcile/utils/oc.py +8 -2
  32. reconcile/utils/rhcsv2_certs.py +87 -21
  33. reconcile/utils/terrascript_aws_client.py +140 -50
  34. reconcile/vpc_peerings_validator.py +13 -0
  35. {qontract_reconcile-0.10.2.dev427.dist-info → qontract_reconcile-0.10.2.dev456.dist-info}/entry_points.txt +0 -0
@@ -42,6 +42,7 @@ from reconcile.gql_definitions.fragments.saas_target_namespace import (
42
42
  SaasTargetNamespace,
43
43
  )
44
44
  from reconcile.utils import gql
45
+ from reconcile.utils.environ import used_for_security_is_enabled
45
46
  from reconcile.utils.exceptions import (
46
47
  AppInterfaceSettingsError,
47
48
  ParameterError,
@@ -78,10 +79,14 @@ class SaasResourceTemplateTarget(
78
79
  self, parent_saas_file_name: str, parent_resource_template_name: str
79
80
  ) -> str:
80
81
  """Returns a unique identifier for a target."""
81
- return hashlib.blake2s(
82
- f"{parent_saas_file_name}:{parent_resource_template_name}:{self.name or 'default'}:{self.namespace.cluster.name}:{self.namespace.name}".encode(),
83
- digest_size=20,
84
- ).hexdigest()
82
+ data = f"{parent_saas_file_name}:{parent_resource_template_name}:{self.name or 'default'}:{self.namespace.cluster.name}:{self.namespace.name}".encode()
83
+ if used_for_security_is_enabled():
84
+ # When USED_FOR_SECURITY is enabled, use blake2s without digest_size and truncate to 20 bytes
85
+ # This is needed for FIPS compliance where digest_size parameter is not supported
86
+ return hashlib.blake2s(data).digest()[:20].hex()
87
+ else:
88
+ # Default behavior: use blake2s with digest_size=20
89
+ return hashlib.blake2s(data, digest_size=20).hexdigest()
85
90
 
86
91
 
87
92
  class SaasResourceTemplate(ConfiguredBaseModel, validate_by_alias=True):
@@ -4,6 +4,11 @@ from functools import wraps
4
4
  from typing import Any
5
5
 
6
6
 
7
+ def used_for_security_is_enabled() -> bool:
8
+ used_for_security_env = os.getenv("USED_FOR_SECURITY", "false")
9
+ return used_for_security_env.lower() == "true"
10
+
11
+
7
12
  def environ(variables: Iterable[str] | None = None) -> Callable:
8
13
  """Check that environment variables are set before execution."""
9
14
  if variables is None:
@@ -444,6 +444,8 @@ class GitLabApi:
444
444
  def get_merge_request_comments(
445
445
  merge_request: ProjectMergeRequest,
446
446
  include_description: bool = False,
447
+ include_approvals: bool = False,
448
+ approval_body: str = "",
447
449
  ) -> list[Comment]:
448
450
  comments = []
449
451
  if include_description:
@@ -455,6 +457,16 @@ class GitLabApi:
455
457
  created_at=merge_request.created_at,
456
458
  )
457
459
  )
460
+ if include_approvals:
461
+ comments.extend(
462
+ Comment(
463
+ id=approval["user"]["id"],
464
+ username=approval["user"]["username"],
465
+ body=approval_body,
466
+ created_at=approval["approved_at"],
467
+ )
468
+ for approval in merge_request.approvals.get().approved_by
469
+ )
458
470
  comments.extend(
459
471
  Comment(
460
472
  id=note.id,
@@ -33,6 +33,10 @@ from reconcile.utils.vcs import GITHUB_BASE_URL
33
33
  JJB_INI = "[jenkins]\nurl = https://JENKINS_URL"
34
34
 
35
35
 
36
+ class MissingJobUrlError(Exception):
37
+ pass
38
+
39
+
36
40
  class JJB:
37
41
  """Wrapper around Jenkins Jobs"""
38
42
 
@@ -335,7 +339,7 @@ class JJB:
335
339
  job_name = job["name"]
336
340
  try:
337
341
  repos.add(self.get_repo_url(job))
338
- except KeyError:
342
+ except MissingJobUrlError:
339
343
  logging.debug(f"missing github url: {job_name}")
340
344
  return repos
341
345
 
@@ -355,7 +359,19 @@ class JJB:
355
359
 
356
360
  @staticmethod
357
361
  def get_repo_url(job: Mapping[str, Any]) -> str:
358
- repo_url_raw = job["properties"][0]["github"]["url"]
362
+ repo_url_raw = job.get("properties", [{}])[0].get("github", {}).get("url")
363
+
364
+ # we may be in a Github Branch Source type of job
365
+ if not repo_url_raw:
366
+ gh_org = job.get("scm", [{}])[0].get("github", {}).get("repo-owner")
367
+ gh_repo = job.get("scm", [{}])[0].get("github", {}).get("repo")
368
+ if gh_org and gh_repo:
369
+ repo_url_raw = f"https://github.com/{gh_org}/{gh_repo}/"
370
+ else:
371
+ raise MissingJobUrlError(
372
+ f"Cannot find job url for {job['display-name']}"
373
+ )
374
+
359
375
  return repo_url_raw.strip("/").replace(".git", "")
360
376
 
361
377
  @staticmethod
@@ -404,7 +420,7 @@ class JJB:
404
420
  try:
405
421
  if self.get_repo_url(job).lower() == repo_url.rstrip("/").lower():
406
422
  return job
407
- except KeyError:
423
+ except MissingJobUrlError:
408
424
  # something wrong here. ignore this job
409
425
  pass
410
426
  raise ValueError(f"job with {job_type=} and {repo_url=} not found")
reconcile/utils/oc.py CHANGED
@@ -651,9 +651,15 @@ class OCCli:
651
651
  raise e
652
652
  return True
653
653
 
654
+ def _use_oc_project(self, namespace: str) -> bool:
655
+ # Note, that openshift-* namespaces cannot be created via new-project
656
+ return self.is_kind_supported(PROJECT_KIND) and not namespace.startswith(
657
+ "openshift-"
658
+ )
659
+
654
660
  @OCDecorators.process_reconcile_time
655
661
  def new_project(self, namespace: str) -> OCProcessReconcileTimeDecoratorMsg:
656
- if self.is_kind_supported(PROJECT_KIND):
662
+ if self._use_oc_project(namespace=namespace):
657
663
  cmd = ["new-project", namespace]
658
664
  else:
659
665
  cmd = ["create", "namespace", namespace]
@@ -669,7 +675,7 @@ class OCCli:
669
675
 
670
676
  @OCDecorators.process_reconcile_time
671
677
  def delete_project(self, namespace: str) -> OCProcessReconcileTimeDecoratorMsg:
672
- if self.is_kind_supported(PROJECT_KIND):
678
+ if self._use_oc_project(namespace=namespace):
673
679
  cmd = ["delete", "project", namespace]
674
680
  else:
675
681
  cmd = ["delete", "namespace", namespace]
@@ -1,21 +1,35 @@
1
+ import base64
1
2
  import re
2
3
  from datetime import UTC
4
+ from enum import StrEnum
3
5
 
4
6
  import requests
5
7
  from cryptography import x509
6
8
  from cryptography.hazmat.primitives import hashes, serialization
7
9
  from cryptography.hazmat.primitives.asymmetric import rsa
10
+ from cryptography.hazmat.primitives.serialization import pkcs12
8
11
  from cryptography.x509.oid import NameOID
9
12
  from pydantic import BaseModel, Field
10
13
 
11
14
 
12
- class RhcsV2Cert(BaseModel, validate_by_name=True, validate_by_alias=True):
15
+ class CertificateFormat(StrEnum):
16
+ PEM = "PEM"
17
+ PKCS12 = "PKCS12"
18
+
19
+
20
+ class RhcsV2CertPem(BaseModel, validate_by_name=True, validate_by_alias=True):
13
21
  certificate: str = Field(alias="tls.crt")
14
22
  private_key: str = Field(alias="tls.key")
15
23
  ca_cert: str = Field(alias="ca.crt")
16
24
  expiration_timestamp: int
17
25
 
18
26
 
27
+ class RhcsV2CertPkcs12(BaseModel, validate_by_name=True, validate_by_alias=True):
28
+ pkcs12_keystore: str = Field(alias="keystore.pkcs12.b64")
29
+ pkcs12_truststore: str = Field(alias="truststore.pkcs12.b64")
30
+ expiration_timestamp: int
31
+
32
+
19
33
  def extract_cert(text: str) -> re.Match:
20
34
  # The CA webform returns an HTML page with inline JS that builds an array of “outputList”
21
35
  # objects. Each object looks roughly like:
@@ -67,7 +81,66 @@ def get_cert_expiry_timestamp(js_escaped_pem: str) -> int:
67
81
  return int(dt_expiry.timestamp())
68
82
 
69
83
 
70
- def generate_cert(issuer_url: str, uid: str, pwd: str, ca_url: str) -> RhcsV2Cert:
84
+ def _format_pem(
85
+ private_key: rsa.RSAPrivateKey,
86
+ cert_pem: str,
87
+ ca_pem: str,
88
+ cert_expiry_timestamp: int,
89
+ ) -> RhcsV2CertPem:
90
+ """Generate RhcsV2Cert with PEM components."""
91
+ private_key_pem = private_key.private_bytes(
92
+ encoding=serialization.Encoding.PEM,
93
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
94
+ encryption_algorithm=serialization.NoEncryption(),
95
+ ).decode()
96
+ return RhcsV2CertPem(
97
+ private_key=private_key_pem,
98
+ certificate=cert_pem.encode().decode("unicode_escape").replace("\\/", "/"),
99
+ ca_cert=ca_pem,
100
+ expiration_timestamp=cert_expiry_timestamp,
101
+ )
102
+
103
+
104
+ def _format_pkcs12(
105
+ private_key: rsa.RSAPrivateKey,
106
+ cert_pem: str,
107
+ ca_pem: str,
108
+ uid: str,
109
+ pwd: str,
110
+ cert_expiry_timestamp: int,
111
+ ) -> RhcsV2CertPkcs12:
112
+ """Generate PKCS#12 keystore and truststore components, returns base64-encoded strings."""
113
+ clean_cert_pem = cert_pem.encode().decode("unicode_escape").replace("\\/", "/")
114
+ cert_obj = x509.load_pem_x509_certificate(clean_cert_pem.encode())
115
+ ca_obj = x509.load_pem_x509_certificate(ca_pem.encode())
116
+ keystore_p12 = pkcs12.serialize_key_and_certificates(
117
+ name=uid.encode("utf-8"),
118
+ key=private_key,
119
+ cert=cert_obj,
120
+ cas=[ca_obj],
121
+ encryption_algorithm=serialization.BestAvailableEncryption(pwd.encode("utf-8")),
122
+ )
123
+ truststore_p12 = pkcs12.serialize_key_and_certificates(
124
+ name=b"ca-trust",
125
+ key=None,
126
+ cert=None,
127
+ cas=[ca_obj],
128
+ encryption_algorithm=serialization.NoEncryption(),
129
+ )
130
+ return RhcsV2CertPkcs12(
131
+ pkcs12_keystore=base64.b64encode(keystore_p12).decode("utf-8"),
132
+ pkcs12_truststore=base64.b64encode(truststore_p12).decode("utf-8"),
133
+ expiration_timestamp=cert_expiry_timestamp,
134
+ )
135
+
136
+
137
+ def generate_cert(
138
+ issuer_url: str,
139
+ uid: str,
140
+ pwd: str,
141
+ ca_url: str,
142
+ cert_format: CertificateFormat = CertificateFormat.PEM,
143
+ ) -> RhcsV2CertPem | RhcsV2CertPkcs12:
71
144
  private_key = rsa.generate_private_key(65537, 4096)
72
145
  csr = (
73
146
  x509.CertificateSigningRequestBuilder()
@@ -78,6 +151,7 @@ def generate_cert(issuer_url: str, uid: str, pwd: str, ca_url: str) -> RhcsV2Cer
78
151
  )
79
152
  .sign(private_key, hashes.SHA256())
80
153
  )
154
+
81
155
  data = {
82
156
  "uid": uid,
83
157
  "pwd": pwd,
@@ -87,27 +161,19 @@ def generate_cert(issuer_url: str, uid: str, pwd: str, ca_url: str) -> RhcsV2Cer
87
161
  "renewal": "false",
88
162
  "xmlOutput": "false",
89
163
  }
90
- response = requests.post(issuer_url, data=data)
164
+ response = requests.post(issuer_url, data=data, timeout=30)
91
165
  response.raise_for_status()
166
+ cert_pem = extract_cert(response.text).group(1)
167
+ cert_expiry_timestamp = get_cert_expiry_timestamp(cert_pem)
92
168
 
93
- cert_pem = extract_cert(response.text)
94
- cert_expiry_timestamp = get_cert_expiry_timestamp(cert_pem.group(1))
95
- private_key_pem = private_key.private_bytes(
96
- encoding=serialization.Encoding.PEM,
97
- format=serialization.PrivateFormat.TraditionalOpenSSL,
98
- encryption_algorithm=serialization.NoEncryption(),
99
- ).decode()
100
-
101
- response = requests.get(ca_url)
169
+ response = requests.get(ca_url, timeout=30)
102
170
  response.raise_for_status()
103
171
  ca_pem = response.text
104
172
 
105
- return RhcsV2Cert(
106
- private_key=private_key_pem,
107
- certificate=cert_pem.group(1)
108
- .encode()
109
- .decode("unicode_escape")
110
- .replace("\\/", "/"),
111
- ca_cert=ca_pem,
112
- expiration_timestamp=cert_expiry_timestamp,
113
- )
173
+ match cert_format:
174
+ case CertificateFormat.PKCS12:
175
+ return _format_pkcs12(
176
+ private_key, cert_pem, ca_pem, uid, pwd, cert_expiry_timestamp
177
+ )
178
+ case CertificateFormat.PEM:
179
+ return _format_pem(private_key, cert_pem, ca_pem, cert_expiry_timestamp)
@@ -272,6 +272,7 @@ VARIABLE_KEYS = [
272
272
  "lifecycle",
273
273
  "max_session_duration",
274
274
  "secret_format",
275
+ "policy",
275
276
  ]
276
277
 
277
278
  EMAIL_REGEX = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
@@ -373,6 +374,10 @@ class aws_s3_bucket_logging(Resource):
373
374
  pass
374
375
 
375
376
 
377
+ class aws_kinesis_resource_policy(Resource):
378
+ pass
379
+
380
+
376
381
  class aws_cloudfront_log_delivery_canonical_user_id(Data):
377
382
  pass
378
383
 
@@ -2249,14 +2254,22 @@ class TerrascriptClient:
2249
2254
 
2250
2255
  return lifecycle_rules
2251
2256
 
2252
- def populate_tf_resource_s3(self, spec: ExternalResourceSpec) -> aws_s3_bucket:
2257
+ def _populate_tf_resource_s3_bucket(
2258
+ self,
2259
+ spec: ExternalResourceSpec,
2260
+ common_values: dict[str, Any],
2261
+ ) -> tuple[aws_s3_bucket, list[TFResource]]:
2262
+ """Create S3 bucket with configuration and notifications.
2263
+
2264
+ Creates aws_s3_bucket with versioning, encryption, lifecycle rules,
2265
+ CORS, logging, and replication. Also creates aws_s3_bucket_notification
2266
+ for SQS/SNS event notifications if configured.
2267
+ """
2253
2268
  account = spec.provisioner_name
2254
2269
  identifier = spec.identifier
2255
- common_values = self.init_values(spec)
2256
2270
  output_prefix = spec.output_prefix
2257
2271
 
2258
2272
  tf_resources: list[TFResource] = []
2259
- self.init_common_outputs(tf_resources, spec)
2260
2273
 
2261
2274
  # s3 bucket
2262
2275
  # Terraform resource reference:
@@ -2433,8 +2446,7 @@ class TerrascriptClient:
2433
2446
  output_name = output_prefix + "__endpoint"
2434
2447
  tf_resources.append(Output(output_name, value=endpoint))
2435
2448
 
2436
- sqs_identifier = common_values.get("sqs_identifier", None)
2437
- if sqs_identifier is not None:
2449
+ if sqs_identifier := common_values.get("sqs_identifier"):
2438
2450
  sqs_values = {"name": sqs_identifier}
2439
2451
  sqs_provider = values.get("provider")
2440
2452
  if sqs_provider:
@@ -2453,11 +2465,9 @@ class TerrascriptClient:
2453
2465
  }
2454
2466
  ],
2455
2467
  }
2456
- filter_prefix = common_values.get("filter_prefix", None)
2457
- if filter_prefix is not None:
2468
+ if filter_prefix := common_values.get("filter_prefix"):
2458
2469
  notification_values["queue"][0]["filter_prefix"] = filter_prefix
2459
- filter_suffix = common_values.get("filter_suffix", None)
2460
- if filter_suffix is not None:
2470
+ if filter_suffix := common_values.get("filter_suffix"):
2461
2471
  notification_values["queue"][0]["filter_suffix"] = filter_suffix
2462
2472
 
2463
2473
  notification_tf_resource = aws_s3_bucket_notification(
@@ -2537,21 +2547,48 @@ class TerrascriptClient:
2537
2547
  )
2538
2548
  tf_resources.append(notification_tf_resource)
2539
2549
 
2540
- bucket_policy = common_values.get("bucket_policy")
2541
- if bucket_policy:
2542
- values = {
2543
- "bucket": identifier,
2544
- "policy": bucket_policy,
2545
- "depends_on": self.get_dependencies([bucket_tf_resource]),
2546
- }
2547
- if self._multiregion_account(account):
2548
- values["provider"] = "aws." + region
2549
- bucket_policy_tf_resource = aws_s3_bucket_policy(identifier, **values)
2550
- tf_resources.append(bucket_policy_tf_resource)
2550
+ return bucket_tf_resource, tf_resources
2551
2551
 
2552
- # iam resources
2553
- # Terraform resource reference:
2554
- # https://www.terraform.io/docs/providers/aws/r/iam_access_key.html
2552
+ def _populate_tf_resource_s3_bucket_policy(
2553
+ self,
2554
+ spec: ExternalResourceSpec,
2555
+ bucket_tf_resource: aws_s3_bucket,
2556
+ policy: str,
2557
+ common_values: dict[str, Any],
2558
+ ) -> list[TFResource]:
2559
+ """Create S3 bucket policy resource.
2560
+
2561
+ Creates aws_s3_bucket_policy with the provided policy document.
2562
+ """
2563
+ account = spec.provisioner_name
2564
+ identifier = spec.identifier
2565
+ region = common_values.get("region") or self.default_regions.get(account)
2566
+ assert region # make mypy happy
2567
+
2568
+ values: dict[str, Any] = {
2569
+ "bucket": identifier,
2570
+ "policy": policy,
2571
+ "depends_on": self.get_dependencies([bucket_tf_resource]),
2572
+ }
2573
+ if self._multiregion_account(account):
2574
+ values["provider"] = "aws." + region
2575
+ bucket_policy_tf_resource = aws_s3_bucket_policy(identifier, **values)
2576
+ return [bucket_policy_tf_resource]
2577
+
2578
+ def _populate_tf_resource_s3_iam(
2579
+ self,
2580
+ spec: ExternalResourceSpec,
2581
+ bucket_tf_resource: aws_s3_bucket,
2582
+ common_values: dict[str, Any],
2583
+ ) -> list[TFResource]:
2584
+ """Create IAM resources for S3 bucket access.
2585
+
2586
+ Creates aws_iam_user, aws_iam_access_key, aws_iam_policy,
2587
+ and aws_iam_user_policy_attachment for bucket access.
2588
+ """
2589
+ identifier = spec.identifier
2590
+ output_prefix = spec.output_prefix
2591
+ tf_resources: list[TFResource] = []
2555
2592
 
2556
2593
  # iam user for bucket
2557
2594
  values = {
@@ -2609,6 +2646,32 @@ class TerrascriptClient:
2609
2646
  )
2610
2647
  tf_resources.append(tf_user_policy_attachment)
2611
2648
 
2649
+ return tf_resources
2650
+
2651
+ def populate_tf_resource_s3(self, spec: ExternalResourceSpec) -> aws_s3_bucket:
2652
+ account = spec.provisioner_name
2653
+ common_values = self.init_values(spec)
2654
+
2655
+ tf_resources: list[TFResource] = []
2656
+ self.init_common_outputs(tf_resources, spec)
2657
+
2658
+ bucket_tf_resource, bucket_resources = self._populate_tf_resource_s3_bucket(
2659
+ spec, common_values
2660
+ )
2661
+ tf_resources.extend(bucket_resources)
2662
+
2663
+ bucket_policy = common_values.get("bucket_policy")
2664
+ if bucket_policy:
2665
+ tf_resources.extend(
2666
+ self._populate_tf_resource_s3_bucket_policy(
2667
+ spec, bucket_tf_resource, bucket_policy, common_values
2668
+ )
2669
+ )
2670
+
2671
+ tf_resources.extend(
2672
+ self._populate_tf_resource_s3_iam(spec, bucket_tf_resource, common_values)
2673
+ )
2674
+
2612
2675
  self.add_resources(account, tf_resources)
2613
2676
 
2614
2677
  return bucket_tf_resource
@@ -3383,42 +3446,53 @@ class TerrascriptClient:
3383
3446
  common_values = self.init_values(spec)
3384
3447
  output_prefix = spec.output_prefix
3385
3448
 
3386
- bucket_tf_resource = self.populate_tf_resource_s3(spec)
3387
-
3388
3449
  tf_resources: list[TFResource] = []
3450
+ self.init_common_outputs(tf_resources, spec)
3451
+
3452
+ bucket_tf_resource, bucket_resources = self._populate_tf_resource_s3_bucket(
3453
+ spec, common_values
3454
+ )
3455
+ tf_resources.extend(bucket_resources)
3456
+
3457
+ tf_resources.extend(
3458
+ self._populate_tf_resource_s3_iam(spec, bucket_tf_resource, common_values)
3459
+ )
3389
3460
 
3390
3461
  # cloudfront origin access identity
3391
3462
  values = {"comment": f"{identifier}-cf-identity"}
3392
3463
  cf_oai_tf_resource = aws_cloudfront_origin_access_identity(identifier, **values)
3393
3464
  tf_resources.append(cf_oai_tf_resource)
3394
3465
 
3395
- # bucket policy for cloudfront
3396
- values_policy: dict[str, Any] = {"bucket": identifier}
3397
- policy = {
3398
- "Version": "2012-10-17",
3399
- "Statement": [
3400
- {
3401
- "Sid": "Grant access to CloudFront Origin Identity",
3402
- "Effect": "Allow",
3403
- "Principal": {"AWS": "${" + cf_oai_tf_resource.iam_arn + "}"},
3404
- "Action": "s3:GetObject",
3405
- "Resource": [
3406
- f"arn:aws:s3:::{identifier}/{enable_dir}/*"
3407
- for enable_dir in common_values.get(
3408
- "get_object_enable_dirs", []
3409
- )
3410
- ],
3411
- }
3466
+ # bucket policy for cloudfront - merge custom policy with CloudFront access statement
3467
+ cf_statement = {
3468
+ "Sid": "Grant access to CloudFront Origin Identity",
3469
+ "Effect": "Allow",
3470
+ "Principal": {"AWS": "${" + cf_oai_tf_resource.iam_arn + "}"},
3471
+ "Action": "s3:GetObject",
3472
+ "Resource": [
3473
+ f"arn:aws:s3:::{identifier}/{enable_dir}/*"
3474
+ for enable_dir in common_values.get("get_object_enable_dirs", [])
3412
3475
  ],
3413
3476
  }
3414
- values_policy["policy"] = json_dumps(policy)
3415
- values_policy["depends_on"] = self.get_dependencies([bucket_tf_resource])
3416
- region = common_values.get("region") or self.default_regions.get(account)
3417
- assert region # make mypy happy
3418
- if self._multiregion_account(account):
3419
- values_policy["provider"] = "aws." + region
3420
- bucket_policy_tf_resource = aws_s3_bucket_policy(identifier, **values_policy)
3421
- tf_resources.append(bucket_policy_tf_resource)
3477
+
3478
+ custom_bucket_policy = common_values.get("bucket_policy")
3479
+ if custom_bucket_policy:
3480
+ # if the user specifies a custom bucket policy then we merge their statements with the cloudfront origin identity policy
3481
+ if isinstance(custom_bucket_policy, str):
3482
+ custom_bucket_policy = json.loads(custom_bucket_policy)
3483
+ custom_bucket_policy.setdefault("Statement", []).append(cf_statement)
3484
+ policy = custom_bucket_policy
3485
+ else:
3486
+ policy = {
3487
+ "Version": "2012-10-17",
3488
+ "Statement": [cf_statement],
3489
+ }
3490
+
3491
+ tf_resources.extend(
3492
+ self._populate_tf_resource_s3_bucket_policy(
3493
+ spec, bucket_tf_resource, json_dumps(policy), common_values
3494
+ )
3495
+ )
3422
3496
 
3423
3497
  distribution_config = common_values.get("distribution_config", {})
3424
3498
  # aws_s3_bucket_acl
@@ -4019,6 +4093,22 @@ class TerrascriptClient:
4019
4093
  kinesis_tf_resource = aws_kinesis_stream(identifier, **kinesis_values)
4020
4094
  tf_resources.append(kinesis_tf_resource)
4021
4095
 
4096
+ # kinesis resource policy (optional)
4097
+ # Terraform resource reference:
4098
+ # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kinesis_resource_policy
4099
+ if policy := common_values.get("policy"):
4100
+ policy_identifier = f"{identifier}-policy"
4101
+ policy_values: dict[str, Any] = {
4102
+ "resource_arn": "${" + kinesis_tf_resource.arn + "}",
4103
+ "policy": policy,
4104
+ }
4105
+ if provider:
4106
+ policy_values["provider"] = provider
4107
+ kinesis_policy_tf_resource = aws_kinesis_resource_policy(
4108
+ policy_identifier, **policy_values
4109
+ )
4110
+ tf_resources.append(kinesis_policy_tf_resource)
4111
+
4022
4112
  es_identifier = common_values.get("es_identifier", None)
4023
4113
  if es_identifier:
4024
4114
  es_resource = self._find_resource_spec(
@@ -159,6 +159,19 @@ def validate_no_public_to_public_peerings(
159
159
  if peer.internal or (peer.spec and peer.spec.private):
160
160
  continue
161
161
 
162
+ # If both sides are allowed to override this check, then we can
163
+ # allow the peering.
164
+ if (
165
+ cluster.allowed_to_bypass_public_peering_restriction
166
+ and peer.allowed_to_bypass_public_peering_restriction
167
+ ):
168
+ logging.debug(
169
+ f"{cluster.name} and {peer.name} are both allowed to skip \
170
+ the check 'no peering with public clusters' check, so their \
171
+ peering is allowed"
172
+ )
173
+ continue
174
+
162
175
  valid = False
163
176
  pair = {cluster.name, peer.name}
164
177
  if pair in found_pairs: