pulumi-gcp 9.0.0a1__py3-none-any.whl → 9.0.0a1758219982__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (209) hide show
  1. pulumi_gcp/__init__.py +48 -0
  2. pulumi_gcp/activedirectory/peering.py +16 -0
  3. pulumi_gcp/apigateway/api_config_iam_binding.py +4 -4
  4. pulumi_gcp/apigateway/api_config_iam_member.py +4 -4
  5. pulumi_gcp/apigateway/api_config_iam_policy.py +4 -4
  6. pulumi_gcp/apigateway/api_iam_binding.py +4 -4
  7. pulumi_gcp/apigateway/api_iam_member.py +4 -4
  8. pulumi_gcp/apigateway/api_iam_policy.py +4 -4
  9. pulumi_gcp/apigateway/gateway_iam_binding.py +4 -4
  10. pulumi_gcp/apigateway/gateway_iam_member.py +4 -4
  11. pulumi_gcp/apigateway/gateway_iam_policy.py +4 -4
  12. pulumi_gcp/artifactregistry/__init__.py +1 -0
  13. pulumi_gcp/artifactregistry/get_python_package.py +237 -0
  14. pulumi_gcp/artifactregistry/get_repository.py +12 -1
  15. pulumi_gcp/artifactregistry/repository.py +28 -0
  16. pulumi_gcp/backupdisasterrecovery/__init__.py +1 -0
  17. pulumi_gcp/backupdisasterrecovery/backup_plan.py +4 -4
  18. pulumi_gcp/backupdisasterrecovery/get_backup.py +12 -1
  19. pulumi_gcp/backupdisasterrecovery/get_backup_plan_association.py +4 -0
  20. pulumi_gcp/backupdisasterrecovery/get_data_source_references.py +135 -0
  21. pulumi_gcp/backupdisasterrecovery/get_management_server.py +4 -0
  22. pulumi_gcp/backupdisasterrecovery/outputs.py +103 -0
  23. pulumi_gcp/bigquery/app_profile.py +14 -49
  24. pulumi_gcp/bigquery/dataset_access.py +4 -8
  25. pulumi_gcp/bigquery/table.py +7 -21
  26. pulumi_gcp/bigqueryanalyticshub/data_exchange_subscription.py +20 -0
  27. pulumi_gcp/bigtable/app_profile.py +14 -49
  28. pulumi_gcp/bigtable/table.py +42 -42
  29. pulumi_gcp/certificatemanager/certificate_map_entry.py +7 -7
  30. pulumi_gcp/chronicle/_inputs.py +6 -5
  31. pulumi_gcp/chronicle/outputs.py +4 -3
  32. pulumi_gcp/chronicle/reference_list.py +53 -5
  33. pulumi_gcp/cloudasset/get_resources_search_all.py +8 -0
  34. pulumi_gcp/cloudbuild/_inputs.py +20 -0
  35. pulumi_gcp/cloudbuild/outputs.py +14 -0
  36. pulumi_gcp/cloudbuild/trigger.py +4 -2
  37. pulumi_gcp/cloudbuild/worker_pool.py +28 -21
  38. pulumi_gcp/cloudfunctions/function.py +4 -8
  39. pulumi_gcp/cloudquota/s_quota_adjuster_settings.py +16 -0
  40. pulumi_gcp/cloudrunv2/_inputs.py +57 -0
  41. pulumi_gcp/cloudrunv2/get_service.py +12 -1
  42. pulumi_gcp/cloudrunv2/outputs.py +81 -0
  43. pulumi_gcp/cloudrunv2/service.py +54 -0
  44. pulumi_gcp/cloudrunv2/worker_pool.py +2 -2
  45. pulumi_gcp/composer/environment.py +28 -49
  46. pulumi_gcp/compute/_inputs.py +804 -24
  47. pulumi_gcp/compute/backend_bucket.py +14 -0
  48. pulumi_gcp/compute/backend_bucket_iam_binding.py +236 -0
  49. pulumi_gcp/compute/backend_bucket_iam_member.py +236 -0
  50. pulumi_gcp/compute/backend_bucket_iam_policy.py +236 -0
  51. pulumi_gcp/compute/backend_service_iam_binding.py +464 -0
  52. pulumi_gcp/compute/backend_service_iam_member.py +464 -0
  53. pulumi_gcp/compute/backend_service_iam_policy.py +464 -0
  54. pulumi_gcp/compute/cross_site_network.py +16 -0
  55. pulumi_gcp/compute/future_reservation.py +32 -0
  56. pulumi_gcp/compute/get_region_backend_service.py +12 -1
  57. pulumi_gcp/compute/get_resource_policy.py +2 -20
  58. pulumi_gcp/compute/instance_from_machine_image.py +75 -110
  59. pulumi_gcp/compute/instance_from_template.py +75 -110
  60. pulumi_gcp/compute/instance_group_manager.py +7 -14
  61. pulumi_gcp/compute/instance_template.py +7 -14
  62. pulumi_gcp/compute/interconnect.py +4 -8
  63. pulumi_gcp/compute/machine_image_iam_binding.py +8 -4
  64. pulumi_gcp/compute/machine_image_iam_member.py +8 -4
  65. pulumi_gcp/compute/machine_image_iam_policy.py +8 -4
  66. pulumi_gcp/compute/network_edge_security_service.py +16 -0
  67. pulumi_gcp/compute/network_firewall_policy_packet_mirroring_rule.py +12 -0
  68. pulumi_gcp/compute/outputs.py +671 -16
  69. pulumi_gcp/compute/preview_feature.py +16 -0
  70. pulumi_gcp/compute/public_delegated_prefix.py +42 -0
  71. pulumi_gcp/compute/region_backend_service.py +54 -0
  72. pulumi_gcp/compute/region_backend_service_iam_binding.py +488 -0
  73. pulumi_gcp/compute/region_backend_service_iam_member.py +488 -0
  74. pulumi_gcp/compute/region_backend_service_iam_policy.py +488 -0
  75. pulumi_gcp/compute/region_instance_group_manager.py +7 -14
  76. pulumi_gcp/compute/region_network_endpoint_group.py +1 -3
  77. pulumi_gcp/compute/region_resize_request.py +24 -0
  78. pulumi_gcp/compute/region_url_map.py +75 -0
  79. pulumi_gcp/compute/wire_group.py +16 -0
  80. pulumi_gcp/container/_inputs.py +446 -18
  81. pulumi_gcp/container/aws_cluster.py +28 -42
  82. pulumi_gcp/container/aws_node_pool.py +28 -42
  83. pulumi_gcp/container/azure_cluster.py +35 -70
  84. pulumi_gcp/container/azure_node_pool.py +28 -42
  85. pulumi_gcp/container/cluster.py +16 -7
  86. pulumi_gcp/container/outputs.py +423 -15
  87. pulumi_gcp/dataflow/flex_template_job.py +10 -0
  88. pulumi_gcp/dataform/repository.py +16 -0
  89. pulumi_gcp/dataform/repository_release_config.py +16 -0
  90. pulumi_gcp/dataform/repository_workflow_config.py +16 -0
  91. pulumi_gcp/dataplex/asset.py +28 -21
  92. pulumi_gcp/dataplex/zone.py +28 -21
  93. pulumi_gcp/dataproc/cluster.py +21 -14
  94. pulumi_gcp/dataproc/workflow_template.py +42 -105
  95. pulumi_gcp/discoveryengine/__init__.py +1 -0
  96. pulumi_gcp/discoveryengine/_inputs.py +94 -0
  97. pulumi_gcp/discoveryengine/acl_config.py +393 -0
  98. pulumi_gcp/discoveryengine/outputs.py +92 -0
  99. pulumi_gcp/firebase/android_app.py +16 -0
  100. pulumi_gcp/firebase/apple_app.py +16 -0
  101. pulumi_gcp/firebase/database_instance.py +16 -0
  102. pulumi_gcp/firebase/extensions_instance.py +12 -0
  103. pulumi_gcp/firebase/get_android_app.py +4 -2
  104. pulumi_gcp/firebase/get_apple_app.py +4 -2
  105. pulumi_gcp/firebase/get_apple_app_config.py +16 -2
  106. pulumi_gcp/firebase/get_hosting_channel.py +4 -2
  107. pulumi_gcp/firebase/hosting_channel.py +20 -0
  108. pulumi_gcp/firebase/hosting_custom_domain.py +20 -0
  109. pulumi_gcp/firebase/hosting_release.py +16 -0
  110. pulumi_gcp/firebase/hosting_site.py +16 -0
  111. pulumi_gcp/firebase/hosting_version.py +16 -0
  112. pulumi_gcp/firebase/storage_bucket.py +18 -0
  113. pulumi_gcp/firestore/index.py +118 -3
  114. pulumi_gcp/folder/service_identity.py +26 -0
  115. pulumi_gcp/gkehub/membership_rbac_role_binding.py +16 -0
  116. pulumi_gcp/healthcare/_inputs.py +205 -0
  117. pulumi_gcp/healthcare/fhir_store.py +128 -0
  118. pulumi_gcp/healthcare/outputs.py +163 -0
  119. pulumi_gcp/iam/workload_identity_pool_iam_binding.py +464 -0
  120. pulumi_gcp/iam/workload_identity_pool_iam_member.py +464 -0
  121. pulumi_gcp/iam/workload_identity_pool_iam_policy.py +464 -0
  122. pulumi_gcp/iam/workload_identity_pool_managed_identity.py +18 -0
  123. pulumi_gcp/iam/workload_identity_pool_namespace.py +20 -0
  124. pulumi_gcp/iap/__init__.py +4 -0
  125. pulumi_gcp/iap/_inputs.py +130 -0
  126. pulumi_gcp/iap/get_web_forwarding_rule_service_iam_policy.py +159 -0
  127. pulumi_gcp/iap/outputs.py +76 -0
  128. pulumi_gcp/iap/web_forwarding_rule_service_iam_binding.py +1002 -0
  129. pulumi_gcp/iap/web_forwarding_rule_service_iam_member.py +1002 -0
  130. pulumi_gcp/iap/web_forwarding_rule_service_iam_policy.py +821 -0
  131. pulumi_gcp/kms/get_kms_secret_asymmetric.py +22 -0
  132. pulumi_gcp/logging/billing_account_bucket_config.py +7 -21
  133. pulumi_gcp/logging/folder_bucket_config.py +7 -21
  134. pulumi_gcp/logging/organization_bucket_config.py +7 -21
  135. pulumi_gcp/logging/organization_sink.py +7 -7
  136. pulumi_gcp/managedkafka/connect_cluster.py +4 -0
  137. pulumi_gcp/managedkafka/connector.py +4 -0
  138. pulumi_gcp/memorystore/instance.py +14 -14
  139. pulumi_gcp/monitoring/dashboard.py +7 -14
  140. pulumi_gcp/netapp/storage_pool.py +91 -2
  141. pulumi_gcp/netapp/volume.py +47 -0
  142. pulumi_gcp/networkmanagement/__init__.py +1 -0
  143. pulumi_gcp/networkmanagement/organization_vpc_flow_logs_config.py +1028 -0
  144. pulumi_gcp/networksecurity/authorization_policy.py +12 -0
  145. pulumi_gcp/networkservices/gateway.py +7 -7
  146. pulumi_gcp/networkservices/service_lb_policies.py +12 -0
  147. pulumi_gcp/oracledatabase/autonomous_database.py +2 -2
  148. pulumi_gcp/organizations/project.py +7 -21
  149. pulumi_gcp/projects/service.py +0 -7
  150. pulumi_gcp/pulumi-plugin.json +1 -1
  151. pulumi_gcp/resourcemanager/capability.py +16 -0
  152. pulumi_gcp/runtimeconfig/config.py +16 -0
  153. pulumi_gcp/runtimeconfig/config_iam_binding.py +236 -0
  154. pulumi_gcp/runtimeconfig/config_iam_member.py +236 -0
  155. pulumi_gcp/runtimeconfig/config_iam_policy.py +236 -0
  156. pulumi_gcp/runtimeconfig/variable.py +10 -0
  157. pulumi_gcp/saasruntime/__init__.py +1 -0
  158. pulumi_gcp/saasruntime/_inputs.py +458 -0
  159. pulumi_gcp/saasruntime/outputs.py +392 -0
  160. pulumi_gcp/saasruntime/saa_s.py +4 -0
  161. pulumi_gcp/saasruntime/unit_kind.py +1024 -0
  162. pulumi_gcp/securesourcemanager/instance.py +7 -7
  163. pulumi_gcp/securesourcemanager/repository.py +7 -7
  164. pulumi_gcp/servicedirectory/namespace_iam_binding.py +4 -4
  165. pulumi_gcp/servicedirectory/namespace_iam_member.py +4 -4
  166. pulumi_gcp/servicedirectory/namespace_iam_policy.py +4 -4
  167. pulumi_gcp/servicedirectory/service_iam_binding.py +4 -4
  168. pulumi_gcp/servicedirectory/service_iam_member.py +4 -4
  169. pulumi_gcp/servicedirectory/service_iam_policy.py +4 -4
  170. pulumi_gcp/sql/_inputs.py +135 -1
  171. pulumi_gcp/sql/database_instance.py +54 -14
  172. pulumi_gcp/sql/get_database_instance.py +12 -1
  173. pulumi_gcp/sql/outputs.py +265 -3
  174. pulumi_gcp/storage/_inputs.py +246 -0
  175. pulumi_gcp/storage/bucket.py +7 -14
  176. pulumi_gcp/storage/bucket_object.py +0 -9
  177. pulumi_gcp/storage/outputs.py +191 -1
  178. pulumi_gcp/storage/transfer_job.py +47 -0
  179. pulumi_gcp/tpu/v2_queued_resource.py +16 -0
  180. pulumi_gcp/tpu/v2_vm.py +16 -0
  181. pulumi_gcp/vertex/ai_feature_group_iam_binding.py +236 -0
  182. pulumi_gcp/vertex/ai_feature_group_iam_member.py +236 -0
  183. pulumi_gcp/vertex/ai_feature_group_iam_policy.py +236 -0
  184. pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_binding.py +248 -0
  185. pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_member.py +248 -0
  186. pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_policy.py +248 -0
  187. pulumi_gcp/vertex/ai_feature_online_store_iam_binding.py +236 -0
  188. pulumi_gcp/vertex/ai_feature_online_store_iam_member.py +236 -0
  189. pulumi_gcp/vertex/ai_feature_online_store_iam_policy.py +236 -0
  190. pulumi_gcp/vertex/ai_feature_store_entity_type_iam_binding.py +238 -0
  191. pulumi_gcp/vertex/ai_feature_store_entity_type_iam_member.py +238 -0
  192. pulumi_gcp/vertex/ai_feature_store_entity_type_iam_policy.py +238 -0
  193. pulumi_gcp/vertex/ai_feature_store_iam_binding.py +248 -0
  194. pulumi_gcp/vertex/ai_feature_store_iam_member.py +248 -0
  195. pulumi_gcp/vertex/ai_feature_store_iam_policy.py +248 -0
  196. pulumi_gcp/vertex/ai_metadata_store.py +16 -0
  197. pulumi_gcp/workstations/workstation.py +16 -0
  198. pulumi_gcp/workstations/workstation_cluster.py +16 -0
  199. pulumi_gcp/workstations/workstation_config.py +16 -0
  200. pulumi_gcp/workstations/workstation_config_iam_binding.py +260 -0
  201. pulumi_gcp/workstations/workstation_config_iam_member.py +260 -0
  202. pulumi_gcp/workstations/workstation_config_iam_policy.py +260 -0
  203. pulumi_gcp/workstations/workstation_iam_binding.py +272 -0
  204. pulumi_gcp/workstations/workstation_iam_member.py +272 -0
  205. pulumi_gcp/workstations/workstation_iam_policy.py +272 -0
  206. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/METADATA +1 -1
  207. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/RECORD +209 -200
  208. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/WHEEL +0 -0
  209. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/top_level.txt +0 -0
@@ -174,6 +174,8 @@ __all__ = [
174
174
  'ClusterNodeConfigKubeletConfigEvictionMinimumReclaim',
175
175
  'ClusterNodeConfigKubeletConfigEvictionSoft',
176
176
  'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod',
177
+ 'ClusterNodeConfigKubeletConfigMemoryManager',
178
+ 'ClusterNodeConfigKubeletConfigTopologyManager',
177
179
  'ClusterNodeConfigLinuxNodeConfig',
178
180
  'ClusterNodeConfigLinuxNodeConfigHugepagesConfig',
179
181
  'ClusterNodeConfigLocalNvmeSsdBlockConfig',
@@ -227,6 +229,8 @@ __all__ = [
227
229
  'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
228
230
  'ClusterNodePoolNodeConfigKubeletConfigEvictionSoft',
229
231
  'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
232
+ 'ClusterNodePoolNodeConfigKubeletConfigMemoryManager',
233
+ 'ClusterNodePoolNodeConfigKubeletConfigTopologyManager',
230
234
  'ClusterNodePoolNodeConfigLinuxNodeConfig',
231
235
  'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
232
236
  'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -295,6 +299,8 @@ __all__ = [
295
299
  'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
296
300
  'NodePoolNodeConfigKubeletConfigEvictionSoft',
297
301
  'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
302
+ 'NodePoolNodeConfigKubeletConfigMemoryManager',
303
+ 'NodePoolNodeConfigKubeletConfigTopologyManager',
298
304
  'NodePoolNodeConfigLinuxNodeConfig',
299
305
  'NodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
300
306
  'NodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -401,6 +407,8 @@ __all__ = [
401
407
  'GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult',
402
408
  'GetClusterNodeConfigKubeletConfigEvictionSoftResult',
403
409
  'GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
410
+ 'GetClusterNodeConfigKubeletConfigMemoryManagerResult',
411
+ 'GetClusterNodeConfigKubeletConfigTopologyManagerResult',
404
412
  'GetClusterNodeConfigLinuxNodeConfigResult',
405
413
  'GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult',
406
414
  'GetClusterNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -454,6 +462,8 @@ __all__ = [
454
462
  'GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult',
455
463
  'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult',
456
464
  'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
465
+ 'GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult',
466
+ 'GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult',
457
467
  'GetClusterNodePoolNodeConfigLinuxNodeConfigResult',
458
468
  'GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult',
459
469
  'GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -5899,7 +5909,7 @@ class ClusterEnterpriseConfig(dict):
5899
5909
  desired_tier: Optional[_builtins.str] = None):
5900
5910
  """
5901
5911
  :param _builtins.str cluster_tier: The effective tier of the cluster.
5902
- :param _builtins.str desired_tier: Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
5912
+ :param _builtins.str desired_tier: (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
5903
5913
  """
5904
5914
  if cluster_tier is not None:
5905
5915
  pulumi.set(__self__, "cluster_tier", cluster_tier)
@@ -5908,6 +5918,7 @@ class ClusterEnterpriseConfig(dict):
5908
5918
 
5909
5919
  @_builtins.property
5910
5920
  @pulumi.getter(name="clusterTier")
5921
+ @_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
5911
5922
  def cluster_tier(self) -> Optional[_builtins.str]:
5912
5923
  """
5913
5924
  The effective tier of the cluster.
@@ -5916,9 +5927,10 @@ class ClusterEnterpriseConfig(dict):
5916
5927
 
5917
5928
  @_builtins.property
5918
5929
  @pulumi.getter(name="desiredTier")
5930
+ @_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
5919
5931
  def desired_tier(self) -> Optional[_builtins.str]:
5920
5932
  """
5921
- Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
5933
+ (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
5922
5934
  """
5923
5935
  return pulumi.get(self, "desired_tier")
5924
5936
 
@@ -7428,7 +7440,7 @@ class ClusterNodeConfig(dict):
7428
7440
  in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
7429
7441
  Prefer configuring `boot_disk`.
7430
7442
  :param _builtins.str disk_type: Type of the disk attached to each node
7431
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7443
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7432
7444
  :param Sequence['ClusterNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
7433
7445
  :param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
7434
7446
  :param 'ClusterNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -7676,7 +7688,7 @@ class ClusterNodeConfig(dict):
7676
7688
  def disk_type(self) -> Optional[_builtins.str]:
7677
7689
  """
7678
7690
  Type of the disk attached to each node
7679
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7691
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7680
7692
  """
7681
7693
  return pulumi.get(self, "disk_type")
7682
7694
 
@@ -8146,7 +8158,7 @@ class ClusterNodeConfigBootDisk(dict):
8146
8158
  size_gb: Optional[_builtins.int] = None):
8147
8159
  """
8148
8160
  :param _builtins.str disk_type: Type of the disk attached to each node
8149
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8161
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8150
8162
  :param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8151
8163
  :param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8152
8164
  :param _builtins.int size_gb: Size of the disk attached to each node, specified
@@ -8166,7 +8178,7 @@ class ClusterNodeConfigBootDisk(dict):
8166
8178
  def disk_type(self) -> Optional[_builtins.str]:
8167
8179
  """
8168
8180
  Type of the disk attached to each node
8169
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8181
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8170
8182
  """
8171
8183
  return pulumi.get(self, "disk_type")
8172
8184
 
@@ -8849,10 +8861,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8849
8861
  suggest = "insecure_kubelet_readonly_port_enabled"
8850
8862
  elif key == "maxParallelImagePulls":
8851
8863
  suggest = "max_parallel_image_pulls"
8864
+ elif key == "memoryManager":
8865
+ suggest = "memory_manager"
8852
8866
  elif key == "podPidsLimit":
8853
8867
  suggest = "pod_pids_limit"
8854
8868
  elif key == "singleProcessOomKill":
8855
8869
  suggest = "single_process_oom_kill"
8870
+ elif key == "topologyManager":
8871
+ suggest = "topology_manager"
8856
8872
 
8857
8873
  if suggest:
8858
8874
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -8882,8 +8898,10 @@ class ClusterNodeConfigKubeletConfig(dict):
8882
8898
  image_minimum_gc_age: Optional[_builtins.str] = None,
8883
8899
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
8884
8900
  max_parallel_image_pulls: Optional[_builtins.int] = None,
8901
+ memory_manager: Optional['outputs.ClusterNodeConfigKubeletConfigMemoryManager'] = None,
8885
8902
  pod_pids_limit: Optional[_builtins.int] = None,
8886
- single_process_oom_kill: Optional[_builtins.bool] = None):
8903
+ single_process_oom_kill: Optional[_builtins.bool] = None,
8904
+ topology_manager: Optional['outputs.ClusterNodeConfigKubeletConfigTopologyManager'] = None):
8887
8905
  """
8888
8906
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
8889
8907
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -8913,8 +8931,12 @@ class ClusterNodeConfigKubeletConfig(dict):
8913
8931
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
8914
8932
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8915
8933
  :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
8934
+ :param 'ClusterNodeConfigKubeletConfigMemoryManagerArgs' memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
8935
+ The memory manager optimizes memory and hugepages allocation for pods, especially
8936
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
8916
8937
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
8917
8938
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
8939
+ :param 'ClusterNodeConfigKubeletConfigTopologyManagerArgs' topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
8918
8940
  """
8919
8941
  if allowed_unsafe_sysctls is not None:
8920
8942
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -8948,10 +8970,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8948
8970
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
8949
8971
  if max_parallel_image_pulls is not None:
8950
8972
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
8973
+ if memory_manager is not None:
8974
+ pulumi.set(__self__, "memory_manager", memory_manager)
8951
8975
  if pod_pids_limit is not None:
8952
8976
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
8953
8977
  if single_process_oom_kill is not None:
8954
8978
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
8979
+ if topology_manager is not None:
8980
+ pulumi.set(__self__, "topology_manager", topology_manager)
8955
8981
 
8956
8982
  @_builtins.property
8957
8983
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -9093,6 +9119,16 @@ class ClusterNodeConfigKubeletConfig(dict):
9093
9119
  """
9094
9120
  return pulumi.get(self, "max_parallel_image_pulls")
9095
9121
 
9122
+ @_builtins.property
9123
+ @pulumi.getter(name="memoryManager")
9124
+ def memory_manager(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigMemoryManager']:
9125
+ """
9126
+ Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
9127
+ The memory manager optimizes memory and hugepages allocation for pods, especially
9128
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
9129
+ """
9130
+ return pulumi.get(self, "memory_manager")
9131
+
9096
9132
  @_builtins.property
9097
9133
  @pulumi.getter(name="podPidsLimit")
9098
9134
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -9109,6 +9145,14 @@ class ClusterNodeConfigKubeletConfig(dict):
9109
9145
  """
9110
9146
  return pulumi.get(self, "single_process_oom_kill")
9111
9147
 
9148
+ @_builtins.property
9149
+ @pulumi.getter(name="topologyManager")
9150
+ def topology_manager(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigTopologyManager']:
9151
+ """
9152
+ These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
9153
+ """
9154
+ return pulumi.get(self, "topology_manager")
9155
+
9112
9156
 
9113
9157
  @pulumi.output_type
9114
9158
  class ClusterNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
@@ -9428,6 +9472,64 @@ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
9428
9472
  return pulumi.get(self, "pid_available")
9429
9473
 
9430
9474
 
9475
+ @pulumi.output_type
9476
+ class ClusterNodeConfigKubeletConfigMemoryManager(dict):
9477
+ def __init__(__self__, *,
9478
+ policy: Optional[_builtins.str] = None):
9479
+ """
9480
+ :param _builtins.str policy: The [Memory
9481
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
9482
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
9483
+ """
9484
+ if policy is not None:
9485
+ pulumi.set(__self__, "policy", policy)
9486
+
9487
+ @_builtins.property
9488
+ @pulumi.getter
9489
+ def policy(self) -> Optional[_builtins.str]:
9490
+ """
9491
+ The [Memory
9492
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
9493
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
9494
+ """
9495
+ return pulumi.get(self, "policy")
9496
+
9497
+
9498
+ @pulumi.output_type
9499
+ class ClusterNodeConfigKubeletConfigTopologyManager(dict):
9500
+ def __init__(__self__, *,
9501
+ policy: Optional[_builtins.str] = None,
9502
+ scope: Optional[_builtins.str] = None):
9503
+ """
9504
+ :param _builtins.str policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
9505
+ :param _builtins.str scope: The Topology Manager scope, defining the granularity at which
9506
+ policy decisions are applied. Valid values are "container" (resources are aligned
9507
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
9508
+ """
9509
+ if policy is not None:
9510
+ pulumi.set(__self__, "policy", policy)
9511
+ if scope is not None:
9512
+ pulumi.set(__self__, "scope", scope)
9513
+
9514
+ @_builtins.property
9515
+ @pulumi.getter
9516
+ def policy(self) -> Optional[_builtins.str]:
9517
+ """
9518
+ The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
9519
+ """
9520
+ return pulumi.get(self, "policy")
9521
+
9522
+ @_builtins.property
9523
+ @pulumi.getter
9524
+ def scope(self) -> Optional[_builtins.str]:
9525
+ """
9526
+ The Topology Manager scope, defining the granularity at which
9527
+ policy decisions are applied. Valid values are "container" (resources are aligned
9528
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
9529
+ """
9530
+ return pulumi.get(self, "scope")
9531
+
9532
+
9431
9533
  @pulumi.output_type
9432
9534
  class ClusterNodeConfigLinuxNodeConfig(dict):
9433
9535
  @staticmethod
@@ -11370,7 +11472,7 @@ class ClusterNodePoolNodeConfig(dict):
11370
11472
  in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
11371
11473
  Prefer configuring `boot_disk`.
11372
11474
  :param _builtins.str disk_type: Type of the disk attached to each node
11373
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
11475
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
11374
11476
  :param Sequence['ClusterNodePoolNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
11375
11477
  :param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
11376
11478
  :param 'ClusterNodePoolNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -11618,7 +11720,7 @@ class ClusterNodePoolNodeConfig(dict):
11618
11720
  def disk_type(self) -> Optional[_builtins.str]:
11619
11721
  """
11620
11722
  Type of the disk attached to each node
11621
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
11723
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
11622
11724
  """
11623
11725
  return pulumi.get(self, "disk_type")
11624
11726
 
@@ -12088,7 +12190,7 @@ class ClusterNodePoolNodeConfigBootDisk(dict):
12088
12190
  size_gb: Optional[_builtins.int] = None):
12089
12191
  """
12090
12192
  :param _builtins.str disk_type: Type of the disk attached to each node
12091
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
12193
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
12092
12194
  :param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12093
12195
  :param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12094
12196
  :param _builtins.int size_gb: Size of the disk attached to each node, specified
@@ -12108,7 +12210,7 @@ class ClusterNodePoolNodeConfigBootDisk(dict):
12108
12210
  def disk_type(self) -> Optional[_builtins.str]:
12109
12211
  """
12110
12212
  Type of the disk attached to each node
12111
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
12213
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
12112
12214
  """
12113
12215
  return pulumi.get(self, "disk_type")
12114
12216
 
@@ -12791,10 +12893,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12791
12893
  suggest = "insecure_kubelet_readonly_port_enabled"
12792
12894
  elif key == "maxParallelImagePulls":
12793
12895
  suggest = "max_parallel_image_pulls"
12896
+ elif key == "memoryManager":
12897
+ suggest = "memory_manager"
12794
12898
  elif key == "podPidsLimit":
12795
12899
  suggest = "pod_pids_limit"
12796
12900
  elif key == "singleProcessOomKill":
12797
12901
  suggest = "single_process_oom_kill"
12902
+ elif key == "topologyManager":
12903
+ suggest = "topology_manager"
12798
12904
 
12799
12905
  if suggest:
12800
12906
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12824,8 +12930,10 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12824
12930
  image_minimum_gc_age: Optional[_builtins.str] = None,
12825
12931
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
12826
12932
  max_parallel_image_pulls: Optional[_builtins.int] = None,
12933
+ memory_manager: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigMemoryManager'] = None,
12827
12934
  pod_pids_limit: Optional[_builtins.int] = None,
12828
- single_process_oom_kill: Optional[_builtins.bool] = None):
12935
+ single_process_oom_kill: Optional[_builtins.bool] = None,
12936
+ topology_manager: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigTopologyManager'] = None):
12829
12937
  """
12830
12938
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
12831
12939
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -12855,8 +12963,12 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12855
12963
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
12856
12964
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
12857
12965
  :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
12966
+ :param 'ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs' memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
12967
+ The memory manager optimizes memory and hugepages allocation for pods, especially
12968
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
12858
12969
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
12859
12970
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
12971
+ :param 'ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs' topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
12860
12972
  """
12861
12973
  if allowed_unsafe_sysctls is not None:
12862
12974
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -12890,10 +13002,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12890
13002
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
12891
13003
  if max_parallel_image_pulls is not None:
12892
13004
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
13005
+ if memory_manager is not None:
13006
+ pulumi.set(__self__, "memory_manager", memory_manager)
12893
13007
  if pod_pids_limit is not None:
12894
13008
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
12895
13009
  if single_process_oom_kill is not None:
12896
13010
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
13011
+ if topology_manager is not None:
13012
+ pulumi.set(__self__, "topology_manager", topology_manager)
12897
13013
 
12898
13014
  @_builtins.property
12899
13015
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -13035,6 +13151,16 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
13035
13151
  """
13036
13152
  return pulumi.get(self, "max_parallel_image_pulls")
13037
13153
 
13154
+ @_builtins.property
13155
+ @pulumi.getter(name="memoryManager")
13156
+ def memory_manager(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigMemoryManager']:
13157
+ """
13158
+ Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
13159
+ The memory manager optimizes memory and hugepages allocation for pods, especially
13160
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
13161
+ """
13162
+ return pulumi.get(self, "memory_manager")
13163
+
13038
13164
  @_builtins.property
13039
13165
  @pulumi.getter(name="podPidsLimit")
13040
13166
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -13051,6 +13177,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
13051
13177
  """
13052
13178
  return pulumi.get(self, "single_process_oom_kill")
13053
13179
 
13180
+ @_builtins.property
13181
+ @pulumi.getter(name="topologyManager")
13182
+ def topology_manager(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigTopologyManager']:
13183
+ """
13184
+ These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
13185
+ """
13186
+ return pulumi.get(self, "topology_manager")
13187
+
13054
13188
 
13055
13189
  @pulumi.output_type
13056
13190
  class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
@@ -13370,6 +13504,64 @@ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
13370
13504
  return pulumi.get(self, "pid_available")
13371
13505
 
13372
13506
 
13507
+ @pulumi.output_type
13508
+ class ClusterNodePoolNodeConfigKubeletConfigMemoryManager(dict):
13509
+ def __init__(__self__, *,
13510
+ policy: Optional[_builtins.str] = None):
13511
+ """
13512
+ :param _builtins.str policy: The [Memory
13513
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
13514
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
13515
+ """
13516
+ if policy is not None:
13517
+ pulumi.set(__self__, "policy", policy)
13518
+
13519
+ @_builtins.property
13520
+ @pulumi.getter
13521
+ def policy(self) -> Optional[_builtins.str]:
13522
+ """
13523
+ The [Memory
13524
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
13525
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
13526
+ """
13527
+ return pulumi.get(self, "policy")
13528
+
13529
+
13530
+ @pulumi.output_type
13531
+ class ClusterNodePoolNodeConfigKubeletConfigTopologyManager(dict):
13532
+ def __init__(__self__, *,
13533
+ policy: Optional[_builtins.str] = None,
13534
+ scope: Optional[_builtins.str] = None):
13535
+ """
13536
+ :param _builtins.str policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
13537
+ :param _builtins.str scope: The Topology Manager scope, defining the granularity at which
13538
+ policy decisions are applied. Valid values are "container" (resources are aligned
13539
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
13540
+ """
13541
+ if policy is not None:
13542
+ pulumi.set(__self__, "policy", policy)
13543
+ if scope is not None:
13544
+ pulumi.set(__self__, "scope", scope)
13545
+
13546
+ @_builtins.property
13547
+ @pulumi.getter
13548
+ def policy(self) -> Optional[_builtins.str]:
13549
+ """
13550
+ The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
13551
+ """
13552
+ return pulumi.get(self, "policy")
13553
+
13554
+ @_builtins.property
13555
+ @pulumi.getter
13556
+ def scope(self) -> Optional[_builtins.str]:
13557
+ """
13558
+ The Topology Manager scope, defining the granularity at which
13559
+ policy decisions are applied. Valid values are "container" (resources are aligned
13560
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
13561
+ """
13562
+ return pulumi.get(self, "scope")
13563
+
13564
+
13373
13565
  @pulumi.output_type
13374
13566
  class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
13375
13567
  @staticmethod
@@ -17175,10 +17367,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
17175
17367
  suggest = "insecure_kubelet_readonly_port_enabled"
17176
17368
  elif key == "maxParallelImagePulls":
17177
17369
  suggest = "max_parallel_image_pulls"
17370
+ elif key == "memoryManager":
17371
+ suggest = "memory_manager"
17178
17372
  elif key == "podPidsLimit":
17179
17373
  suggest = "pod_pids_limit"
17180
17374
  elif key == "singleProcessOomKill":
17181
17375
  suggest = "single_process_oom_kill"
17376
+ elif key == "topologyManager":
17377
+ suggest = "topology_manager"
17182
17378
 
17183
17379
  if suggest:
17184
17380
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -17208,8 +17404,10 @@ class NodePoolNodeConfigKubeletConfig(dict):
17208
17404
  image_minimum_gc_age: Optional[_builtins.str] = None,
17209
17405
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
17210
17406
  max_parallel_image_pulls: Optional[_builtins.int] = None,
17407
+ memory_manager: Optional['outputs.NodePoolNodeConfigKubeletConfigMemoryManager'] = None,
17211
17408
  pod_pids_limit: Optional[_builtins.int] = None,
17212
- single_process_oom_kill: Optional[_builtins.bool] = None):
17409
+ single_process_oom_kill: Optional[_builtins.bool] = None,
17410
+ topology_manager: Optional['outputs.NodePoolNodeConfigKubeletConfigTopologyManager'] = None):
17213
17411
  """
17214
17412
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
17215
17413
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -17227,8 +17425,10 @@ class NodePoolNodeConfigKubeletConfig(dict):
17227
17425
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
17228
17426
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
17229
17427
  :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
17428
+ :param 'NodePoolNodeConfigKubeletConfigMemoryManagerArgs' memory_manager: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
17230
17429
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
17231
17430
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
17431
+ :param 'NodePoolNodeConfigKubeletConfigTopologyManagerArgs' topology_manager: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
17232
17432
  """
17233
17433
  if allowed_unsafe_sysctls is not None:
17234
17434
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -17262,10 +17462,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
17262
17462
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
17263
17463
  if max_parallel_image_pulls is not None:
17264
17464
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
17465
+ if memory_manager is not None:
17466
+ pulumi.set(__self__, "memory_manager", memory_manager)
17265
17467
  if pod_pids_limit is not None:
17266
17468
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
17267
17469
  if single_process_oom_kill is not None:
17268
17470
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
17471
+ if topology_manager is not None:
17472
+ pulumi.set(__self__, "topology_manager", topology_manager)
17269
17473
 
17270
17474
  @_builtins.property
17271
17475
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -17395,6 +17599,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
17395
17599
  """
17396
17600
  return pulumi.get(self, "max_parallel_image_pulls")
17397
17601
 
17602
+ @_builtins.property
17603
+ @pulumi.getter(name="memoryManager")
17604
+ def memory_manager(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigMemoryManager']:
17605
+ """
17606
+ Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
17607
+ """
17608
+ return pulumi.get(self, "memory_manager")
17609
+
17398
17610
  @_builtins.property
17399
17611
  @pulumi.getter(name="podPidsLimit")
17400
17612
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -17411,6 +17623,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
17411
17623
  """
17412
17624
  return pulumi.get(self, "single_process_oom_kill")
17413
17625
 
17626
+ @_builtins.property
17627
+ @pulumi.getter(name="topologyManager")
17628
+ def topology_manager(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigTopologyManager']:
17629
+ """
17630
+ Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
17631
+ """
17632
+ return pulumi.get(self, "topology_manager")
17633
+
17414
17634
 
17415
17635
  @pulumi.output_type
17416
17636
  class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
@@ -17730,6 +17950,56 @@ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
17730
17950
  return pulumi.get(self, "pid_available")
17731
17951
 
17732
17952
 
17953
+ @pulumi.output_type
17954
+ class NodePoolNodeConfigKubeletConfigMemoryManager(dict):
17955
+ def __init__(__self__, *,
17956
+ policy: Optional[_builtins.str] = None):
17957
+ """
17958
+ :param _builtins.str policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
17959
+ """
17960
+ if policy is not None:
17961
+ pulumi.set(__self__, "policy", policy)
17962
+
17963
+ @_builtins.property
17964
+ @pulumi.getter
17965
+ def policy(self) -> Optional[_builtins.str]:
17966
+ """
17967
+ The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
17968
+ """
17969
+ return pulumi.get(self, "policy")
17970
+
17971
+
17972
+ @pulumi.output_type
17973
+ class NodePoolNodeConfigKubeletConfigTopologyManager(dict):
17974
+ def __init__(__self__, *,
17975
+ policy: Optional[_builtins.str] = None,
17976
+ scope: Optional[_builtins.str] = None):
17977
+ """
17978
+ :param _builtins.str policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
17979
+ :param _builtins.str scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
17980
+ """
17981
+ if policy is not None:
17982
+ pulumi.set(__self__, "policy", policy)
17983
+ if scope is not None:
17984
+ pulumi.set(__self__, "scope", scope)
17985
+
17986
+ @_builtins.property
17987
+ @pulumi.getter
17988
+ def policy(self) -> Optional[_builtins.str]:
17989
+ """
17990
+ The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
17991
+ """
17992
+ return pulumi.get(self, "policy")
17993
+
17994
+ @_builtins.property
17995
+ @pulumi.getter
17996
+ def scope(self) -> Optional[_builtins.str]:
17997
+ """
17998
+ The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
17999
+ """
18000
+ return pulumi.get(self, "scope")
18001
+
18002
+
17733
18003
  @pulumi.output_type
17734
18004
  class NodePoolNodeConfigLinuxNodeConfig(dict):
17735
18005
  @staticmethod
@@ -21612,8 +21882,10 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
21612
21882
  image_minimum_gc_age: _builtins.str,
21613
21883
  insecure_kubelet_readonly_port_enabled: _builtins.str,
21614
21884
  max_parallel_image_pulls: _builtins.int,
21885
+ memory_managers: Sequence['outputs.GetClusterNodeConfigKubeletConfigMemoryManagerResult'],
21615
21886
  pod_pids_limit: _builtins.int,
21616
- single_process_oom_kill: _builtins.bool):
21887
+ single_process_oom_kill: _builtins.bool,
21888
+ topology_managers: Sequence['outputs.GetClusterNodeConfigKubeletConfigTopologyManagerResult']):
21617
21889
  """
21618
21890
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
21619
21891
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -21631,8 +21903,10 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
21631
21903
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
21632
21904
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
21633
21905
  :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
21906
+ :param Sequence['GetClusterNodeConfigKubeletConfigMemoryManagerArgs'] memory_managers: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
21634
21907
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
21635
21908
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
21909
+ :param Sequence['GetClusterNodeConfigKubeletConfigTopologyManagerArgs'] topology_managers: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
21636
21910
  """
21637
21911
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
21638
21912
  pulumi.set(__self__, "container_log_max_files", container_log_max_files)
@@ -21650,8 +21924,10 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
21650
21924
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
21651
21925
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
21652
21926
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
21927
+ pulumi.set(__self__, "memory_managers", memory_managers)
21653
21928
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
21654
21929
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
21930
+ pulumi.set(__self__, "topology_managers", topology_managers)
21655
21931
 
21656
21932
  @_builtins.property
21657
21933
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -21781,6 +22057,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
21781
22057
  """
21782
22058
  return pulumi.get(self, "max_parallel_image_pulls")
21783
22059
 
22060
+ @_builtins.property
22061
+ @pulumi.getter(name="memoryManagers")
22062
+ def memory_managers(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigMemoryManagerResult']:
22063
+ """
22064
+ Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
22065
+ """
22066
+ return pulumi.get(self, "memory_managers")
22067
+
21784
22068
  @_builtins.property
21785
22069
  @pulumi.getter(name="podPidsLimit")
21786
22070
  def pod_pids_limit(self) -> _builtins.int:
@@ -21797,6 +22081,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
21797
22081
  """
21798
22082
  return pulumi.get(self, "single_process_oom_kill")
21799
22083
 
22084
+ @_builtins.property
22085
+ @pulumi.getter(name="topologyManagers")
22086
+ def topology_managers(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigTopologyManagerResult']:
22087
+ """
22088
+ Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
22089
+ """
22090
+ return pulumi.get(self, "topology_managers")
22091
+
21800
22092
 
21801
22093
  @pulumi.output_type
21802
22094
  class GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
@@ -22017,6 +22309,53 @@ class GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
22017
22309
  return pulumi.get(self, "pid_available")
22018
22310
 
22019
22311
 
22312
+ @pulumi.output_type
22313
+ class GetClusterNodeConfigKubeletConfigMemoryManagerResult(dict):
22314
+ def __init__(__self__, *,
22315
+ policy: _builtins.str):
22316
+ """
22317
+ :param _builtins.str policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
22318
+ """
22319
+ pulumi.set(__self__, "policy", policy)
22320
+
22321
+ @_builtins.property
22322
+ @pulumi.getter
22323
+ def policy(self) -> _builtins.str:
22324
+ """
22325
+ The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
22326
+ """
22327
+ return pulumi.get(self, "policy")
22328
+
22329
+
22330
+ @pulumi.output_type
22331
+ class GetClusterNodeConfigKubeletConfigTopologyManagerResult(dict):
22332
+ def __init__(__self__, *,
22333
+ policy: _builtins.str,
22334
+ scope: _builtins.str):
22335
+ """
22336
+ :param _builtins.str policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
22337
+ :param _builtins.str scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
22338
+ """
22339
+ pulumi.set(__self__, "policy", policy)
22340
+ pulumi.set(__self__, "scope", scope)
22341
+
22342
+ @_builtins.property
22343
+ @pulumi.getter
22344
+ def policy(self) -> _builtins.str:
22345
+ """
22346
+ The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
22347
+ """
22348
+ return pulumi.get(self, "policy")
22349
+
22350
+ @_builtins.property
22351
+ @pulumi.getter
22352
+ def scope(self) -> _builtins.str:
22353
+ """
22354
+ The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
22355
+ """
22356
+ return pulumi.get(self, "scope")
22357
+
22358
+
22020
22359
  @pulumi.output_type
22021
22360
  class GetClusterNodeConfigLinuxNodeConfigResult(dict):
22022
22361
  def __init__(__self__, *,
@@ -24162,8 +24501,10 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
24162
24501
  image_minimum_gc_age: _builtins.str,
24163
24502
  insecure_kubelet_readonly_port_enabled: _builtins.str,
24164
24503
  max_parallel_image_pulls: _builtins.int,
24504
+ memory_managers: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult'],
24165
24505
  pod_pids_limit: _builtins.int,
24166
- single_process_oom_kill: _builtins.bool):
24506
+ single_process_oom_kill: _builtins.bool,
24507
+ topology_managers: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult']):
24167
24508
  """
24168
24509
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
24169
24510
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -24181,8 +24522,10 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
24181
24522
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
24182
24523
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
24183
24524
  :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
24525
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs'] memory_managers: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
24184
24526
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
24185
24527
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
24528
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs'] topology_managers: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
24186
24529
  """
24187
24530
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
24188
24531
  pulumi.set(__self__, "container_log_max_files", container_log_max_files)
@@ -24200,8 +24543,10 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
24200
24543
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
24201
24544
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
24202
24545
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
24546
+ pulumi.set(__self__, "memory_managers", memory_managers)
24203
24547
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
24204
24548
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
24549
+ pulumi.set(__self__, "topology_managers", topology_managers)
24205
24550
 
24206
24551
  @_builtins.property
24207
24552
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -24331,6 +24676,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
24331
24676
  """
24332
24677
  return pulumi.get(self, "max_parallel_image_pulls")
24333
24678
 
24679
+ @_builtins.property
24680
+ @pulumi.getter(name="memoryManagers")
24681
+ def memory_managers(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult']:
24682
+ """
24683
+ Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
24684
+ """
24685
+ return pulumi.get(self, "memory_managers")
24686
+
24334
24687
  @_builtins.property
24335
24688
  @pulumi.getter(name="podPidsLimit")
24336
24689
  def pod_pids_limit(self) -> _builtins.int:
@@ -24347,6 +24700,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
24347
24700
  """
24348
24701
  return pulumi.get(self, "single_process_oom_kill")
24349
24702
 
24703
+ @_builtins.property
24704
+ @pulumi.getter(name="topologyManagers")
24705
+ def topology_managers(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult']:
24706
+ """
24707
+ Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
24708
+ """
24709
+ return pulumi.get(self, "topology_managers")
24710
+
24350
24711
 
24351
24712
  @pulumi.output_type
24352
24713
  class GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
@@ -24567,6 +24928,53 @@ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dic
24567
24928
  return pulumi.get(self, "pid_available")
24568
24929
 
24569
24930
 
24931
+ @pulumi.output_type
24932
+ class GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult(dict):
24933
+ def __init__(__self__, *,
24934
+ policy: _builtins.str):
24935
+ """
24936
+ :param _builtins.str policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
24937
+ """
24938
+ pulumi.set(__self__, "policy", policy)
24939
+
24940
+ @_builtins.property
24941
+ @pulumi.getter
24942
+ def policy(self) -> _builtins.str:
24943
+ """
24944
+ The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
24945
+ """
24946
+ return pulumi.get(self, "policy")
24947
+
24948
+
24949
+ @pulumi.output_type
24950
+ class GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult(dict):
24951
+ def __init__(__self__, *,
24952
+ policy: _builtins.str,
24953
+ scope: _builtins.str):
24954
+ """
24955
+ :param _builtins.str policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
24956
+ :param _builtins.str scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
24957
+ """
24958
+ pulumi.set(__self__, "policy", policy)
24959
+ pulumi.set(__self__, "scope", scope)
24960
+
24961
+ @_builtins.property
24962
+ @pulumi.getter
24963
+ def policy(self) -> _builtins.str:
24964
+ """
24965
+ The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
24966
+ """
24967
+ return pulumi.get(self, "policy")
24968
+
24969
+ @_builtins.property
24970
+ @pulumi.getter
24971
+ def scope(self) -> _builtins.str:
24972
+ """
24973
+ The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
24974
+ """
24975
+ return pulumi.get(self, "scope")
24976
+
24977
+
24570
24978
  @pulumi.output_type
24571
24979
  class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
24572
24980
  def __init__(__self__, *,