pulumi-gcp 9.0.0a1__py3-none-any.whl → 9.0.0a1758219982__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (209) hide show
  1. pulumi_gcp/__init__.py +48 -0
  2. pulumi_gcp/activedirectory/peering.py +16 -0
  3. pulumi_gcp/apigateway/api_config_iam_binding.py +4 -4
  4. pulumi_gcp/apigateway/api_config_iam_member.py +4 -4
  5. pulumi_gcp/apigateway/api_config_iam_policy.py +4 -4
  6. pulumi_gcp/apigateway/api_iam_binding.py +4 -4
  7. pulumi_gcp/apigateway/api_iam_member.py +4 -4
  8. pulumi_gcp/apigateway/api_iam_policy.py +4 -4
  9. pulumi_gcp/apigateway/gateway_iam_binding.py +4 -4
  10. pulumi_gcp/apigateway/gateway_iam_member.py +4 -4
  11. pulumi_gcp/apigateway/gateway_iam_policy.py +4 -4
  12. pulumi_gcp/artifactregistry/__init__.py +1 -0
  13. pulumi_gcp/artifactregistry/get_python_package.py +237 -0
  14. pulumi_gcp/artifactregistry/get_repository.py +12 -1
  15. pulumi_gcp/artifactregistry/repository.py +28 -0
  16. pulumi_gcp/backupdisasterrecovery/__init__.py +1 -0
  17. pulumi_gcp/backupdisasterrecovery/backup_plan.py +4 -4
  18. pulumi_gcp/backupdisasterrecovery/get_backup.py +12 -1
  19. pulumi_gcp/backupdisasterrecovery/get_backup_plan_association.py +4 -0
  20. pulumi_gcp/backupdisasterrecovery/get_data_source_references.py +135 -0
  21. pulumi_gcp/backupdisasterrecovery/get_management_server.py +4 -0
  22. pulumi_gcp/backupdisasterrecovery/outputs.py +103 -0
  23. pulumi_gcp/bigquery/app_profile.py +14 -49
  24. pulumi_gcp/bigquery/dataset_access.py +4 -8
  25. pulumi_gcp/bigquery/table.py +7 -21
  26. pulumi_gcp/bigqueryanalyticshub/data_exchange_subscription.py +20 -0
  27. pulumi_gcp/bigtable/app_profile.py +14 -49
  28. pulumi_gcp/bigtable/table.py +42 -42
  29. pulumi_gcp/certificatemanager/certificate_map_entry.py +7 -7
  30. pulumi_gcp/chronicle/_inputs.py +6 -5
  31. pulumi_gcp/chronicle/outputs.py +4 -3
  32. pulumi_gcp/chronicle/reference_list.py +53 -5
  33. pulumi_gcp/cloudasset/get_resources_search_all.py +8 -0
  34. pulumi_gcp/cloudbuild/_inputs.py +20 -0
  35. pulumi_gcp/cloudbuild/outputs.py +14 -0
  36. pulumi_gcp/cloudbuild/trigger.py +4 -2
  37. pulumi_gcp/cloudbuild/worker_pool.py +28 -21
  38. pulumi_gcp/cloudfunctions/function.py +4 -8
  39. pulumi_gcp/cloudquota/s_quota_adjuster_settings.py +16 -0
  40. pulumi_gcp/cloudrunv2/_inputs.py +57 -0
  41. pulumi_gcp/cloudrunv2/get_service.py +12 -1
  42. pulumi_gcp/cloudrunv2/outputs.py +81 -0
  43. pulumi_gcp/cloudrunv2/service.py +54 -0
  44. pulumi_gcp/cloudrunv2/worker_pool.py +2 -2
  45. pulumi_gcp/composer/environment.py +28 -49
  46. pulumi_gcp/compute/_inputs.py +804 -24
  47. pulumi_gcp/compute/backend_bucket.py +14 -0
  48. pulumi_gcp/compute/backend_bucket_iam_binding.py +236 -0
  49. pulumi_gcp/compute/backend_bucket_iam_member.py +236 -0
  50. pulumi_gcp/compute/backend_bucket_iam_policy.py +236 -0
  51. pulumi_gcp/compute/backend_service_iam_binding.py +464 -0
  52. pulumi_gcp/compute/backend_service_iam_member.py +464 -0
  53. pulumi_gcp/compute/backend_service_iam_policy.py +464 -0
  54. pulumi_gcp/compute/cross_site_network.py +16 -0
  55. pulumi_gcp/compute/future_reservation.py +32 -0
  56. pulumi_gcp/compute/get_region_backend_service.py +12 -1
  57. pulumi_gcp/compute/get_resource_policy.py +2 -20
  58. pulumi_gcp/compute/instance_from_machine_image.py +75 -110
  59. pulumi_gcp/compute/instance_from_template.py +75 -110
  60. pulumi_gcp/compute/instance_group_manager.py +7 -14
  61. pulumi_gcp/compute/instance_template.py +7 -14
  62. pulumi_gcp/compute/interconnect.py +4 -8
  63. pulumi_gcp/compute/machine_image_iam_binding.py +8 -4
  64. pulumi_gcp/compute/machine_image_iam_member.py +8 -4
  65. pulumi_gcp/compute/machine_image_iam_policy.py +8 -4
  66. pulumi_gcp/compute/network_edge_security_service.py +16 -0
  67. pulumi_gcp/compute/network_firewall_policy_packet_mirroring_rule.py +12 -0
  68. pulumi_gcp/compute/outputs.py +671 -16
  69. pulumi_gcp/compute/preview_feature.py +16 -0
  70. pulumi_gcp/compute/public_delegated_prefix.py +42 -0
  71. pulumi_gcp/compute/region_backend_service.py +54 -0
  72. pulumi_gcp/compute/region_backend_service_iam_binding.py +488 -0
  73. pulumi_gcp/compute/region_backend_service_iam_member.py +488 -0
  74. pulumi_gcp/compute/region_backend_service_iam_policy.py +488 -0
  75. pulumi_gcp/compute/region_instance_group_manager.py +7 -14
  76. pulumi_gcp/compute/region_network_endpoint_group.py +1 -3
  77. pulumi_gcp/compute/region_resize_request.py +24 -0
  78. pulumi_gcp/compute/region_url_map.py +75 -0
  79. pulumi_gcp/compute/wire_group.py +16 -0
  80. pulumi_gcp/container/_inputs.py +446 -18
  81. pulumi_gcp/container/aws_cluster.py +28 -42
  82. pulumi_gcp/container/aws_node_pool.py +28 -42
  83. pulumi_gcp/container/azure_cluster.py +35 -70
  84. pulumi_gcp/container/azure_node_pool.py +28 -42
  85. pulumi_gcp/container/cluster.py +16 -7
  86. pulumi_gcp/container/outputs.py +423 -15
  87. pulumi_gcp/dataflow/flex_template_job.py +10 -0
  88. pulumi_gcp/dataform/repository.py +16 -0
  89. pulumi_gcp/dataform/repository_release_config.py +16 -0
  90. pulumi_gcp/dataform/repository_workflow_config.py +16 -0
  91. pulumi_gcp/dataplex/asset.py +28 -21
  92. pulumi_gcp/dataplex/zone.py +28 -21
  93. pulumi_gcp/dataproc/cluster.py +21 -14
  94. pulumi_gcp/dataproc/workflow_template.py +42 -105
  95. pulumi_gcp/discoveryengine/__init__.py +1 -0
  96. pulumi_gcp/discoveryengine/_inputs.py +94 -0
  97. pulumi_gcp/discoveryengine/acl_config.py +393 -0
  98. pulumi_gcp/discoveryengine/outputs.py +92 -0
  99. pulumi_gcp/firebase/android_app.py +16 -0
  100. pulumi_gcp/firebase/apple_app.py +16 -0
  101. pulumi_gcp/firebase/database_instance.py +16 -0
  102. pulumi_gcp/firebase/extensions_instance.py +12 -0
  103. pulumi_gcp/firebase/get_android_app.py +4 -2
  104. pulumi_gcp/firebase/get_apple_app.py +4 -2
  105. pulumi_gcp/firebase/get_apple_app_config.py +16 -2
  106. pulumi_gcp/firebase/get_hosting_channel.py +4 -2
  107. pulumi_gcp/firebase/hosting_channel.py +20 -0
  108. pulumi_gcp/firebase/hosting_custom_domain.py +20 -0
  109. pulumi_gcp/firebase/hosting_release.py +16 -0
  110. pulumi_gcp/firebase/hosting_site.py +16 -0
  111. pulumi_gcp/firebase/hosting_version.py +16 -0
  112. pulumi_gcp/firebase/storage_bucket.py +18 -0
  113. pulumi_gcp/firestore/index.py +118 -3
  114. pulumi_gcp/folder/service_identity.py +26 -0
  115. pulumi_gcp/gkehub/membership_rbac_role_binding.py +16 -0
  116. pulumi_gcp/healthcare/_inputs.py +205 -0
  117. pulumi_gcp/healthcare/fhir_store.py +128 -0
  118. pulumi_gcp/healthcare/outputs.py +163 -0
  119. pulumi_gcp/iam/workload_identity_pool_iam_binding.py +464 -0
  120. pulumi_gcp/iam/workload_identity_pool_iam_member.py +464 -0
  121. pulumi_gcp/iam/workload_identity_pool_iam_policy.py +464 -0
  122. pulumi_gcp/iam/workload_identity_pool_managed_identity.py +18 -0
  123. pulumi_gcp/iam/workload_identity_pool_namespace.py +20 -0
  124. pulumi_gcp/iap/__init__.py +4 -0
  125. pulumi_gcp/iap/_inputs.py +130 -0
  126. pulumi_gcp/iap/get_web_forwarding_rule_service_iam_policy.py +159 -0
  127. pulumi_gcp/iap/outputs.py +76 -0
  128. pulumi_gcp/iap/web_forwarding_rule_service_iam_binding.py +1002 -0
  129. pulumi_gcp/iap/web_forwarding_rule_service_iam_member.py +1002 -0
  130. pulumi_gcp/iap/web_forwarding_rule_service_iam_policy.py +821 -0
  131. pulumi_gcp/kms/get_kms_secret_asymmetric.py +22 -0
  132. pulumi_gcp/logging/billing_account_bucket_config.py +7 -21
  133. pulumi_gcp/logging/folder_bucket_config.py +7 -21
  134. pulumi_gcp/logging/organization_bucket_config.py +7 -21
  135. pulumi_gcp/logging/organization_sink.py +7 -7
  136. pulumi_gcp/managedkafka/connect_cluster.py +4 -0
  137. pulumi_gcp/managedkafka/connector.py +4 -0
  138. pulumi_gcp/memorystore/instance.py +14 -14
  139. pulumi_gcp/monitoring/dashboard.py +7 -14
  140. pulumi_gcp/netapp/storage_pool.py +91 -2
  141. pulumi_gcp/netapp/volume.py +47 -0
  142. pulumi_gcp/networkmanagement/__init__.py +1 -0
  143. pulumi_gcp/networkmanagement/organization_vpc_flow_logs_config.py +1028 -0
  144. pulumi_gcp/networksecurity/authorization_policy.py +12 -0
  145. pulumi_gcp/networkservices/gateway.py +7 -7
  146. pulumi_gcp/networkservices/service_lb_policies.py +12 -0
  147. pulumi_gcp/oracledatabase/autonomous_database.py +2 -2
  148. pulumi_gcp/organizations/project.py +7 -21
  149. pulumi_gcp/projects/service.py +0 -7
  150. pulumi_gcp/pulumi-plugin.json +1 -1
  151. pulumi_gcp/resourcemanager/capability.py +16 -0
  152. pulumi_gcp/runtimeconfig/config.py +16 -0
  153. pulumi_gcp/runtimeconfig/config_iam_binding.py +236 -0
  154. pulumi_gcp/runtimeconfig/config_iam_member.py +236 -0
  155. pulumi_gcp/runtimeconfig/config_iam_policy.py +236 -0
  156. pulumi_gcp/runtimeconfig/variable.py +10 -0
  157. pulumi_gcp/saasruntime/__init__.py +1 -0
  158. pulumi_gcp/saasruntime/_inputs.py +458 -0
  159. pulumi_gcp/saasruntime/outputs.py +392 -0
  160. pulumi_gcp/saasruntime/saa_s.py +4 -0
  161. pulumi_gcp/saasruntime/unit_kind.py +1024 -0
  162. pulumi_gcp/securesourcemanager/instance.py +7 -7
  163. pulumi_gcp/securesourcemanager/repository.py +7 -7
  164. pulumi_gcp/servicedirectory/namespace_iam_binding.py +4 -4
  165. pulumi_gcp/servicedirectory/namespace_iam_member.py +4 -4
  166. pulumi_gcp/servicedirectory/namespace_iam_policy.py +4 -4
  167. pulumi_gcp/servicedirectory/service_iam_binding.py +4 -4
  168. pulumi_gcp/servicedirectory/service_iam_member.py +4 -4
  169. pulumi_gcp/servicedirectory/service_iam_policy.py +4 -4
  170. pulumi_gcp/sql/_inputs.py +135 -1
  171. pulumi_gcp/sql/database_instance.py +54 -14
  172. pulumi_gcp/sql/get_database_instance.py +12 -1
  173. pulumi_gcp/sql/outputs.py +265 -3
  174. pulumi_gcp/storage/_inputs.py +246 -0
  175. pulumi_gcp/storage/bucket.py +7 -14
  176. pulumi_gcp/storage/bucket_object.py +0 -9
  177. pulumi_gcp/storage/outputs.py +191 -1
  178. pulumi_gcp/storage/transfer_job.py +47 -0
  179. pulumi_gcp/tpu/v2_queued_resource.py +16 -0
  180. pulumi_gcp/tpu/v2_vm.py +16 -0
  181. pulumi_gcp/vertex/ai_feature_group_iam_binding.py +236 -0
  182. pulumi_gcp/vertex/ai_feature_group_iam_member.py +236 -0
  183. pulumi_gcp/vertex/ai_feature_group_iam_policy.py +236 -0
  184. pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_binding.py +248 -0
  185. pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_member.py +248 -0
  186. pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_policy.py +248 -0
  187. pulumi_gcp/vertex/ai_feature_online_store_iam_binding.py +236 -0
  188. pulumi_gcp/vertex/ai_feature_online_store_iam_member.py +236 -0
  189. pulumi_gcp/vertex/ai_feature_online_store_iam_policy.py +236 -0
  190. pulumi_gcp/vertex/ai_feature_store_entity_type_iam_binding.py +238 -0
  191. pulumi_gcp/vertex/ai_feature_store_entity_type_iam_member.py +238 -0
  192. pulumi_gcp/vertex/ai_feature_store_entity_type_iam_policy.py +238 -0
  193. pulumi_gcp/vertex/ai_feature_store_iam_binding.py +248 -0
  194. pulumi_gcp/vertex/ai_feature_store_iam_member.py +248 -0
  195. pulumi_gcp/vertex/ai_feature_store_iam_policy.py +248 -0
  196. pulumi_gcp/vertex/ai_metadata_store.py +16 -0
  197. pulumi_gcp/workstations/workstation.py +16 -0
  198. pulumi_gcp/workstations/workstation_cluster.py +16 -0
  199. pulumi_gcp/workstations/workstation_config.py +16 -0
  200. pulumi_gcp/workstations/workstation_config_iam_binding.py +260 -0
  201. pulumi_gcp/workstations/workstation_config_iam_member.py +260 -0
  202. pulumi_gcp/workstations/workstation_config_iam_policy.py +260 -0
  203. pulumi_gcp/workstations/workstation_iam_binding.py +272 -0
  204. pulumi_gcp/workstations/workstation_iam_member.py +272 -0
  205. pulumi_gcp/workstations/workstation_iam_policy.py +272 -0
  206. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/METADATA +1 -1
  207. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/RECORD +209 -200
  208. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/WHEEL +0 -0
  209. {pulumi_gcp-9.0.0a1.dist-info → pulumi_gcp-9.0.0a1758219982.dist-info}/top_level.txt +0 -0
@@ -331,6 +331,10 @@ __all__ = [
331
331
  'ClusterNodeConfigKubeletConfigEvictionSoftArgsDict',
332
332
  'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
333
333
  'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
334
+ 'ClusterNodeConfigKubeletConfigMemoryManagerArgs',
335
+ 'ClusterNodeConfigKubeletConfigMemoryManagerArgsDict',
336
+ 'ClusterNodeConfigKubeletConfigTopologyManagerArgs',
337
+ 'ClusterNodeConfigKubeletConfigTopologyManagerArgsDict',
334
338
  'ClusterNodeConfigLinuxNodeConfigArgs',
335
339
  'ClusterNodeConfigLinuxNodeConfigArgsDict',
336
340
  'ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -437,6 +441,10 @@ __all__ = [
437
441
  'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
438
442
  'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
439
443
  'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
444
+ 'ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs',
445
+ 'ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict',
446
+ 'ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs',
447
+ 'ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict',
440
448
  'ClusterNodePoolNodeConfigLinuxNodeConfigArgs',
441
449
  'ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict',
442
450
  'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -573,6 +581,10 @@ __all__ = [
573
581
  'NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
574
582
  'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
575
583
  'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
584
+ 'NodePoolNodeConfigKubeletConfigMemoryManagerArgs',
585
+ 'NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict',
586
+ 'NodePoolNodeConfigKubeletConfigTopologyManagerArgs',
587
+ 'NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict',
576
588
  'NodePoolNodeConfigLinuxNodeConfigArgs',
577
589
  'NodePoolNodeConfigLinuxNodeConfigArgsDict',
578
590
  'NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -7148,7 +7160,7 @@ if not MYPY:
7148
7160
  """
7149
7161
  desired_tier: NotRequired[pulumi.Input[_builtins.str]]
7150
7162
  """
7151
- Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
7163
+ (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
7152
7164
  """
7153
7165
  elif False:
7154
7166
  ClusterEnterpriseConfigArgsDict: TypeAlias = Mapping[str, Any]
@@ -7160,15 +7172,22 @@ class ClusterEnterpriseConfigArgs:
7160
7172
  desired_tier: Optional[pulumi.Input[_builtins.str]] = None):
7161
7173
  """
7162
7174
  :param pulumi.Input[_builtins.str] cluster_tier: The effective tier of the cluster.
7163
- :param pulumi.Input[_builtins.str] desired_tier: Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
7175
+ :param pulumi.Input[_builtins.str] desired_tier: (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
7164
7176
  """
7177
+ if cluster_tier is not None:
7178
+ warnings.warn("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""", DeprecationWarning)
7179
+ pulumi.log.warn("""cluster_tier is deprecated: GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
7165
7180
  if cluster_tier is not None:
7166
7181
  pulumi.set(__self__, "cluster_tier", cluster_tier)
7182
+ if desired_tier is not None:
7183
+ warnings.warn("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""", DeprecationWarning)
7184
+ pulumi.log.warn("""desired_tier is deprecated: GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
7167
7185
  if desired_tier is not None:
7168
7186
  pulumi.set(__self__, "desired_tier", desired_tier)
7169
7187
 
7170
7188
  @_builtins.property
7171
7189
  @pulumi.getter(name="clusterTier")
7190
+ @_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
7172
7191
  def cluster_tier(self) -> Optional[pulumi.Input[_builtins.str]]:
7173
7192
  """
7174
7193
  The effective tier of the cluster.
@@ -7181,9 +7200,10 @@ class ClusterEnterpriseConfigArgs:
7181
7200
 
7182
7201
  @_builtins.property
7183
7202
  @pulumi.getter(name="desiredTier")
7203
+ @_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
7184
7204
  def desired_tier(self) -> Optional[pulumi.Input[_builtins.str]]:
7185
7205
  """
7186
- Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
7206
+ (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
7187
7207
  """
7188
7208
  return pulumi.get(self, "desired_tier")
7189
7209
 
@@ -8851,7 +8871,7 @@ if not MYPY:
8851
8871
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
8852
8872
  """
8853
8873
  Type of the disk attached to each node
8854
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
8874
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
8855
8875
  """
8856
8876
  effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgsDict']]]]
8857
8877
  """
@@ -9130,7 +9150,7 @@ class ClusterNodeConfigArgs:
9130
9150
  in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
9131
9151
  Prefer configuring `boot_disk`.
9132
9152
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
9133
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
9153
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
9134
9154
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
9135
9155
  :param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
9136
9156
  :param pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -9402,7 +9422,7 @@ class ClusterNodeConfigArgs:
9402
9422
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
9403
9423
  """
9404
9424
  Type of the disk attached to each node
9405
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
9425
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
9406
9426
  """
9407
9427
  return pulumi.get(self, "disk_type")
9408
9428
 
@@ -10013,7 +10033,7 @@ if not MYPY:
10013
10033
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
10014
10034
  """
10015
10035
  Type of the disk attached to each node
10016
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10036
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10017
10037
  """
10018
10038
  provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
10019
10039
  """
@@ -10040,7 +10060,7 @@ class ClusterNodeConfigBootDiskArgs:
10040
10060
  size_gb: Optional[pulumi.Input[_builtins.int]] = None):
10041
10061
  """
10042
10062
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
10043
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10063
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10044
10064
  :param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10045
10065
  :param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10046
10066
  :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
@@ -10060,7 +10080,7 @@ class ClusterNodeConfigBootDiskArgs:
10060
10080
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
10061
10081
  """
10062
10082
  Type of the disk attached to each node
10063
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10083
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10064
10084
  """
10065
10085
  return pulumi.get(self, "disk_type")
10066
10086
 
@@ -10898,6 +10918,12 @@ if not MYPY:
10898
10918
  """
10899
10919
  Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
10900
10920
  """
10921
+ memory_manager: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgsDict']]
10922
+ """
10923
+ Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
10924
+ The memory manager optimizes memory and hugepages allocation for pods, especially
10925
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
10926
+ """
10901
10927
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
10902
10928
  """
10903
10929
  Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
@@ -10906,6 +10932,10 @@ if not MYPY:
10906
10932
  """
10907
10933
  Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
10908
10934
  """
10935
+ topology_manager: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgsDict']]
10936
+ """
10937
+ These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
10938
+ """
10909
10939
  elif False:
10910
10940
  ClusterNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
10911
10941
 
@@ -10928,8 +10958,10 @@ class ClusterNodeConfigKubeletConfigArgs:
10928
10958
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
10929
10959
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
10930
10960
  max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
10961
+ memory_manager: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs']] = None,
10931
10962
  pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
10932
- single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
10963
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None,
10964
+ topology_manager: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs']] = None):
10933
10965
  """
10934
10966
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
10935
10967
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -10959,8 +10991,12 @@ class ClusterNodeConfigKubeletConfigArgs:
10959
10991
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
10960
10992
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
10961
10993
  :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
10994
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs'] memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
10995
+ The memory manager optimizes memory and hugepages allocation for pods, especially
10996
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
10962
10997
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
10963
10998
  :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
10999
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs'] topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
10964
11000
  """
10965
11001
  if allowed_unsafe_sysctls is not None:
10966
11002
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -10994,10 +11030,14 @@ class ClusterNodeConfigKubeletConfigArgs:
10994
11030
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
10995
11031
  if max_parallel_image_pulls is not None:
10996
11032
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
11033
+ if memory_manager is not None:
11034
+ pulumi.set(__self__, "memory_manager", memory_manager)
10997
11035
  if pod_pids_limit is not None:
10998
11036
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
10999
11037
  if single_process_oom_kill is not None:
11000
11038
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
11039
+ if topology_manager is not None:
11040
+ pulumi.set(__self__, "topology_manager", topology_manager)
11001
11041
 
11002
11042
  @_builtins.property
11003
11043
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -11203,6 +11243,20 @@ class ClusterNodeConfigKubeletConfigArgs:
11203
11243
  def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
11204
11244
  pulumi.set(self, "max_parallel_image_pulls", value)
11205
11245
 
11246
+ @_builtins.property
11247
+ @pulumi.getter(name="memoryManager")
11248
+ def memory_manager(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs']]:
11249
+ """
11250
+ Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
11251
+ The memory manager optimizes memory and hugepages allocation for pods, especially
11252
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
11253
+ """
11254
+ return pulumi.get(self, "memory_manager")
11255
+
11256
+ @memory_manager.setter
11257
+ def memory_manager(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs']]):
11258
+ pulumi.set(self, "memory_manager", value)
11259
+
11206
11260
  @_builtins.property
11207
11261
  @pulumi.getter(name="podPidsLimit")
11208
11262
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -11227,6 +11281,18 @@ class ClusterNodeConfigKubeletConfigArgs:
11227
11281
  def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
11228
11282
  pulumi.set(self, "single_process_oom_kill", value)
11229
11283
 
11284
+ @_builtins.property
11285
+ @pulumi.getter(name="topologyManager")
11286
+ def topology_manager(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs']]:
11287
+ """
11288
+ These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
11289
+ """
11290
+ return pulumi.get(self, "topology_manager")
11291
+
11292
+ @topology_manager.setter
11293
+ def topology_manager(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs']]):
11294
+ pulumi.set(self, "topology_manager", value)
11295
+
11230
11296
 
11231
11297
  if not MYPY:
11232
11298
  class ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
@@ -11624,6 +11690,102 @@ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
11624
11690
  pulumi.set(self, "pid_available", value)
11625
11691
 
11626
11692
 
11693
+ if not MYPY:
11694
+ class ClusterNodeConfigKubeletConfigMemoryManagerArgsDict(TypedDict):
11695
+ policy: NotRequired[pulumi.Input[_builtins.str]]
11696
+ """
11697
+ The [Memory
11698
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
11699
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
11700
+ """
11701
+ elif False:
11702
+ ClusterNodeConfigKubeletConfigMemoryManagerArgsDict: TypeAlias = Mapping[str, Any]
11703
+
11704
+ @pulumi.input_type
11705
+ class ClusterNodeConfigKubeletConfigMemoryManagerArgs:
11706
+ def __init__(__self__, *,
11707
+ policy: Optional[pulumi.Input[_builtins.str]] = None):
11708
+ """
11709
+ :param pulumi.Input[_builtins.str] policy: The [Memory
11710
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
11711
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
11712
+ """
11713
+ if policy is not None:
11714
+ pulumi.set(__self__, "policy", policy)
11715
+
11716
+ @_builtins.property
11717
+ @pulumi.getter
11718
+ def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
11719
+ """
11720
+ The [Memory
11721
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
11722
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
11723
+ """
11724
+ return pulumi.get(self, "policy")
11725
+
11726
+ @policy.setter
11727
+ def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
11728
+ pulumi.set(self, "policy", value)
11729
+
11730
+
11731
+ if not MYPY:
11732
+ class ClusterNodeConfigKubeletConfigTopologyManagerArgsDict(TypedDict):
11733
+ policy: NotRequired[pulumi.Input[_builtins.str]]
11734
+ """
11735
+ The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
11736
+ """
11737
+ scope: NotRequired[pulumi.Input[_builtins.str]]
11738
+ """
11739
+ The Topology Manager scope, defining the granularity at which
11740
+ policy decisions are applied. Valid values are "container" (resources are aligned
11741
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
11742
+ """
11743
+ elif False:
11744
+ ClusterNodeConfigKubeletConfigTopologyManagerArgsDict: TypeAlias = Mapping[str, Any]
11745
+
11746
+ @pulumi.input_type
11747
+ class ClusterNodeConfigKubeletConfigTopologyManagerArgs:
11748
+ def __init__(__self__, *,
11749
+ policy: Optional[pulumi.Input[_builtins.str]] = None,
11750
+ scope: Optional[pulumi.Input[_builtins.str]] = None):
11751
+ """
11752
+ :param pulumi.Input[_builtins.str] policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
11753
+ :param pulumi.Input[_builtins.str] scope: The Topology Manager scope, defining the granularity at which
11754
+ policy decisions are applied. Valid values are "container" (resources are aligned
11755
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
11756
+ """
11757
+ if policy is not None:
11758
+ pulumi.set(__self__, "policy", policy)
11759
+ if scope is not None:
11760
+ pulumi.set(__self__, "scope", scope)
11761
+
11762
+ @_builtins.property
11763
+ @pulumi.getter
11764
+ def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
11765
+ """
11766
+ The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
11767
+ """
11768
+ return pulumi.get(self, "policy")
11769
+
11770
+ @policy.setter
11771
+ def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
11772
+ pulumi.set(self, "policy", value)
11773
+
11774
+ @_builtins.property
11775
+ @pulumi.getter
11776
+ def scope(self) -> Optional[pulumi.Input[_builtins.str]]:
11777
+ """
11778
+ The Topology Manager scope, defining the granularity at which
11779
+ policy decisions are applied. Valid values are "container" (resources are aligned
11780
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
11781
+ """
11782
+ return pulumi.get(self, "scope")
11783
+
11784
+ @scope.setter
11785
+ def scope(self, value: Optional[pulumi.Input[_builtins.str]]):
11786
+ pulumi.set(self, "scope", value)
11787
+
11788
+
11627
11789
  if not MYPY:
11628
11790
  class ClusterNodeConfigLinuxNodeConfigArgsDict(TypedDict):
11629
11791
  cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
@@ -13868,7 +14030,7 @@ if not MYPY:
13868
14030
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
13869
14031
  """
13870
14032
  Type of the disk attached to each node
13871
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
14033
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
13872
14034
  """
13873
14035
  effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgsDict']]]]
13874
14036
  """
@@ -14147,7 +14309,7 @@ class ClusterNodePoolNodeConfigArgs:
14147
14309
  in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
14148
14310
  Prefer configuring `boot_disk`.
14149
14311
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
14150
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
14312
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
14151
14313
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
14152
14314
  :param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
14153
14315
  :param pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -14419,7 +14581,7 @@ class ClusterNodePoolNodeConfigArgs:
14419
14581
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
14420
14582
  """
14421
14583
  Type of the disk attached to each node
14422
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
14584
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
14423
14585
  """
14424
14586
  return pulumi.get(self, "disk_type")
14425
14587
 
@@ -15030,7 +15192,7 @@ if not MYPY:
15030
15192
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
15031
15193
  """
15032
15194
  Type of the disk attached to each node
15033
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15195
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15034
15196
  """
15035
15197
  provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
15036
15198
  """
@@ -15057,7 +15219,7 @@ class ClusterNodePoolNodeConfigBootDiskArgs:
15057
15219
  size_gb: Optional[pulumi.Input[_builtins.int]] = None):
15058
15220
  """
15059
15221
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
15060
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15222
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15061
15223
  :param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15062
15224
  :param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15063
15225
  :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
@@ -15077,7 +15239,7 @@ class ClusterNodePoolNodeConfigBootDiskArgs:
15077
15239
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
15078
15240
  """
15079
15241
  Type of the disk attached to each node
15080
- (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15242
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15081
15243
  """
15082
15244
  return pulumi.get(self, "disk_type")
15083
15245
 
@@ -15915,6 +16077,12 @@ if not MYPY:
15915
16077
  """
15916
16078
  Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
15917
16079
  """
16080
+ memory_manager: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict']]
16081
+ """
16082
+ Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
16083
+ The memory manager optimizes memory and hugepages allocation for pods, especially
16084
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
16085
+ """
15918
16086
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
15919
16087
  """
15920
16088
  Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
@@ -15923,6 +16091,10 @@ if not MYPY:
15923
16091
  """
15924
16092
  Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
15925
16093
  """
16094
+ topology_manager: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict']]
16095
+ """
16096
+ These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
16097
+ """
15926
16098
  elif False:
15927
16099
  ClusterNodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
15928
16100
 
@@ -15945,8 +16117,10 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15945
16117
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
15946
16118
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
15947
16119
  max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
16120
+ memory_manager: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs']] = None,
15948
16121
  pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
15949
- single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
16122
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None,
16123
+ topology_manager: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs']] = None):
15950
16124
  """
15951
16125
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
15952
16126
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -15976,8 +16150,12 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15976
16150
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
15977
16151
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
15978
16152
  :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
16153
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs'] memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
16154
+ The memory manager optimizes memory and hugepages allocation for pods, especially
16155
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
15979
16156
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
15980
16157
  :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
16158
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs'] topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
15981
16159
  """
15982
16160
  if allowed_unsafe_sysctls is not None:
15983
16161
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -16011,10 +16189,14 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
16011
16189
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
16012
16190
  if max_parallel_image_pulls is not None:
16013
16191
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
16192
+ if memory_manager is not None:
16193
+ pulumi.set(__self__, "memory_manager", memory_manager)
16014
16194
  if pod_pids_limit is not None:
16015
16195
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
16016
16196
  if single_process_oom_kill is not None:
16017
16197
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
16198
+ if topology_manager is not None:
16199
+ pulumi.set(__self__, "topology_manager", topology_manager)
16018
16200
 
16019
16201
  @_builtins.property
16020
16202
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -16220,6 +16402,20 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
16220
16402
  def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
16221
16403
  pulumi.set(self, "max_parallel_image_pulls", value)
16222
16404
 
16405
+ @_builtins.property
16406
+ @pulumi.getter(name="memoryManager")
16407
+ def memory_manager(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs']]:
16408
+ """
16409
+ Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
16410
+ The memory manager optimizes memory and hugepages allocation for pods, especially
16411
+ those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
16412
+ """
16413
+ return pulumi.get(self, "memory_manager")
16414
+
16415
+ @memory_manager.setter
16416
+ def memory_manager(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs']]):
16417
+ pulumi.set(self, "memory_manager", value)
16418
+
16223
16419
  @_builtins.property
16224
16420
  @pulumi.getter(name="podPidsLimit")
16225
16421
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -16244,6 +16440,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
16244
16440
  def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
16245
16441
  pulumi.set(self, "single_process_oom_kill", value)
16246
16442
 
16443
+ @_builtins.property
16444
+ @pulumi.getter(name="topologyManager")
16445
+ def topology_manager(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs']]:
16446
+ """
16447
+ These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
16448
+ """
16449
+ return pulumi.get(self, "topology_manager")
16450
+
16451
+ @topology_manager.setter
16452
+ def topology_manager(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs']]):
16453
+ pulumi.set(self, "topology_manager", value)
16454
+
16247
16455
 
16248
16456
  if not MYPY:
16249
16457
  class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
@@ -16641,6 +16849,102 @@ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
16641
16849
  pulumi.set(self, "pid_available", value)
16642
16850
 
16643
16851
 
16852
+ if not MYPY:
16853
+ class ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict(TypedDict):
16854
+ policy: NotRequired[pulumi.Input[_builtins.str]]
16855
+ """
16856
+ The [Memory
16857
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
16858
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
16859
+ """
16860
+ elif False:
16861
+ ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict: TypeAlias = Mapping[str, Any]
16862
+
16863
+ @pulumi.input_type
16864
+ class ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs:
16865
+ def __init__(__self__, *,
16866
+ policy: Optional[pulumi.Input[_builtins.str]] = None):
16867
+ """
16868
+ :param pulumi.Input[_builtins.str] policy: The [Memory
16869
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
16870
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
16871
+ """
16872
+ if policy is not None:
16873
+ pulumi.set(__self__, "policy", policy)
16874
+
16875
+ @_builtins.property
16876
+ @pulumi.getter
16877
+ def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
16878
+ """
16879
+ The [Memory
16880
+ Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
16881
+ policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
16882
+ """
16883
+ return pulumi.get(self, "policy")
16884
+
16885
+ @policy.setter
16886
+ def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
16887
+ pulumi.set(self, "policy", value)
16888
+
16889
+
16890
+ if not MYPY:
16891
+ class ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict(TypedDict):
16892
+ policy: NotRequired[pulumi.Input[_builtins.str]]
16893
+ """
16894
+ The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
16895
+ """
16896
+ scope: NotRequired[pulumi.Input[_builtins.str]]
16897
+ """
16898
+ The Topology Manager scope, defining the granularity at which
16899
+ policy decisions are applied. Valid values are "container" (resources are aligned
16900
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
16901
+ """
16902
+ elif False:
16903
+ ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict: TypeAlias = Mapping[str, Any]
16904
+
16905
+ @pulumi.input_type
16906
+ class ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs:
16907
+ def __init__(__self__, *,
16908
+ policy: Optional[pulumi.Input[_builtins.str]] = None,
16909
+ scope: Optional[pulumi.Input[_builtins.str]] = None):
16910
+ """
16911
+ :param pulumi.Input[_builtins.str] policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
16912
+ :param pulumi.Input[_builtins.str] scope: The Topology Manager scope, defining the granularity at which
16913
+ policy decisions are applied. Valid values are "container" (resources are aligned
16914
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
16915
+ """
16916
+ if policy is not None:
16917
+ pulumi.set(__self__, "policy", policy)
16918
+ if scope is not None:
16919
+ pulumi.set(__self__, "scope", scope)
16920
+
16921
+ @_builtins.property
16922
+ @pulumi.getter
16923
+ def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
16924
+ """
16925
+ The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
16926
+ """
16927
+ return pulumi.get(self, "policy")
16928
+
16929
+ @policy.setter
16930
+ def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
16931
+ pulumi.set(self, "policy", value)
16932
+
16933
+ @_builtins.property
16934
+ @pulumi.getter
16935
+ def scope(self) -> Optional[pulumi.Input[_builtins.str]]:
16936
+ """
16937
+ The Topology Manager scope, defining the granularity at which
16938
+ policy decisions are applied. Valid values are "container" (resources are aligned
16939
+ per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
16940
+ """
16941
+ return pulumi.get(self, "scope")
16942
+
16943
+ @scope.setter
16944
+ def scope(self, value: Optional[pulumi.Input[_builtins.str]]):
16945
+ pulumi.set(self, "scope", value)
16946
+
16947
+
16644
16948
  if not MYPY:
16645
16949
  class ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
16646
16950
  cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
@@ -21410,6 +21714,10 @@ if not MYPY:
21410
21714
  """
21411
21715
  Set the maximum number of image pulls in parallel.
21412
21716
  """
21717
+ memory_manager: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict']]
21718
+ """
21719
+ Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
21720
+ """
21413
21721
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
21414
21722
  """
21415
21723
  Controls the maximum number of processes allowed to run in a pod.
@@ -21418,6 +21726,10 @@ if not MYPY:
21418
21726
  """
21419
21727
  Defines whether to enable single process OOM killer.
21420
21728
  """
21729
+ topology_manager: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict']]
21730
+ """
21731
+ Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
21732
+ """
21421
21733
  elif False:
21422
21734
  NodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
21423
21735
 
@@ -21440,8 +21752,10 @@ class NodePoolNodeConfigKubeletConfigArgs:
21440
21752
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
21441
21753
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
21442
21754
  max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
21755
+ memory_manager: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs']] = None,
21443
21756
  pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
21444
- single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
21757
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None,
21758
+ topology_manager: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs']] = None):
21445
21759
  """
21446
21760
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
21447
21761
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -21459,8 +21773,10 @@ class NodePoolNodeConfigKubeletConfigArgs:
21459
21773
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
21460
21774
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
21461
21775
  :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
21776
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs'] memory_manager: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
21462
21777
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
21463
21778
  :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer.
21779
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs'] topology_manager: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
21464
21780
  """
21465
21781
  if allowed_unsafe_sysctls is not None:
21466
21782
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -21494,10 +21810,14 @@ class NodePoolNodeConfigKubeletConfigArgs:
21494
21810
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
21495
21811
  if max_parallel_image_pulls is not None:
21496
21812
  pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
21813
+ if memory_manager is not None:
21814
+ pulumi.set(__self__, "memory_manager", memory_manager)
21497
21815
  if pod_pids_limit is not None:
21498
21816
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
21499
21817
  if single_process_oom_kill is not None:
21500
21818
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
21819
+ if topology_manager is not None:
21820
+ pulumi.set(__self__, "topology_manager", topology_manager)
21501
21821
 
21502
21822
  @_builtins.property
21503
21823
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -21691,6 +22011,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
21691
22011
  def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
21692
22012
  pulumi.set(self, "max_parallel_image_pulls", value)
21693
22013
 
22014
+ @_builtins.property
22015
+ @pulumi.getter(name="memoryManager")
22016
+ def memory_manager(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs']]:
22017
+ """
22018
+ Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
22019
+ """
22020
+ return pulumi.get(self, "memory_manager")
22021
+
22022
+ @memory_manager.setter
22023
+ def memory_manager(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs']]):
22024
+ pulumi.set(self, "memory_manager", value)
22025
+
21694
22026
  @_builtins.property
21695
22027
  @pulumi.getter(name="podPidsLimit")
21696
22028
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -21715,6 +22047,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
21715
22047
  def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
21716
22048
  pulumi.set(self, "single_process_oom_kill", value)
21717
22049
 
22050
+ @_builtins.property
22051
+ @pulumi.getter(name="topologyManager")
22052
+ def topology_manager(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs']]:
22053
+ """
22054
+ Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
22055
+ """
22056
+ return pulumi.get(self, "topology_manager")
22057
+
22058
+ @topology_manager.setter
22059
+ def topology_manager(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs']]):
22060
+ pulumi.set(self, "topology_manager", value)
22061
+
21718
22062
 
21719
22063
  if not MYPY:
21720
22064
  class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
@@ -22112,6 +22456,90 @@ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
22112
22456
  pulumi.set(self, "pid_available", value)
22113
22457
 
22114
22458
 
22459
+ if not MYPY:
22460
+ class NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict(TypedDict):
22461
+ policy: NotRequired[pulumi.Input[_builtins.str]]
22462
+ """
22463
+ The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
22464
+ """
22465
+ elif False:
22466
+ NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict: TypeAlias = Mapping[str, Any]
22467
+
22468
+ @pulumi.input_type
22469
+ class NodePoolNodeConfigKubeletConfigMemoryManagerArgs:
22470
+ def __init__(__self__, *,
22471
+ policy: Optional[pulumi.Input[_builtins.str]] = None):
22472
+ """
22473
+ :param pulumi.Input[_builtins.str] policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
22474
+ """
22475
+ if policy is not None:
22476
+ pulumi.set(__self__, "policy", policy)
22477
+
22478
+ @_builtins.property
22479
+ @pulumi.getter
22480
+ def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
22481
+ """
22482
+ The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
22483
+ """
22484
+ return pulumi.get(self, "policy")
22485
+
22486
+ @policy.setter
22487
+ def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
22488
+ pulumi.set(self, "policy", value)
22489
+
22490
+
22491
+ if not MYPY:
22492
+ class NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict(TypedDict):
22493
+ policy: NotRequired[pulumi.Input[_builtins.str]]
22494
+ """
22495
+ The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
22496
+ """
22497
+ scope: NotRequired[pulumi.Input[_builtins.str]]
22498
+ """
22499
+ The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
22500
+ """
22501
+ elif False:
22502
+ NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict: TypeAlias = Mapping[str, Any]
22503
+
22504
+ @pulumi.input_type
22505
+ class NodePoolNodeConfigKubeletConfigTopologyManagerArgs:
22506
+ def __init__(__self__, *,
22507
+ policy: Optional[pulumi.Input[_builtins.str]] = None,
22508
+ scope: Optional[pulumi.Input[_builtins.str]] = None):
22509
+ """
22510
+ :param pulumi.Input[_builtins.str] policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
22511
+ :param pulumi.Input[_builtins.str] scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
22512
+ """
22513
+ if policy is not None:
22514
+ pulumi.set(__self__, "policy", policy)
22515
+ if scope is not None:
22516
+ pulumi.set(__self__, "scope", scope)
22517
+
22518
+ @_builtins.property
22519
+ @pulumi.getter
22520
+ def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
22521
+ """
22522
+ The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
22523
+ """
22524
+ return pulumi.get(self, "policy")
22525
+
22526
+ @policy.setter
22527
+ def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
22528
+ pulumi.set(self, "policy", value)
22529
+
22530
+ @_builtins.property
22531
+ @pulumi.getter
22532
+ def scope(self) -> Optional[pulumi.Input[_builtins.str]]:
22533
+ """
22534
+ The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
22535
+ """
22536
+ return pulumi.get(self, "scope")
22537
+
22538
+ @scope.setter
22539
+ def scope(self, value: Optional[pulumi.Input[_builtins.str]]):
22540
+ pulumi.set(self, "scope", value)
22541
+
22542
+
22115
22543
  if not MYPY:
22116
22544
  class NodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
22117
22545
  cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]