pulumi-gcp 8.42.0a1758178363__py3-none-any.whl → 9.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_gcp/__init__.py +83 -48
- pulumi_gcp/activedirectory/peering.py +16 -0
- pulumi_gcp/alloydb/backup.py +8 -4
- pulumi_gcp/alloydb/cluster.py +56 -10
- pulumi_gcp/alloydb/get_cluster.py +12 -1
- pulumi_gcp/alloydb/instance.py +10 -4
- pulumi_gcp/alloydb/user.py +8 -4
- pulumi_gcp/apigateway/api_config_iam_binding.py +4 -4
- pulumi_gcp/apigateway/api_config_iam_member.py +4 -4
- pulumi_gcp/apigateway/api_config_iam_policy.py +4 -4
- pulumi_gcp/apigateway/api_iam_binding.py +4 -4
- pulumi_gcp/apigateway/api_iam_member.py +4 -4
- pulumi_gcp/apigateway/api_iam_policy.py +4 -4
- pulumi_gcp/apigateway/gateway_iam_binding.py +4 -4
- pulumi_gcp/apigateway/gateway_iam_member.py +4 -4
- pulumi_gcp/apigateway/gateway_iam_policy.py +4 -4
- pulumi_gcp/apigee/_inputs.py +151 -108
- pulumi_gcp/apigee/keystores_aliases_key_cert_file.py +52 -40
- pulumi_gcp/apigee/outputs.py +92 -88
- pulumi_gcp/artifactregistry/__init__.py +2 -0
- pulumi_gcp/artifactregistry/_inputs.py +0 -12
- pulumi_gcp/artifactregistry/get_npm_package.py +251 -0
- pulumi_gcp/artifactregistry/get_python_package.py +237 -0
- pulumi_gcp/artifactregistry/get_repository.py +12 -1
- pulumi_gcp/artifactregistry/outputs.py +8 -16
- pulumi_gcp/artifactregistry/repository.py +28 -0
- pulumi_gcp/backupdisasterrecovery/__init__.py +1 -0
- pulumi_gcp/backupdisasterrecovery/backup_plan.py +4 -4
- pulumi_gcp/backupdisasterrecovery/get_backup.py +12 -1
- pulumi_gcp/backupdisasterrecovery/get_backup_plan_association.py +4 -0
- pulumi_gcp/backupdisasterrecovery/get_data_source_references.py +135 -0
- pulumi_gcp/backupdisasterrecovery/get_management_server.py +4 -0
- pulumi_gcp/backupdisasterrecovery/outputs.py +103 -0
- pulumi_gcp/beyondcorp/__init__.py +0 -5
- pulumi_gcp/beyondcorp/_inputs.py +0 -312
- pulumi_gcp/beyondcorp/outputs.py +0 -200
- pulumi_gcp/bigquery/_inputs.py +114 -4
- pulumi_gcp/bigquery/app_profile.py +3 -5
- pulumi_gcp/bigquery/outputs.py +75 -5
- pulumi_gcp/bigqueryanalyticshub/data_exchange_subscription.py +20 -0
- pulumi_gcp/bigtable/app_profile.py +1 -1
- pulumi_gcp/bigtable/get_table_iam_policy.py +16 -18
- pulumi_gcp/bigtable/table_iam_binding.py +43 -43
- pulumi_gcp/bigtable/table_iam_member.py +43 -43
- pulumi_gcp/bigtable/table_iam_policy.py +43 -43
- pulumi_gcp/billing/_inputs.py +0 -6
- pulumi_gcp/billing/outputs.py +0 -4
- pulumi_gcp/certificatemanager/__init__.py +1 -0
- pulumi_gcp/certificatemanager/certificate_map_entry.py +7 -7
- pulumi_gcp/certificatemanager/get_dns_authorization.py +229 -0
- pulumi_gcp/certificatemanager/outputs.py +41 -0
- pulumi_gcp/chronicle/_inputs.py +6 -5
- pulumi_gcp/chronicle/outputs.py +4 -3
- pulumi_gcp/chronicle/reference_list.py +53 -5
- pulumi_gcp/cloudasset/get_resources_search_all.py +8 -0
- pulumi_gcp/cloudbuild/_inputs.py +309 -45
- pulumi_gcp/cloudbuild/bitbucket_server_config.py +2 -6
- pulumi_gcp/cloudbuild/get_trigger.py +12 -1
- pulumi_gcp/cloudbuild/outputs.py +380 -30
- pulumi_gcp/cloudbuild/trigger.py +160 -2
- pulumi_gcp/cloudbuild/worker_pool.py +2 -6
- pulumi_gcp/cloudfunctionsv2/_inputs.py +23 -21
- pulumi_gcp/cloudfunctionsv2/outputs.py +17 -16
- pulumi_gcp/cloudquota/s_quota_adjuster_settings.py +16 -0
- pulumi_gcp/cloudrunv2/_inputs.py +80 -26
- pulumi_gcp/cloudrunv2/get_service.py +12 -1
- pulumi_gcp/cloudrunv2/outputs.py +109 -30
- pulumi_gcp/cloudrunv2/service.py +72 -12
- pulumi_gcp/cloudrunv2/worker_pool.py +2 -2
- pulumi_gcp/cloudtasks/queue.py +98 -2
- pulumi_gcp/colab/_inputs.py +1 -114
- pulumi_gcp/colab/outputs.py +1 -106
- pulumi_gcp/compute/_inputs.py +884 -24
- pulumi_gcp/compute/backend_bucket.py +68 -0
- pulumi_gcp/compute/backend_bucket_iam_binding.py +236 -0
- pulumi_gcp/compute/backend_bucket_iam_member.py +236 -0
- pulumi_gcp/compute/backend_bucket_iam_policy.py +236 -0
- pulumi_gcp/compute/backend_service.py +54 -0
- pulumi_gcp/compute/backend_service_iam_binding.py +464 -0
- pulumi_gcp/compute/backend_service_iam_member.py +464 -0
- pulumi_gcp/compute/backend_service_iam_policy.py +464 -0
- pulumi_gcp/compute/cross_site_network.py +16 -0
- pulumi_gcp/compute/future_reservation.py +32 -0
- pulumi_gcp/compute/get_backend_bucket.py +12 -1
- pulumi_gcp/compute/get_backend_service.py +12 -1
- pulumi_gcp/compute/get_region_backend_service.py +12 -1
- pulumi_gcp/compute/get_resource_policy.py +2 -20
- pulumi_gcp/compute/machine_image_iam_binding.py +8 -4
- pulumi_gcp/compute/machine_image_iam_member.py +8 -4
- pulumi_gcp/compute/machine_image_iam_policy.py +8 -4
- pulumi_gcp/compute/network_edge_security_service.py +16 -0
- pulumi_gcp/compute/network_firewall_policy_packet_mirroring_rule.py +12 -0
- pulumi_gcp/compute/network_peering_routes_config.py +0 -100
- pulumi_gcp/compute/organization_security_policy.py +83 -51
- pulumi_gcp/compute/outputs.py +799 -16
- pulumi_gcp/compute/packet_mirroring.py +6 -0
- pulumi_gcp/compute/preview_feature.py +16 -0
- pulumi_gcp/compute/public_delegated_prefix.py +42 -0
- pulumi_gcp/compute/region_backend_service.py +54 -0
- pulumi_gcp/compute/region_backend_service_iam_binding.py +488 -0
- pulumi_gcp/compute/region_backend_service_iam_member.py +488 -0
- pulumi_gcp/compute/region_backend_service_iam_policy.py +488 -0
- pulumi_gcp/compute/region_network_endpoint_group.py +1 -3
- pulumi_gcp/compute/region_resize_request.py +24 -0
- pulumi_gcp/compute/region_url_map.py +75 -0
- pulumi_gcp/compute/subnetwork.py +0 -98
- pulumi_gcp/compute/wire_group.py +16 -0
- pulumi_gcp/config/__init__.pyi +2 -2
- pulumi_gcp/config/vars.py +4 -4
- pulumi_gcp/container/_inputs.py +466 -18
- pulumi_gcp/container/cluster.py +16 -7
- pulumi_gcp/container/outputs.py +448 -15
- pulumi_gcp/databasemigrationservice/connection_profile.py +4 -2
- pulumi_gcp/databasemigrationservice/migration_job.py +4 -2
- pulumi_gcp/dataflow/flex_template_job.py +10 -0
- pulumi_gcp/dataform/repository.py +16 -0
- pulumi_gcp/dataform/repository_release_config.py +16 -0
- pulumi_gcp/dataform/repository_workflow_config.py +16 -0
- pulumi_gcp/diagflow/_inputs.py +152 -0
- pulumi_gcp/diagflow/cx_agent.py +425 -0
- pulumi_gcp/diagflow/outputs.py +143 -0
- pulumi_gcp/discoveryengine/__init__.py +1 -0
- pulumi_gcp/discoveryengine/_inputs.py +94 -0
- pulumi_gcp/discoveryengine/acl_config.py +393 -0
- pulumi_gcp/discoveryengine/outputs.py +92 -0
- pulumi_gcp/firebase/android_app.py +16 -0
- pulumi_gcp/firebase/app_check_play_integrity_config.py +4 -8
- pulumi_gcp/firebase/app_check_recaptcha_enterprise_config.py +2 -4
- pulumi_gcp/firebase/app_check_service_config.py +6 -12
- pulumi_gcp/firebase/app_hosting_backend.py +4 -8
- pulumi_gcp/firebase/app_hosting_build.py +4 -8
- pulumi_gcp/firebase/app_hosting_traffic.py +6 -12
- pulumi_gcp/firebase/apple_app.py +16 -0
- pulumi_gcp/firebase/data_connect_service.py +4 -8
- pulumi_gcp/firebase/database_instance.py +20 -8
- pulumi_gcp/firebase/extensions_instance.py +12 -0
- pulumi_gcp/firebase/get_android_app.py +4 -2
- pulumi_gcp/firebase/get_apple_app.py +4 -2
- pulumi_gcp/firebase/get_apple_app_config.py +16 -2
- pulumi_gcp/firebase/get_hosting_channel.py +4 -2
- pulumi_gcp/firebase/hosting_channel.py +20 -0
- pulumi_gcp/firebase/hosting_custom_domain.py +20 -0
- pulumi_gcp/firebase/hosting_release.py +16 -0
- pulumi_gcp/firebase/hosting_site.py +16 -0
- pulumi_gcp/firebase/hosting_version.py +16 -0
- pulumi_gcp/firebase/storage_bucket.py +18 -0
- pulumi_gcp/firestore/index.py +118 -3
- pulumi_gcp/folder/service_identity.py +26 -0
- pulumi_gcp/gkehub/_inputs.py +0 -60
- pulumi_gcp/gkehub/get_membership.py +1 -12
- pulumi_gcp/gkehub/membership.py +0 -70
- pulumi_gcp/gkehub/membership_rbac_role_binding.py +16 -0
- pulumi_gcp/gkehub/outputs.py +0 -36
- pulumi_gcp/healthcare/_inputs.py +205 -0
- pulumi_gcp/healthcare/fhir_store.py +128 -0
- pulumi_gcp/healthcare/outputs.py +163 -0
- pulumi_gcp/iam/workload_identity_pool_iam_binding.py +464 -0
- pulumi_gcp/iam/workload_identity_pool_iam_member.py +464 -0
- pulumi_gcp/iam/workload_identity_pool_iam_policy.py +464 -0
- pulumi_gcp/iam/workload_identity_pool_managed_identity.py +18 -0
- pulumi_gcp/iam/workload_identity_pool_namespace.py +20 -0
- pulumi_gcp/iap/__init__.py +8 -0
- pulumi_gcp/iap/_inputs.py +260 -0
- pulumi_gcp/iap/get_web_forwarding_rule_service_iam_policy.py +159 -0
- pulumi_gcp/iap/get_web_region_forwarding_rule_service_iam_policy.py +182 -0
- pulumi_gcp/iap/outputs.py +152 -0
- pulumi_gcp/{beyondcorp/application_iam_binding.py → iap/web_forwarding_rule_service_iam_binding.py} +233 -315
- pulumi_gcp/{beyondcorp/application_iam_member.py → iap/web_forwarding_rule_service_iam_member.py} +233 -315
- pulumi_gcp/{beyondcorp/application_iam_policy.py → iap/web_forwarding_rule_service_iam_policy.py} +212 -294
- pulumi_gcp/iap/web_region_forwarding_rule_service_iam_binding.py +1091 -0
- pulumi_gcp/iap/web_region_forwarding_rule_service_iam_member.py +1091 -0
- pulumi_gcp/iap/web_region_forwarding_rule_service_iam_policy.py +910 -0
- pulumi_gcp/kms/autokey_config.py +0 -2
- pulumi_gcp/kms/get_kms_secret_asymmetric.py +22 -0
- pulumi_gcp/kms/key_handle.py +0 -2
- pulumi_gcp/logging/organization_sink.py +7 -7
- pulumi_gcp/managedkafka/connect_cluster.py +4 -0
- pulumi_gcp/managedkafka/connector.py +4 -0
- pulumi_gcp/memorystore/get_instance.py +1 -12
- pulumi_gcp/memorystore/instance.py +14 -84
- pulumi_gcp/netapp/storage_pool.py +91 -2
- pulumi_gcp/netapp/volume.py +47 -0
- pulumi_gcp/networkmanagement/__init__.py +1 -0
- pulumi_gcp/networkmanagement/organization_vpc_flow_logs_config.py +1028 -0
- pulumi_gcp/networksecurity/authorization_policy.py +12 -0
- pulumi_gcp/networkservices/gateway.py +7 -7
- pulumi_gcp/networkservices/lb_traffic_extension.py +24 -23
- pulumi_gcp/networkservices/service_lb_policies.py +12 -0
- pulumi_gcp/notebooks/__init__.py +0 -1
- pulumi_gcp/oracledatabase/autonomous_database.py +2 -2
- pulumi_gcp/oracledatabase/cloud_vm_cluster.py +3 -3
- pulumi_gcp/projects/service.py +2 -11
- pulumi_gcp/provider.py +20 -20
- pulumi_gcp/pulumi-plugin.json +1 -1
- pulumi_gcp/redis/cluster.py +0 -70
- pulumi_gcp/redis/get_cluster.py +1 -12
- pulumi_gcp/resourcemanager/capability.py +16 -0
- pulumi_gcp/runtimeconfig/config.py +16 -0
- pulumi_gcp/runtimeconfig/config_iam_binding.py +236 -0
- pulumi_gcp/runtimeconfig/config_iam_member.py +236 -0
- pulumi_gcp/runtimeconfig/config_iam_policy.py +236 -0
- pulumi_gcp/runtimeconfig/variable.py +10 -0
- pulumi_gcp/saasruntime/__init__.py +12 -0
- pulumi_gcp/saasruntime/_inputs.py +513 -0
- pulumi_gcp/saasruntime/outputs.py +431 -0
- pulumi_gcp/saasruntime/saa_s.py +819 -0
- pulumi_gcp/saasruntime/unit_kind.py +1024 -0
- pulumi_gcp/securesourcemanager/_inputs.py +20 -19
- pulumi_gcp/securesourcemanager/instance.py +7 -7
- pulumi_gcp/securesourcemanager/outputs.py +15 -14
- pulumi_gcp/securesourcemanager/repository.py +7 -7
- pulumi_gcp/serviceaccount/get_account_key.py +1 -21
- pulumi_gcp/servicedirectory/namespace_iam_binding.py +4 -4
- pulumi_gcp/servicedirectory/namespace_iam_member.py +4 -4
- pulumi_gcp/servicedirectory/namespace_iam_policy.py +4 -4
- pulumi_gcp/servicedirectory/service_iam_binding.py +4 -4
- pulumi_gcp/servicedirectory/service_iam_member.py +4 -4
- pulumi_gcp/servicedirectory/service_iam_policy.py +4 -4
- pulumi_gcp/sql/_inputs.py +135 -1
- pulumi_gcp/sql/database_instance.py +94 -0
- pulumi_gcp/sql/get_database_instance.py +23 -1
- pulumi_gcp/sql/outputs.py +276 -3
- pulumi_gcp/sql/user.py +61 -0
- pulumi_gcp/storage/_inputs.py +253 -7
- pulumi_gcp/storage/bucket_object.py +0 -9
- pulumi_gcp/storage/outputs.py +198 -8
- pulumi_gcp/storage/transfer_job.py +47 -0
- pulumi_gcp/tpu/__init__.py +0 -2
- pulumi_gcp/tpu/_inputs.py +0 -93
- pulumi_gcp/tpu/outputs.py +0 -72
- pulumi_gcp/tpu/v2_queued_resource.py +16 -0
- pulumi_gcp/tpu/v2_vm.py +16 -0
- pulumi_gcp/vertex/_inputs.py +8 -9
- pulumi_gcp/vertex/ai_endpoint.py +2 -4
- pulumi_gcp/vertex/ai_feature_group_iam_binding.py +236 -0
- pulumi_gcp/vertex/ai_feature_group_iam_member.py +236 -0
- pulumi_gcp/vertex/ai_feature_group_iam_policy.py +236 -0
- pulumi_gcp/vertex/ai_feature_online_store_featureview.py +0 -2
- pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_binding.py +248 -0
- pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_member.py +248 -0
- pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_policy.py +248 -0
- pulumi_gcp/vertex/ai_feature_online_store_iam_binding.py +236 -0
- pulumi_gcp/vertex/ai_feature_online_store_iam_member.py +236 -0
- pulumi_gcp/vertex/ai_feature_online_store_iam_policy.py +236 -0
- pulumi_gcp/vertex/ai_feature_store_entity_type_iam_binding.py +238 -0
- pulumi_gcp/vertex/ai_feature_store_entity_type_iam_member.py +238 -0
- pulumi_gcp/vertex/ai_feature_store_entity_type_iam_policy.py +238 -0
- pulumi_gcp/vertex/ai_feature_store_iam_binding.py +248 -0
- pulumi_gcp/vertex/ai_feature_store_iam_member.py +248 -0
- pulumi_gcp/vertex/ai_feature_store_iam_policy.py +248 -0
- pulumi_gcp/vertex/ai_index.py +24 -23
- pulumi_gcp/vertex/ai_metadata_store.py +16 -0
- pulumi_gcp/vertex/outputs.py +7 -8
- pulumi_gcp/workstations/workstation.py +16 -0
- pulumi_gcp/workstations/workstation_cluster.py +16 -0
- pulumi_gcp/workstations/workstation_config.py +16 -0
- pulumi_gcp/workstations/workstation_config_iam_binding.py +260 -0
- pulumi_gcp/workstations/workstation_config_iam_member.py +260 -0
- pulumi_gcp/workstations/workstation_config_iam_policy.py +260 -0
- pulumi_gcp/workstations/workstation_iam_binding.py +272 -0
- pulumi_gcp/workstations/workstation_iam_member.py +272 -0
- pulumi_gcp/workstations/workstation_iam_policy.py +272 -0
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/METADATA +1 -1
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/RECORD +266 -255
- pulumi_gcp/beyondcorp/application.py +0 -746
- pulumi_gcp/beyondcorp/get_application_iam_policy.py +0 -182
- pulumi_gcp/notebooks/location.py +0 -285
- pulumi_gcp/tpu/get_tensorflow_versions.py +0 -184
- pulumi_gcp/tpu/node.py +0 -1062
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/WHEEL +0 -0
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/top_level.txt +0 -0
pulumi_gcp/container/outputs.py
CHANGED
@@ -174,6 +174,8 @@ __all__ = [
|
|
174
174
|
'ClusterNodeConfigKubeletConfigEvictionMinimumReclaim',
|
175
175
|
'ClusterNodeConfigKubeletConfigEvictionSoft',
|
176
176
|
'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod',
|
177
|
+
'ClusterNodeConfigKubeletConfigMemoryManager',
|
178
|
+
'ClusterNodeConfigKubeletConfigTopologyManager',
|
177
179
|
'ClusterNodeConfigLinuxNodeConfig',
|
178
180
|
'ClusterNodeConfigLinuxNodeConfigHugepagesConfig',
|
179
181
|
'ClusterNodeConfigLocalNvmeSsdBlockConfig',
|
@@ -227,6 +229,8 @@ __all__ = [
|
|
227
229
|
'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
|
228
230
|
'ClusterNodePoolNodeConfigKubeletConfigEvictionSoft',
|
229
231
|
'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
|
232
|
+
'ClusterNodePoolNodeConfigKubeletConfigMemoryManager',
|
233
|
+
'ClusterNodePoolNodeConfigKubeletConfigTopologyManager',
|
230
234
|
'ClusterNodePoolNodeConfigLinuxNodeConfig',
|
231
235
|
'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
|
232
236
|
'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig',
|
@@ -295,6 +299,8 @@ __all__ = [
|
|
295
299
|
'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
|
296
300
|
'NodePoolNodeConfigKubeletConfigEvictionSoft',
|
297
301
|
'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
|
302
|
+
'NodePoolNodeConfigKubeletConfigMemoryManager',
|
303
|
+
'NodePoolNodeConfigKubeletConfigTopologyManager',
|
298
304
|
'NodePoolNodeConfigLinuxNodeConfig',
|
299
305
|
'NodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
|
300
306
|
'NodePoolNodeConfigLocalNvmeSsdBlockConfig',
|
@@ -401,6 +407,8 @@ __all__ = [
|
|
401
407
|
'GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult',
|
402
408
|
'GetClusterNodeConfigKubeletConfigEvictionSoftResult',
|
403
409
|
'GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
|
410
|
+
'GetClusterNodeConfigKubeletConfigMemoryManagerResult',
|
411
|
+
'GetClusterNodeConfigKubeletConfigTopologyManagerResult',
|
404
412
|
'GetClusterNodeConfigLinuxNodeConfigResult',
|
405
413
|
'GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult',
|
406
414
|
'GetClusterNodeConfigLocalNvmeSsdBlockConfigResult',
|
@@ -454,6 +462,8 @@ __all__ = [
|
|
454
462
|
'GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult',
|
455
463
|
'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult',
|
456
464
|
'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
|
465
|
+
'GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult',
|
466
|
+
'GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult',
|
457
467
|
'GetClusterNodePoolNodeConfigLinuxNodeConfigResult',
|
458
468
|
'GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult',
|
459
469
|
'GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigResult',
|
@@ -4772,6 +4782,8 @@ class ClusterClusterAutoscaling(dict):
|
|
4772
4782
|
suggest = "auto_provisioning_locations"
|
4773
4783
|
elif key == "autoscalingProfile":
|
4774
4784
|
suggest = "autoscaling_profile"
|
4785
|
+
elif key == "defaultComputeClassEnabled":
|
4786
|
+
suggest = "default_compute_class_enabled"
|
4775
4787
|
elif key == "resourceLimits":
|
4776
4788
|
suggest = "resource_limits"
|
4777
4789
|
|
@@ -4790,6 +4802,7 @@ class ClusterClusterAutoscaling(dict):
|
|
4790
4802
|
auto_provisioning_defaults: Optional['outputs.ClusterClusterAutoscalingAutoProvisioningDefaults'] = None,
|
4791
4803
|
auto_provisioning_locations: Optional[Sequence[_builtins.str]] = None,
|
4792
4804
|
autoscaling_profile: Optional[_builtins.str] = None,
|
4805
|
+
default_compute_class_enabled: Optional[_builtins.bool] = None,
|
4793
4806
|
enabled: Optional[_builtins.bool] = None,
|
4794
4807
|
resource_limits: Optional[Sequence['outputs.ClusterClusterAutoscalingResourceLimit']] = None):
|
4795
4808
|
"""
|
@@ -4803,6 +4816,7 @@ class ClusterClusterAutoscaling(dict):
|
|
4803
4816
|
options for the [Autoscaling profile](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles)
|
4804
4817
|
feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability
|
4805
4818
|
when deciding to remove nodes from a cluster. Can be `BALANCED` or `OPTIMIZE_UTILIZATION`. Defaults to `BALANCED`.
|
4819
|
+
:param _builtins.bool default_compute_class_enabled: Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden.
|
4806
4820
|
:param _builtins.bool enabled: Whether node auto-provisioning is enabled. Must be supplied for GKE Standard clusters, `true` is implied
|
4807
4821
|
for autopilot clusters. Resource limits for `cpu` and `memory` must be defined to enable node auto-provisioning for GKE Standard.
|
4808
4822
|
:param Sequence['ClusterClusterAutoscalingResourceLimitArgs'] resource_limits: Global constraints for machine resources in the
|
@@ -4816,6 +4830,8 @@ class ClusterClusterAutoscaling(dict):
|
|
4816
4830
|
pulumi.set(__self__, "auto_provisioning_locations", auto_provisioning_locations)
|
4817
4831
|
if autoscaling_profile is not None:
|
4818
4832
|
pulumi.set(__self__, "autoscaling_profile", autoscaling_profile)
|
4833
|
+
if default_compute_class_enabled is not None:
|
4834
|
+
pulumi.set(__self__, "default_compute_class_enabled", default_compute_class_enabled)
|
4819
4835
|
if enabled is not None:
|
4820
4836
|
pulumi.set(__self__, "enabled", enabled)
|
4821
4837
|
if resource_limits is not None:
|
@@ -4852,6 +4868,14 @@ class ClusterClusterAutoscaling(dict):
|
|
4852
4868
|
"""
|
4853
4869
|
return pulumi.get(self, "autoscaling_profile")
|
4854
4870
|
|
4871
|
+
@_builtins.property
|
4872
|
+
@pulumi.getter(name="defaultComputeClassEnabled")
|
4873
|
+
def default_compute_class_enabled(self) -> Optional[_builtins.bool]:
|
4874
|
+
"""
|
4875
|
+
Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden.
|
4876
|
+
"""
|
4877
|
+
return pulumi.get(self, "default_compute_class_enabled")
|
4878
|
+
|
4855
4879
|
@_builtins.property
|
4856
4880
|
@pulumi.getter
|
4857
4881
|
def enabled(self) -> Optional[_builtins.bool]:
|
@@ -5885,7 +5909,7 @@ class ClusterEnterpriseConfig(dict):
|
|
5885
5909
|
desired_tier: Optional[_builtins.str] = None):
|
5886
5910
|
"""
|
5887
5911
|
:param _builtins.str cluster_tier: The effective tier of the cluster.
|
5888
|
-
:param _builtins.str desired_tier: Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
|
5912
|
+
:param _builtins.str desired_tier: (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
|
5889
5913
|
"""
|
5890
5914
|
if cluster_tier is not None:
|
5891
5915
|
pulumi.set(__self__, "cluster_tier", cluster_tier)
|
@@ -5894,6 +5918,7 @@ class ClusterEnterpriseConfig(dict):
|
|
5894
5918
|
|
5895
5919
|
@_builtins.property
|
5896
5920
|
@pulumi.getter(name="clusterTier")
|
5921
|
+
@_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
|
5897
5922
|
def cluster_tier(self) -> Optional[_builtins.str]:
|
5898
5923
|
"""
|
5899
5924
|
The effective tier of the cluster.
|
@@ -5902,9 +5927,10 @@ class ClusterEnterpriseConfig(dict):
|
|
5902
5927
|
|
5903
5928
|
@_builtins.property
|
5904
5929
|
@pulumi.getter(name="desiredTier")
|
5930
|
+
@_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
|
5905
5931
|
def desired_tier(self) -> Optional[_builtins.str]:
|
5906
5932
|
"""
|
5907
|
-
Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
|
5933
|
+
(DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
|
5908
5934
|
"""
|
5909
5935
|
return pulumi.get(self, "desired_tier")
|
5910
5936
|
|
@@ -7414,7 +7440,7 @@ class ClusterNodeConfig(dict):
|
|
7414
7440
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
|
7415
7441
|
Prefer configuring `boot_disk`.
|
7416
7442
|
:param _builtins.str disk_type: Type of the disk attached to each node
|
7417
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
7443
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
7418
7444
|
:param Sequence['ClusterNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
|
7419
7445
|
:param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
7420
7446
|
:param 'ClusterNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
@@ -7662,7 +7688,7 @@ class ClusterNodeConfig(dict):
|
|
7662
7688
|
def disk_type(self) -> Optional[_builtins.str]:
|
7663
7689
|
"""
|
7664
7690
|
Type of the disk attached to each node
|
7665
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
7691
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
7666
7692
|
"""
|
7667
7693
|
return pulumi.get(self, "disk_type")
|
7668
7694
|
|
@@ -8132,7 +8158,7 @@ class ClusterNodeConfigBootDisk(dict):
|
|
8132
8158
|
size_gb: Optional[_builtins.int] = None):
|
8133
8159
|
"""
|
8134
8160
|
:param _builtins.str disk_type: Type of the disk attached to each node
|
8135
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
8161
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
8136
8162
|
:param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
8137
8163
|
:param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
8138
8164
|
:param _builtins.int size_gb: Size of the disk attached to each node, specified
|
@@ -8152,7 +8178,7 @@ class ClusterNodeConfigBootDisk(dict):
|
|
8152
8178
|
def disk_type(self) -> Optional[_builtins.str]:
|
8153
8179
|
"""
|
8154
8180
|
Type of the disk attached to each node
|
8155
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
8181
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
8156
8182
|
"""
|
8157
8183
|
return pulumi.get(self, "disk_type")
|
8158
8184
|
|
@@ -8835,10 +8861,14 @@ class ClusterNodeConfigKubeletConfig(dict):
|
|
8835
8861
|
suggest = "insecure_kubelet_readonly_port_enabled"
|
8836
8862
|
elif key == "maxParallelImagePulls":
|
8837
8863
|
suggest = "max_parallel_image_pulls"
|
8864
|
+
elif key == "memoryManager":
|
8865
|
+
suggest = "memory_manager"
|
8838
8866
|
elif key == "podPidsLimit":
|
8839
8867
|
suggest = "pod_pids_limit"
|
8840
8868
|
elif key == "singleProcessOomKill":
|
8841
8869
|
suggest = "single_process_oom_kill"
|
8870
|
+
elif key == "topologyManager":
|
8871
|
+
suggest = "topology_manager"
|
8842
8872
|
|
8843
8873
|
if suggest:
|
8844
8874
|
pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
|
@@ -8868,8 +8898,10 @@ class ClusterNodeConfigKubeletConfig(dict):
|
|
8868
8898
|
image_minimum_gc_age: Optional[_builtins.str] = None,
|
8869
8899
|
insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
|
8870
8900
|
max_parallel_image_pulls: Optional[_builtins.int] = None,
|
8901
|
+
memory_manager: Optional['outputs.ClusterNodeConfigKubeletConfigMemoryManager'] = None,
|
8871
8902
|
pod_pids_limit: Optional[_builtins.int] = None,
|
8872
|
-
single_process_oom_kill: Optional[_builtins.bool] = None
|
8903
|
+
single_process_oom_kill: Optional[_builtins.bool] = None,
|
8904
|
+
topology_manager: Optional['outputs.ClusterNodeConfigKubeletConfigTopologyManager'] = None):
|
8873
8905
|
"""
|
8874
8906
|
:param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
|
8875
8907
|
:param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
|
@@ -8899,8 +8931,12 @@ class ClusterNodeConfigKubeletConfig(dict):
|
|
8899
8931
|
:param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
|
8900
8932
|
:param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
8901
8933
|
:param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
|
8934
|
+
:param 'ClusterNodeConfigKubeletConfigMemoryManagerArgs' memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
8935
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
8936
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
8902
8937
|
:param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
|
8903
8938
|
:param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
|
8939
|
+
:param 'ClusterNodeConfigKubeletConfigTopologyManagerArgs' topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
8904
8940
|
"""
|
8905
8941
|
if allowed_unsafe_sysctls is not None:
|
8906
8942
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
@@ -8934,10 +8970,14 @@ class ClusterNodeConfigKubeletConfig(dict):
|
|
8934
8970
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
8935
8971
|
if max_parallel_image_pulls is not None:
|
8936
8972
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
8973
|
+
if memory_manager is not None:
|
8974
|
+
pulumi.set(__self__, "memory_manager", memory_manager)
|
8937
8975
|
if pod_pids_limit is not None:
|
8938
8976
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
8939
8977
|
if single_process_oom_kill is not None:
|
8940
8978
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
8979
|
+
if topology_manager is not None:
|
8980
|
+
pulumi.set(__self__, "topology_manager", topology_manager)
|
8941
8981
|
|
8942
8982
|
@_builtins.property
|
8943
8983
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -9079,6 +9119,16 @@ class ClusterNodeConfigKubeletConfig(dict):
|
|
9079
9119
|
"""
|
9080
9120
|
return pulumi.get(self, "max_parallel_image_pulls")
|
9081
9121
|
|
9122
|
+
@_builtins.property
|
9123
|
+
@pulumi.getter(name="memoryManager")
|
9124
|
+
def memory_manager(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigMemoryManager']:
|
9125
|
+
"""
|
9126
|
+
Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
9127
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
9128
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
9129
|
+
"""
|
9130
|
+
return pulumi.get(self, "memory_manager")
|
9131
|
+
|
9082
9132
|
@_builtins.property
|
9083
9133
|
@pulumi.getter(name="podPidsLimit")
|
9084
9134
|
def pod_pids_limit(self) -> Optional[_builtins.int]:
|
@@ -9095,6 +9145,14 @@ class ClusterNodeConfigKubeletConfig(dict):
|
|
9095
9145
|
"""
|
9096
9146
|
return pulumi.get(self, "single_process_oom_kill")
|
9097
9147
|
|
9148
|
+
@_builtins.property
|
9149
|
+
@pulumi.getter(name="topologyManager")
|
9150
|
+
def topology_manager(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigTopologyManager']:
|
9151
|
+
"""
|
9152
|
+
These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
9153
|
+
"""
|
9154
|
+
return pulumi.get(self, "topology_manager")
|
9155
|
+
|
9098
9156
|
|
9099
9157
|
@pulumi.output_type
|
9100
9158
|
class ClusterNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
|
@@ -9414,6 +9472,64 @@ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
|
|
9414
9472
|
return pulumi.get(self, "pid_available")
|
9415
9473
|
|
9416
9474
|
|
9475
|
+
@pulumi.output_type
|
9476
|
+
class ClusterNodeConfigKubeletConfigMemoryManager(dict):
|
9477
|
+
def __init__(__self__, *,
|
9478
|
+
policy: Optional[_builtins.str] = None):
|
9479
|
+
"""
|
9480
|
+
:param _builtins.str policy: The [Memory
|
9481
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
9482
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
9483
|
+
"""
|
9484
|
+
if policy is not None:
|
9485
|
+
pulumi.set(__self__, "policy", policy)
|
9486
|
+
|
9487
|
+
@_builtins.property
|
9488
|
+
@pulumi.getter
|
9489
|
+
def policy(self) -> Optional[_builtins.str]:
|
9490
|
+
"""
|
9491
|
+
The [Memory
|
9492
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
9493
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
9494
|
+
"""
|
9495
|
+
return pulumi.get(self, "policy")
|
9496
|
+
|
9497
|
+
|
9498
|
+
@pulumi.output_type
|
9499
|
+
class ClusterNodeConfigKubeletConfigTopologyManager(dict):
|
9500
|
+
def __init__(__self__, *,
|
9501
|
+
policy: Optional[_builtins.str] = None,
|
9502
|
+
scope: Optional[_builtins.str] = None):
|
9503
|
+
"""
|
9504
|
+
:param _builtins.str policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
9505
|
+
:param _builtins.str scope: The Topology Manager scope, defining the granularity at which
|
9506
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
9507
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
9508
|
+
"""
|
9509
|
+
if policy is not None:
|
9510
|
+
pulumi.set(__self__, "policy", policy)
|
9511
|
+
if scope is not None:
|
9512
|
+
pulumi.set(__self__, "scope", scope)
|
9513
|
+
|
9514
|
+
@_builtins.property
|
9515
|
+
@pulumi.getter
|
9516
|
+
def policy(self) -> Optional[_builtins.str]:
|
9517
|
+
"""
|
9518
|
+
The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
9519
|
+
"""
|
9520
|
+
return pulumi.get(self, "policy")
|
9521
|
+
|
9522
|
+
@_builtins.property
|
9523
|
+
@pulumi.getter
|
9524
|
+
def scope(self) -> Optional[_builtins.str]:
|
9525
|
+
"""
|
9526
|
+
The Topology Manager scope, defining the granularity at which
|
9527
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
9528
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
9529
|
+
"""
|
9530
|
+
return pulumi.get(self, "scope")
|
9531
|
+
|
9532
|
+
|
9417
9533
|
@pulumi.output_type
|
9418
9534
|
class ClusterNodeConfigLinuxNodeConfig(dict):
|
9419
9535
|
@staticmethod
|
@@ -11356,7 +11472,7 @@ class ClusterNodePoolNodeConfig(dict):
|
|
11356
11472
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
|
11357
11473
|
Prefer configuring `boot_disk`.
|
11358
11474
|
:param _builtins.str disk_type: Type of the disk attached to each node
|
11359
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
11475
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
11360
11476
|
:param Sequence['ClusterNodePoolNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
|
11361
11477
|
:param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
11362
11478
|
:param 'ClusterNodePoolNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
@@ -11604,7 +11720,7 @@ class ClusterNodePoolNodeConfig(dict):
|
|
11604
11720
|
def disk_type(self) -> Optional[_builtins.str]:
|
11605
11721
|
"""
|
11606
11722
|
Type of the disk attached to each node
|
11607
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
11723
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
11608
11724
|
"""
|
11609
11725
|
return pulumi.get(self, "disk_type")
|
11610
11726
|
|
@@ -12074,7 +12190,7 @@ class ClusterNodePoolNodeConfigBootDisk(dict):
|
|
12074
12190
|
size_gb: Optional[_builtins.int] = None):
|
12075
12191
|
"""
|
12076
12192
|
:param _builtins.str disk_type: Type of the disk attached to each node
|
12077
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
12193
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
12078
12194
|
:param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
12079
12195
|
:param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
12080
12196
|
:param _builtins.int size_gb: Size of the disk attached to each node, specified
|
@@ -12094,7 +12210,7 @@ class ClusterNodePoolNodeConfigBootDisk(dict):
|
|
12094
12210
|
def disk_type(self) -> Optional[_builtins.str]:
|
12095
12211
|
"""
|
12096
12212
|
Type of the disk attached to each node
|
12097
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
12213
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
12098
12214
|
"""
|
12099
12215
|
return pulumi.get(self, "disk_type")
|
12100
12216
|
|
@@ -12777,10 +12893,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
|
|
12777
12893
|
suggest = "insecure_kubelet_readonly_port_enabled"
|
12778
12894
|
elif key == "maxParallelImagePulls":
|
12779
12895
|
suggest = "max_parallel_image_pulls"
|
12896
|
+
elif key == "memoryManager":
|
12897
|
+
suggest = "memory_manager"
|
12780
12898
|
elif key == "podPidsLimit":
|
12781
12899
|
suggest = "pod_pids_limit"
|
12782
12900
|
elif key == "singleProcessOomKill":
|
12783
12901
|
suggest = "single_process_oom_kill"
|
12902
|
+
elif key == "topologyManager":
|
12903
|
+
suggest = "topology_manager"
|
12784
12904
|
|
12785
12905
|
if suggest:
|
12786
12906
|
pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
|
@@ -12810,8 +12930,10 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
|
|
12810
12930
|
image_minimum_gc_age: Optional[_builtins.str] = None,
|
12811
12931
|
insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
|
12812
12932
|
max_parallel_image_pulls: Optional[_builtins.int] = None,
|
12933
|
+
memory_manager: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigMemoryManager'] = None,
|
12813
12934
|
pod_pids_limit: Optional[_builtins.int] = None,
|
12814
|
-
single_process_oom_kill: Optional[_builtins.bool] = None
|
12935
|
+
single_process_oom_kill: Optional[_builtins.bool] = None,
|
12936
|
+
topology_manager: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigTopologyManager'] = None):
|
12815
12937
|
"""
|
12816
12938
|
:param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
|
12817
12939
|
:param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
|
@@ -12841,8 +12963,12 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
|
|
12841
12963
|
:param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
|
12842
12964
|
:param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
12843
12965
|
:param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
|
12966
|
+
:param 'ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs' memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
12967
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
12968
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
12844
12969
|
:param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
|
12845
12970
|
:param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
|
12971
|
+
:param 'ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs' topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
12846
12972
|
"""
|
12847
12973
|
if allowed_unsafe_sysctls is not None:
|
12848
12974
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
@@ -12876,10 +13002,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
|
|
12876
13002
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
12877
13003
|
if max_parallel_image_pulls is not None:
|
12878
13004
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
13005
|
+
if memory_manager is not None:
|
13006
|
+
pulumi.set(__self__, "memory_manager", memory_manager)
|
12879
13007
|
if pod_pids_limit is not None:
|
12880
13008
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
12881
13009
|
if single_process_oom_kill is not None:
|
12882
13010
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
13011
|
+
if topology_manager is not None:
|
13012
|
+
pulumi.set(__self__, "topology_manager", topology_manager)
|
12883
13013
|
|
12884
13014
|
@_builtins.property
|
12885
13015
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -13021,6 +13151,16 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
|
|
13021
13151
|
"""
|
13022
13152
|
return pulumi.get(self, "max_parallel_image_pulls")
|
13023
13153
|
|
13154
|
+
@_builtins.property
|
13155
|
+
@pulumi.getter(name="memoryManager")
|
13156
|
+
def memory_manager(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigMemoryManager']:
|
13157
|
+
"""
|
13158
|
+
Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
13159
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
13160
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
13161
|
+
"""
|
13162
|
+
return pulumi.get(self, "memory_manager")
|
13163
|
+
|
13024
13164
|
@_builtins.property
|
13025
13165
|
@pulumi.getter(name="podPidsLimit")
|
13026
13166
|
def pod_pids_limit(self) -> Optional[_builtins.int]:
|
@@ -13037,6 +13177,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
|
|
13037
13177
|
"""
|
13038
13178
|
return pulumi.get(self, "single_process_oom_kill")
|
13039
13179
|
|
13180
|
+
@_builtins.property
|
13181
|
+
@pulumi.getter(name="topologyManager")
|
13182
|
+
def topology_manager(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigTopologyManager']:
|
13183
|
+
"""
|
13184
|
+
These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
13185
|
+
"""
|
13186
|
+
return pulumi.get(self, "topology_manager")
|
13187
|
+
|
13040
13188
|
|
13041
13189
|
@pulumi.output_type
|
13042
13190
|
class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
|
@@ -13356,6 +13504,64 @@ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
|
|
13356
13504
|
return pulumi.get(self, "pid_available")
|
13357
13505
|
|
13358
13506
|
|
13507
|
+
@pulumi.output_type
|
13508
|
+
class ClusterNodePoolNodeConfigKubeletConfigMemoryManager(dict):
|
13509
|
+
def __init__(__self__, *,
|
13510
|
+
policy: Optional[_builtins.str] = None):
|
13511
|
+
"""
|
13512
|
+
:param _builtins.str policy: The [Memory
|
13513
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
13514
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
13515
|
+
"""
|
13516
|
+
if policy is not None:
|
13517
|
+
pulumi.set(__self__, "policy", policy)
|
13518
|
+
|
13519
|
+
@_builtins.property
|
13520
|
+
@pulumi.getter
|
13521
|
+
def policy(self) -> Optional[_builtins.str]:
|
13522
|
+
"""
|
13523
|
+
The [Memory
|
13524
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
13525
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
13526
|
+
"""
|
13527
|
+
return pulumi.get(self, "policy")
|
13528
|
+
|
13529
|
+
|
13530
|
+
@pulumi.output_type
|
13531
|
+
class ClusterNodePoolNodeConfigKubeletConfigTopologyManager(dict):
|
13532
|
+
def __init__(__self__, *,
|
13533
|
+
policy: Optional[_builtins.str] = None,
|
13534
|
+
scope: Optional[_builtins.str] = None):
|
13535
|
+
"""
|
13536
|
+
:param _builtins.str policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
13537
|
+
:param _builtins.str scope: The Topology Manager scope, defining the granularity at which
|
13538
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
13539
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
13540
|
+
"""
|
13541
|
+
if policy is not None:
|
13542
|
+
pulumi.set(__self__, "policy", policy)
|
13543
|
+
if scope is not None:
|
13544
|
+
pulumi.set(__self__, "scope", scope)
|
13545
|
+
|
13546
|
+
@_builtins.property
|
13547
|
+
@pulumi.getter
|
13548
|
+
def policy(self) -> Optional[_builtins.str]:
|
13549
|
+
"""
|
13550
|
+
The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
13551
|
+
"""
|
13552
|
+
return pulumi.get(self, "policy")
|
13553
|
+
|
13554
|
+
@_builtins.property
|
13555
|
+
@pulumi.getter
|
13556
|
+
def scope(self) -> Optional[_builtins.str]:
|
13557
|
+
"""
|
13558
|
+
The Topology Manager scope, defining the granularity at which
|
13559
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
13560
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
13561
|
+
"""
|
13562
|
+
return pulumi.get(self, "scope")
|
13563
|
+
|
13564
|
+
|
13359
13565
|
@pulumi.output_type
|
13360
13566
|
class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
|
13361
13567
|
@staticmethod
|
@@ -17161,10 +17367,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
|
|
17161
17367
|
suggest = "insecure_kubelet_readonly_port_enabled"
|
17162
17368
|
elif key == "maxParallelImagePulls":
|
17163
17369
|
suggest = "max_parallel_image_pulls"
|
17370
|
+
elif key == "memoryManager":
|
17371
|
+
suggest = "memory_manager"
|
17164
17372
|
elif key == "podPidsLimit":
|
17165
17373
|
suggest = "pod_pids_limit"
|
17166
17374
|
elif key == "singleProcessOomKill":
|
17167
17375
|
suggest = "single_process_oom_kill"
|
17376
|
+
elif key == "topologyManager":
|
17377
|
+
suggest = "topology_manager"
|
17168
17378
|
|
17169
17379
|
if suggest:
|
17170
17380
|
pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
|
@@ -17194,8 +17404,10 @@ class NodePoolNodeConfigKubeletConfig(dict):
|
|
17194
17404
|
image_minimum_gc_age: Optional[_builtins.str] = None,
|
17195
17405
|
insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
|
17196
17406
|
max_parallel_image_pulls: Optional[_builtins.int] = None,
|
17407
|
+
memory_manager: Optional['outputs.NodePoolNodeConfigKubeletConfigMemoryManager'] = None,
|
17197
17408
|
pod_pids_limit: Optional[_builtins.int] = None,
|
17198
|
-
single_process_oom_kill: Optional[_builtins.bool] = None
|
17409
|
+
single_process_oom_kill: Optional[_builtins.bool] = None,
|
17410
|
+
topology_manager: Optional['outputs.NodePoolNodeConfigKubeletConfigTopologyManager'] = None):
|
17199
17411
|
"""
|
17200
17412
|
:param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
|
17201
17413
|
:param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
|
@@ -17213,8 +17425,10 @@ class NodePoolNodeConfigKubeletConfig(dict):
|
|
17213
17425
|
:param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
|
17214
17426
|
:param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
17215
17427
|
:param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
|
17428
|
+
:param 'NodePoolNodeConfigKubeletConfigMemoryManagerArgs' memory_manager: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
17216
17429
|
:param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
|
17217
17430
|
:param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
|
17431
|
+
:param 'NodePoolNodeConfigKubeletConfigTopologyManagerArgs' topology_manager: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
17218
17432
|
"""
|
17219
17433
|
if allowed_unsafe_sysctls is not None:
|
17220
17434
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
@@ -17248,10 +17462,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
|
|
17248
17462
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
17249
17463
|
if max_parallel_image_pulls is not None:
|
17250
17464
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
17465
|
+
if memory_manager is not None:
|
17466
|
+
pulumi.set(__self__, "memory_manager", memory_manager)
|
17251
17467
|
if pod_pids_limit is not None:
|
17252
17468
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
17253
17469
|
if single_process_oom_kill is not None:
|
17254
17470
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
17471
|
+
if topology_manager is not None:
|
17472
|
+
pulumi.set(__self__, "topology_manager", topology_manager)
|
17255
17473
|
|
17256
17474
|
@_builtins.property
|
17257
17475
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -17381,6 +17599,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
|
|
17381
17599
|
"""
|
17382
17600
|
return pulumi.get(self, "max_parallel_image_pulls")
|
17383
17601
|
|
17602
|
+
@_builtins.property
|
17603
|
+
@pulumi.getter(name="memoryManager")
|
17604
|
+
def memory_manager(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigMemoryManager']:
|
17605
|
+
"""
|
17606
|
+
Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
17607
|
+
"""
|
17608
|
+
return pulumi.get(self, "memory_manager")
|
17609
|
+
|
17384
17610
|
@_builtins.property
|
17385
17611
|
@pulumi.getter(name="podPidsLimit")
|
17386
17612
|
def pod_pids_limit(self) -> Optional[_builtins.int]:
|
@@ -17397,6 +17623,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
|
|
17397
17623
|
"""
|
17398
17624
|
return pulumi.get(self, "single_process_oom_kill")
|
17399
17625
|
|
17626
|
+
@_builtins.property
|
17627
|
+
@pulumi.getter(name="topologyManager")
|
17628
|
+
def topology_manager(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigTopologyManager']:
|
17629
|
+
"""
|
17630
|
+
Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
17631
|
+
"""
|
17632
|
+
return pulumi.get(self, "topology_manager")
|
17633
|
+
|
17400
17634
|
|
17401
17635
|
@pulumi.output_type
|
17402
17636
|
class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
|
@@ -17716,6 +17950,56 @@ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
|
|
17716
17950
|
return pulumi.get(self, "pid_available")
|
17717
17951
|
|
17718
17952
|
|
17953
|
+
@pulumi.output_type
|
17954
|
+
class NodePoolNodeConfigKubeletConfigMemoryManager(dict):
|
17955
|
+
def __init__(__self__, *,
|
17956
|
+
policy: Optional[_builtins.str] = None):
|
17957
|
+
"""
|
17958
|
+
:param _builtins.str policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
17959
|
+
"""
|
17960
|
+
if policy is not None:
|
17961
|
+
pulumi.set(__self__, "policy", policy)
|
17962
|
+
|
17963
|
+
@_builtins.property
|
17964
|
+
@pulumi.getter
|
17965
|
+
def policy(self) -> Optional[_builtins.str]:
|
17966
|
+
"""
|
17967
|
+
The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
17968
|
+
"""
|
17969
|
+
return pulumi.get(self, "policy")
|
17970
|
+
|
17971
|
+
|
17972
|
+
@pulumi.output_type
|
17973
|
+
class NodePoolNodeConfigKubeletConfigTopologyManager(dict):
|
17974
|
+
def __init__(__self__, *,
|
17975
|
+
policy: Optional[_builtins.str] = None,
|
17976
|
+
scope: Optional[_builtins.str] = None):
|
17977
|
+
"""
|
17978
|
+
:param _builtins.str policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
17979
|
+
:param _builtins.str scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
17980
|
+
"""
|
17981
|
+
if policy is not None:
|
17982
|
+
pulumi.set(__self__, "policy", policy)
|
17983
|
+
if scope is not None:
|
17984
|
+
pulumi.set(__self__, "scope", scope)
|
17985
|
+
|
17986
|
+
@_builtins.property
|
17987
|
+
@pulumi.getter
|
17988
|
+
def policy(self) -> Optional[_builtins.str]:
|
17989
|
+
"""
|
17990
|
+
The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
17991
|
+
"""
|
17992
|
+
return pulumi.get(self, "policy")
|
17993
|
+
|
17994
|
+
@_builtins.property
|
17995
|
+
@pulumi.getter
|
17996
|
+
def scope(self) -> Optional[_builtins.str]:
|
17997
|
+
"""
|
17998
|
+
The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
17999
|
+
"""
|
18000
|
+
return pulumi.get(self, "scope")
|
18001
|
+
|
18002
|
+
|
17719
18003
|
@pulumi.output_type
|
17720
18004
|
class NodePoolNodeConfigLinuxNodeConfig(dict):
|
17721
18005
|
@staticmethod
|
@@ -19095,18 +19379,21 @@ class GetClusterClusterAutoscalingResult(dict):
|
|
19095
19379
|
auto_provisioning_defaults: Sequence['outputs.GetClusterClusterAutoscalingAutoProvisioningDefaultResult'],
|
19096
19380
|
auto_provisioning_locations: Sequence[_builtins.str],
|
19097
19381
|
autoscaling_profile: _builtins.str,
|
19382
|
+
default_compute_class_enabled: _builtins.bool,
|
19098
19383
|
enabled: _builtins.bool,
|
19099
19384
|
resource_limits: Sequence['outputs.GetClusterClusterAutoscalingResourceLimitResult']):
|
19100
19385
|
"""
|
19101
19386
|
:param Sequence['GetClusterClusterAutoscalingAutoProvisioningDefaultArgs'] auto_provisioning_defaults: Contains defaults for a node pool created by NAP.
|
19102
19387
|
:param Sequence[_builtins.str] auto_provisioning_locations: The list of Google Compute Engine zones in which the NodePool's nodes can be created by NAP.
|
19103
19388
|
:param _builtins.str autoscaling_profile: Configuration options for the Autoscaling profile feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability when deciding to remove nodes from a cluster. Can be BALANCED or OPTIMIZE_UTILIZATION. Defaults to BALANCED.
|
19389
|
+
:param _builtins.bool default_compute_class_enabled: Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden.
|
19104
19390
|
:param _builtins.bool enabled: Whether node auto-provisioning is enabled. Resource limits for cpu and memory must be defined to enable node auto-provisioning.
|
19105
19391
|
:param Sequence['GetClusterClusterAutoscalingResourceLimitArgs'] resource_limits: Global constraints for machine resources in the cluster. Configuring the cpu and memory types is required if node auto-provisioning is enabled. These limits will apply to node pool autoscaling in addition to node auto-provisioning.
|
19106
19392
|
"""
|
19107
19393
|
pulumi.set(__self__, "auto_provisioning_defaults", auto_provisioning_defaults)
|
19108
19394
|
pulumi.set(__self__, "auto_provisioning_locations", auto_provisioning_locations)
|
19109
19395
|
pulumi.set(__self__, "autoscaling_profile", autoscaling_profile)
|
19396
|
+
pulumi.set(__self__, "default_compute_class_enabled", default_compute_class_enabled)
|
19110
19397
|
pulumi.set(__self__, "enabled", enabled)
|
19111
19398
|
pulumi.set(__self__, "resource_limits", resource_limits)
|
19112
19399
|
|
@@ -19134,6 +19421,14 @@ class GetClusterClusterAutoscalingResult(dict):
|
|
19134
19421
|
"""
|
19135
19422
|
return pulumi.get(self, "autoscaling_profile")
|
19136
19423
|
|
19424
|
+
@_builtins.property
|
19425
|
+
@pulumi.getter(name="defaultComputeClassEnabled")
|
19426
|
+
def default_compute_class_enabled(self) -> _builtins.bool:
|
19427
|
+
"""
|
19428
|
+
Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden.
|
19429
|
+
"""
|
19430
|
+
return pulumi.get(self, "default_compute_class_enabled")
|
19431
|
+
|
19137
19432
|
@_builtins.property
|
19138
19433
|
@pulumi.getter
|
19139
19434
|
def enabled(self) -> _builtins.bool:
|
@@ -21587,8 +21882,10 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
|
|
21587
21882
|
image_minimum_gc_age: _builtins.str,
|
21588
21883
|
insecure_kubelet_readonly_port_enabled: _builtins.str,
|
21589
21884
|
max_parallel_image_pulls: _builtins.int,
|
21885
|
+
memory_managers: Sequence['outputs.GetClusterNodeConfigKubeletConfigMemoryManagerResult'],
|
21590
21886
|
pod_pids_limit: _builtins.int,
|
21591
|
-
single_process_oom_kill: _builtins.bool
|
21887
|
+
single_process_oom_kill: _builtins.bool,
|
21888
|
+
topology_managers: Sequence['outputs.GetClusterNodeConfigKubeletConfigTopologyManagerResult']):
|
21592
21889
|
"""
|
21593
21890
|
:param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
|
21594
21891
|
:param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
|
@@ -21606,8 +21903,10 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
|
|
21606
21903
|
:param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
|
21607
21904
|
:param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
21608
21905
|
:param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
|
21906
|
+
:param Sequence['GetClusterNodeConfigKubeletConfigMemoryManagerArgs'] memory_managers: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
21609
21907
|
:param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
|
21610
21908
|
:param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
|
21909
|
+
:param Sequence['GetClusterNodeConfigKubeletConfigTopologyManagerArgs'] topology_managers: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
21611
21910
|
"""
|
21612
21911
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
21613
21912
|
pulumi.set(__self__, "container_log_max_files", container_log_max_files)
|
@@ -21625,8 +21924,10 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
|
|
21625
21924
|
pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
|
21626
21925
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
21627
21926
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
21927
|
+
pulumi.set(__self__, "memory_managers", memory_managers)
|
21628
21928
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
21629
21929
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
21930
|
+
pulumi.set(__self__, "topology_managers", topology_managers)
|
21630
21931
|
|
21631
21932
|
@_builtins.property
|
21632
21933
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -21756,6 +22057,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
|
|
21756
22057
|
"""
|
21757
22058
|
return pulumi.get(self, "max_parallel_image_pulls")
|
21758
22059
|
|
22060
|
+
@_builtins.property
|
22061
|
+
@pulumi.getter(name="memoryManagers")
|
22062
|
+
def memory_managers(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigMemoryManagerResult']:
|
22063
|
+
"""
|
22064
|
+
Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
22065
|
+
"""
|
22066
|
+
return pulumi.get(self, "memory_managers")
|
22067
|
+
|
21759
22068
|
@_builtins.property
|
21760
22069
|
@pulumi.getter(name="podPidsLimit")
|
21761
22070
|
def pod_pids_limit(self) -> _builtins.int:
|
@@ -21772,6 +22081,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
|
|
21772
22081
|
"""
|
21773
22082
|
return pulumi.get(self, "single_process_oom_kill")
|
21774
22083
|
|
22084
|
+
@_builtins.property
|
22085
|
+
@pulumi.getter(name="topologyManagers")
|
22086
|
+
def topology_managers(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigTopologyManagerResult']:
|
22087
|
+
"""
|
22088
|
+
Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
22089
|
+
"""
|
22090
|
+
return pulumi.get(self, "topology_managers")
|
22091
|
+
|
21775
22092
|
|
21776
22093
|
@pulumi.output_type
|
21777
22094
|
class GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
|
@@ -21992,6 +22309,53 @@ class GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
|
|
21992
22309
|
return pulumi.get(self, "pid_available")
|
21993
22310
|
|
21994
22311
|
|
22312
|
+
@pulumi.output_type
|
22313
|
+
class GetClusterNodeConfigKubeletConfigMemoryManagerResult(dict):
|
22314
|
+
def __init__(__self__, *,
|
22315
|
+
policy: _builtins.str):
|
22316
|
+
"""
|
22317
|
+
:param _builtins.str policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
22318
|
+
"""
|
22319
|
+
pulumi.set(__self__, "policy", policy)
|
22320
|
+
|
22321
|
+
@_builtins.property
|
22322
|
+
@pulumi.getter
|
22323
|
+
def policy(self) -> _builtins.str:
|
22324
|
+
"""
|
22325
|
+
The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
22326
|
+
"""
|
22327
|
+
return pulumi.get(self, "policy")
|
22328
|
+
|
22329
|
+
|
22330
|
+
@pulumi.output_type
|
22331
|
+
class GetClusterNodeConfigKubeletConfigTopologyManagerResult(dict):
|
22332
|
+
def __init__(__self__, *,
|
22333
|
+
policy: _builtins.str,
|
22334
|
+
scope: _builtins.str):
|
22335
|
+
"""
|
22336
|
+
:param _builtins.str policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
22337
|
+
:param _builtins.str scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
22338
|
+
"""
|
22339
|
+
pulumi.set(__self__, "policy", policy)
|
22340
|
+
pulumi.set(__self__, "scope", scope)
|
22341
|
+
|
22342
|
+
@_builtins.property
|
22343
|
+
@pulumi.getter
|
22344
|
+
def policy(self) -> _builtins.str:
|
22345
|
+
"""
|
22346
|
+
The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
22347
|
+
"""
|
22348
|
+
return pulumi.get(self, "policy")
|
22349
|
+
|
22350
|
+
@_builtins.property
|
22351
|
+
@pulumi.getter
|
22352
|
+
def scope(self) -> _builtins.str:
|
22353
|
+
"""
|
22354
|
+
The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
22355
|
+
"""
|
22356
|
+
return pulumi.get(self, "scope")
|
22357
|
+
|
22358
|
+
|
21995
22359
|
@pulumi.output_type
|
21996
22360
|
class GetClusterNodeConfigLinuxNodeConfigResult(dict):
|
21997
22361
|
def __init__(__self__, *,
|
@@ -24137,8 +24501,10 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
|
|
24137
24501
|
image_minimum_gc_age: _builtins.str,
|
24138
24502
|
insecure_kubelet_readonly_port_enabled: _builtins.str,
|
24139
24503
|
max_parallel_image_pulls: _builtins.int,
|
24504
|
+
memory_managers: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult'],
|
24140
24505
|
pod_pids_limit: _builtins.int,
|
24141
|
-
single_process_oom_kill: _builtins.bool
|
24506
|
+
single_process_oom_kill: _builtins.bool,
|
24507
|
+
topology_managers: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult']):
|
24142
24508
|
"""
|
24143
24509
|
:param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
|
24144
24510
|
:param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
|
@@ -24156,8 +24522,10 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
|
|
24156
24522
|
:param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
|
24157
24523
|
:param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
24158
24524
|
:param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
|
24525
|
+
:param Sequence['GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs'] memory_managers: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
24159
24526
|
:param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
|
24160
24527
|
:param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
|
24528
|
+
:param Sequence['GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs'] topology_managers: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
24161
24529
|
"""
|
24162
24530
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
24163
24531
|
pulumi.set(__self__, "container_log_max_files", container_log_max_files)
|
@@ -24175,8 +24543,10 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
|
|
24175
24543
|
pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
|
24176
24544
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
24177
24545
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
24546
|
+
pulumi.set(__self__, "memory_managers", memory_managers)
|
24178
24547
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
24179
24548
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
24549
|
+
pulumi.set(__self__, "topology_managers", topology_managers)
|
24180
24550
|
|
24181
24551
|
@_builtins.property
|
24182
24552
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -24306,6 +24676,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
|
|
24306
24676
|
"""
|
24307
24677
|
return pulumi.get(self, "max_parallel_image_pulls")
|
24308
24678
|
|
24679
|
+
@_builtins.property
|
24680
|
+
@pulumi.getter(name="memoryManagers")
|
24681
|
+
def memory_managers(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult']:
|
24682
|
+
"""
|
24683
|
+
Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
24684
|
+
"""
|
24685
|
+
return pulumi.get(self, "memory_managers")
|
24686
|
+
|
24309
24687
|
@_builtins.property
|
24310
24688
|
@pulumi.getter(name="podPidsLimit")
|
24311
24689
|
def pod_pids_limit(self) -> _builtins.int:
|
@@ -24322,6 +24700,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
|
|
24322
24700
|
"""
|
24323
24701
|
return pulumi.get(self, "single_process_oom_kill")
|
24324
24702
|
|
24703
|
+
@_builtins.property
|
24704
|
+
@pulumi.getter(name="topologyManagers")
|
24705
|
+
def topology_managers(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult']:
|
24706
|
+
"""
|
24707
|
+
Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
24708
|
+
"""
|
24709
|
+
return pulumi.get(self, "topology_managers")
|
24710
|
+
|
24325
24711
|
|
24326
24712
|
@pulumi.output_type
|
24327
24713
|
class GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
|
@@ -24542,6 +24928,53 @@ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dic
|
|
24542
24928
|
return pulumi.get(self, "pid_available")
|
24543
24929
|
|
24544
24930
|
|
24931
|
+
@pulumi.output_type
|
24932
|
+
class GetClusterNodePoolNodeConfigKubeletConfigMemoryManagerResult(dict):
|
24933
|
+
def __init__(__self__, *,
|
24934
|
+
policy: _builtins.str):
|
24935
|
+
"""
|
24936
|
+
:param _builtins.str policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
24937
|
+
"""
|
24938
|
+
pulumi.set(__self__, "policy", policy)
|
24939
|
+
|
24940
|
+
@_builtins.property
|
24941
|
+
@pulumi.getter
|
24942
|
+
def policy(self) -> _builtins.str:
|
24943
|
+
"""
|
24944
|
+
The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
24945
|
+
"""
|
24946
|
+
return pulumi.get(self, "policy")
|
24947
|
+
|
24948
|
+
|
24949
|
+
@pulumi.output_type
|
24950
|
+
class GetClusterNodePoolNodeConfigKubeletConfigTopologyManagerResult(dict):
|
24951
|
+
def __init__(__self__, *,
|
24952
|
+
policy: _builtins.str,
|
24953
|
+
scope: _builtins.str):
|
24954
|
+
"""
|
24955
|
+
:param _builtins.str policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
24956
|
+
:param _builtins.str scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
24957
|
+
"""
|
24958
|
+
pulumi.set(__self__, "policy", policy)
|
24959
|
+
pulumi.set(__self__, "scope", scope)
|
24960
|
+
|
24961
|
+
@_builtins.property
|
24962
|
+
@pulumi.getter
|
24963
|
+
def policy(self) -> _builtins.str:
|
24964
|
+
"""
|
24965
|
+
The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
24966
|
+
"""
|
24967
|
+
return pulumi.get(self, "policy")
|
24968
|
+
|
24969
|
+
@_builtins.property
|
24970
|
+
@pulumi.getter
|
24971
|
+
def scope(self) -> _builtins.str:
|
24972
|
+
"""
|
24973
|
+
The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
24974
|
+
"""
|
24975
|
+
return pulumi.get(self, "scope")
|
24976
|
+
|
24977
|
+
|
24545
24978
|
@pulumi.output_type
|
24546
24979
|
class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
|
24547
24980
|
def __init__(__self__, *,
|