pulumi-gcp 8.42.0a1758178363__py3-none-any.whl → 9.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_gcp/__init__.py +83 -48
- pulumi_gcp/activedirectory/peering.py +16 -0
- pulumi_gcp/alloydb/backup.py +8 -4
- pulumi_gcp/alloydb/cluster.py +56 -10
- pulumi_gcp/alloydb/get_cluster.py +12 -1
- pulumi_gcp/alloydb/instance.py +10 -4
- pulumi_gcp/alloydb/user.py +8 -4
- pulumi_gcp/apigateway/api_config_iam_binding.py +4 -4
- pulumi_gcp/apigateway/api_config_iam_member.py +4 -4
- pulumi_gcp/apigateway/api_config_iam_policy.py +4 -4
- pulumi_gcp/apigateway/api_iam_binding.py +4 -4
- pulumi_gcp/apigateway/api_iam_member.py +4 -4
- pulumi_gcp/apigateway/api_iam_policy.py +4 -4
- pulumi_gcp/apigateway/gateway_iam_binding.py +4 -4
- pulumi_gcp/apigateway/gateway_iam_member.py +4 -4
- pulumi_gcp/apigateway/gateway_iam_policy.py +4 -4
- pulumi_gcp/apigee/_inputs.py +151 -108
- pulumi_gcp/apigee/keystores_aliases_key_cert_file.py +52 -40
- pulumi_gcp/apigee/outputs.py +92 -88
- pulumi_gcp/artifactregistry/__init__.py +2 -0
- pulumi_gcp/artifactregistry/_inputs.py +0 -12
- pulumi_gcp/artifactregistry/get_npm_package.py +251 -0
- pulumi_gcp/artifactregistry/get_python_package.py +237 -0
- pulumi_gcp/artifactregistry/get_repository.py +12 -1
- pulumi_gcp/artifactregistry/outputs.py +8 -16
- pulumi_gcp/artifactregistry/repository.py +28 -0
- pulumi_gcp/backupdisasterrecovery/__init__.py +1 -0
- pulumi_gcp/backupdisasterrecovery/backup_plan.py +4 -4
- pulumi_gcp/backupdisasterrecovery/get_backup.py +12 -1
- pulumi_gcp/backupdisasterrecovery/get_backup_plan_association.py +4 -0
- pulumi_gcp/backupdisasterrecovery/get_data_source_references.py +135 -0
- pulumi_gcp/backupdisasterrecovery/get_management_server.py +4 -0
- pulumi_gcp/backupdisasterrecovery/outputs.py +103 -0
- pulumi_gcp/beyondcorp/__init__.py +0 -5
- pulumi_gcp/beyondcorp/_inputs.py +0 -312
- pulumi_gcp/beyondcorp/outputs.py +0 -200
- pulumi_gcp/bigquery/_inputs.py +114 -4
- pulumi_gcp/bigquery/app_profile.py +3 -5
- pulumi_gcp/bigquery/outputs.py +75 -5
- pulumi_gcp/bigqueryanalyticshub/data_exchange_subscription.py +20 -0
- pulumi_gcp/bigtable/app_profile.py +1 -1
- pulumi_gcp/bigtable/get_table_iam_policy.py +16 -18
- pulumi_gcp/bigtable/table_iam_binding.py +43 -43
- pulumi_gcp/bigtable/table_iam_member.py +43 -43
- pulumi_gcp/bigtable/table_iam_policy.py +43 -43
- pulumi_gcp/billing/_inputs.py +0 -6
- pulumi_gcp/billing/outputs.py +0 -4
- pulumi_gcp/certificatemanager/__init__.py +1 -0
- pulumi_gcp/certificatemanager/certificate_map_entry.py +7 -7
- pulumi_gcp/certificatemanager/get_dns_authorization.py +229 -0
- pulumi_gcp/certificatemanager/outputs.py +41 -0
- pulumi_gcp/chronicle/_inputs.py +6 -5
- pulumi_gcp/chronicle/outputs.py +4 -3
- pulumi_gcp/chronicle/reference_list.py +53 -5
- pulumi_gcp/cloudasset/get_resources_search_all.py +8 -0
- pulumi_gcp/cloudbuild/_inputs.py +309 -45
- pulumi_gcp/cloudbuild/bitbucket_server_config.py +2 -6
- pulumi_gcp/cloudbuild/get_trigger.py +12 -1
- pulumi_gcp/cloudbuild/outputs.py +380 -30
- pulumi_gcp/cloudbuild/trigger.py +160 -2
- pulumi_gcp/cloudbuild/worker_pool.py +2 -6
- pulumi_gcp/cloudfunctionsv2/_inputs.py +23 -21
- pulumi_gcp/cloudfunctionsv2/outputs.py +17 -16
- pulumi_gcp/cloudquota/s_quota_adjuster_settings.py +16 -0
- pulumi_gcp/cloudrunv2/_inputs.py +80 -26
- pulumi_gcp/cloudrunv2/get_service.py +12 -1
- pulumi_gcp/cloudrunv2/outputs.py +109 -30
- pulumi_gcp/cloudrunv2/service.py +72 -12
- pulumi_gcp/cloudrunv2/worker_pool.py +2 -2
- pulumi_gcp/cloudtasks/queue.py +98 -2
- pulumi_gcp/colab/_inputs.py +1 -114
- pulumi_gcp/colab/outputs.py +1 -106
- pulumi_gcp/compute/_inputs.py +884 -24
- pulumi_gcp/compute/backend_bucket.py +68 -0
- pulumi_gcp/compute/backend_bucket_iam_binding.py +236 -0
- pulumi_gcp/compute/backend_bucket_iam_member.py +236 -0
- pulumi_gcp/compute/backend_bucket_iam_policy.py +236 -0
- pulumi_gcp/compute/backend_service.py +54 -0
- pulumi_gcp/compute/backend_service_iam_binding.py +464 -0
- pulumi_gcp/compute/backend_service_iam_member.py +464 -0
- pulumi_gcp/compute/backend_service_iam_policy.py +464 -0
- pulumi_gcp/compute/cross_site_network.py +16 -0
- pulumi_gcp/compute/future_reservation.py +32 -0
- pulumi_gcp/compute/get_backend_bucket.py +12 -1
- pulumi_gcp/compute/get_backend_service.py +12 -1
- pulumi_gcp/compute/get_region_backend_service.py +12 -1
- pulumi_gcp/compute/get_resource_policy.py +2 -20
- pulumi_gcp/compute/machine_image_iam_binding.py +8 -4
- pulumi_gcp/compute/machine_image_iam_member.py +8 -4
- pulumi_gcp/compute/machine_image_iam_policy.py +8 -4
- pulumi_gcp/compute/network_edge_security_service.py +16 -0
- pulumi_gcp/compute/network_firewall_policy_packet_mirroring_rule.py +12 -0
- pulumi_gcp/compute/network_peering_routes_config.py +0 -100
- pulumi_gcp/compute/organization_security_policy.py +83 -51
- pulumi_gcp/compute/outputs.py +799 -16
- pulumi_gcp/compute/packet_mirroring.py +6 -0
- pulumi_gcp/compute/preview_feature.py +16 -0
- pulumi_gcp/compute/public_delegated_prefix.py +42 -0
- pulumi_gcp/compute/region_backend_service.py +54 -0
- pulumi_gcp/compute/region_backend_service_iam_binding.py +488 -0
- pulumi_gcp/compute/region_backend_service_iam_member.py +488 -0
- pulumi_gcp/compute/region_backend_service_iam_policy.py +488 -0
- pulumi_gcp/compute/region_network_endpoint_group.py +1 -3
- pulumi_gcp/compute/region_resize_request.py +24 -0
- pulumi_gcp/compute/region_url_map.py +75 -0
- pulumi_gcp/compute/subnetwork.py +0 -98
- pulumi_gcp/compute/wire_group.py +16 -0
- pulumi_gcp/config/__init__.pyi +2 -2
- pulumi_gcp/config/vars.py +4 -4
- pulumi_gcp/container/_inputs.py +466 -18
- pulumi_gcp/container/cluster.py +16 -7
- pulumi_gcp/container/outputs.py +448 -15
- pulumi_gcp/databasemigrationservice/connection_profile.py +4 -2
- pulumi_gcp/databasemigrationservice/migration_job.py +4 -2
- pulumi_gcp/dataflow/flex_template_job.py +10 -0
- pulumi_gcp/dataform/repository.py +16 -0
- pulumi_gcp/dataform/repository_release_config.py +16 -0
- pulumi_gcp/dataform/repository_workflow_config.py +16 -0
- pulumi_gcp/diagflow/_inputs.py +152 -0
- pulumi_gcp/diagflow/cx_agent.py +425 -0
- pulumi_gcp/diagflow/outputs.py +143 -0
- pulumi_gcp/discoveryengine/__init__.py +1 -0
- pulumi_gcp/discoveryengine/_inputs.py +94 -0
- pulumi_gcp/discoveryengine/acl_config.py +393 -0
- pulumi_gcp/discoveryengine/outputs.py +92 -0
- pulumi_gcp/firebase/android_app.py +16 -0
- pulumi_gcp/firebase/app_check_play_integrity_config.py +4 -8
- pulumi_gcp/firebase/app_check_recaptcha_enterprise_config.py +2 -4
- pulumi_gcp/firebase/app_check_service_config.py +6 -12
- pulumi_gcp/firebase/app_hosting_backend.py +4 -8
- pulumi_gcp/firebase/app_hosting_build.py +4 -8
- pulumi_gcp/firebase/app_hosting_traffic.py +6 -12
- pulumi_gcp/firebase/apple_app.py +16 -0
- pulumi_gcp/firebase/data_connect_service.py +4 -8
- pulumi_gcp/firebase/database_instance.py +20 -8
- pulumi_gcp/firebase/extensions_instance.py +12 -0
- pulumi_gcp/firebase/get_android_app.py +4 -2
- pulumi_gcp/firebase/get_apple_app.py +4 -2
- pulumi_gcp/firebase/get_apple_app_config.py +16 -2
- pulumi_gcp/firebase/get_hosting_channel.py +4 -2
- pulumi_gcp/firebase/hosting_channel.py +20 -0
- pulumi_gcp/firebase/hosting_custom_domain.py +20 -0
- pulumi_gcp/firebase/hosting_release.py +16 -0
- pulumi_gcp/firebase/hosting_site.py +16 -0
- pulumi_gcp/firebase/hosting_version.py +16 -0
- pulumi_gcp/firebase/storage_bucket.py +18 -0
- pulumi_gcp/firestore/index.py +118 -3
- pulumi_gcp/folder/service_identity.py +26 -0
- pulumi_gcp/gkehub/_inputs.py +0 -60
- pulumi_gcp/gkehub/get_membership.py +1 -12
- pulumi_gcp/gkehub/membership.py +0 -70
- pulumi_gcp/gkehub/membership_rbac_role_binding.py +16 -0
- pulumi_gcp/gkehub/outputs.py +0 -36
- pulumi_gcp/healthcare/_inputs.py +205 -0
- pulumi_gcp/healthcare/fhir_store.py +128 -0
- pulumi_gcp/healthcare/outputs.py +163 -0
- pulumi_gcp/iam/workload_identity_pool_iam_binding.py +464 -0
- pulumi_gcp/iam/workload_identity_pool_iam_member.py +464 -0
- pulumi_gcp/iam/workload_identity_pool_iam_policy.py +464 -0
- pulumi_gcp/iam/workload_identity_pool_managed_identity.py +18 -0
- pulumi_gcp/iam/workload_identity_pool_namespace.py +20 -0
- pulumi_gcp/iap/__init__.py +8 -0
- pulumi_gcp/iap/_inputs.py +260 -0
- pulumi_gcp/iap/get_web_forwarding_rule_service_iam_policy.py +159 -0
- pulumi_gcp/iap/get_web_region_forwarding_rule_service_iam_policy.py +182 -0
- pulumi_gcp/iap/outputs.py +152 -0
- pulumi_gcp/{beyondcorp/application_iam_binding.py → iap/web_forwarding_rule_service_iam_binding.py} +233 -315
- pulumi_gcp/{beyondcorp/application_iam_member.py → iap/web_forwarding_rule_service_iam_member.py} +233 -315
- pulumi_gcp/{beyondcorp/application_iam_policy.py → iap/web_forwarding_rule_service_iam_policy.py} +212 -294
- pulumi_gcp/iap/web_region_forwarding_rule_service_iam_binding.py +1091 -0
- pulumi_gcp/iap/web_region_forwarding_rule_service_iam_member.py +1091 -0
- pulumi_gcp/iap/web_region_forwarding_rule_service_iam_policy.py +910 -0
- pulumi_gcp/kms/autokey_config.py +0 -2
- pulumi_gcp/kms/get_kms_secret_asymmetric.py +22 -0
- pulumi_gcp/kms/key_handle.py +0 -2
- pulumi_gcp/logging/organization_sink.py +7 -7
- pulumi_gcp/managedkafka/connect_cluster.py +4 -0
- pulumi_gcp/managedkafka/connector.py +4 -0
- pulumi_gcp/memorystore/get_instance.py +1 -12
- pulumi_gcp/memorystore/instance.py +14 -84
- pulumi_gcp/netapp/storage_pool.py +91 -2
- pulumi_gcp/netapp/volume.py +47 -0
- pulumi_gcp/networkmanagement/__init__.py +1 -0
- pulumi_gcp/networkmanagement/organization_vpc_flow_logs_config.py +1028 -0
- pulumi_gcp/networksecurity/authorization_policy.py +12 -0
- pulumi_gcp/networkservices/gateway.py +7 -7
- pulumi_gcp/networkservices/lb_traffic_extension.py +24 -23
- pulumi_gcp/networkservices/service_lb_policies.py +12 -0
- pulumi_gcp/notebooks/__init__.py +0 -1
- pulumi_gcp/oracledatabase/autonomous_database.py +2 -2
- pulumi_gcp/oracledatabase/cloud_vm_cluster.py +3 -3
- pulumi_gcp/projects/service.py +2 -11
- pulumi_gcp/provider.py +20 -20
- pulumi_gcp/pulumi-plugin.json +1 -1
- pulumi_gcp/redis/cluster.py +0 -70
- pulumi_gcp/redis/get_cluster.py +1 -12
- pulumi_gcp/resourcemanager/capability.py +16 -0
- pulumi_gcp/runtimeconfig/config.py +16 -0
- pulumi_gcp/runtimeconfig/config_iam_binding.py +236 -0
- pulumi_gcp/runtimeconfig/config_iam_member.py +236 -0
- pulumi_gcp/runtimeconfig/config_iam_policy.py +236 -0
- pulumi_gcp/runtimeconfig/variable.py +10 -0
- pulumi_gcp/saasruntime/__init__.py +12 -0
- pulumi_gcp/saasruntime/_inputs.py +513 -0
- pulumi_gcp/saasruntime/outputs.py +431 -0
- pulumi_gcp/saasruntime/saa_s.py +819 -0
- pulumi_gcp/saasruntime/unit_kind.py +1024 -0
- pulumi_gcp/securesourcemanager/_inputs.py +20 -19
- pulumi_gcp/securesourcemanager/instance.py +7 -7
- pulumi_gcp/securesourcemanager/outputs.py +15 -14
- pulumi_gcp/securesourcemanager/repository.py +7 -7
- pulumi_gcp/serviceaccount/get_account_key.py +1 -21
- pulumi_gcp/servicedirectory/namespace_iam_binding.py +4 -4
- pulumi_gcp/servicedirectory/namespace_iam_member.py +4 -4
- pulumi_gcp/servicedirectory/namespace_iam_policy.py +4 -4
- pulumi_gcp/servicedirectory/service_iam_binding.py +4 -4
- pulumi_gcp/servicedirectory/service_iam_member.py +4 -4
- pulumi_gcp/servicedirectory/service_iam_policy.py +4 -4
- pulumi_gcp/sql/_inputs.py +135 -1
- pulumi_gcp/sql/database_instance.py +94 -0
- pulumi_gcp/sql/get_database_instance.py +23 -1
- pulumi_gcp/sql/outputs.py +276 -3
- pulumi_gcp/sql/user.py +61 -0
- pulumi_gcp/storage/_inputs.py +253 -7
- pulumi_gcp/storage/bucket_object.py +0 -9
- pulumi_gcp/storage/outputs.py +198 -8
- pulumi_gcp/storage/transfer_job.py +47 -0
- pulumi_gcp/tpu/__init__.py +0 -2
- pulumi_gcp/tpu/_inputs.py +0 -93
- pulumi_gcp/tpu/outputs.py +0 -72
- pulumi_gcp/tpu/v2_queued_resource.py +16 -0
- pulumi_gcp/tpu/v2_vm.py +16 -0
- pulumi_gcp/vertex/_inputs.py +8 -9
- pulumi_gcp/vertex/ai_endpoint.py +2 -4
- pulumi_gcp/vertex/ai_feature_group_iam_binding.py +236 -0
- pulumi_gcp/vertex/ai_feature_group_iam_member.py +236 -0
- pulumi_gcp/vertex/ai_feature_group_iam_policy.py +236 -0
- pulumi_gcp/vertex/ai_feature_online_store_featureview.py +0 -2
- pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_binding.py +248 -0
- pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_member.py +248 -0
- pulumi_gcp/vertex/ai_feature_online_store_featureview_iam_policy.py +248 -0
- pulumi_gcp/vertex/ai_feature_online_store_iam_binding.py +236 -0
- pulumi_gcp/vertex/ai_feature_online_store_iam_member.py +236 -0
- pulumi_gcp/vertex/ai_feature_online_store_iam_policy.py +236 -0
- pulumi_gcp/vertex/ai_feature_store_entity_type_iam_binding.py +238 -0
- pulumi_gcp/vertex/ai_feature_store_entity_type_iam_member.py +238 -0
- pulumi_gcp/vertex/ai_feature_store_entity_type_iam_policy.py +238 -0
- pulumi_gcp/vertex/ai_feature_store_iam_binding.py +248 -0
- pulumi_gcp/vertex/ai_feature_store_iam_member.py +248 -0
- pulumi_gcp/vertex/ai_feature_store_iam_policy.py +248 -0
- pulumi_gcp/vertex/ai_index.py +24 -23
- pulumi_gcp/vertex/ai_metadata_store.py +16 -0
- pulumi_gcp/vertex/outputs.py +7 -8
- pulumi_gcp/workstations/workstation.py +16 -0
- pulumi_gcp/workstations/workstation_cluster.py +16 -0
- pulumi_gcp/workstations/workstation_config.py +16 -0
- pulumi_gcp/workstations/workstation_config_iam_binding.py +260 -0
- pulumi_gcp/workstations/workstation_config_iam_member.py +260 -0
- pulumi_gcp/workstations/workstation_config_iam_policy.py +260 -0
- pulumi_gcp/workstations/workstation_iam_binding.py +272 -0
- pulumi_gcp/workstations/workstation_iam_member.py +272 -0
- pulumi_gcp/workstations/workstation_iam_policy.py +272 -0
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/METADATA +1 -1
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/RECORD +266 -255
- pulumi_gcp/beyondcorp/application.py +0 -746
- pulumi_gcp/beyondcorp/get_application_iam_policy.py +0 -182
- pulumi_gcp/notebooks/location.py +0 -285
- pulumi_gcp/tpu/get_tensorflow_versions.py +0 -184
- pulumi_gcp/tpu/node.py +0 -1062
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/WHEEL +0 -0
- {pulumi_gcp-8.42.0a1758178363.dist-info → pulumi_gcp-9.0.0.dist-info}/top_level.txt +0 -0
pulumi_gcp/container/_inputs.py
CHANGED
@@ -331,6 +331,10 @@ __all__ = [
|
|
331
331
|
'ClusterNodeConfigKubeletConfigEvictionSoftArgsDict',
|
332
332
|
'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
|
333
333
|
'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
|
334
|
+
'ClusterNodeConfigKubeletConfigMemoryManagerArgs',
|
335
|
+
'ClusterNodeConfigKubeletConfigMemoryManagerArgsDict',
|
336
|
+
'ClusterNodeConfigKubeletConfigTopologyManagerArgs',
|
337
|
+
'ClusterNodeConfigKubeletConfigTopologyManagerArgsDict',
|
334
338
|
'ClusterNodeConfigLinuxNodeConfigArgs',
|
335
339
|
'ClusterNodeConfigLinuxNodeConfigArgsDict',
|
336
340
|
'ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs',
|
@@ -437,6 +441,10 @@ __all__ = [
|
|
437
441
|
'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
|
438
442
|
'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
|
439
443
|
'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
|
444
|
+
'ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs',
|
445
|
+
'ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict',
|
446
|
+
'ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs',
|
447
|
+
'ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict',
|
440
448
|
'ClusterNodePoolNodeConfigLinuxNodeConfigArgs',
|
441
449
|
'ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict',
|
442
450
|
'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
|
@@ -573,6 +581,10 @@ __all__ = [
|
|
573
581
|
'NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
|
574
582
|
'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
|
575
583
|
'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
|
584
|
+
'NodePoolNodeConfigKubeletConfigMemoryManagerArgs',
|
585
|
+
'NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict',
|
586
|
+
'NodePoolNodeConfigKubeletConfigTopologyManagerArgs',
|
587
|
+
'NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict',
|
576
588
|
'NodePoolNodeConfigLinuxNodeConfigArgs',
|
577
589
|
'NodePoolNodeConfigLinuxNodeConfigArgsDict',
|
578
590
|
'NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
|
@@ -5811,6 +5823,10 @@ if not MYPY:
|
|
5811
5823
|
feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability
|
5812
5824
|
when deciding to remove nodes from a cluster. Can be `BALANCED` or `OPTIMIZE_UTILIZATION`. Defaults to `BALANCED`.
|
5813
5825
|
"""
|
5826
|
+
default_compute_class_enabled: NotRequired[pulumi.Input[_builtins.bool]]
|
5827
|
+
"""
|
5828
|
+
Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden.
|
5829
|
+
"""
|
5814
5830
|
enabled: NotRequired[pulumi.Input[_builtins.bool]]
|
5815
5831
|
"""
|
5816
5832
|
Whether node auto-provisioning is enabled. Must be supplied for GKE Standard clusters, `true` is implied
|
@@ -5832,6 +5848,7 @@ class ClusterClusterAutoscalingArgs:
|
|
5832
5848
|
auto_provisioning_defaults: Optional[pulumi.Input['ClusterClusterAutoscalingAutoProvisioningDefaultsArgs']] = None,
|
5833
5849
|
auto_provisioning_locations: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
5834
5850
|
autoscaling_profile: Optional[pulumi.Input[_builtins.str]] = None,
|
5851
|
+
default_compute_class_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
5835
5852
|
enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
5836
5853
|
resource_limits: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterAutoscalingResourceLimitArgs']]]] = None):
|
5837
5854
|
"""
|
@@ -5845,6 +5862,7 @@ class ClusterClusterAutoscalingArgs:
|
|
5845
5862
|
options for the [Autoscaling profile](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles)
|
5846
5863
|
feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability
|
5847
5864
|
when deciding to remove nodes from a cluster. Can be `BALANCED` or `OPTIMIZE_UTILIZATION`. Defaults to `BALANCED`.
|
5865
|
+
:param pulumi.Input[_builtins.bool] default_compute_class_enabled: Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden.
|
5848
5866
|
:param pulumi.Input[_builtins.bool] enabled: Whether node auto-provisioning is enabled. Must be supplied for GKE Standard clusters, `true` is implied
|
5849
5867
|
for autopilot clusters. Resource limits for `cpu` and `memory` must be defined to enable node auto-provisioning for GKE Standard.
|
5850
5868
|
:param pulumi.Input[Sequence[pulumi.Input['ClusterClusterAutoscalingResourceLimitArgs']]] resource_limits: Global constraints for machine resources in the
|
@@ -5858,6 +5876,8 @@ class ClusterClusterAutoscalingArgs:
|
|
5858
5876
|
pulumi.set(__self__, "auto_provisioning_locations", auto_provisioning_locations)
|
5859
5877
|
if autoscaling_profile is not None:
|
5860
5878
|
pulumi.set(__self__, "autoscaling_profile", autoscaling_profile)
|
5879
|
+
if default_compute_class_enabled is not None:
|
5880
|
+
pulumi.set(__self__, "default_compute_class_enabled", default_compute_class_enabled)
|
5861
5881
|
if enabled is not None:
|
5862
5882
|
pulumi.set(__self__, "enabled", enabled)
|
5863
5883
|
if resource_limits is not None:
|
@@ -5906,6 +5926,18 @@ class ClusterClusterAutoscalingArgs:
|
|
5906
5926
|
def autoscaling_profile(self, value: Optional[pulumi.Input[_builtins.str]]):
|
5907
5927
|
pulumi.set(self, "autoscaling_profile", value)
|
5908
5928
|
|
5929
|
+
@_builtins.property
|
5930
|
+
@pulumi.getter(name="defaultComputeClassEnabled")
|
5931
|
+
def default_compute_class_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
5932
|
+
"""
|
5933
|
+
Specifies whether default compute class behaviour is enabled. If enabled, cluster autoscaler will use Compute Class with name default for all the workloads, if not overriden.
|
5934
|
+
"""
|
5935
|
+
return pulumi.get(self, "default_compute_class_enabled")
|
5936
|
+
|
5937
|
+
@default_compute_class_enabled.setter
|
5938
|
+
def default_compute_class_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
5939
|
+
pulumi.set(self, "default_compute_class_enabled", value)
|
5940
|
+
|
5909
5941
|
@_builtins.property
|
5910
5942
|
@pulumi.getter
|
5911
5943
|
def enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
@@ -7128,7 +7160,7 @@ if not MYPY:
|
|
7128
7160
|
"""
|
7129
7161
|
desired_tier: NotRequired[pulumi.Input[_builtins.str]]
|
7130
7162
|
"""
|
7131
|
-
Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
|
7163
|
+
(DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
|
7132
7164
|
"""
|
7133
7165
|
elif False:
|
7134
7166
|
ClusterEnterpriseConfigArgsDict: TypeAlias = Mapping[str, Any]
|
@@ -7140,15 +7172,22 @@ class ClusterEnterpriseConfigArgs:
|
|
7140
7172
|
desired_tier: Optional[pulumi.Input[_builtins.str]] = None):
|
7141
7173
|
"""
|
7142
7174
|
:param pulumi.Input[_builtins.str] cluster_tier: The effective tier of the cluster.
|
7143
|
-
:param pulumi.Input[_builtins.str] desired_tier: Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
|
7175
|
+
:param pulumi.Input[_builtins.str] desired_tier: (DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
|
7144
7176
|
"""
|
7177
|
+
if cluster_tier is not None:
|
7178
|
+
warnings.warn("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""", DeprecationWarning)
|
7179
|
+
pulumi.log.warn("""cluster_tier is deprecated: GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
|
7145
7180
|
if cluster_tier is not None:
|
7146
7181
|
pulumi.set(__self__, "cluster_tier", cluster_tier)
|
7182
|
+
if desired_tier is not None:
|
7183
|
+
warnings.warn("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""", DeprecationWarning)
|
7184
|
+
pulumi.log.warn("""desired_tier is deprecated: GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
|
7147
7185
|
if desired_tier is not None:
|
7148
7186
|
pulumi.set(__self__, "desired_tier", desired_tier)
|
7149
7187
|
|
7150
7188
|
@_builtins.property
|
7151
7189
|
@pulumi.getter(name="clusterTier")
|
7190
|
+
@_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
|
7152
7191
|
def cluster_tier(self) -> Optional[pulumi.Input[_builtins.str]]:
|
7153
7192
|
"""
|
7154
7193
|
The effective tier of the cluster.
|
@@ -7161,9 +7200,10 @@ class ClusterEnterpriseConfigArgs:
|
|
7161
7200
|
|
7162
7201
|
@_builtins.property
|
7163
7202
|
@pulumi.getter(name="desiredTier")
|
7203
|
+
@_utilities.deprecated("""GKE Enterprise features are now available without an Enterprise tier. This field is deprecated and will be removed in a future major release""")
|
7164
7204
|
def desired_tier(self) -> Optional[pulumi.Input[_builtins.str]]:
|
7165
7205
|
"""
|
7166
|
-
Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
|
7206
|
+
(DEPRECATED) Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`. Deprecated as GKE Enterprise features are now available without an Enterprise tier. See https://cloud.google.com/blog/products/containers-kubernetes/gke-gets-new-pricing-and-capabilities-on-10th-birthday for the announcement of this change.
|
7167
7207
|
"""
|
7168
7208
|
return pulumi.get(self, "desired_tier")
|
7169
7209
|
|
@@ -8831,7 +8871,7 @@ if not MYPY:
|
|
8831
8871
|
disk_type: NotRequired[pulumi.Input[_builtins.str]]
|
8832
8872
|
"""
|
8833
8873
|
Type of the disk attached to each node
|
8834
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
8874
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
8835
8875
|
"""
|
8836
8876
|
effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgsDict']]]]
|
8837
8877
|
"""
|
@@ -9110,7 +9150,7 @@ class ClusterNodeConfigArgs:
|
|
9110
9150
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
|
9111
9151
|
Prefer configuring `boot_disk`.
|
9112
9152
|
:param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
|
9113
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
9153
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
9114
9154
|
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
|
9115
9155
|
:param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
9116
9156
|
:param pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
@@ -9382,7 +9422,7 @@ class ClusterNodeConfigArgs:
|
|
9382
9422
|
def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
|
9383
9423
|
"""
|
9384
9424
|
Type of the disk attached to each node
|
9385
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
9425
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
9386
9426
|
"""
|
9387
9427
|
return pulumi.get(self, "disk_type")
|
9388
9428
|
|
@@ -9993,7 +10033,7 @@ if not MYPY:
|
|
9993
10033
|
disk_type: NotRequired[pulumi.Input[_builtins.str]]
|
9994
10034
|
"""
|
9995
10035
|
Type of the disk attached to each node
|
9996
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
10036
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
9997
10037
|
"""
|
9998
10038
|
provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
|
9999
10039
|
"""
|
@@ -10020,7 +10060,7 @@ class ClusterNodeConfigBootDiskArgs:
|
|
10020
10060
|
size_gb: Optional[pulumi.Input[_builtins.int]] = None):
|
10021
10061
|
"""
|
10022
10062
|
:param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
|
10023
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
10063
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
10024
10064
|
:param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
10025
10065
|
:param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
10026
10066
|
:param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
|
@@ -10040,7 +10080,7 @@ class ClusterNodeConfigBootDiskArgs:
|
|
10040
10080
|
def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
|
10041
10081
|
"""
|
10042
10082
|
Type of the disk attached to each node
|
10043
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
10083
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
10044
10084
|
"""
|
10045
10085
|
return pulumi.get(self, "disk_type")
|
10046
10086
|
|
@@ -10878,6 +10918,12 @@ if not MYPY:
|
|
10878
10918
|
"""
|
10879
10919
|
Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
|
10880
10920
|
"""
|
10921
|
+
memory_manager: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgsDict']]
|
10922
|
+
"""
|
10923
|
+
Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
10924
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
10925
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
10926
|
+
"""
|
10881
10927
|
pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
|
10882
10928
|
"""
|
10883
10929
|
Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
|
@@ -10886,6 +10932,10 @@ if not MYPY:
|
|
10886
10932
|
"""
|
10887
10933
|
Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
|
10888
10934
|
"""
|
10935
|
+
topology_manager: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgsDict']]
|
10936
|
+
"""
|
10937
|
+
These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
10938
|
+
"""
|
10889
10939
|
elif False:
|
10890
10940
|
ClusterNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
|
10891
10941
|
|
@@ -10908,8 +10958,10 @@ class ClusterNodeConfigKubeletConfigArgs:
|
|
10908
10958
|
image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
|
10909
10959
|
insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
|
10910
10960
|
max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
|
10961
|
+
memory_manager: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs']] = None,
|
10911
10962
|
pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
|
10912
|
-
single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None
|
10963
|
+
single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None,
|
10964
|
+
topology_manager: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs']] = None):
|
10913
10965
|
"""
|
10914
10966
|
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
|
10915
10967
|
:param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
|
@@ -10939,8 +10991,12 @@ class ClusterNodeConfigKubeletConfigArgs:
|
|
10939
10991
|
:param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
|
10940
10992
|
:param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
10941
10993
|
:param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
|
10994
|
+
:param pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs'] memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
10995
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
10996
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
10942
10997
|
:param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
|
10943
10998
|
:param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
|
10999
|
+
:param pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs'] topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
10944
11000
|
"""
|
10945
11001
|
if allowed_unsafe_sysctls is not None:
|
10946
11002
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
@@ -10974,10 +11030,14 @@ class ClusterNodeConfigKubeletConfigArgs:
|
|
10974
11030
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
10975
11031
|
if max_parallel_image_pulls is not None:
|
10976
11032
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
11033
|
+
if memory_manager is not None:
|
11034
|
+
pulumi.set(__self__, "memory_manager", memory_manager)
|
10977
11035
|
if pod_pids_limit is not None:
|
10978
11036
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
10979
11037
|
if single_process_oom_kill is not None:
|
10980
11038
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
11039
|
+
if topology_manager is not None:
|
11040
|
+
pulumi.set(__self__, "topology_manager", topology_manager)
|
10981
11041
|
|
10982
11042
|
@_builtins.property
|
10983
11043
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -11183,6 +11243,20 @@ class ClusterNodeConfigKubeletConfigArgs:
|
|
11183
11243
|
def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
|
11184
11244
|
pulumi.set(self, "max_parallel_image_pulls", value)
|
11185
11245
|
|
11246
|
+
@_builtins.property
|
11247
|
+
@pulumi.getter(name="memoryManager")
|
11248
|
+
def memory_manager(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs']]:
|
11249
|
+
"""
|
11250
|
+
Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
11251
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
11252
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
11253
|
+
"""
|
11254
|
+
return pulumi.get(self, "memory_manager")
|
11255
|
+
|
11256
|
+
@memory_manager.setter
|
11257
|
+
def memory_manager(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigMemoryManagerArgs']]):
|
11258
|
+
pulumi.set(self, "memory_manager", value)
|
11259
|
+
|
11186
11260
|
@_builtins.property
|
11187
11261
|
@pulumi.getter(name="podPidsLimit")
|
11188
11262
|
def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
|
@@ -11207,6 +11281,18 @@ class ClusterNodeConfigKubeletConfigArgs:
|
|
11207
11281
|
def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
11208
11282
|
pulumi.set(self, "single_process_oom_kill", value)
|
11209
11283
|
|
11284
|
+
@_builtins.property
|
11285
|
+
@pulumi.getter(name="topologyManager")
|
11286
|
+
def topology_manager(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs']]:
|
11287
|
+
"""
|
11288
|
+
These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
11289
|
+
"""
|
11290
|
+
return pulumi.get(self, "topology_manager")
|
11291
|
+
|
11292
|
+
@topology_manager.setter
|
11293
|
+
def topology_manager(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigTopologyManagerArgs']]):
|
11294
|
+
pulumi.set(self, "topology_manager", value)
|
11295
|
+
|
11210
11296
|
|
11211
11297
|
if not MYPY:
|
11212
11298
|
class ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
|
@@ -11604,6 +11690,102 @@ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
|
|
11604
11690
|
pulumi.set(self, "pid_available", value)
|
11605
11691
|
|
11606
11692
|
|
11693
|
+
if not MYPY:
|
11694
|
+
class ClusterNodeConfigKubeletConfigMemoryManagerArgsDict(TypedDict):
|
11695
|
+
policy: NotRequired[pulumi.Input[_builtins.str]]
|
11696
|
+
"""
|
11697
|
+
The [Memory
|
11698
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
11699
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
11700
|
+
"""
|
11701
|
+
elif False:
|
11702
|
+
ClusterNodeConfigKubeletConfigMemoryManagerArgsDict: TypeAlias = Mapping[str, Any]
|
11703
|
+
|
11704
|
+
@pulumi.input_type
|
11705
|
+
class ClusterNodeConfigKubeletConfigMemoryManagerArgs:
|
11706
|
+
def __init__(__self__, *,
|
11707
|
+
policy: Optional[pulumi.Input[_builtins.str]] = None):
|
11708
|
+
"""
|
11709
|
+
:param pulumi.Input[_builtins.str] policy: The [Memory
|
11710
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
11711
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
11712
|
+
"""
|
11713
|
+
if policy is not None:
|
11714
|
+
pulumi.set(__self__, "policy", policy)
|
11715
|
+
|
11716
|
+
@_builtins.property
|
11717
|
+
@pulumi.getter
|
11718
|
+
def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
11719
|
+
"""
|
11720
|
+
The [Memory
|
11721
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
11722
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
11723
|
+
"""
|
11724
|
+
return pulumi.get(self, "policy")
|
11725
|
+
|
11726
|
+
@policy.setter
|
11727
|
+
def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
11728
|
+
pulumi.set(self, "policy", value)
|
11729
|
+
|
11730
|
+
|
11731
|
+
if not MYPY:
|
11732
|
+
class ClusterNodeConfigKubeletConfigTopologyManagerArgsDict(TypedDict):
|
11733
|
+
policy: NotRequired[pulumi.Input[_builtins.str]]
|
11734
|
+
"""
|
11735
|
+
The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
11736
|
+
"""
|
11737
|
+
scope: NotRequired[pulumi.Input[_builtins.str]]
|
11738
|
+
"""
|
11739
|
+
The Topology Manager scope, defining the granularity at which
|
11740
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
11741
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
11742
|
+
"""
|
11743
|
+
elif False:
|
11744
|
+
ClusterNodeConfigKubeletConfigTopologyManagerArgsDict: TypeAlias = Mapping[str, Any]
|
11745
|
+
|
11746
|
+
@pulumi.input_type
|
11747
|
+
class ClusterNodeConfigKubeletConfigTopologyManagerArgs:
|
11748
|
+
def __init__(__self__, *,
|
11749
|
+
policy: Optional[pulumi.Input[_builtins.str]] = None,
|
11750
|
+
scope: Optional[pulumi.Input[_builtins.str]] = None):
|
11751
|
+
"""
|
11752
|
+
:param pulumi.Input[_builtins.str] policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
11753
|
+
:param pulumi.Input[_builtins.str] scope: The Topology Manager scope, defining the granularity at which
|
11754
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
11755
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
11756
|
+
"""
|
11757
|
+
if policy is not None:
|
11758
|
+
pulumi.set(__self__, "policy", policy)
|
11759
|
+
if scope is not None:
|
11760
|
+
pulumi.set(__self__, "scope", scope)
|
11761
|
+
|
11762
|
+
@_builtins.property
|
11763
|
+
@pulumi.getter
|
11764
|
+
def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
11765
|
+
"""
|
11766
|
+
The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
11767
|
+
"""
|
11768
|
+
return pulumi.get(self, "policy")
|
11769
|
+
|
11770
|
+
@policy.setter
|
11771
|
+
def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
11772
|
+
pulumi.set(self, "policy", value)
|
11773
|
+
|
11774
|
+
@_builtins.property
|
11775
|
+
@pulumi.getter
|
11776
|
+
def scope(self) -> Optional[pulumi.Input[_builtins.str]]:
|
11777
|
+
"""
|
11778
|
+
The Topology Manager scope, defining the granularity at which
|
11779
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
11780
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
11781
|
+
"""
|
11782
|
+
return pulumi.get(self, "scope")
|
11783
|
+
|
11784
|
+
@scope.setter
|
11785
|
+
def scope(self, value: Optional[pulumi.Input[_builtins.str]]):
|
11786
|
+
pulumi.set(self, "scope", value)
|
11787
|
+
|
11788
|
+
|
11607
11789
|
if not MYPY:
|
11608
11790
|
class ClusterNodeConfigLinuxNodeConfigArgsDict(TypedDict):
|
11609
11791
|
cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
|
@@ -13848,7 +14030,7 @@ if not MYPY:
|
|
13848
14030
|
disk_type: NotRequired[pulumi.Input[_builtins.str]]
|
13849
14031
|
"""
|
13850
14032
|
Type of the disk attached to each node
|
13851
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
14033
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
13852
14034
|
"""
|
13853
14035
|
effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgsDict']]]]
|
13854
14036
|
"""
|
@@ -14127,7 +14309,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
14127
14309
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
|
14128
14310
|
Prefer configuring `boot_disk`.
|
14129
14311
|
:param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
|
14130
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
14312
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
14131
14313
|
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
|
14132
14314
|
:param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
14133
14315
|
:param pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
@@ -14399,7 +14581,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
14399
14581
|
def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
|
14400
14582
|
"""
|
14401
14583
|
Type of the disk attached to each node
|
14402
|
-
(e.g. 'pd-standard', 'pd-balanced'
|
14584
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
|
14403
14585
|
"""
|
14404
14586
|
return pulumi.get(self, "disk_type")
|
14405
14587
|
|
@@ -15010,7 +15192,7 @@ if not MYPY:
|
|
15010
15192
|
disk_type: NotRequired[pulumi.Input[_builtins.str]]
|
15011
15193
|
"""
|
15012
15194
|
Type of the disk attached to each node
|
15013
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
15195
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
15014
15196
|
"""
|
15015
15197
|
provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
|
15016
15198
|
"""
|
@@ -15037,7 +15219,7 @@ class ClusterNodePoolNodeConfigBootDiskArgs:
|
|
15037
15219
|
size_gb: Optional[pulumi.Input[_builtins.int]] = None):
|
15038
15220
|
"""
|
15039
15221
|
:param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
|
15040
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
15222
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
15041
15223
|
:param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
15042
15224
|
:param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
|
15043
15225
|
:param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
|
@@ -15057,7 +15239,7 @@ class ClusterNodePoolNodeConfigBootDiskArgs:
|
|
15057
15239
|
def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
|
15058
15240
|
"""
|
15059
15241
|
Type of the disk attached to each node
|
15060
|
-
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced').
|
15242
|
+
(e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`. This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
|
15061
15243
|
"""
|
15062
15244
|
return pulumi.get(self, "disk_type")
|
15063
15245
|
|
@@ -15895,6 +16077,12 @@ if not MYPY:
|
|
15895
16077
|
"""
|
15896
16078
|
Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
|
15897
16079
|
"""
|
16080
|
+
memory_manager: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict']]
|
16081
|
+
"""
|
16082
|
+
Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
16083
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
16084
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
16085
|
+
"""
|
15898
16086
|
pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
|
15899
16087
|
"""
|
15900
16088
|
Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
|
@@ -15903,6 +16091,10 @@ if not MYPY:
|
|
15903
16091
|
"""
|
15904
16092
|
Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
|
15905
16093
|
"""
|
16094
|
+
topology_manager: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict']]
|
16095
|
+
"""
|
16096
|
+
These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
16097
|
+
"""
|
15906
16098
|
elif False:
|
15907
16099
|
ClusterNodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
|
15908
16100
|
|
@@ -15925,8 +16117,10 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
|
|
15925
16117
|
image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
|
15926
16118
|
insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
|
15927
16119
|
max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
|
16120
|
+
memory_manager: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs']] = None,
|
15928
16121
|
pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
|
15929
|
-
single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None
|
16122
|
+
single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None,
|
16123
|
+
topology_manager: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs']] = None):
|
15930
16124
|
"""
|
15931
16125
|
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
|
15932
16126
|
:param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
|
@@ -15956,8 +16150,12 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
|
|
15956
16150
|
:param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
|
15957
16151
|
:param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
15958
16152
|
:param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
|
16153
|
+
:param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs'] memory_manager: Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
16154
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
16155
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
15959
16156
|
:param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
|
15960
16157
|
:param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
|
16158
|
+
:param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs'] topology_manager: These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
15961
16159
|
"""
|
15962
16160
|
if allowed_unsafe_sysctls is not None:
|
15963
16161
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
@@ -15991,10 +16189,14 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
|
|
15991
16189
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
15992
16190
|
if max_parallel_image_pulls is not None:
|
15993
16191
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
16192
|
+
if memory_manager is not None:
|
16193
|
+
pulumi.set(__self__, "memory_manager", memory_manager)
|
15994
16194
|
if pod_pids_limit is not None:
|
15995
16195
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
15996
16196
|
if single_process_oom_kill is not None:
|
15997
16197
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
16198
|
+
if topology_manager is not None:
|
16199
|
+
pulumi.set(__self__, "topology_manager", topology_manager)
|
15998
16200
|
|
15999
16201
|
@_builtins.property
|
16000
16202
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -16200,6 +16402,20 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
|
|
16200
16402
|
def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
|
16201
16403
|
pulumi.set(self, "max_parallel_image_pulls", value)
|
16202
16404
|
|
16405
|
+
@_builtins.property
|
16406
|
+
@pulumi.getter(name="memoryManager")
|
16407
|
+
def memory_manager(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs']]:
|
16408
|
+
"""
|
16409
|
+
Configuration for the [memory manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/) on the node.
|
16410
|
+
The memory manager optimizes memory and hugepages allocation for pods, especially
|
16411
|
+
those in the Guaranteed QoS class, by influencing NUMA affinity. Structure is documented below.
|
16412
|
+
"""
|
16413
|
+
return pulumi.get(self, "memory_manager")
|
16414
|
+
|
16415
|
+
@memory_manager.setter
|
16416
|
+
def memory_manager(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs']]):
|
16417
|
+
pulumi.set(self, "memory_manager", value)
|
16418
|
+
|
16203
16419
|
@_builtins.property
|
16204
16420
|
@pulumi.getter(name="podPidsLimit")
|
16205
16421
|
def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
|
@@ -16224,6 +16440,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
|
|
16224
16440
|
def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
16225
16441
|
pulumi.set(self, "single_process_oom_kill", value)
|
16226
16442
|
|
16443
|
+
@_builtins.property
|
16444
|
+
@pulumi.getter(name="topologyManager")
|
16445
|
+
def topology_manager(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs']]:
|
16446
|
+
"""
|
16447
|
+
These settings control the kubelet's [Topology Manager policy](https://kubernetes.io/docs/tasks/administer-cluster/topology-manager/#topology-manager-policies), which coordinates the set of components responsible for performance optimizations related to CPU isolation, memory, and device locality. Structure is documented below.
|
16448
|
+
"""
|
16449
|
+
return pulumi.get(self, "topology_manager")
|
16450
|
+
|
16451
|
+
@topology_manager.setter
|
16452
|
+
def topology_manager(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs']]):
|
16453
|
+
pulumi.set(self, "topology_manager", value)
|
16454
|
+
|
16227
16455
|
|
16228
16456
|
if not MYPY:
|
16229
16457
|
class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
|
@@ -16621,6 +16849,102 @@ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
|
|
16621
16849
|
pulumi.set(self, "pid_available", value)
|
16622
16850
|
|
16623
16851
|
|
16852
|
+
if not MYPY:
|
16853
|
+
class ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict(TypedDict):
|
16854
|
+
policy: NotRequired[pulumi.Input[_builtins.str]]
|
16855
|
+
"""
|
16856
|
+
The [Memory
|
16857
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
16858
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
16859
|
+
"""
|
16860
|
+
elif False:
|
16861
|
+
ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgsDict: TypeAlias = Mapping[str, Any]
|
16862
|
+
|
16863
|
+
@pulumi.input_type
|
16864
|
+
class ClusterNodePoolNodeConfigKubeletConfigMemoryManagerArgs:
|
16865
|
+
def __init__(__self__, *,
|
16866
|
+
policy: Optional[pulumi.Input[_builtins.str]] = None):
|
16867
|
+
"""
|
16868
|
+
:param pulumi.Input[_builtins.str] policy: The [Memory
|
16869
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
16870
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
16871
|
+
"""
|
16872
|
+
if policy is not None:
|
16873
|
+
pulumi.set(__self__, "policy", policy)
|
16874
|
+
|
16875
|
+
@_builtins.property
|
16876
|
+
@pulumi.getter
|
16877
|
+
def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
16878
|
+
"""
|
16879
|
+
The [Memory
|
16880
|
+
Manager](https://kubernetes.io/docs/tasks/administer-cluster/memory-manager/)
|
16881
|
+
policy can be set to None (default) or Static. This policy dictates how memory alignment is handled on the node. If unset (or set to the empty string `""`), the API will treat the field as if set to "None".
|
16882
|
+
"""
|
16883
|
+
return pulumi.get(self, "policy")
|
16884
|
+
|
16885
|
+
@policy.setter
|
16886
|
+
def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
16887
|
+
pulumi.set(self, "policy", value)
|
16888
|
+
|
16889
|
+
|
16890
|
+
if not MYPY:
|
16891
|
+
class ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict(TypedDict):
|
16892
|
+
policy: NotRequired[pulumi.Input[_builtins.str]]
|
16893
|
+
"""
|
16894
|
+
The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
16895
|
+
"""
|
16896
|
+
scope: NotRequired[pulumi.Input[_builtins.str]]
|
16897
|
+
"""
|
16898
|
+
The Topology Manager scope, defining the granularity at which
|
16899
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
16900
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
16901
|
+
"""
|
16902
|
+
elif False:
|
16903
|
+
ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgsDict: TypeAlias = Mapping[str, Any]
|
16904
|
+
|
16905
|
+
@pulumi.input_type
|
16906
|
+
class ClusterNodePoolNodeConfigKubeletConfigTopologyManagerArgs:
|
16907
|
+
def __init__(__self__, *,
|
16908
|
+
policy: Optional[pulumi.Input[_builtins.str]] = None,
|
16909
|
+
scope: Optional[pulumi.Input[_builtins.str]] = None):
|
16910
|
+
"""
|
16911
|
+
:param pulumi.Input[_builtins.str] policy: The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
16912
|
+
:param pulumi.Input[_builtins.str] scope: The Topology Manager scope, defining the granularity at which
|
16913
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
16914
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
16915
|
+
"""
|
16916
|
+
if policy is not None:
|
16917
|
+
pulumi.set(__self__, "policy", policy)
|
16918
|
+
if scope is not None:
|
16919
|
+
pulumi.set(__self__, "scope", scope)
|
16920
|
+
|
16921
|
+
@_builtins.property
|
16922
|
+
@pulumi.getter
|
16923
|
+
def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
16924
|
+
"""
|
16925
|
+
The Topology Manager policy controls resource alignment on the node and can be set to one of the following: none (default), best-effort, restricted, or single-numa-node. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
|
16926
|
+
"""
|
16927
|
+
return pulumi.get(self, "policy")
|
16928
|
+
|
16929
|
+
@policy.setter
|
16930
|
+
def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
16931
|
+
pulumi.set(self, "policy", value)
|
16932
|
+
|
16933
|
+
@_builtins.property
|
16934
|
+
@pulumi.getter
|
16935
|
+
def scope(self) -> Optional[pulumi.Input[_builtins.str]]:
|
16936
|
+
"""
|
16937
|
+
The Topology Manager scope, defining the granularity at which
|
16938
|
+
policy decisions are applied. Valid values are "container" (resources are aligned
|
16939
|
+
per container within a pod which is set by default) or "pod" (resources are aligned for the entire pod). If unset (or set to the empty string `""`), the API will treat the field as if set to "container".
|
16940
|
+
"""
|
16941
|
+
return pulumi.get(self, "scope")
|
16942
|
+
|
16943
|
+
@scope.setter
|
16944
|
+
def scope(self, value: Optional[pulumi.Input[_builtins.str]]):
|
16945
|
+
pulumi.set(self, "scope", value)
|
16946
|
+
|
16947
|
+
|
16624
16948
|
if not MYPY:
|
16625
16949
|
class ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
|
16626
16950
|
cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
|
@@ -21390,6 +21714,10 @@ if not MYPY:
|
|
21390
21714
|
"""
|
21391
21715
|
Set the maximum number of image pulls in parallel.
|
21392
21716
|
"""
|
21717
|
+
memory_manager: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict']]
|
21718
|
+
"""
|
21719
|
+
Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
21720
|
+
"""
|
21393
21721
|
pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
|
21394
21722
|
"""
|
21395
21723
|
Controls the maximum number of processes allowed to run in a pod.
|
@@ -21398,6 +21726,10 @@ if not MYPY:
|
|
21398
21726
|
"""
|
21399
21727
|
Defines whether to enable single process OOM killer.
|
21400
21728
|
"""
|
21729
|
+
topology_manager: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict']]
|
21730
|
+
"""
|
21731
|
+
Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
21732
|
+
"""
|
21401
21733
|
elif False:
|
21402
21734
|
NodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
|
21403
21735
|
|
@@ -21420,8 +21752,10 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
21420
21752
|
image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
|
21421
21753
|
insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
|
21422
21754
|
max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
|
21755
|
+
memory_manager: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs']] = None,
|
21423
21756
|
pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
|
21424
|
-
single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None
|
21757
|
+
single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None,
|
21758
|
+
topology_manager: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs']] = None):
|
21425
21759
|
"""
|
21426
21760
|
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
|
21427
21761
|
:param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container.
|
@@ -21439,8 +21773,10 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
21439
21773
|
:param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
|
21440
21774
|
:param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
|
21441
21775
|
:param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
|
21776
|
+
:param pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs'] memory_manager: Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
21442
21777
|
:param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
|
21443
21778
|
:param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer.
|
21779
|
+
:param pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs'] topology_manager: Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
21444
21780
|
"""
|
21445
21781
|
if allowed_unsafe_sysctls is not None:
|
21446
21782
|
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
@@ -21474,10 +21810,14 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
21474
21810
|
pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
|
21475
21811
|
if max_parallel_image_pulls is not None:
|
21476
21812
|
pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
|
21813
|
+
if memory_manager is not None:
|
21814
|
+
pulumi.set(__self__, "memory_manager", memory_manager)
|
21477
21815
|
if pod_pids_limit is not None:
|
21478
21816
|
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
21479
21817
|
if single_process_oom_kill is not None:
|
21480
21818
|
pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
|
21819
|
+
if topology_manager is not None:
|
21820
|
+
pulumi.set(__self__, "topology_manager", topology_manager)
|
21481
21821
|
|
21482
21822
|
@_builtins.property
|
21483
21823
|
@pulumi.getter(name="allowedUnsafeSysctls")
|
@@ -21671,6 +22011,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
21671
22011
|
def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
|
21672
22012
|
pulumi.set(self, "max_parallel_image_pulls", value)
|
21673
22013
|
|
22014
|
+
@_builtins.property
|
22015
|
+
@pulumi.getter(name="memoryManager")
|
22016
|
+
def memory_manager(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs']]:
|
22017
|
+
"""
|
22018
|
+
Configuration for the Memory Manager on the node. The memory manager optimizes memory and hugepages allocation for pods, especially those in the Guaranteed QoS class, by influencing NUMA affinity.
|
22019
|
+
"""
|
22020
|
+
return pulumi.get(self, "memory_manager")
|
22021
|
+
|
22022
|
+
@memory_manager.setter
|
22023
|
+
def memory_manager(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigMemoryManagerArgs']]):
|
22024
|
+
pulumi.set(self, "memory_manager", value)
|
22025
|
+
|
21674
22026
|
@_builtins.property
|
21675
22027
|
@pulumi.getter(name="podPidsLimit")
|
21676
22028
|
def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
|
@@ -21695,6 +22047,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
21695
22047
|
def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
21696
22048
|
pulumi.set(self, "single_process_oom_kill", value)
|
21697
22049
|
|
22050
|
+
@_builtins.property
|
22051
|
+
@pulumi.getter(name="topologyManager")
|
22052
|
+
def topology_manager(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs']]:
|
22053
|
+
"""
|
22054
|
+
Configuration for the Topology Manager on the node. The Topology Manager aligns CPU, memory, and device resources on a node to optimize performance, especially for NUMA-aware workloads, by ensuring resource co-location.
|
22055
|
+
"""
|
22056
|
+
return pulumi.get(self, "topology_manager")
|
22057
|
+
|
22058
|
+
@topology_manager.setter
|
22059
|
+
def topology_manager(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigTopologyManagerArgs']]):
|
22060
|
+
pulumi.set(self, "topology_manager", value)
|
22061
|
+
|
21698
22062
|
|
21699
22063
|
if not MYPY:
|
21700
22064
|
class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
|
@@ -22092,6 +22456,90 @@ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
|
|
22092
22456
|
pulumi.set(self, "pid_available", value)
|
22093
22457
|
|
22094
22458
|
|
22459
|
+
if not MYPY:
|
22460
|
+
class NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict(TypedDict):
|
22461
|
+
policy: NotRequired[pulumi.Input[_builtins.str]]
|
22462
|
+
"""
|
22463
|
+
The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
22464
|
+
"""
|
22465
|
+
elif False:
|
22466
|
+
NodePoolNodeConfigKubeletConfigMemoryManagerArgsDict: TypeAlias = Mapping[str, Any]
|
22467
|
+
|
22468
|
+
@pulumi.input_type
|
22469
|
+
class NodePoolNodeConfigKubeletConfigMemoryManagerArgs:
|
22470
|
+
def __init__(__self__, *,
|
22471
|
+
policy: Optional[pulumi.Input[_builtins.str]] = None):
|
22472
|
+
"""
|
22473
|
+
:param pulumi.Input[_builtins.str] policy: The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
22474
|
+
"""
|
22475
|
+
if policy is not None:
|
22476
|
+
pulumi.set(__self__, "policy", policy)
|
22477
|
+
|
22478
|
+
@_builtins.property
|
22479
|
+
@pulumi.getter
|
22480
|
+
def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
22481
|
+
"""
|
22482
|
+
The Memory Manager policy to use. This policy guides how memory and hugepages are allocated and managed for pods on the node, influencing NUMA affinity.
|
22483
|
+
"""
|
22484
|
+
return pulumi.get(self, "policy")
|
22485
|
+
|
22486
|
+
@policy.setter
|
22487
|
+
def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
22488
|
+
pulumi.set(self, "policy", value)
|
22489
|
+
|
22490
|
+
|
22491
|
+
if not MYPY:
|
22492
|
+
class NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict(TypedDict):
|
22493
|
+
policy: NotRequired[pulumi.Input[_builtins.str]]
|
22494
|
+
"""
|
22495
|
+
The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
22496
|
+
"""
|
22497
|
+
scope: NotRequired[pulumi.Input[_builtins.str]]
|
22498
|
+
"""
|
22499
|
+
The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
22500
|
+
"""
|
22501
|
+
elif False:
|
22502
|
+
NodePoolNodeConfigKubeletConfigTopologyManagerArgsDict: TypeAlias = Mapping[str, Any]
|
22503
|
+
|
22504
|
+
@pulumi.input_type
|
22505
|
+
class NodePoolNodeConfigKubeletConfigTopologyManagerArgs:
|
22506
|
+
def __init__(__self__, *,
|
22507
|
+
policy: Optional[pulumi.Input[_builtins.str]] = None,
|
22508
|
+
scope: Optional[pulumi.Input[_builtins.str]] = None):
|
22509
|
+
"""
|
22510
|
+
:param pulumi.Input[_builtins.str] policy: The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
22511
|
+
:param pulumi.Input[_builtins.str] scope: The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
22512
|
+
"""
|
22513
|
+
if policy is not None:
|
22514
|
+
pulumi.set(__self__, "policy", policy)
|
22515
|
+
if scope is not None:
|
22516
|
+
pulumi.set(__self__, "scope", scope)
|
22517
|
+
|
22518
|
+
@_builtins.property
|
22519
|
+
@pulumi.getter
|
22520
|
+
def policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
22521
|
+
"""
|
22522
|
+
The Topology Manager policy to use. This policy dictates how resource alignment is handled on the node.
|
22523
|
+
"""
|
22524
|
+
return pulumi.get(self, "policy")
|
22525
|
+
|
22526
|
+
@policy.setter
|
22527
|
+
def policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
22528
|
+
pulumi.set(self, "policy", value)
|
22529
|
+
|
22530
|
+
@_builtins.property
|
22531
|
+
@pulumi.getter
|
22532
|
+
def scope(self) -> Optional[pulumi.Input[_builtins.str]]:
|
22533
|
+
"""
|
22534
|
+
The Topology Manager scope, defining the granularity at which policy decisions are applied. Valid values are "container" (resources are aligned per container within a pod) or "pod" (resources are aligned for the entire pod).
|
22535
|
+
"""
|
22536
|
+
return pulumi.get(self, "scope")
|
22537
|
+
|
22538
|
+
@scope.setter
|
22539
|
+
def scope(self, value: Optional[pulumi.Input[_builtins.str]]):
|
22540
|
+
pulumi.set(self, "scope", value)
|
22541
|
+
|
22542
|
+
|
22095
22543
|
if not MYPY:
|
22096
22544
|
class NodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
|
22097
22545
|
cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
|