pulumi-gcp 7.7.0a1706207981__py3-none-any.whl → 7.8.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- pulumi_gcp/__init__.py +73 -0
- pulumi_gcp/_utilities.py +6 -2
- pulumi_gcp/accesscontextmanager/access_level.py +2 -18
- pulumi_gcp/accesscontextmanager/access_levels.py +2 -18
- pulumi_gcp/accesscontextmanager/access_policy.py +2 -18
- pulumi_gcp/accesscontextmanager/authorized_orgs_desc.py +2 -18
- pulumi_gcp/accesscontextmanager/egress_policy.py +2 -18
- pulumi_gcp/accesscontextmanager/gcp_user_access_binding.py +2 -18
- pulumi_gcp/accesscontextmanager/ingress_policy.py +2 -18
- pulumi_gcp/accesscontextmanager/service_perimeter.py +2 -18
- pulumi_gcp/accesscontextmanager/service_perimeter_egress_policy.py +2 -18
- pulumi_gcp/accesscontextmanager/service_perimeter_ingress_policy.py +2 -18
- pulumi_gcp/accesscontextmanager/service_perimeter_resource.py +2 -18
- pulumi_gcp/accesscontextmanager/service_perimeters.py +2 -18
- pulumi_gcp/activedirectory/domain.py +2 -18
- pulumi_gcp/activedirectory/domain_trust.py +2 -18
- pulumi_gcp/alloydb/backup.py +2 -18
- pulumi_gcp/alloydb/cluster.py +26 -18
- pulumi_gcp/alloydb/instance.py +3 -19
- pulumi_gcp/alloydb/outputs.py +16 -0
- pulumi_gcp/alloydb/user.py +2 -18
- pulumi_gcp/apigateway/api.py +2 -18
- pulumi_gcp/apigateway/api_config.py +2 -18
- pulumi_gcp/apigateway/gateway.py +23 -53
- pulumi_gcp/apigee/addons_config.py +2 -18
- pulumi_gcp/apigee/endpoint_attachment.py +2 -18
- pulumi_gcp/apigee/env_group.py +2 -18
- pulumi_gcp/apigee/env_group_attachment.py +2 -18
- pulumi_gcp/apigee/env_keystore.py +2 -18
- pulumi_gcp/apigee/env_references.py +2 -18
- pulumi_gcp/apigee/environment.py +2 -18
- pulumi_gcp/apigee/flowhook.py +2 -18
- pulumi_gcp/apigee/instance.py +2 -18
- pulumi_gcp/apigee/instance_attachment.py +2 -18
- pulumi_gcp/apigee/keystores_aliases_key_cert_file.py +2 -18
- pulumi_gcp/apigee/keystores_aliases_pkcs12.py +2 -18
- pulumi_gcp/apigee/keystores_aliases_self_signed_cert.py +2 -18
- pulumi_gcp/apigee/nat_address.py +2 -18
- pulumi_gcp/apigee/organization.py +2 -18
- pulumi_gcp/apigee/sharedflow.py +2 -18
- pulumi_gcp/apigee/sharedflow_deployment.py +2 -18
- pulumi_gcp/apigee/sync_authorization.py +2 -18
- pulumi_gcp/apigee/target_server.py +2 -18
- pulumi_gcp/appengine/application.py +2 -18
- pulumi_gcp/appengine/application_url_dispatch_rules.py +2 -18
- pulumi_gcp/appengine/domain_mapping.py +2 -18
- pulumi_gcp/appengine/engine_split_traffic.py +2 -18
- pulumi_gcp/appengine/firewall_rule.py +2 -18
- pulumi_gcp/appengine/flexible_app_version.py +2 -18
- pulumi_gcp/appengine/service_network_settings.py +2 -18
- pulumi_gcp/appengine/standard_app_version.py +2 -18
- pulumi_gcp/artifactregistry/_inputs.py +60 -0
- pulumi_gcp/artifactregistry/outputs.py +264 -0
- pulumi_gcp/artifactregistry/repository.py +77 -74
- pulumi_gcp/artifactregistry/vpcsc_config.py +2 -18
- pulumi_gcp/assuredworkloads/_inputs.py +34 -0
- pulumi_gcp/assuredworkloads/outputs.py +34 -0
- pulumi_gcp/assuredworkloads/workload.py +2 -18
- pulumi_gcp/backupdisasterrecovery/management_server.py +2 -18
- pulumi_gcp/backupdisasterrecovery/outputs.py +20 -0
- pulumi_gcp/beyondcorp/app_connection.py +2 -18
- pulumi_gcp/beyondcorp/app_connector.py +2 -18
- pulumi_gcp/beyondcorp/app_gateway.py +2 -18
- pulumi_gcp/beyondcorp/outputs.py +54 -0
- pulumi_gcp/biglake/catalog.py +2 -18
- pulumi_gcp/biglake/database.py +2 -18
- pulumi_gcp/biglake/table.py +2 -18
- pulumi_gcp/bigquery/_inputs.py +18 -0
- pulumi_gcp/bigquery/app_profile.py +2 -18
- pulumi_gcp/bigquery/bi_reservation.py +2 -18
- pulumi_gcp/bigquery/capacity_commitment.py +2 -18
- pulumi_gcp/bigquery/connection.py +2 -18
- pulumi_gcp/bigquery/data_transfer_config.py +2 -18
- pulumi_gcp/bigquery/dataset.py +9 -18
- pulumi_gcp/bigquery/dataset_iam_binding.py +4 -12
- pulumi_gcp/bigquery/dataset_iam_member.py +4 -12
- pulumi_gcp/bigquery/dataset_iam_policy.py +4 -12
- pulumi_gcp/bigquery/job.py +2 -18
- pulumi_gcp/bigquery/outputs.py +172 -0
- pulumi_gcp/bigquery/reservation.py +2 -18
- pulumi_gcp/bigquery/reservation_assignment.py +2 -18
- pulumi_gcp/bigquery/routine.py +2 -18
- pulumi_gcp/bigquery/table.py +2 -18
- pulumi_gcp/bigqueryanalyticshub/data_exchange.py +2 -18
- pulumi_gcp/bigqueryanalyticshub/listing.py +2 -18
- pulumi_gcp/bigquerydatapolicy/data_policy.py +2 -18
- pulumi_gcp/bigtable/_inputs.py +4 -0
- pulumi_gcp/bigtable/instance.py +2 -18
- pulumi_gcp/bigtable/instance_iam_binding.py +4 -12
- pulumi_gcp/bigtable/instance_iam_member.py +4 -12
- pulumi_gcp/bigtable/instance_iam_policy.py +4 -12
- pulumi_gcp/bigtable/outputs.py +4 -0
- pulumi_gcp/bigtable/table.py +2 -18
- pulumi_gcp/bigtable/table_iam_binding.py +4 -12
- pulumi_gcp/bigtable/table_iam_member.py +4 -12
- pulumi_gcp/bigtable/table_iam_policy.py +4 -12
- pulumi_gcp/billing/account_iam_binding.py +4 -12
- pulumi_gcp/billing/account_iam_member.py +4 -12
- pulumi_gcp/billing/account_iam_policy.py +4 -12
- pulumi_gcp/billing/budget.py +2 -18
- pulumi_gcp/billing/project_info.py +2 -18
- pulumi_gcp/billing/sub_account.py +2 -18
- pulumi_gcp/binaryauthorization/attestor.py +2 -18
- pulumi_gcp/binaryauthorization/policy.py +2 -18
- pulumi_gcp/blockchainnodeengine/__init__.py +10 -0
- pulumi_gcp/blockchainnodeengine/_inputs.py +388 -0
- pulumi_gcp/blockchainnodeengine/blockchain_nodes.py +791 -0
- pulumi_gcp/blockchainnodeengine/outputs.py +441 -0
- pulumi_gcp/certificateauthority/authority.py +2 -18
- pulumi_gcp/certificateauthority/ca_pool.py +2 -18
- pulumi_gcp/certificateauthority/certificate.py +2 -18
- pulumi_gcp/certificateauthority/certificate_template.py +2 -18
- pulumi_gcp/certificateauthority/outputs.py +378 -0
- pulumi_gcp/certificatemanager/certificate.py +2 -18
- pulumi_gcp/certificatemanager/certificate_issuance_config.py +2 -18
- pulumi_gcp/certificatemanager/certificate_map.py +2 -18
- pulumi_gcp/certificatemanager/certificate_map_entry.py +2 -18
- pulumi_gcp/certificatemanager/dns_authorization.py +2 -18
- pulumi_gcp/certificatemanager/outputs.py +32 -0
- pulumi_gcp/certificatemanager/trust_config.py +2 -18
- pulumi_gcp/cloudasset/folder_feed.py +2 -18
- pulumi_gcp/cloudasset/organization_feed.py +2 -18
- pulumi_gcp/cloudasset/project_feed.py +2 -18
- pulumi_gcp/cloudbuild/bitbucket_server_config.py +2 -18
- pulumi_gcp/cloudbuild/outputs.py +1009 -3
- pulumi_gcp/cloudbuild/trigger.py +2 -18
- pulumi_gcp/cloudbuild/worker_pool.py +2 -18
- pulumi_gcp/cloudbuildv2/_inputs.py +14 -0
- pulumi_gcp/cloudbuildv2/connection.py +2 -18
- pulumi_gcp/cloudbuildv2/outputs.py +14 -0
- pulumi_gcp/cloudbuildv2/repository.py +2 -18
- pulumi_gcp/clouddeploy/_inputs.py +36 -0
- pulumi_gcp/clouddeploy/automation.py +2 -18
- pulumi_gcp/clouddeploy/delivery_pipeline.py +2 -18
- pulumi_gcp/clouddeploy/outputs.py +36 -0
- pulumi_gcp/clouddeploy/target.py +2 -18
- pulumi_gcp/clouddomains/registration.py +2 -18
- pulumi_gcp/cloudfunctions/_inputs.py +4 -0
- pulumi_gcp/cloudfunctions/function.py +2 -18
- pulumi_gcp/cloudfunctions/outputs.py +54 -0
- pulumi_gcp/cloudfunctionsv2/function.py +2 -18
- pulumi_gcp/cloudfunctionsv2/outputs.py +302 -0
- pulumi_gcp/cloudidentity/group.py +2 -18
- pulumi_gcp/cloudidentity/group_membership.py +2 -18
- pulumi_gcp/cloudidentity/outputs.py +54 -0
- pulumi_gcp/cloudids/endpoint.py +2 -18
- pulumi_gcp/cloudrun/_inputs.py +93 -4
- pulumi_gcp/cloudrun/domain_mapping.py +2 -18
- pulumi_gcp/cloudrun/outputs.py +867 -9
- pulumi_gcp/cloudrun/service.py +2 -18
- pulumi_gcp/cloudrunv2/_inputs.py +12 -0
- pulumi_gcp/cloudrunv2/job.py +2 -18
- pulumi_gcp/cloudrunv2/outputs.py +937 -1
- pulumi_gcp/cloudrunv2/service.py +2 -18
- pulumi_gcp/cloudscheduler/job.py +2 -18
- pulumi_gcp/cloudtasks/queue.py +2 -18
- pulumi_gcp/composer/_inputs.py +493 -0
- pulumi_gcp/composer/environment.py +2 -18
- pulumi_gcp/composer/outputs.py +996 -0
- pulumi_gcp/compute/__init__.py +2 -0
- pulumi_gcp/compute/_inputs.py +1010 -46
- pulumi_gcp/compute/address.py +2 -18
- pulumi_gcp/compute/attached_disk.py +2 -18
- pulumi_gcp/compute/autoscaler.py +2 -18
- pulumi_gcp/compute/backend_bucket.py +2 -18
- pulumi_gcp/compute/backend_service.py +30 -18
- pulumi_gcp/compute/disk.py +34 -29
- pulumi_gcp/compute/disk_resource_policy_attachment.py +2 -18
- pulumi_gcp/compute/external_vpn_gateway.py +2 -18
- pulumi_gcp/compute/firewall.py +2 -18
- pulumi_gcp/compute/firewall_policy_association.py +2 -18
- pulumi_gcp/compute/firewall_policy_rule.py +2 -18
- pulumi_gcp/compute/forwarding_rule.py +2 -18
- pulumi_gcp/compute/get_disk.py +11 -1
- pulumi_gcp/compute/get_instance_group_manager.py +11 -1
- pulumi_gcp/compute/get_instance_template.py +3 -0
- pulumi_gcp/compute/get_machine_types.py +143 -0
- pulumi_gcp/compute/global_address.py +9 -25
- pulumi_gcp/compute/global_forwarding_rule.py +2 -18
- pulumi_gcp/compute/global_network_endpoint.py +2 -18
- pulumi_gcp/compute/global_network_endpoint_group.py +2 -18
- pulumi_gcp/compute/ha_vpn_gateway.py +2 -18
- pulumi_gcp/compute/health_check.py +2 -18
- pulumi_gcp/compute/http_health_check.py +2 -18
- pulumi_gcp/compute/https_health_check.py +2 -18
- pulumi_gcp/compute/image.py +2 -18
- pulumi_gcp/compute/instance.py +13 -22
- pulumi_gcp/compute/instance_group.py +2 -18
- pulumi_gcp/compute/instance_group_manager.py +34 -29
- pulumi_gcp/compute/instance_group_named_port.py +2 -18
- pulumi_gcp/compute/instance_settings.py +2 -18
- pulumi_gcp/compute/instance_template.py +9 -25
- pulumi_gcp/compute/interconnect_attachment.py +77 -18
- pulumi_gcp/compute/machine_image.py +2 -18
- pulumi_gcp/compute/managed_ssl_certificate.py +2 -18
- pulumi_gcp/compute/manged_ssl_certificate.py +2 -18
- pulumi_gcp/compute/network.py +2 -18
- pulumi_gcp/compute/network_attachment.py +2 -18
- pulumi_gcp/compute/network_edge_security_service.py +2 -18
- pulumi_gcp/compute/network_endpoint.py +2 -18
- pulumi_gcp/compute/network_endpoint_group.py +2 -18
- pulumi_gcp/compute/network_endpoint_list.py +2 -18
- pulumi_gcp/compute/network_firewall_policy.py +2 -18
- pulumi_gcp/compute/network_firewall_policy_association.py +2 -18
- pulumi_gcp/compute/network_firewall_policy_rule.py +2 -18
- pulumi_gcp/compute/network_peering.py +2 -18
- pulumi_gcp/compute/network_peering_routes_config.py +2 -18
- pulumi_gcp/compute/node_group.py +30 -53
- pulumi_gcp/compute/node_template.py +2 -18
- pulumi_gcp/compute/organization_security_policy.py +2 -18
- pulumi_gcp/compute/organization_security_policy_association.py +2 -18
- pulumi_gcp/compute/organization_security_policy_rule.py +2 -18
- pulumi_gcp/compute/outputs.py +4530 -181
- pulumi_gcp/compute/packet_mirroring.py +2 -18
- pulumi_gcp/compute/per_instance_config.py +2 -18
- pulumi_gcp/compute/project_default_network_tier.py +2 -18
- pulumi_gcp/compute/project_metadata.py +2 -18
- pulumi_gcp/compute/project_metadata_item.py +2 -18
- pulumi_gcp/compute/public_advertised_prefix.py +2 -18
- pulumi_gcp/compute/public_delegated_prefix.py +2 -18
- pulumi_gcp/compute/region_autoscaler.py +2 -18
- pulumi_gcp/compute/region_backend_service.py +46 -32
- pulumi_gcp/compute/region_commitment.py +2 -18
- pulumi_gcp/compute/region_disk.py +6 -29
- pulumi_gcp/compute/region_disk_resource_policy_attachment.py +2 -18
- pulumi_gcp/compute/region_health_check.py +2 -18
- pulumi_gcp/compute/region_instance_group_manager.py +34 -29
- pulumi_gcp/compute/region_instance_template.py +2 -18
- pulumi_gcp/compute/region_network_endpoint.py +556 -0
- pulumi_gcp/compute/region_network_endpoint_group.py +140 -92
- pulumi_gcp/compute/region_network_firewall_policy.py +2 -18
- pulumi_gcp/compute/region_network_firewall_policy_association.py +2 -18
- pulumi_gcp/compute/region_network_firewall_policy_rule.py +2 -18
- pulumi_gcp/compute/region_per_instance_config.py +2 -18
- pulumi_gcp/compute/region_security_policy.py +2 -18
- pulumi_gcp/compute/region_security_policy_rule.py +2 -18
- pulumi_gcp/compute/region_ssl_certificate.py +2 -18
- pulumi_gcp/compute/region_ssl_policy.py +2 -18
- pulumi_gcp/compute/region_target_http_proxy.py +2 -18
- pulumi_gcp/compute/region_target_https_proxy.py +2 -18
- pulumi_gcp/compute/region_target_tcp_proxy.py +2 -18
- pulumi_gcp/compute/region_url_map.py +2 -18
- pulumi_gcp/compute/reservation.py +2 -18
- pulumi_gcp/compute/resource_policy.py +2 -18
- pulumi_gcp/compute/route.py +2 -18
- pulumi_gcp/compute/router.py +2 -18
- pulumi_gcp/compute/router_interface.py +2 -18
- pulumi_gcp/compute/router_nat.py +44 -39
- pulumi_gcp/compute/router_peer.py +2 -18
- pulumi_gcp/compute/security_policy.py +2 -18
- pulumi_gcp/compute/security_scan_config.py +2 -18
- pulumi_gcp/compute/service_attachment.py +2 -18
- pulumi_gcp/compute/shared_vpc_host_project.py +2 -18
- pulumi_gcp/compute/shared_vpc_service_project.py +2 -18
- pulumi_gcp/compute/snapshot.py +2 -18
- pulumi_gcp/compute/ssl_certificate.py +2 -18
- pulumi_gcp/compute/ssl_policy.py +2 -18
- pulumi_gcp/compute/subnetwork.py +79 -102
- pulumi_gcp/compute/target_grpc_proxy.py +2 -18
- pulumi_gcp/compute/target_http_proxy.py +2 -18
- pulumi_gcp/compute/target_https_proxy.py +2 -18
- pulumi_gcp/compute/target_instance.py +2 -18
- pulumi_gcp/compute/target_pool.py +9 -25
- pulumi_gcp/compute/target_ssl_proxy.py +2 -18
- pulumi_gcp/compute/target_tcp_proxy.py +2 -18
- pulumi_gcp/compute/url_map.py +2 -18
- pulumi_gcp/compute/vpn_gateway.py +2 -18
- pulumi_gcp/compute/vpn_tunnel.py +2 -18
- pulumi_gcp/config/__init__.pyi +6 -0
- pulumi_gcp/config/vars.py +12 -0
- pulumi_gcp/container/_inputs.py +568 -30
- pulumi_gcp/container/attached_cluster.py +2 -18
- pulumi_gcp/container/aws_cluster.py +9 -25
- pulumi_gcp/container/aws_node_pool.py +2 -18
- pulumi_gcp/container/azure_client.py +2 -18
- pulumi_gcp/container/azure_cluster.py +9 -25
- pulumi_gcp/container/azure_node_pool.py +2 -18
- pulumi_gcp/container/cluster.py +25 -69
- pulumi_gcp/container/node_pool.py +2 -18
- pulumi_gcp/container/outputs.py +2161 -31
- pulumi_gcp/containeranalysis/note.py +2 -18
- pulumi_gcp/containeranalysis/occurence.py +2 -18
- pulumi_gcp/databasemigrationservice/connection_profile.py +2 -18
- pulumi_gcp/databasemigrationservice/private_connection.py +2 -18
- pulumi_gcp/datacatalog/entry.py +2 -18
- pulumi_gcp/datacatalog/entry_group.py +2 -18
- pulumi_gcp/datacatalog/policy_tag.py +2 -18
- pulumi_gcp/datacatalog/tag.py +2 -18
- pulumi_gcp/datacatalog/tag_template.py +2 -18
- pulumi_gcp/datacatalog/taxonomy.py +2 -18
- pulumi_gcp/dataflow/job.py +2 -18
- pulumi_gcp/dataflow/pipeline.py +2 -18
- pulumi_gcp/dataform/repository.py +2 -18
- pulumi_gcp/dataform/repository_release_config.py +2 -18
- pulumi_gcp/dataform/repository_workflow_config.py +2 -18
- pulumi_gcp/datafusion/instance.py +30 -46
- pulumi_gcp/dataloss/prevention_deidentify_template.py +2 -18
- pulumi_gcp/dataloss/prevention_inspect_template.py +2 -18
- pulumi_gcp/dataloss/prevention_job_trigger.py +2 -18
- pulumi_gcp/dataloss/prevention_stored_info_type.py +2 -18
- pulumi_gcp/dataplex/_inputs.py +66 -0
- pulumi_gcp/dataplex/asset.py +2 -18
- pulumi_gcp/dataplex/datascan.py +2 -18
- pulumi_gcp/dataplex/lake.py +2 -18
- pulumi_gcp/dataplex/outputs.py +66 -0
- pulumi_gcp/dataplex/task.py +2 -18
- pulumi_gcp/dataplex/zone.py +2 -18
- pulumi_gcp/dataproc/_inputs.py +178 -0
- pulumi_gcp/dataproc/autoscaling_policy.py +2 -18
- pulumi_gcp/dataproc/cluster_iam_binding.py +4 -12
- pulumi_gcp/dataproc/cluster_iam_member.py +4 -12
- pulumi_gcp/dataproc/cluster_iam_policy.py +4 -12
- pulumi_gcp/dataproc/job_iam_binding.py +4 -12
- pulumi_gcp/dataproc/job_iam_member.py +4 -12
- pulumi_gcp/dataproc/job_iam_policy.py +4 -12
- pulumi_gcp/dataproc/metastore_federation.py +2 -18
- pulumi_gcp/dataproc/metastore_service.py +2 -18
- pulumi_gcp/dataproc/outputs.py +178 -0
- pulumi_gcp/dataproc/workflow_template.py +9 -25
- pulumi_gcp/datastore/data_store_index.py +2 -18
- pulumi_gcp/datastream/connection_profile.py +2 -18
- pulumi_gcp/datastream/private_connection.py +2 -18
- pulumi_gcp/datastream/stream.py +2 -18
- pulumi_gcp/deploymentmanager/deployment.py +2 -18
- pulumi_gcp/diagflow/agent.py +2 -18
- pulumi_gcp/diagflow/cx_agent.py +2 -18
- pulumi_gcp/diagflow/cx_entity_type.py +2 -18
- pulumi_gcp/diagflow/cx_environment.py +2 -18
- pulumi_gcp/diagflow/cx_flow.py +2 -18
- pulumi_gcp/diagflow/cx_intent.py +2 -18
- pulumi_gcp/diagflow/cx_page.py +2 -18
- pulumi_gcp/diagflow/cx_security_settings.py +2 -18
- pulumi_gcp/diagflow/cx_test_case.py +2 -18
- pulumi_gcp/diagflow/cx_version.py +2 -18
- pulumi_gcp/diagflow/cx_webhook.py +2 -18
- pulumi_gcp/diagflow/entity_type.py +2 -18
- pulumi_gcp/diagflow/fulfillment.py +2 -18
- pulumi_gcp/diagflow/intent.py +2 -18
- pulumi_gcp/discoveryengine/__init__.py +12 -0
- pulumi_gcp/discoveryengine/_inputs.py +237 -0
- pulumi_gcp/discoveryengine/chat_engine.py +822 -0
- pulumi_gcp/discoveryengine/data_store.py +734 -0
- pulumi_gcp/discoveryengine/outputs.py +304 -0
- pulumi_gcp/discoveryengine/search_engine.py +752 -0
- pulumi_gcp/dns/_inputs.py +34 -0
- pulumi_gcp/dns/managed_zone.py +2 -18
- pulumi_gcp/dns/outputs.py +34 -0
- pulumi_gcp/dns/policy.py +2 -18
- pulumi_gcp/dns/record_set.py +2 -18
- pulumi_gcp/dns/response_policy.py +2 -18
- pulumi_gcp/dns/response_policy_rule.py +9 -32
- pulumi_gcp/edgecontainer/cluster.py +2 -18
- pulumi_gcp/edgecontainer/node_pool.py +2 -18
- pulumi_gcp/edgecontainer/vpn_connection.py +2 -18
- pulumi_gcp/edgenetwork/network.py +2 -18
- pulumi_gcp/edgenetwork/subnet.py +2 -18
- pulumi_gcp/essentialcontacts/contact.py +2 -18
- pulumi_gcp/essentialcontacts/document_ai_processor.py +2 -18
- pulumi_gcp/essentialcontacts/document_ai_processor_default_version.py +2 -18
- pulumi_gcp/essentialcontacts/document_ai_warehouse_document_schema.py +2 -18
- pulumi_gcp/eventarc/_inputs.py +2 -2
- pulumi_gcp/eventarc/channel.py +2 -18
- pulumi_gcp/eventarc/google_channel_config.py +2 -18
- pulumi_gcp/eventarc/outputs.py +2 -2
- pulumi_gcp/eventarc/trigger.py +2 -18
- pulumi_gcp/filestore/_inputs.py +1 -3
- pulumi_gcp/filestore/backup.py +2 -18
- pulumi_gcp/filestore/instance.py +2 -18
- pulumi_gcp/filestore/outputs.py +89 -3
- pulumi_gcp/filestore/snapshot.py +2 -18
- pulumi_gcp/firebase/_inputs.py +4 -2
- pulumi_gcp/firebase/android_app.py +2 -18
- pulumi_gcp/firebase/apple_app.py +2 -18
- pulumi_gcp/firebase/database_instance.py +2 -18
- pulumi_gcp/firebase/extensions_instance.py +8 -26
- pulumi_gcp/firebase/hosting_channel.py +2 -18
- pulumi_gcp/firebase/hosting_custom_domain.py +2 -18
- pulumi_gcp/firebase/hosting_release.py +2 -18
- pulumi_gcp/firebase/hosting_site.py +2 -18
- pulumi_gcp/firebase/hosting_version.py +2 -18
- pulumi_gcp/firebase/outputs.py +4 -2
- pulumi_gcp/firebase/project.py +2 -18
- pulumi_gcp/firebase/storage_bucket.py +2 -18
- pulumi_gcp/firebase/web_app.py +2 -18
- pulumi_gcp/firebaserules/_inputs.py +6 -0
- pulumi_gcp/firebaserules/outputs.py +6 -0
- pulumi_gcp/firebaserules/release.py +4 -16
- pulumi_gcp/firebaserules/ruleset.py +2 -18
- pulumi_gcp/firestore/backup_schedule.py +38 -30
- pulumi_gcp/firestore/database.py +2 -26
- pulumi_gcp/firestore/document.py +2 -86
- pulumi_gcp/firestore/field.py +24 -120
- pulumi_gcp/firestore/index.py +6 -60
- pulumi_gcp/folder/_inputs.py +4 -0
- pulumi_gcp/folder/access_approval_settings.py +2 -18
- pulumi_gcp/folder/iam_audit_config.py +6 -14
- pulumi_gcp/folder/iam_member.py +6 -14
- pulumi_gcp/folder/iam_policy.py +6 -14
- pulumi_gcp/folder/organization_policy.py +2 -18
- pulumi_gcp/folder/outputs.py +54 -0
- pulumi_gcp/gkebackup/backup_plan.py +2 -18
- pulumi_gcp/gkebackup/restore_plan.py +2 -18
- pulumi_gcp/gkebackup/restore_plan_iam_binding.py +2 -18
- pulumi_gcp/gkebackup/restore_plan_iam_member.py +2 -18
- pulumi_gcp/gkebackup/restore_plan_iam_policy.py +2 -18
- pulumi_gcp/gkehub/_inputs.py +8 -0
- pulumi_gcp/gkehub/feature.py +4 -20
- pulumi_gcp/gkehub/feature_membership.py +2 -18
- pulumi_gcp/gkehub/fleet.py +2 -18
- pulumi_gcp/gkehub/membership.py +6 -29
- pulumi_gcp/gkehub/membership_binding.py +2 -18
- pulumi_gcp/gkehub/membership_rbac_role_binding.py +2 -18
- pulumi_gcp/gkehub/namespace.py +2 -18
- pulumi_gcp/gkehub/outputs.py +8 -0
- pulumi_gcp/gkehub/scope.py +2 -18
- pulumi_gcp/gkehub/scope_rbac_role_binding.py +2 -18
- pulumi_gcp/gkeonprem/bare_metal_admin_cluster.py +2 -18
- pulumi_gcp/gkeonprem/bare_metal_cluster.py +2 -18
- pulumi_gcp/gkeonprem/bare_metal_node_pool.py +2 -18
- pulumi_gcp/gkeonprem/v_mware_cluster.py +2 -18
- pulumi_gcp/gkeonprem/v_mware_node_pool.py +2 -18
- pulumi_gcp/healthcare/consent_store.py +2 -18
- pulumi_gcp/healthcare/dataset.py +2 -18
- pulumi_gcp/healthcare/dataset_iam_binding.py +4 -12
- pulumi_gcp/healthcare/dataset_iam_member.py +4 -12
- pulumi_gcp/healthcare/dataset_iam_policy.py +4 -12
- pulumi_gcp/healthcare/dicom_store.py +2 -18
- pulumi_gcp/healthcare/dicom_store_iam_binding.py +4 -12
- pulumi_gcp/healthcare/dicom_store_iam_member.py +4 -12
- pulumi_gcp/healthcare/dicom_store_iam_policy.py +4 -12
- pulumi_gcp/healthcare/fhir_store.py +23 -32
- pulumi_gcp/healthcare/fhir_store_iam_binding.py +4 -12
- pulumi_gcp/healthcare/fhir_store_iam_member.py +4 -12
- pulumi_gcp/healthcare/fhir_store_iam_policy.py +4 -12
- pulumi_gcp/healthcare/hl7_store.py +2 -18
- pulumi_gcp/healthcare/hl7_store_iam_binding.py +4 -12
- pulumi_gcp/healthcare/hl7_store_iam_member.py +4 -12
- pulumi_gcp/healthcare/hl7_store_iam_policy.py +4 -12
- pulumi_gcp/iam/access_boundary_policy.py +2 -18
- pulumi_gcp/iam/deny_policy.py +2 -18
- pulumi_gcp/iam/outputs.py +92 -0
- pulumi_gcp/iam/workforce_pool.py +2 -18
- pulumi_gcp/iam/workforce_pool_provider.py +2 -18
- pulumi_gcp/iam/workload_identity_pool.py +2 -18
- pulumi_gcp/iam/workload_identity_pool_provider.py +2 -18
- pulumi_gcp/iap/brand.py +2 -18
- pulumi_gcp/iap/client.py +2 -18
- pulumi_gcp/identityplatform/config.py +2 -18
- pulumi_gcp/identityplatform/default_supported_idp_config.py +2 -18
- pulumi_gcp/identityplatform/inbound_saml_config.py +2 -18
- pulumi_gcp/identityplatform/oauth_idp_config.py +2 -18
- pulumi_gcp/identityplatform/project_default_config.py +2 -18
- pulumi_gcp/identityplatform/tenant.py +2 -18
- pulumi_gcp/identityplatform/tenant_default_supported_idp_config.py +2 -18
- pulumi_gcp/identityplatform/tenant_inbound_saml_config.py +2 -18
- pulumi_gcp/identityplatform/tenant_oauth_idp_config.py +2 -18
- pulumi_gcp/integrationconnectors/connection.py +2 -18
- pulumi_gcp/integrationconnectors/endpoint_attachment.py +2 -18
- pulumi_gcp/kms/crypto_key.py +36 -18
- pulumi_gcp/kms/crypto_key_iam_binding.py +11 -19
- pulumi_gcp/kms/crypto_key_iam_member.py +11 -19
- pulumi_gcp/kms/crypto_key_iam_policy.py +4 -12
- pulumi_gcp/kms/crypto_key_version.py +2 -18
- pulumi_gcp/kms/key_ring.py +2 -18
- pulumi_gcp/kms/key_ring_iam_binding.py +11 -19
- pulumi_gcp/kms/key_ring_iam_member.py +11 -19
- pulumi_gcp/kms/key_ring_iam_policy.py +4 -12
- pulumi_gcp/kms/key_ring_import_job.py +2 -18
- pulumi_gcp/kms/outputs.py +16 -0
- pulumi_gcp/logging/_inputs.py +108 -0
- pulumi_gcp/logging/billing_account_bucket_config.py +2 -18
- pulumi_gcp/logging/billing_account_exclusion.py +2 -18
- pulumi_gcp/logging/billing_account_sink.py +2 -18
- pulumi_gcp/logging/folder_bucket_config.py +2 -18
- pulumi_gcp/logging/folder_exclusion.py +2 -18
- pulumi_gcp/logging/folder_settings.py +2 -18
- pulumi_gcp/logging/folder_sink.py +2 -18
- pulumi_gcp/logging/linked_dataset.py +2 -18
- pulumi_gcp/logging/log_view.py +2 -18
- pulumi_gcp/logging/metric.py +2 -18
- pulumi_gcp/logging/organization_bucket_config.py +2 -18
- pulumi_gcp/logging/organization_exclusion.py +2 -18
- pulumi_gcp/logging/organization_settings.py +2 -18
- pulumi_gcp/logging/organization_sink.py +2 -18
- pulumi_gcp/logging/outputs.py +108 -0
- pulumi_gcp/logging/project_bucket_config.py +2 -18
- pulumi_gcp/logging/project_exclusion.py +2 -18
- pulumi_gcp/logging/project_sink.py +2 -18
- pulumi_gcp/looker/instance.py +2 -18
- pulumi_gcp/memcache/instance.py +63 -18
- pulumi_gcp/migrationcenter/group.py +2 -18
- pulumi_gcp/ml/engine_model.py +2 -18
- pulumi_gcp/monitoring/alert_policy.py +2 -18
- pulumi_gcp/monitoring/custom_service.py +2 -18
- pulumi_gcp/monitoring/dashboard.py +2 -18
- pulumi_gcp/monitoring/generic_service.py +2 -18
- pulumi_gcp/monitoring/group.py +2 -18
- pulumi_gcp/monitoring/metric_descriptor.py +2 -18
- pulumi_gcp/monitoring/monitored_project.py +2 -18
- pulumi_gcp/monitoring/notification_channel.py +2 -18
- pulumi_gcp/monitoring/outputs.py +14 -0
- pulumi_gcp/monitoring/slo.py +2 -18
- pulumi_gcp/monitoring/uptime_check_config.py +2 -18
- pulumi_gcp/netapp/__init__.py +1 -0
- pulumi_gcp/netapp/active_directory.py +2 -18
- pulumi_gcp/netapp/backup_policy.py +2 -18
- pulumi_gcp/netapp/backup_vault.py +2 -18
- pulumi_gcp/netapp/kmsconfig.py +2 -18
- pulumi_gcp/netapp/storage_pool.py +36 -24
- pulumi_gcp/netapp/volume.py +67 -20
- pulumi_gcp/netapp/volume_snapshot.py +625 -0
- pulumi_gcp/networkconnectivity/_inputs.py +6 -0
- pulumi_gcp/networkconnectivity/hub.py +2 -18
- pulumi_gcp/networkconnectivity/outputs.py +6 -0
- pulumi_gcp/networkconnectivity/policy_based_route.py +2 -18
- pulumi_gcp/networkconnectivity/service_connection_policy.py +2 -18
- pulumi_gcp/networkconnectivity/spoke.py +2 -18
- pulumi_gcp/networkmanagement/connectivity_test.py +2 -18
- pulumi_gcp/networksecurity/address_group.py +2 -18
- pulumi_gcp/networksecurity/authorization_policy.py +2 -18
- pulumi_gcp/networksecurity/client_tls_policy.py +2 -18
- pulumi_gcp/networksecurity/gateway_security_policy.py +2 -18
- pulumi_gcp/networksecurity/gateway_security_policy_rule.py +2 -18
- pulumi_gcp/networksecurity/security_profile.py +2 -18
- pulumi_gcp/networksecurity/server_tls_policy.py +2 -18
- pulumi_gcp/networksecurity/tls_inspection_policy.py +2 -18
- pulumi_gcp/networksecurity/url_list.py +2 -18
- pulumi_gcp/networkservices/edge_cache_keyset.py +2 -18
- pulumi_gcp/networkservices/edge_cache_origin.py +2 -18
- pulumi_gcp/networkservices/edge_cache_service.py +2 -18
- pulumi_gcp/networkservices/endpoint_policy.py +2 -18
- pulumi_gcp/networkservices/gateway.py +2 -18
- pulumi_gcp/networkservices/grpc_route.py +2 -18
- pulumi_gcp/networkservices/http_route.py +2 -18
- pulumi_gcp/networkservices/mesh.py +2 -18
- pulumi_gcp/networkservices/service_binding.py +2 -18
- pulumi_gcp/networkservices/tcp_route.py +2 -18
- pulumi_gcp/networkservices/tls_route.py +2 -18
- pulumi_gcp/notebooks/environment.py +2 -18
- pulumi_gcp/notebooks/instance.py +2 -18
- pulumi_gcp/notebooks/location.py +2 -18
- pulumi_gcp/notebooks/runtime.py +2 -18
- pulumi_gcp/organizations/_inputs.py +4 -0
- pulumi_gcp/organizations/access_approval_settings.py +2 -18
- pulumi_gcp/organizations/folder.py +2 -18
- pulumi_gcp/organizations/iam_audit_config.py +2 -2
- pulumi_gcp/organizations/iam_member.py +6 -14
- pulumi_gcp/organizations/iam_policy.py +6 -14
- pulumi_gcp/organizations/outputs.py +4 -0
- pulumi_gcp/organizations/policy.py +2 -18
- pulumi_gcp/organizations/project.py +2 -18
- pulumi_gcp/orgpolicy/custom_constraint.py +2 -18
- pulumi_gcp/orgpolicy/policy.py +4 -16
- pulumi_gcp/osconfig/guest_policies.py +2 -18
- pulumi_gcp/osconfig/os_policy_assignment.py +2 -18
- pulumi_gcp/osconfig/patch_deployment.py +2 -18
- pulumi_gcp/oslogin/ssh_public_key.py +2 -18
- pulumi_gcp/projects/_inputs.py +4 -0
- pulumi_gcp/projects/access_approval_settings.py +2 -18
- pulumi_gcp/projects/api_key.py +2 -18
- pulumi_gcp/projects/iam_audit_config.py +6 -14
- pulumi_gcp/projects/iam_binding.py +6 -14
- pulumi_gcp/projects/iam_custom_role.py +2 -18
- pulumi_gcp/projects/iam_member.py +6 -14
- pulumi_gcp/projects/iam_policy.py +6 -14
- pulumi_gcp/projects/organization_policy.py +2 -18
- pulumi_gcp/projects/outputs.py +54 -0
- pulumi_gcp/projects/service.py +2 -18
- pulumi_gcp/projects/usage_export_bucket.py +2 -18
- pulumi_gcp/provider.py +60 -0
- pulumi_gcp/pubsub/_inputs.py +26 -4
- pulumi_gcp/pubsub/lite_reservation.py +2 -18
- pulumi_gcp/pubsub/lite_subscription.py +2 -18
- pulumi_gcp/pubsub/lite_topic.py +2 -18
- pulumi_gcp/pubsub/outputs.py +323 -4
- pulumi_gcp/pubsub/schema.py +2 -18
- pulumi_gcp/pubsub/subscription.py +82 -16
- pulumi_gcp/pubsub/subscription_iam_binding.py +4 -12
- pulumi_gcp/pubsub/subscription_iam_member.py +4 -12
- pulumi_gcp/pubsub/subscription_iam_policy.py +4 -12
- pulumi_gcp/pubsub/topic.py +2 -18
- pulumi_gcp/recaptcha/enterprise_key.py +2 -18
- pulumi_gcp/redis/cluster.py +2 -18
- pulumi_gcp/redis/instance.py +2 -18
- pulumi_gcp/redis/outputs.py +204 -0
- pulumi_gcp/resourcemanager/lien.py +2 -18
- pulumi_gcp/runtimeconfig/config.py +2 -18
- pulumi_gcp/runtimeconfig/variable.py +2 -18
- pulumi_gcp/secretmanager/outputs.py +88 -0
- pulumi_gcp/secretmanager/secret.py +2 -18
- pulumi_gcp/secretmanager/secret_version.py +2 -18
- pulumi_gcp/securesourcemanager/instance.py +2 -18
- pulumi_gcp/securitycenter/event_threat_detection_custom_module.py +2 -18
- pulumi_gcp/securitycenter/folder_custom_module.py +2 -18
- pulumi_gcp/securitycenter/instance_iam_binding.py +2 -18
- pulumi_gcp/securitycenter/instance_iam_member.py +2 -18
- pulumi_gcp/securitycenter/instance_iam_policy.py +2 -18
- pulumi_gcp/securitycenter/mute_config.py +2 -18
- pulumi_gcp/securitycenter/notification_config.py +2 -18
- pulumi_gcp/securitycenter/organization_custom_module.py +2 -18
- pulumi_gcp/securitycenter/project_custom_module.py +2 -18
- pulumi_gcp/securitycenter/source.py +2 -18
- pulumi_gcp/securitycenter/source_iam_binding.py +2 -18
- pulumi_gcp/securitycenter/source_iam_member.py +2 -18
- pulumi_gcp/securitycenter/source_iam_policy.py +2 -18
- pulumi_gcp/securityposture/__init__.py +11 -0
- pulumi_gcp/securityposture/_inputs.py +1364 -0
- pulumi_gcp/securityposture/outputs.py +1372 -0
- pulumi_gcp/securityposture/posture.py +828 -0
- pulumi_gcp/securityposture/posture_deployment.py +872 -0
- pulumi_gcp/serviceaccount/account.py +2 -18
- pulumi_gcp/servicedirectory/endpoint.py +2 -18
- pulumi_gcp/servicedirectory/namespace.py +2 -18
- pulumi_gcp/servicedirectory/service.py +2 -18
- pulumi_gcp/servicenetworking/connection.py +2 -18
- pulumi_gcp/servicenetworking/peered_dns_domain.py +2 -18
- pulumi_gcp/serviceusage/consumer_quota_override.py +2 -18
- pulumi_gcp/sourcerepo/outputs.py +20 -0
- pulumi_gcp/sourcerepo/repository.py +2 -18
- pulumi_gcp/spanner/database.py +2 -18
- pulumi_gcp/spanner/database_iam_binding.py +4 -12
- pulumi_gcp/spanner/database_iam_member.py +4 -12
- pulumi_gcp/spanner/database_iam_policy.py +4 -12
- pulumi_gcp/spanner/instance.py +2 -18
- pulumi_gcp/spanner/instance_iam_binding.py +4 -12
- pulumi_gcp/spanner/instance_iam_member.py +4 -12
- pulumi_gcp/spanner/instance_iam_policy.py +4 -12
- pulumi_gcp/spanner/outputs.py +68 -0
- pulumi_gcp/sql/_inputs.py +50 -0
- pulumi_gcp/sql/database.py +2 -18
- pulumi_gcp/sql/database_instance.py +2 -18
- pulumi_gcp/sql/outputs.py +904 -0
- pulumi_gcp/sql/source_representation_instance.py +2 -18
- pulumi_gcp/sql/user.py +2 -30
- pulumi_gcp/storage/bucket.py +2 -18
- pulumi_gcp/storage/bucket_access_control.py +2 -18
- pulumi_gcp/storage/default_object_access_control.py +2 -18
- pulumi_gcp/storage/hmac_key.py +2 -18
- pulumi_gcp/storage/insights_report_config.py +2 -18
- pulumi_gcp/storage/notification.py +2 -18
- pulumi_gcp/storage/object_access_control.py +2 -18
- pulumi_gcp/storage/outputs.py +188 -0
- pulumi_gcp/storage/transfer_agent_pool.py +2 -18
- pulumi_gcp/storage/transfer_job.py +2 -18
- pulumi_gcp/tags/location_tag_binding.py +2 -18
- pulumi_gcp/tags/tag_binding.py +2 -18
- pulumi_gcp/tags/tag_key.py +2 -18
- pulumi_gcp/tags/tag_value.py +2 -18
- pulumi_gcp/tpu/node.py +2 -18
- pulumi_gcp/tpu/v2_vm.py +2 -18
- pulumi_gcp/vertex/_inputs.py +166 -0
- pulumi_gcp/vertex/ai_endpoint.py +2 -18
- pulumi_gcp/vertex/ai_feature_group.py +2 -18
- pulumi_gcp/vertex/ai_feature_group_feature.py +2 -18
- pulumi_gcp/vertex/ai_feature_online_store.py +30 -46
- pulumi_gcp/vertex/ai_feature_online_store_featureview.py +259 -19
- pulumi_gcp/vertex/ai_feature_store.py +9 -46
- pulumi_gcp/vertex/ai_feature_store_entity_type.py +9 -39
- pulumi_gcp/vertex/ai_feature_store_entity_type_feature.py +2 -18
- pulumi_gcp/vertex/ai_index.py +2 -18
- pulumi_gcp/vertex/ai_index_endpoint.py +2 -18
- pulumi_gcp/vertex/ai_metadata_store.py +2 -18
- pulumi_gcp/vertex/ai_tensorboard.py +2 -18
- pulumi_gcp/vertex/outputs.py +304 -0
- pulumi_gcp/vmwareengine/cluster.py +2 -18
- pulumi_gcp/vmwareengine/external_access_rule.py +2 -18
- pulumi_gcp/vmwareengine/external_address.py +2 -18
- pulumi_gcp/vmwareengine/network.py +2 -18
- pulumi_gcp/vmwareengine/network_peering.py +2 -18
- pulumi_gcp/vmwareengine/network_policy.py +2 -18
- pulumi_gcp/vmwareengine/outputs.py +208 -0
- pulumi_gcp/vmwareengine/private_cloud.py +2 -25
- pulumi_gcp/vmwareengine/subnet.py +2 -18
- pulumi_gcp/vpcaccess/connector.py +2 -18
- pulumi_gcp/vpcaccess/outputs.py +4 -0
- pulumi_gcp/workbench/instance.py +2 -18
- pulumi_gcp/workflows/workflow.py +75 -7
- pulumi_gcp/workstations/_inputs.py +38 -0
- pulumi_gcp/workstations/outputs.py +30 -0
- pulumi_gcp/workstations/workstation.py +2 -18
- pulumi_gcp/workstations/workstation_cluster.py +2 -18
- pulumi_gcp/workstations/workstation_config.py +56 -18
- {pulumi_gcp-7.7.0a1706207981.dist-info → pulumi_gcp-7.8.0.dist-info}/METADATA +2 -1
- {pulumi_gcp-7.7.0a1706207981.dist-info → pulumi_gcp-7.8.0.dist-info}/RECORD +686 -668
- {pulumi_gcp-7.7.0a1706207981.dist-info → pulumi_gcp-7.8.0.dist-info}/WHEEL +0 -0
- {pulumi_gcp-7.7.0a1706207981.dist-info → pulumi_gcp-7.8.0.dist-info}/top_level.txt +0 -0
pulumi_gcp/container/_inputs.py
CHANGED
@@ -780,7 +780,7 @@ class AwsClusterControlPlaneArgs:
|
|
780
780
|
:param pulumi.Input[str] iam_instance_profile: The name of the AWS IAM instance pofile to assign to each control plane replica.
|
781
781
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: The list of subnets where control plane replicas will run. A replica will be provisioned on each subnet and up to three values can be provided. Each subnet must be in a different AWS Availability Zone (AZ).
|
782
782
|
:param pulumi.Input[str] version: The Kubernetes version to run on control plane replicas (e.g. `1.19.10-gke.1000`). You can list all supported versions on a given Google Cloud region by calling .
|
783
|
-
:param pulumi.Input['AwsClusterControlPlaneInstancePlacementArgs'] instance_placement:
|
783
|
+
:param pulumi.Input['AwsClusterControlPlaneInstancePlacementArgs'] instance_placement: Details of placement information for an instance.
|
784
784
|
:param pulumi.Input[str] instance_type: Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.
|
785
785
|
:param pulumi.Input['AwsClusterControlPlaneMainVolumeArgs'] main_volume: Optional. Configuration related to the main volume provisioned for each control plane replica. The main volume is in charge of storing all of the cluster's etcd state. Volumes will be provisioned in the availability zone associated with the corresponding subnet. When unspecified, it defaults to 8 GiB with the GP2 volume type.
|
786
786
|
:param pulumi.Input['AwsClusterControlPlaneProxyConfigArgs'] proxy_config: Proxy configuration for outbound HTTP(S) traffic.
|
@@ -888,7 +888,7 @@ class AwsClusterControlPlaneArgs:
|
|
888
888
|
@pulumi.getter(name="instancePlacement")
|
889
889
|
def instance_placement(self) -> Optional[pulumi.Input['AwsClusterControlPlaneInstancePlacementArgs']]:
|
890
890
|
"""
|
891
|
-
|
891
|
+
Details of placement information for an instance.
|
892
892
|
"""
|
893
893
|
return pulumi.get(self, "instance_placement")
|
894
894
|
|
@@ -1482,6 +1482,11 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1482
1482
|
identity_provider: Optional[pulumi.Input[str]] = None,
|
1483
1483
|
issuer_uri: Optional[pulumi.Input[str]] = None,
|
1484
1484
|
workload_pool: Optional[pulumi.Input[str]] = None):
|
1485
|
+
"""
|
1486
|
+
:param pulumi.Input[str] identity_provider: The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
1487
|
+
:param pulumi.Input[str] issuer_uri: The OIDC issuer URL for this cluster.
|
1488
|
+
:param pulumi.Input[str] workload_pool: The Workload Identity Pool associated to the cluster.
|
1489
|
+
"""
|
1485
1490
|
if identity_provider is not None:
|
1486
1491
|
pulumi.set(__self__, "identity_provider", identity_provider)
|
1487
1492
|
if issuer_uri is not None:
|
@@ -1492,6 +1497,9 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1492
1497
|
@property
|
1493
1498
|
@pulumi.getter(name="identityProvider")
|
1494
1499
|
def identity_provider(self) -> Optional[pulumi.Input[str]]:
|
1500
|
+
"""
|
1501
|
+
The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
1502
|
+
"""
|
1495
1503
|
return pulumi.get(self, "identity_provider")
|
1496
1504
|
|
1497
1505
|
@identity_provider.setter
|
@@ -1501,6 +1509,9 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1501
1509
|
@property
|
1502
1510
|
@pulumi.getter(name="issuerUri")
|
1503
1511
|
def issuer_uri(self) -> Optional[pulumi.Input[str]]:
|
1512
|
+
"""
|
1513
|
+
The OIDC issuer URL for this cluster.
|
1514
|
+
"""
|
1504
1515
|
return pulumi.get(self, "issuer_uri")
|
1505
1516
|
|
1506
1517
|
@issuer_uri.setter
|
@@ -1510,6 +1521,9 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1510
1521
|
@property
|
1511
1522
|
@pulumi.getter(name="workloadPool")
|
1512
1523
|
def workload_pool(self) -> Optional[pulumi.Input[str]]:
|
1524
|
+
"""
|
1525
|
+
The Workload Identity Pool associated to the cluster.
|
1526
|
+
"""
|
1513
1527
|
return pulumi.get(self, "workload_pool")
|
1514
1528
|
|
1515
1529
|
@workload_pool.setter
|
@@ -1575,14 +1589,14 @@ class AwsNodePoolConfigArgs:
|
|
1575
1589
|
:param pulumi.Input['AwsNodePoolConfigConfigEncryptionArgs'] config_encryption: The ARN of the AWS KMS key used to encrypt node pool configuration.
|
1576
1590
|
:param pulumi.Input[str] iam_instance_profile: The name of the AWS IAM role assigned to nodes in the pool.
|
1577
1591
|
:param pulumi.Input['AwsNodePoolConfigAutoscalingMetricsCollectionArgs'] autoscaling_metrics_collection: Optional. Configuration related to CloudWatch metrics collection on the Auto Scaling group of the node pool. When unspecified, metrics collection is disabled.
|
1578
|
-
:param pulumi.Input[str] image_type:
|
1579
|
-
:param pulumi.Input['AwsNodePoolConfigInstancePlacementArgs'] instance_placement:
|
1592
|
+
:param pulumi.Input[str] image_type: The OS image type to use on node pool instances.
|
1593
|
+
:param pulumi.Input['AwsNodePoolConfigInstancePlacementArgs'] instance_placement: Details of placement information for an instance.
|
1580
1594
|
:param pulumi.Input[str] instance_type: Optional. The AWS instance type. When unspecified, it defaults to `m5.large`.
|
1581
1595
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
|
1582
1596
|
:param pulumi.Input['AwsNodePoolConfigProxyConfigArgs'] proxy_config: Proxy configuration for outbound HTTP(S) traffic.
|
1583
1597
|
:param pulumi.Input['AwsNodePoolConfigRootVolumeArgs'] root_volume: Optional. Template for the root volume provisioned for node pool nodes. Volumes will be provisioned in the availability zone assigned to the node pool subnet. When unspecified, it defaults to 32 GiB with the GP2 volume type.
|
1584
1598
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: Optional. The IDs of additional security groups to add to nodes in this pool. The manager will automatically create security groups with minimum rules needed for a functioning cluster.
|
1585
|
-
:param pulumi.Input['AwsNodePoolConfigSpotConfigArgs'] spot_config:
|
1599
|
+
:param pulumi.Input['AwsNodePoolConfigSpotConfigArgs'] spot_config: Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type`
|
1586
1600
|
:param pulumi.Input['AwsNodePoolConfigSshConfigArgs'] ssh_config: Optional. The SSH configuration.
|
1587
1601
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Optional. Key/value metadata to assign to each underlying AWS resource. Specify at most 50 pairs containing alphanumerics, spaces, and symbols (.+-=_:@/). Keys can be up to 127 Unicode characters. Values can be up to 255 Unicode characters.
|
1588
1602
|
:param pulumi.Input[Sequence[pulumi.Input['AwsNodePoolConfigTaintArgs']]] taints: Optional. The initial taints assigned to nodes of this node pool.
|
@@ -1654,7 +1668,7 @@ class AwsNodePoolConfigArgs:
|
|
1654
1668
|
@pulumi.getter(name="imageType")
|
1655
1669
|
def image_type(self) -> Optional[pulumi.Input[str]]:
|
1656
1670
|
"""
|
1657
|
-
|
1671
|
+
The OS image type to use on node pool instances.
|
1658
1672
|
"""
|
1659
1673
|
return pulumi.get(self, "image_type")
|
1660
1674
|
|
@@ -1666,7 +1680,7 @@ class AwsNodePoolConfigArgs:
|
|
1666
1680
|
@pulumi.getter(name="instancePlacement")
|
1667
1681
|
def instance_placement(self) -> Optional[pulumi.Input['AwsNodePoolConfigInstancePlacementArgs']]:
|
1668
1682
|
"""
|
1669
|
-
|
1683
|
+
Details of placement information for an instance.
|
1670
1684
|
"""
|
1671
1685
|
return pulumi.get(self, "instance_placement")
|
1672
1686
|
|
@@ -1738,7 +1752,7 @@ class AwsNodePoolConfigArgs:
|
|
1738
1752
|
@pulumi.getter(name="spotConfig")
|
1739
1753
|
def spot_config(self) -> Optional[pulumi.Input['AwsNodePoolConfigSpotConfigArgs']]:
|
1740
1754
|
"""
|
1741
|
-
|
1755
|
+
Optional. When specified, the node pool will provision Spot instances from the set of spot_config.instance_types. This field is mutually exclusive with `instance_type`
|
1742
1756
|
"""
|
1743
1757
|
return pulumi.get(self, "spot_config")
|
1744
1758
|
|
@@ -2791,6 +2805,11 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2791
2805
|
identity_provider: Optional[pulumi.Input[str]] = None,
|
2792
2806
|
issuer_uri: Optional[pulumi.Input[str]] = None,
|
2793
2807
|
workload_pool: Optional[pulumi.Input[str]] = None):
|
2808
|
+
"""
|
2809
|
+
:param pulumi.Input[str] identity_provider: The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
2810
|
+
:param pulumi.Input[str] issuer_uri: The OIDC issuer URL for this cluster.
|
2811
|
+
:param pulumi.Input[str] workload_pool: The Workload Identity Pool associated to the cluster.
|
2812
|
+
"""
|
2794
2813
|
if identity_provider is not None:
|
2795
2814
|
pulumi.set(__self__, "identity_provider", identity_provider)
|
2796
2815
|
if issuer_uri is not None:
|
@@ -2801,6 +2820,9 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2801
2820
|
@property
|
2802
2821
|
@pulumi.getter(name="identityProvider")
|
2803
2822
|
def identity_provider(self) -> Optional[pulumi.Input[str]]:
|
2823
|
+
"""
|
2824
|
+
The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
2825
|
+
"""
|
2804
2826
|
return pulumi.get(self, "identity_provider")
|
2805
2827
|
|
2806
2828
|
@identity_provider.setter
|
@@ -2810,6 +2832,9 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2810
2832
|
@property
|
2811
2833
|
@pulumi.getter(name="issuerUri")
|
2812
2834
|
def issuer_uri(self) -> Optional[pulumi.Input[str]]:
|
2835
|
+
"""
|
2836
|
+
The OIDC issuer URL for this cluster.
|
2837
|
+
"""
|
2813
2838
|
return pulumi.get(self, "issuer_uri")
|
2814
2839
|
|
2815
2840
|
@issuer_uri.setter
|
@@ -2819,6 +2844,9 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2819
2844
|
@property
|
2820
2845
|
@pulumi.getter(name="workloadPool")
|
2821
2846
|
def workload_pool(self) -> Optional[pulumi.Input[str]]:
|
2847
|
+
"""
|
2848
|
+
The Workload Identity Pool associated to the cluster.
|
2849
|
+
"""
|
2822
2850
|
return pulumi.get(self, "workload_pool")
|
2823
2851
|
|
2824
2852
|
@workload_pool.setter
|
@@ -2875,7 +2903,7 @@ class AzureNodePoolConfigArgs:
|
|
2875
2903
|
vm_size: Optional[pulumi.Input[str]] = None):
|
2876
2904
|
"""
|
2877
2905
|
:param pulumi.Input['AzureNodePoolConfigSshConfigArgs'] ssh_config: SSH configuration for how to access the node pool machines.
|
2878
|
-
:param pulumi.Input[str] image_type:
|
2906
|
+
:param pulumi.Input[str] image_type: The OS image type to use on node pool instances.
|
2879
2907
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Optional. The initial labels assigned to nodes of this node pool. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
|
2880
2908
|
:param pulumi.Input['AzureNodePoolConfigProxyConfigArgs'] proxy_config: Proxy configuration for outbound HTTP(S) traffic.
|
2881
2909
|
:param pulumi.Input['AzureNodePoolConfigRootVolumeArgs'] root_volume: Optional. Configuration related to the root volume provisioned for each node pool machine. When unspecified, it defaults to a 32-GiB Azure Disk.
|
@@ -2912,7 +2940,7 @@ class AzureNodePoolConfigArgs:
|
|
2912
2940
|
@pulumi.getter(name="imageType")
|
2913
2941
|
def image_type(self) -> Optional[pulumi.Input[str]]:
|
2914
2942
|
"""
|
2915
|
-
|
2943
|
+
The OS image type to use on node pool instances.
|
2916
2944
|
"""
|
2917
2945
|
return pulumi.get(self, "image_type")
|
2918
2946
|
|
@@ -3777,7 +3805,7 @@ class ClusterClusterAutoscalingArgs:
|
|
3777
3805
|
:param pulumi.Input['ClusterClusterAutoscalingAutoProvisioningDefaultsArgs'] auto_provisioning_defaults: Contains defaults for a node pool created by NAP. A subset of fields also apply to
|
3778
3806
|
GKE Autopilot clusters.
|
3779
3807
|
Structure is documented below.
|
3780
|
-
:param pulumi.Input[str] autoscaling_profile:
|
3808
|
+
:param pulumi.Input[str] autoscaling_profile: Configuration
|
3781
3809
|
options for the [Autoscaling profile](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles)
|
3782
3810
|
feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability
|
3783
3811
|
when deciding to remove nodes from a cluster. Can be `BALANCED` or `OPTIMIZE_UTILIZATION`. Defaults to `BALANCED`.
|
@@ -3815,7 +3843,7 @@ class ClusterClusterAutoscalingArgs:
|
|
3815
3843
|
@pulumi.getter(name="autoscalingProfile")
|
3816
3844
|
def autoscaling_profile(self) -> Optional[pulumi.Input[str]]:
|
3817
3845
|
"""
|
3818
|
-
|
3846
|
+
Configuration
|
3819
3847
|
options for the [Autoscaling profile](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles)
|
3820
3848
|
feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability
|
3821
3849
|
when deciding to remove nodes from a cluster. Can be `BALANCED` or `OPTIMIZE_UTILIZATION`. Defaults to `BALANCED`.
|
@@ -4041,6 +4069,7 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementArgs:
|
|
4041
4069
|
|
4042
4070
|
This block also contains several computed attributes, documented below.
|
4043
4071
|
:param pulumi.Input[bool] auto_upgrade: Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.
|
4072
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionArgs']]] upgrade_options: Specifies the Auto Upgrade knobs for the node pool.
|
4044
4073
|
"""
|
4045
4074
|
if auto_repair is not None:
|
4046
4075
|
pulumi.set(__self__, "auto_repair", auto_repair)
|
@@ -4078,6 +4107,9 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementArgs:
|
|
4078
4107
|
@property
|
4079
4108
|
@pulumi.getter(name="upgradeOptions")
|
4080
4109
|
def upgrade_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionArgs']]]]:
|
4110
|
+
"""
|
4111
|
+
Specifies the Auto Upgrade knobs for the node pool.
|
4112
|
+
"""
|
4081
4113
|
return pulumi.get(self, "upgrade_options")
|
4082
4114
|
|
4083
4115
|
@upgrade_options.setter
|
@@ -4091,6 +4123,7 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionAr
|
|
4091
4123
|
auto_upgrade_start_time: Optional[pulumi.Input[str]] = None,
|
4092
4124
|
description: Optional[pulumi.Input[str]] = None):
|
4093
4125
|
"""
|
4126
|
+
:param pulumi.Input[str] auto_upgrade_start_time: This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format.
|
4094
4127
|
:param pulumi.Input[str] description: Description of the cluster.
|
4095
4128
|
"""
|
4096
4129
|
if auto_upgrade_start_time is not None:
|
@@ -4101,6 +4134,9 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionAr
|
|
4101
4134
|
@property
|
4102
4135
|
@pulumi.getter(name="autoUpgradeStartTime")
|
4103
4136
|
def auto_upgrade_start_time(self) -> Optional[pulumi.Input[str]]:
|
4137
|
+
"""
|
4138
|
+
This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format.
|
4139
|
+
"""
|
4104
4140
|
return pulumi.get(self, "auto_upgrade_start_time")
|
4105
4141
|
|
4106
4142
|
@auto_upgrade_start_time.setter
|
@@ -4612,6 +4648,8 @@ class ClusterFleetArgs:
|
|
4612
4648
|
pre_registered: Optional[pulumi.Input[bool]] = None,
|
4613
4649
|
project: Optional[pulumi.Input[str]] = None):
|
4614
4650
|
"""
|
4651
|
+
:param pulumi.Input[str] membership: Full resource name of the registered fleet membership of the cluster.
|
4652
|
+
:param pulumi.Input[bool] pre_registered: Whether the cluster has been registered via the fleet API.
|
4615
4653
|
:param pulumi.Input[str] project: The name of the Fleet host project where this cluster will be registered.
|
4616
4654
|
"""
|
4617
4655
|
if membership is not None:
|
@@ -4624,6 +4662,9 @@ class ClusterFleetArgs:
|
|
4624
4662
|
@property
|
4625
4663
|
@pulumi.getter
|
4626
4664
|
def membership(self) -> Optional[pulumi.Input[str]]:
|
4665
|
+
"""
|
4666
|
+
Full resource name of the registered fleet membership of the cluster.
|
4667
|
+
"""
|
4627
4668
|
return pulumi.get(self, "membership")
|
4628
4669
|
|
4629
4670
|
@membership.setter
|
@@ -4633,6 +4674,9 @@ class ClusterFleetArgs:
|
|
4633
4674
|
@property
|
4634
4675
|
@pulumi.getter(name="preRegistered")
|
4635
4676
|
def pre_registered(self) -> Optional[pulumi.Input[bool]]:
|
4677
|
+
"""
|
4678
|
+
Whether the cluster has been registered via the fleet API.
|
4679
|
+
"""
|
4636
4680
|
return pulumi.get(self, "pre_registered")
|
4637
4681
|
|
4638
4682
|
@pre_registered.setter
|
@@ -4719,6 +4763,7 @@ class ClusterIpAllocationPolicyArgs:
|
|
4719
4763
|
:param pulumi.Input[str] cluster_secondary_range_name: The name of the existing secondary
|
4720
4764
|
range in the cluster's subnetwork to use for pod IP addresses. Alternatively,
|
4721
4765
|
`cluster_ipv4_cidr_block` can be used to automatically create a GKE-managed one.
|
4766
|
+
:param pulumi.Input['ClusterIpAllocationPolicyPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for cluster level pod cidr overprovision. Default is disabled=false.
|
4722
4767
|
:param pulumi.Input[str] services_ipv4_cidr_block: The IP address range of the services IPs in this cluster.
|
4723
4768
|
Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14)
|
4724
4769
|
to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14)
|
@@ -4794,6 +4839,9 @@ class ClusterIpAllocationPolicyArgs:
|
|
4794
4839
|
@property
|
4795
4840
|
@pulumi.getter(name="podCidrOverprovisionConfig")
|
4796
4841
|
def pod_cidr_overprovision_config(self) -> Optional[pulumi.Input['ClusterIpAllocationPolicyPodCidrOverprovisionConfigArgs']]:
|
4842
|
+
"""
|
4843
|
+
Configuration for cluster level pod cidr overprovision. Default is disabled=false.
|
4844
|
+
"""
|
4797
4845
|
return pulumi.get(self, "pod_cidr_overprovision_config")
|
4798
4846
|
|
4799
4847
|
@pod_cidr_overprovision_config.setter
|
@@ -5196,6 +5244,9 @@ class ClusterMasterAuthArgs:
|
|
5196
5244
|
```
|
5197
5245
|
|
5198
5246
|
This block also contains several computed attributes, documented below.
|
5247
|
+
:param pulumi.Input[str] client_certificate: Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.
|
5248
|
+
:param pulumi.Input[str] client_key: Base64 encoded private key used by clients to authenticate to the cluster endpoint.
|
5249
|
+
:param pulumi.Input[str] cluster_ca_certificate: Base64 encoded public certificate that is the root of trust for the cluster.
|
5199
5250
|
"""
|
5200
5251
|
pulumi.set(__self__, "client_certificate_config", client_certificate_config)
|
5201
5252
|
if client_certificate is not None:
|
@@ -5226,6 +5277,9 @@ class ClusterMasterAuthArgs:
|
|
5226
5277
|
@property
|
5227
5278
|
@pulumi.getter(name="clientCertificate")
|
5228
5279
|
def client_certificate(self) -> Optional[pulumi.Input[str]]:
|
5280
|
+
"""
|
5281
|
+
Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.
|
5282
|
+
"""
|
5229
5283
|
return pulumi.get(self, "client_certificate")
|
5230
5284
|
|
5231
5285
|
@client_certificate.setter
|
@@ -5235,6 +5289,9 @@ class ClusterMasterAuthArgs:
|
|
5235
5289
|
@property
|
5236
5290
|
@pulumi.getter(name="clientKey")
|
5237
5291
|
def client_key(self) -> Optional[pulumi.Input[str]]:
|
5292
|
+
"""
|
5293
|
+
Base64 encoded private key used by clients to authenticate to the cluster endpoint.
|
5294
|
+
"""
|
5238
5295
|
return pulumi.get(self, "client_key")
|
5239
5296
|
|
5240
5297
|
@client_key.setter
|
@@ -5244,6 +5301,9 @@ class ClusterMasterAuthArgs:
|
|
5244
5301
|
@property
|
5245
5302
|
@pulumi.getter(name="clusterCaCertificate")
|
5246
5303
|
def cluster_ca_certificate(self) -> Optional[pulumi.Input[str]]:
|
5304
|
+
"""
|
5305
|
+
Base64 encoded public certificate that is the root of trust for the cluster.
|
5306
|
+
"""
|
5247
5307
|
return pulumi.get(self, "cluster_ca_certificate")
|
5248
5308
|
|
5249
5309
|
@cluster_ca_certificate.setter
|
@@ -5255,11 +5315,17 @@ class ClusterMasterAuthArgs:
|
|
5255
5315
|
class ClusterMasterAuthClientCertificateConfigArgs:
|
5256
5316
|
def __init__(__self__, *,
|
5257
5317
|
issue_client_certificate: pulumi.Input[bool]):
|
5318
|
+
"""
|
5319
|
+
:param pulumi.Input[bool] issue_client_certificate: Whether client certificate authorization is enabled for this cluster.
|
5320
|
+
"""
|
5258
5321
|
pulumi.set(__self__, "issue_client_certificate", issue_client_certificate)
|
5259
5322
|
|
5260
5323
|
@property
|
5261
5324
|
@pulumi.getter(name="issueClientCertificate")
|
5262
5325
|
def issue_client_certificate(self) -> pulumi.Input[bool]:
|
5326
|
+
"""
|
5327
|
+
Whether client certificate authorization is enabled for this cluster.
|
5328
|
+
"""
|
5263
5329
|
return pulumi.get(self, "issue_client_certificate")
|
5264
5330
|
|
5265
5331
|
@issue_client_certificate.setter
|
@@ -5574,9 +5640,9 @@ class ClusterNodeConfigArgs:
|
|
5574
5640
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
|
5575
5641
|
:param pulumi.Input[str] disk_type: Type of the disk attached to each node
|
5576
5642
|
(e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-standard'
|
5577
|
-
:param pulumi.Input[
|
5578
|
-
|
5579
|
-
:param pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config:
|
5643
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node.
|
5644
|
+
:param pulumi.Input[bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
5645
|
+
:param pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
5580
5646
|
|
5581
5647
|
```python
|
5582
5648
|
import pulumi
|
@@ -5612,6 +5678,7 @@ class ClusterNodeConfigArgs:
|
|
5612
5678
|
```python
|
5613
5679
|
import pulumi
|
5614
5680
|
```
|
5681
|
+
:param pulumi.Input['ClusterNodeConfigHostMaintenancePolicyArgs'] host_maintenance_policy: The maintenance policy for the hosts on which the GKE VMs run on.
|
5615
5682
|
:param pulumi.Input[str] image_type: The image type to use for this node. Note that changing the image type
|
5616
5683
|
will delete and recreate all nodes in the node pool.
|
5617
5684
|
:param pulumi.Input['ClusterNodeConfigKubeletConfigArgs'] kubelet_config: Kubelet configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
|
@@ -5652,6 +5719,7 @@ class ClusterNodeConfigArgs:
|
|
5652
5719
|
:param pulumi.Input['ClusterNodeConfigReservationAffinityArgs'] reservation_affinity: The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
|
5653
5720
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCP labels (key/value pairs) to be applied to each node. Refer [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels)
|
5654
5721
|
for how these labels are applied to clusters, node pools and nodes.
|
5722
|
+
:param pulumi.Input['ClusterNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
|
5655
5723
|
:param pulumi.Input[str] service_account: The service account to be used by the Node VMs.
|
5656
5724
|
If not specified, the "default" service account is used.
|
5657
5725
|
:param pulumi.Input['ClusterNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
|
@@ -5817,6 +5885,9 @@ class ClusterNodeConfigArgs:
|
|
5817
5885
|
@property
|
5818
5886
|
@pulumi.getter(name="effectiveTaints")
|
5819
5887
|
def effective_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]]]:
|
5888
|
+
"""
|
5889
|
+
List of kubernetes taints applied to each node.
|
5890
|
+
"""
|
5820
5891
|
return pulumi.get(self, "effective_taints")
|
5821
5892
|
|
5822
5893
|
@effective_taints.setter
|
@@ -5827,7 +5898,6 @@ class ClusterNodeConfigArgs:
|
|
5827
5898
|
@pulumi.getter(name="enableConfidentialStorage")
|
5828
5899
|
def enable_confidential_storage(self) -> Optional[pulumi.Input[bool]]:
|
5829
5900
|
"""
|
5830
|
-
)
|
5831
5901
|
Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
5832
5902
|
"""
|
5833
5903
|
return pulumi.get(self, "enable_confidential_storage")
|
@@ -5840,7 +5910,7 @@ class ClusterNodeConfigArgs:
|
|
5840
5910
|
@pulumi.getter(name="ephemeralStorageConfig")
|
5841
5911
|
def ephemeral_storage_config(self) -> Optional[pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs']]:
|
5842
5912
|
"""
|
5843
|
-
|
5913
|
+
Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
5844
5914
|
|
5845
5915
|
```python
|
5846
5916
|
import pulumi
|
@@ -5941,6 +6011,9 @@ class ClusterNodeConfigArgs:
|
|
5941
6011
|
@property
|
5942
6012
|
@pulumi.getter(name="hostMaintenancePolicy")
|
5943
6013
|
def host_maintenance_policy(self) -> Optional[pulumi.Input['ClusterNodeConfigHostMaintenancePolicyArgs']]:
|
6014
|
+
"""
|
6015
|
+
The maintenance policy for the hosts on which the GKE VMs run on.
|
6016
|
+
"""
|
5944
6017
|
return pulumi.get(self, "host_maintenance_policy")
|
5945
6018
|
|
5946
6019
|
@host_maintenance_policy.setter
|
@@ -6155,6 +6228,9 @@ class ClusterNodeConfigArgs:
|
|
6155
6228
|
@property
|
6156
6229
|
@pulumi.getter(name="sandboxConfig")
|
6157
6230
|
def sandbox_config(self) -> Optional[pulumi.Input['ClusterNodeConfigSandboxConfigArgs']]:
|
6231
|
+
"""
|
6232
|
+
Sandbox configuration for this node.
|
6233
|
+
"""
|
6158
6234
|
return pulumi.get(self, "sandbox_config")
|
6159
6235
|
|
6160
6236
|
@sandbox_config.setter
|
@@ -6635,11 +6711,17 @@ class ClusterNodeConfigGvnicArgs:
|
|
6635
6711
|
class ClusterNodeConfigHostMaintenancePolicyArgs:
|
6636
6712
|
def __init__(__self__, *,
|
6637
6713
|
maintenance_interval: pulumi.Input[str]):
|
6714
|
+
"""
|
6715
|
+
:param pulumi.Input[str] maintenance_interval: .
|
6716
|
+
"""
|
6638
6717
|
pulumi.set(__self__, "maintenance_interval", maintenance_interval)
|
6639
6718
|
|
6640
6719
|
@property
|
6641
6720
|
@pulumi.getter(name="maintenanceInterval")
|
6642
6721
|
def maintenance_interval(self) -> pulumi.Input[str]:
|
6722
|
+
"""
|
6723
|
+
.
|
6724
|
+
"""
|
6643
6725
|
return pulumi.get(self, "maintenance_interval")
|
6644
6726
|
|
6645
6727
|
@maintenance_interval.setter
|
@@ -6963,11 +7045,17 @@ class ClusterNodeConfigShieldedInstanceConfigArgs:
|
|
6963
7045
|
class ClusterNodeConfigSoleTenantConfigArgs:
|
6964
7046
|
def __init__(__self__, *,
|
6965
7047
|
node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
|
7048
|
+
"""
|
7049
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
|
7050
|
+
"""
|
6966
7051
|
pulumi.set(__self__, "node_affinities", node_affinities)
|
6967
7052
|
|
6968
7053
|
@property
|
6969
7054
|
@pulumi.getter(name="nodeAffinities")
|
6970
7055
|
def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
|
7056
|
+
"""
|
7057
|
+
.
|
7058
|
+
"""
|
6971
7059
|
return pulumi.get(self, "node_affinities")
|
6972
7060
|
|
6973
7061
|
@node_affinities.setter
|
@@ -7129,17 +7217,22 @@ class ClusterNodePoolArgs:
|
|
7129
7217
|
upgrade_settings: Optional[pulumi.Input['ClusterNodePoolUpgradeSettingsArgs']] = None,
|
7130
7218
|
version: Optional[pulumi.Input[str]] = None):
|
7131
7219
|
"""
|
7220
|
+
:param pulumi.Input['ClusterNodePoolAutoscalingArgs'] autoscaling: Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.
|
7132
7221
|
:param pulumi.Input[int] initial_node_count: The number of nodes to create in this
|
7133
7222
|
cluster's default node pool. In regional or multi-zonal clusters, this is the
|
7134
7223
|
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
|
7135
7224
|
`container.NodePool` objects with no default node pool, you'll need to
|
7136
7225
|
set this to a value of at least `1`, alongside setting
|
7137
7226
|
`remove_default_node_pool` to `true`.
|
7227
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_group_urls: The resource URLs of the managed instance groups associated with this node pool.
|
7228
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] managed_instance_group_urls: List of instance group URLs which have been assigned to this node pool.
|
7138
7229
|
:param pulumi.Input['ClusterNodePoolManagementArgs'] management: NodeManagement configuration for this NodePool. Structure is documented below.
|
7230
|
+
:param pulumi.Input[int] max_pods_per_node: The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled.
|
7139
7231
|
:param pulumi.Input[str] name: The name of the cluster, unique within the project and
|
7140
7232
|
location.
|
7141
7233
|
|
7142
7234
|
- - -
|
7235
|
+
:param pulumi.Input[str] name_prefix: Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
|
7143
7236
|
:param pulumi.Input['ClusterNodePoolNetworkConfigArgs'] network_config: Configuration for
|
7144
7237
|
[Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Structure is documented below
|
7145
7238
|
:param pulumi.Input['ClusterNodePoolNodeConfigArgs'] node_config: Parameters used in creating the default node pool.
|
@@ -7147,6 +7240,7 @@ class ClusterNodePoolArgs:
|
|
7147
7240
|
`container.NodePool` or a `node_pool` block; this configuration
|
7148
7241
|
manages the default node pool, which isn't recommended to be used.
|
7149
7242
|
Structure is documented below.
|
7243
|
+
:param pulumi.Input[int] node_count: The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
|
7150
7244
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_locations: The list of zones in which the cluster's nodes
|
7151
7245
|
are located. Nodes must be in the region of their regional cluster or in the
|
7152
7246
|
same region as their cluster's zone for zonal clusters. If this is specified for
|
@@ -7158,6 +7252,8 @@ class ClusterNodePoolArgs:
|
|
7158
7252
|
locations. In contrast, in a regional cluster, cluster master nodes are present
|
7159
7253
|
in multiple zones in the region. For that reason, regional clusters should be
|
7160
7254
|
preferred.
|
7255
|
+
:param pulumi.Input['ClusterNodePoolPlacementPolicyArgs'] placement_policy: Specifies the node placement policy
|
7256
|
+
:param pulumi.Input['ClusterNodePoolQueuedProvisioningArgs'] queued_provisioning: Specifies the configuration of queued provisioning
|
7161
7257
|
:param pulumi.Input['ClusterNodePoolUpgradeSettingsArgs'] upgrade_settings: Specifies the upgrade settings for NAP created node pools. Structure is documented below.
|
7162
7258
|
"""
|
7163
7259
|
if autoscaling is not None:
|
@@ -7196,6 +7292,9 @@ class ClusterNodePoolArgs:
|
|
7196
7292
|
@property
|
7197
7293
|
@pulumi.getter
|
7198
7294
|
def autoscaling(self) -> Optional[pulumi.Input['ClusterNodePoolAutoscalingArgs']]:
|
7295
|
+
"""
|
7296
|
+
Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.
|
7297
|
+
"""
|
7199
7298
|
return pulumi.get(self, "autoscaling")
|
7200
7299
|
|
7201
7300
|
@autoscaling.setter
|
@@ -7222,6 +7321,9 @@ class ClusterNodePoolArgs:
|
|
7222
7321
|
@property
|
7223
7322
|
@pulumi.getter(name="instanceGroupUrls")
|
7224
7323
|
def instance_group_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
7324
|
+
"""
|
7325
|
+
The resource URLs of the managed instance groups associated with this node pool.
|
7326
|
+
"""
|
7225
7327
|
return pulumi.get(self, "instance_group_urls")
|
7226
7328
|
|
7227
7329
|
@instance_group_urls.setter
|
@@ -7231,6 +7333,9 @@ class ClusterNodePoolArgs:
|
|
7231
7333
|
@property
|
7232
7334
|
@pulumi.getter(name="managedInstanceGroupUrls")
|
7233
7335
|
def managed_instance_group_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
7336
|
+
"""
|
7337
|
+
List of instance group URLs which have been assigned to this node pool.
|
7338
|
+
"""
|
7234
7339
|
return pulumi.get(self, "managed_instance_group_urls")
|
7235
7340
|
|
7236
7341
|
@managed_instance_group_urls.setter
|
@@ -7252,6 +7357,9 @@ class ClusterNodePoolArgs:
|
|
7252
7357
|
@property
|
7253
7358
|
@pulumi.getter(name="maxPodsPerNode")
|
7254
7359
|
def max_pods_per_node(self) -> Optional[pulumi.Input[int]]:
|
7360
|
+
"""
|
7361
|
+
The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled.
|
7362
|
+
"""
|
7255
7363
|
return pulumi.get(self, "max_pods_per_node")
|
7256
7364
|
|
7257
7365
|
@max_pods_per_node.setter
|
@@ -7276,6 +7384,9 @@ class ClusterNodePoolArgs:
|
|
7276
7384
|
@property
|
7277
7385
|
@pulumi.getter(name="namePrefix")
|
7278
7386
|
def name_prefix(self) -> Optional[pulumi.Input[str]]:
|
7387
|
+
"""
|
7388
|
+
Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
|
7389
|
+
"""
|
7279
7390
|
return pulumi.get(self, "name_prefix")
|
7280
7391
|
|
7281
7392
|
@name_prefix.setter
|
@@ -7314,6 +7425,9 @@ class ClusterNodePoolArgs:
|
|
7314
7425
|
@property
|
7315
7426
|
@pulumi.getter(name="nodeCount")
|
7316
7427
|
def node_count(self) -> Optional[pulumi.Input[int]]:
|
7428
|
+
"""
|
7429
|
+
The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
|
7430
|
+
"""
|
7317
7431
|
return pulumi.get(self, "node_count")
|
7318
7432
|
|
7319
7433
|
@node_count.setter
|
@@ -7345,6 +7459,9 @@ class ClusterNodePoolArgs:
|
|
7345
7459
|
@property
|
7346
7460
|
@pulumi.getter(name="placementPolicy")
|
7347
7461
|
def placement_policy(self) -> Optional[pulumi.Input['ClusterNodePoolPlacementPolicyArgs']]:
|
7462
|
+
"""
|
7463
|
+
Specifies the node placement policy
|
7464
|
+
"""
|
7348
7465
|
return pulumi.get(self, "placement_policy")
|
7349
7466
|
|
7350
7467
|
@placement_policy.setter
|
@@ -7354,6 +7471,9 @@ class ClusterNodePoolArgs:
|
|
7354
7471
|
@property
|
7355
7472
|
@pulumi.getter(name="queuedProvisioning")
|
7356
7473
|
def queued_provisioning(self) -> Optional[pulumi.Input['ClusterNodePoolQueuedProvisioningArgs']]:
|
7474
|
+
"""
|
7475
|
+
Specifies the configuration of queued provisioning
|
7476
|
+
"""
|
7357
7477
|
return pulumi.get(self, "queued_provisioning")
|
7358
7478
|
|
7359
7479
|
@queued_provisioning.setter
|
@@ -7444,6 +7564,13 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7444
7564
|
min_node_count: Optional[pulumi.Input[int]] = None,
|
7445
7565
|
total_max_node_count: Optional[pulumi.Input[int]] = None,
|
7446
7566
|
total_min_node_count: Optional[pulumi.Input[int]] = None):
|
7567
|
+
"""
|
7568
|
+
:param pulumi.Input[str] location_policy: Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs.
|
7569
|
+
:param pulumi.Input[int] max_node_count: Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits.
|
7570
|
+
:param pulumi.Input[int] min_node_count: Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
|
7571
|
+
:param pulumi.Input[int] total_max_node_count: Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits.
|
7572
|
+
:param pulumi.Input[int] total_min_node_count: Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits.
|
7573
|
+
"""
|
7447
7574
|
if location_policy is not None:
|
7448
7575
|
pulumi.set(__self__, "location_policy", location_policy)
|
7449
7576
|
if max_node_count is not None:
|
@@ -7458,6 +7585,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7458
7585
|
@property
|
7459
7586
|
@pulumi.getter(name="locationPolicy")
|
7460
7587
|
def location_policy(self) -> Optional[pulumi.Input[str]]:
|
7588
|
+
"""
|
7589
|
+
Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs.
|
7590
|
+
"""
|
7461
7591
|
return pulumi.get(self, "location_policy")
|
7462
7592
|
|
7463
7593
|
@location_policy.setter
|
@@ -7467,6 +7597,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7467
7597
|
@property
|
7468
7598
|
@pulumi.getter(name="maxNodeCount")
|
7469
7599
|
def max_node_count(self) -> Optional[pulumi.Input[int]]:
|
7600
|
+
"""
|
7601
|
+
Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits.
|
7602
|
+
"""
|
7470
7603
|
return pulumi.get(self, "max_node_count")
|
7471
7604
|
|
7472
7605
|
@max_node_count.setter
|
@@ -7476,6 +7609,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7476
7609
|
@property
|
7477
7610
|
@pulumi.getter(name="minNodeCount")
|
7478
7611
|
def min_node_count(self) -> Optional[pulumi.Input[int]]:
|
7612
|
+
"""
|
7613
|
+
Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
|
7614
|
+
"""
|
7479
7615
|
return pulumi.get(self, "min_node_count")
|
7480
7616
|
|
7481
7617
|
@min_node_count.setter
|
@@ -7485,6 +7621,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7485
7621
|
@property
|
7486
7622
|
@pulumi.getter(name="totalMaxNodeCount")
|
7487
7623
|
def total_max_node_count(self) -> Optional[pulumi.Input[int]]:
|
7624
|
+
"""
|
7625
|
+
Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits.
|
7626
|
+
"""
|
7488
7627
|
return pulumi.get(self, "total_max_node_count")
|
7489
7628
|
|
7490
7629
|
@total_max_node_count.setter
|
@@ -7494,6 +7633,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7494
7633
|
@property
|
7495
7634
|
@pulumi.getter(name="totalMinNodeCount")
|
7496
7635
|
def total_min_node_count(self) -> Optional[pulumi.Input[int]]:
|
7636
|
+
"""
|
7637
|
+
Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits.
|
7638
|
+
"""
|
7497
7639
|
return pulumi.get(self, "total_min_node_count")
|
7498
7640
|
|
7499
7641
|
@total_min_node_count.setter
|
@@ -7530,7 +7672,7 @@ class ClusterNodePoolDefaultsNodeConfigDefaultsArgs:
|
|
7530
7672
|
gcfs_config: Optional[pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs']] = None,
|
7531
7673
|
logging_variant: Optional[pulumi.Input[str]] = None):
|
7532
7674
|
"""
|
7533
|
-
:param pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs'] gcfs_config:
|
7675
|
+
:param pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs'] gcfs_config: The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below.
|
7534
7676
|
:param pulumi.Input[str] logging_variant: The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information.
|
7535
7677
|
"""
|
7536
7678
|
if gcfs_config is not None:
|
@@ -7542,7 +7684,7 @@ class ClusterNodePoolDefaultsNodeConfigDefaultsArgs:
|
|
7542
7684
|
@pulumi.getter(name="gcfsConfig")
|
7543
7685
|
def gcfs_config(self) -> Optional[pulumi.Input['ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs']]:
|
7544
7686
|
"""
|
7545
|
-
|
7687
|
+
The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below.
|
7546
7688
|
"""
|
7547
7689
|
return pulumi.get(self, "gcfs_config")
|
7548
7690
|
|
@@ -7640,12 +7782,15 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7640
7782
|
pod_ipv4_cidr_block: Optional[pulumi.Input[str]] = None,
|
7641
7783
|
pod_range: Optional[pulumi.Input[str]] = None):
|
7642
7784
|
"""
|
7785
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgs']]] additional_node_network_configs: We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface
|
7786
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs']]] additional_pod_network_configs: We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node
|
7643
7787
|
:param pulumi.Input[bool] create_pod_range: Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified.
|
7644
7788
|
:param pulumi.Input[bool] enable_private_nodes: Enables the private cluster feature,
|
7645
7789
|
creating a private endpoint on the cluster. In a private cluster, nodes only
|
7646
7790
|
have RFC 1918 private addresses and communicate with the master's private
|
7647
7791
|
endpoint via private networking.
|
7648
7792
|
:param pulumi.Input['ClusterNodePoolNetworkConfigNetworkPerformanceConfigArgs'] network_performance_config: Network bandwidth tier configuration.
|
7793
|
+
:param pulumi.Input['ClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
7649
7794
|
:param pulumi.Input[str] pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
|
7650
7795
|
:param pulumi.Input[str] pod_range: The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.
|
7651
7796
|
"""
|
@@ -7669,6 +7814,9 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7669
7814
|
@property
|
7670
7815
|
@pulumi.getter(name="additionalNodeNetworkConfigs")
|
7671
7816
|
def additional_node_network_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgs']]]]:
|
7817
|
+
"""
|
7818
|
+
We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface
|
7819
|
+
"""
|
7672
7820
|
return pulumi.get(self, "additional_node_network_configs")
|
7673
7821
|
|
7674
7822
|
@additional_node_network_configs.setter
|
@@ -7678,6 +7826,9 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7678
7826
|
@property
|
7679
7827
|
@pulumi.getter(name="additionalPodNetworkConfigs")
|
7680
7828
|
def additional_pod_network_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs']]]]:
|
7829
|
+
"""
|
7830
|
+
We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node
|
7831
|
+
"""
|
7681
7832
|
return pulumi.get(self, "additional_pod_network_configs")
|
7682
7833
|
|
7683
7834
|
@additional_pod_network_configs.setter
|
@@ -7726,6 +7877,9 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7726
7877
|
@property
|
7727
7878
|
@pulumi.getter(name="podCidrOverprovisionConfig")
|
7728
7879
|
def pod_cidr_overprovision_config(self) -> Optional[pulumi.Input['ClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs']]:
|
7880
|
+
"""
|
7881
|
+
Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
7882
|
+
"""
|
7729
7883
|
return pulumi.get(self, "pod_cidr_overprovision_config")
|
7730
7884
|
|
7731
7885
|
@pod_cidr_overprovision_config.setter
|
@@ -7809,6 +7963,8 @@ class ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
7809
7963
|
secondary_pod_range: Optional[pulumi.Input[str]] = None,
|
7810
7964
|
subnetwork: Optional[pulumi.Input[str]] = None):
|
7811
7965
|
"""
|
7966
|
+
:param pulumi.Input[int] max_pods_per_node: The maximum number of pods per node which use this pod network.
|
7967
|
+
:param pulumi.Input[str] secondary_pod_range: The name of the secondary range on the subnet which provides IP address for this pod range.
|
7812
7968
|
:param pulumi.Input[str] subnetwork: The name or self_link of the Google Compute Engine
|
7813
7969
|
subnetwork in which the cluster's instances are launched.
|
7814
7970
|
"""
|
@@ -7822,6 +7978,9 @@ class ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
7822
7978
|
@property
|
7823
7979
|
@pulumi.getter(name="maxPodsPerNode")
|
7824
7980
|
def max_pods_per_node(self) -> Optional[pulumi.Input[int]]:
|
7981
|
+
"""
|
7982
|
+
The maximum number of pods per node which use this pod network.
|
7983
|
+
"""
|
7825
7984
|
return pulumi.get(self, "max_pods_per_node")
|
7826
7985
|
|
7827
7986
|
@max_pods_per_node.setter
|
@@ -7831,6 +7990,9 @@ class ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
7831
7990
|
@property
|
7832
7991
|
@pulumi.getter(name="secondaryPodRange")
|
7833
7992
|
def secondary_pod_range(self) -> Optional[pulumi.Input[str]]:
|
7993
|
+
"""
|
7994
|
+
The name of the secondary range on the subnet which provides IP address for this pod range.
|
7995
|
+
"""
|
7834
7996
|
return pulumi.get(self, "secondary_pod_range")
|
7835
7997
|
|
7836
7998
|
@secondary_pod_range.setter
|
@@ -7948,9 +8110,9 @@ class ClusterNodePoolNodeConfigArgs:
|
|
7948
8110
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
|
7949
8111
|
:param pulumi.Input[str] disk_type: Type of the disk attached to each node
|
7950
8112
|
(e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-standard'
|
7951
|
-
:param pulumi.Input[
|
7952
|
-
|
7953
|
-
:param pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config:
|
8113
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node.
|
8114
|
+
:param pulumi.Input[bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
8115
|
+
:param pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
7954
8116
|
|
7955
8117
|
```python
|
7956
8118
|
import pulumi
|
@@ -7986,6 +8148,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
7986
8148
|
```python
|
7987
8149
|
import pulumi
|
7988
8150
|
```
|
8151
|
+
:param pulumi.Input['ClusterNodePoolNodeConfigHostMaintenancePolicyArgs'] host_maintenance_policy: The maintenance policy for the hosts on which the GKE VMs run on.
|
7989
8152
|
:param pulumi.Input[str] image_type: The image type to use for this node. Note that changing the image type
|
7990
8153
|
will delete and recreate all nodes in the node pool.
|
7991
8154
|
:param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigArgs'] kubelet_config: Kubelet configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
|
@@ -8026,6 +8189,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8026
8189
|
:param pulumi.Input['ClusterNodePoolNodeConfigReservationAffinityArgs'] reservation_affinity: The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
|
8027
8190
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCP labels (key/value pairs) to be applied to each node. Refer [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels)
|
8028
8191
|
for how these labels are applied to clusters, node pools and nodes.
|
8192
|
+
:param pulumi.Input['ClusterNodePoolNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
|
8029
8193
|
:param pulumi.Input[str] service_account: The service account to be used by the Node VMs.
|
8030
8194
|
If not specified, the "default" service account is used.
|
8031
8195
|
:param pulumi.Input['ClusterNodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
|
@@ -8191,6 +8355,9 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8191
8355
|
@property
|
8192
8356
|
@pulumi.getter(name="effectiveTaints")
|
8193
8357
|
def effective_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]]]:
|
8358
|
+
"""
|
8359
|
+
List of kubernetes taints applied to each node.
|
8360
|
+
"""
|
8194
8361
|
return pulumi.get(self, "effective_taints")
|
8195
8362
|
|
8196
8363
|
@effective_taints.setter
|
@@ -8201,7 +8368,6 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8201
8368
|
@pulumi.getter(name="enableConfidentialStorage")
|
8202
8369
|
def enable_confidential_storage(self) -> Optional[pulumi.Input[bool]]:
|
8203
8370
|
"""
|
8204
|
-
)
|
8205
8371
|
Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
8206
8372
|
"""
|
8207
8373
|
return pulumi.get(self, "enable_confidential_storage")
|
@@ -8214,7 +8380,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8214
8380
|
@pulumi.getter(name="ephemeralStorageConfig")
|
8215
8381
|
def ephemeral_storage_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs']]:
|
8216
8382
|
"""
|
8217
|
-
|
8383
|
+
Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
8218
8384
|
|
8219
8385
|
```python
|
8220
8386
|
import pulumi
|
@@ -8315,6 +8481,9 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8315
8481
|
@property
|
8316
8482
|
@pulumi.getter(name="hostMaintenancePolicy")
|
8317
8483
|
def host_maintenance_policy(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigHostMaintenancePolicyArgs']]:
|
8484
|
+
"""
|
8485
|
+
The maintenance policy for the hosts on which the GKE VMs run on.
|
8486
|
+
"""
|
8318
8487
|
return pulumi.get(self, "host_maintenance_policy")
|
8319
8488
|
|
8320
8489
|
@host_maintenance_policy.setter
|
@@ -8529,6 +8698,9 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8529
8698
|
@property
|
8530
8699
|
@pulumi.getter(name="sandboxConfig")
|
8531
8700
|
def sandbox_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigSandboxConfigArgs']]:
|
8701
|
+
"""
|
8702
|
+
Sandbox configuration for this node.
|
8703
|
+
"""
|
8532
8704
|
return pulumi.get(self, "sandbox_config")
|
8533
8705
|
|
8534
8706
|
@sandbox_config.setter
|
@@ -9009,11 +9181,17 @@ class ClusterNodePoolNodeConfigGvnicArgs:
|
|
9009
9181
|
class ClusterNodePoolNodeConfigHostMaintenancePolicyArgs:
|
9010
9182
|
def __init__(__self__, *,
|
9011
9183
|
maintenance_interval: pulumi.Input[str]):
|
9184
|
+
"""
|
9185
|
+
:param pulumi.Input[str] maintenance_interval: .
|
9186
|
+
"""
|
9012
9187
|
pulumi.set(__self__, "maintenance_interval", maintenance_interval)
|
9013
9188
|
|
9014
9189
|
@property
|
9015
9190
|
@pulumi.getter(name="maintenanceInterval")
|
9016
9191
|
def maintenance_interval(self) -> pulumi.Input[str]:
|
9192
|
+
"""
|
9193
|
+
.
|
9194
|
+
"""
|
9017
9195
|
return pulumi.get(self, "maintenance_interval")
|
9018
9196
|
|
9019
9197
|
@maintenance_interval.setter
|
@@ -9337,11 +9515,17 @@ class ClusterNodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
9337
9515
|
class ClusterNodePoolNodeConfigSoleTenantConfigArgs:
|
9338
9516
|
def __init__(__self__, *,
|
9339
9517
|
node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
|
9518
|
+
"""
|
9519
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
|
9520
|
+
"""
|
9340
9521
|
pulumi.set(__self__, "node_affinities", node_affinities)
|
9341
9522
|
|
9342
9523
|
@property
|
9343
9524
|
@pulumi.getter(name="nodeAffinities")
|
9344
9525
|
def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
|
9526
|
+
"""
|
9527
|
+
.
|
9528
|
+
"""
|
9345
9529
|
return pulumi.get(self, "node_affinities")
|
9346
9530
|
|
9347
9531
|
@node_affinities.setter
|
@@ -9492,6 +9676,8 @@ class ClusterNodePoolPlacementPolicyArgs:
|
|
9492
9676
|
"""
|
9493
9677
|
:param pulumi.Input[str] type: Telemetry integration for the cluster. Supported values (`ENABLED, DISABLED, SYSTEM_ONLY`);
|
9494
9678
|
`SYSTEM_ONLY` (Only system components are monitored and logged) is only available in GKE versions 1.15 and later.
|
9679
|
+
:param pulumi.Input[str] policy_name: If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
|
9680
|
+
:param pulumi.Input[str] tpu_topology: TPU placement topology for pod slice node pool. https://cloud.google.com/tpu/docs/types-topologies#tpu_topologies
|
9495
9681
|
"""
|
9496
9682
|
pulumi.set(__self__, "type", type)
|
9497
9683
|
if policy_name is not None:
|
@@ -9515,6 +9701,9 @@ class ClusterNodePoolPlacementPolicyArgs:
|
|
9515
9701
|
@property
|
9516
9702
|
@pulumi.getter(name="policyName")
|
9517
9703
|
def policy_name(self) -> Optional[pulumi.Input[str]]:
|
9704
|
+
"""
|
9705
|
+
If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
|
9706
|
+
"""
|
9518
9707
|
return pulumi.get(self, "policy_name")
|
9519
9708
|
|
9520
9709
|
@policy_name.setter
|
@@ -9524,6 +9713,9 @@ class ClusterNodePoolPlacementPolicyArgs:
|
|
9524
9713
|
@property
|
9525
9714
|
@pulumi.getter(name="tpuTopology")
|
9526
9715
|
def tpu_topology(self) -> Optional[pulumi.Input[str]]:
|
9716
|
+
"""
|
9717
|
+
TPU placement topology for pod slice node pool. https://cloud.google.com/tpu/docs/types-topologies#tpu_topologies
|
9718
|
+
"""
|
9527
9719
|
return pulumi.get(self, "tpu_topology")
|
9528
9720
|
|
9529
9721
|
@tpu_topology.setter
|
@@ -10048,8 +10240,8 @@ class ClusterProtectConfigArgs:
|
|
10048
10240
|
workload_config: Optional[pulumi.Input['ClusterProtectConfigWorkloadConfigArgs']] = None,
|
10049
10241
|
workload_vulnerability_mode: Optional[pulumi.Input[str]] = None):
|
10050
10242
|
"""
|
10051
|
-
:param pulumi.Input['ClusterProtectConfigWorkloadConfigArgs'] workload_config:
|
10052
|
-
:param pulumi.Input[str] workload_vulnerability_mode:
|
10243
|
+
:param pulumi.Input['ClusterProtectConfigWorkloadConfigArgs'] workload_config: WorkloadConfig defines which actions are enabled for a cluster's workload configurations. Structure is documented below
|
10244
|
+
:param pulumi.Input[str] workload_vulnerability_mode: Sets which mode to use for Protect workload vulnerability scanning feature. Accepted values are DISABLED, BASIC.
|
10053
10245
|
"""
|
10054
10246
|
if workload_config is not None:
|
10055
10247
|
pulumi.set(__self__, "workload_config", workload_config)
|
@@ -10060,7 +10252,7 @@ class ClusterProtectConfigArgs:
|
|
10060
10252
|
@pulumi.getter(name="workloadConfig")
|
10061
10253
|
def workload_config(self) -> Optional[pulumi.Input['ClusterProtectConfigWorkloadConfigArgs']]:
|
10062
10254
|
"""
|
10063
|
-
|
10255
|
+
WorkloadConfig defines which actions are enabled for a cluster's workload configurations. Structure is documented below
|
10064
10256
|
"""
|
10065
10257
|
return pulumi.get(self, "workload_config")
|
10066
10258
|
|
@@ -10072,7 +10264,7 @@ class ClusterProtectConfigArgs:
|
|
10072
10264
|
@pulumi.getter(name="workloadVulnerabilityMode")
|
10073
10265
|
def workload_vulnerability_mode(self) -> Optional[pulumi.Input[str]]:
|
10074
10266
|
"""
|
10075
|
-
|
10267
|
+
Sets which mode to use for Protect workload vulnerability scanning feature. Accepted values are DISABLED, BASIC.
|
10076
10268
|
"""
|
10077
10269
|
return pulumi.get(self, "workload_vulnerability_mode")
|
10078
10270
|
|
@@ -10086,7 +10278,7 @@ class ClusterProtectConfigWorkloadConfigArgs:
|
|
10086
10278
|
def __init__(__self__, *,
|
10087
10279
|
audit_mode: pulumi.Input[str]):
|
10088
10280
|
"""
|
10089
|
-
:param pulumi.Input[str] audit_mode:
|
10281
|
+
:param pulumi.Input[str] audit_mode: Sets which mode of auditing should be used for the cluster's workloads. Accepted values are DISABLED, BASIC.
|
10090
10282
|
"""
|
10091
10283
|
pulumi.set(__self__, "audit_mode", audit_mode)
|
10092
10284
|
|
@@ -10094,7 +10286,7 @@ class ClusterProtectConfigWorkloadConfigArgs:
|
|
10094
10286
|
@pulumi.getter(name="auditMode")
|
10095
10287
|
def audit_mode(self) -> pulumi.Input[str]:
|
10096
10288
|
"""
|
10097
|
-
|
10289
|
+
Sets which mode of auditing should be used for the cluster's workloads. Accepted values are DISABLED, BASIC.
|
10098
10290
|
"""
|
10099
10291
|
return pulumi.get(self, "audit_mode")
|
10100
10292
|
|
@@ -10215,11 +10407,17 @@ class ClusterResourceUsageExportConfigArgs:
|
|
10215
10407
|
class ClusterResourceUsageExportConfigBigqueryDestinationArgs:
|
10216
10408
|
def __init__(__self__, *,
|
10217
10409
|
dataset_id: pulumi.Input[str]):
|
10410
|
+
"""
|
10411
|
+
:param pulumi.Input[str] dataset_id: The ID of a BigQuery Dataset.
|
10412
|
+
"""
|
10218
10413
|
pulumi.set(__self__, "dataset_id", dataset_id)
|
10219
10414
|
|
10220
10415
|
@property
|
10221
10416
|
@pulumi.getter(name="datasetId")
|
10222
10417
|
def dataset_id(self) -> pulumi.Input[str]:
|
10418
|
+
"""
|
10419
|
+
The ID of a BigQuery Dataset.
|
10420
|
+
"""
|
10223
10421
|
return pulumi.get(self, "dataset_id")
|
10224
10422
|
|
10225
10423
|
@dataset_id.setter
|
@@ -10296,6 +10494,8 @@ class ClusterTpuConfigArgs:
|
|
10296
10494
|
use_service_networking: Optional[pulumi.Input[bool]] = None):
|
10297
10495
|
"""
|
10298
10496
|
:param pulumi.Input[bool] enabled: Enable Binary Authorization for this cluster. Deprecated in favor of `evaluation_mode`.
|
10497
|
+
:param pulumi.Input[str] ipv4_cidr_block: IPv4 CIDR block reserved for Cloud TPU in the VPC.
|
10498
|
+
:param pulumi.Input[bool] use_service_networking: Whether to use service networking for Cloud TPU or not
|
10299
10499
|
"""
|
10300
10500
|
pulumi.set(__self__, "enabled", enabled)
|
10301
10501
|
if ipv4_cidr_block is not None:
|
@@ -10318,6 +10518,9 @@ class ClusterTpuConfigArgs:
|
|
10318
10518
|
@property
|
10319
10519
|
@pulumi.getter(name="ipv4CidrBlock")
|
10320
10520
|
def ipv4_cidr_block(self) -> Optional[pulumi.Input[str]]:
|
10521
|
+
"""
|
10522
|
+
IPv4 CIDR block reserved for Cloud TPU in the VPC.
|
10523
|
+
"""
|
10321
10524
|
return pulumi.get(self, "ipv4_cidr_block")
|
10322
10525
|
|
10323
10526
|
@ipv4_cidr_block.setter
|
@@ -10327,6 +10530,9 @@ class ClusterTpuConfigArgs:
|
|
10327
10530
|
@property
|
10328
10531
|
@pulumi.getter(name="useServiceNetworking")
|
10329
10532
|
def use_service_networking(self) -> Optional[pulumi.Input[bool]]:
|
10533
|
+
"""
|
10534
|
+
Whether to use service networking for Cloud TPU or not
|
10535
|
+
"""
|
10330
10536
|
return pulumi.get(self, "use_service_networking")
|
10331
10537
|
|
10332
10538
|
@use_service_networking.setter
|
@@ -10573,6 +10779,8 @@ class NodePoolNetworkConfigArgs:
|
|
10573
10779
|
Structure is documented below
|
10574
10780
|
:param pulumi.Input[bool] create_pod_range: Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified.
|
10575
10781
|
:param pulumi.Input[bool] enable_private_nodes: Whether nodes have internal IP addresses only.
|
10782
|
+
:param pulumi.Input['NodePoolNetworkConfigNetworkPerformanceConfigArgs'] network_performance_config: Network bandwidth tier configuration.
|
10783
|
+
:param pulumi.Input['NodePoolNetworkConfigPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
10576
10784
|
:param pulumi.Input[str] pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
|
10577
10785
|
:param pulumi.Input[str] pod_range: The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.
|
10578
10786
|
"""
|
@@ -10646,6 +10854,9 @@ class NodePoolNetworkConfigArgs:
|
|
10646
10854
|
@property
|
10647
10855
|
@pulumi.getter(name="networkPerformanceConfig")
|
10648
10856
|
def network_performance_config(self) -> Optional[pulumi.Input['NodePoolNetworkConfigNetworkPerformanceConfigArgs']]:
|
10857
|
+
"""
|
10858
|
+
Network bandwidth tier configuration.
|
10859
|
+
"""
|
10649
10860
|
return pulumi.get(self, "network_performance_config")
|
10650
10861
|
|
10651
10862
|
@network_performance_config.setter
|
@@ -10655,6 +10866,9 @@ class NodePoolNetworkConfigArgs:
|
|
10655
10866
|
@property
|
10656
10867
|
@pulumi.getter(name="podCidrOverprovisionConfig")
|
10657
10868
|
def pod_cidr_overprovision_config(self) -> Optional[pulumi.Input['NodePoolNetworkConfigPodCidrOverprovisionConfigArgs']]:
|
10869
|
+
"""
|
10870
|
+
Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
10871
|
+
"""
|
10658
10872
|
return pulumi.get(self, "pod_cidr_overprovision_config")
|
10659
10873
|
|
10660
10874
|
@pod_cidr_overprovision_config.setter
|
@@ -10784,11 +10998,17 @@ class NodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
10784
10998
|
class NodePoolNetworkConfigNetworkPerformanceConfigArgs:
|
10785
10999
|
def __init__(__self__, *,
|
10786
11000
|
total_egress_bandwidth_tier: pulumi.Input[str]):
|
11001
|
+
"""
|
11002
|
+
:param pulumi.Input[str] total_egress_bandwidth_tier: Specifies the total network bandwidth tier for the NodePool.
|
11003
|
+
"""
|
10787
11004
|
pulumi.set(__self__, "total_egress_bandwidth_tier", total_egress_bandwidth_tier)
|
10788
11005
|
|
10789
11006
|
@property
|
10790
11007
|
@pulumi.getter(name="totalEgressBandwidthTier")
|
10791
11008
|
def total_egress_bandwidth_tier(self) -> pulumi.Input[str]:
|
11009
|
+
"""
|
11010
|
+
Specifies the total network bandwidth tier for the NodePool.
|
11011
|
+
"""
|
10792
11012
|
return pulumi.get(self, "total_egress_bandwidth_tier")
|
10793
11013
|
|
10794
11014
|
@total_egress_bandwidth_tier.setter
|
@@ -10853,7 +11073,43 @@ class NodePoolNodeConfigArgs:
|
|
10853
11073
|
taints: Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigTaintArgs']]]] = None,
|
10854
11074
|
workload_metadata_config: Optional[pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs']] = None):
|
10855
11075
|
"""
|
11076
|
+
:param pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
|
11077
|
+
:param pulumi.Input[str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
|
10856
11078
|
:param pulumi.Input['NodePoolNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
|
11079
|
+
:param pulumi.Input[int] disk_size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
|
11080
|
+
:param pulumi.Input[str] disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
|
11081
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node.
|
11082
|
+
:param pulumi.Input[bool] enable_confidential_storage: If enabled boot disks are configured with confidential mode.
|
11083
|
+
:param pulumi.Input['NodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11084
|
+
:param pulumi.Input['NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs'] ephemeral_storage_local_ssd_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11085
|
+
:param pulumi.Input['NodePoolNodeConfigFastSocketArgs'] fast_socket: Enable or disable NCCL Fast Socket in the node pool.
|
11086
|
+
:param pulumi.Input['NodePoolNodeConfigGcfsConfigArgs'] gcfs_config: GCFS configuration for this node.
|
11087
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigGuestAcceleratorArgs']]] guest_accelerators: List of the type and count of accelerator cards attached to the instance.
|
11088
|
+
:param pulumi.Input['NodePoolNodeConfigGvnicArgs'] gvnic: Enable or disable gvnic in the node pool.
|
11089
|
+
:param pulumi.Input['NodePoolNodeConfigHostMaintenancePolicyArgs'] host_maintenance_policy: The maintenance policy for the hosts on which the GKE VMs run on.
|
11090
|
+
:param pulumi.Input[str] image_type: The image type to use for this node. Note that for a given image type, the latest version of it will be used.
|
11091
|
+
:param pulumi.Input['NodePoolNodeConfigKubeletConfigArgs'] kubelet_config: Node kubelet configs.
|
11092
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
|
11093
|
+
:param pulumi.Input['NodePoolNodeConfigLinuxNodeConfigArgs'] linux_node_config: Parameters that can be configured on Linux nodes.
|
11094
|
+
:param pulumi.Input['NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs'] local_nvme_ssd_block_config: Parameters for raw-block local NVMe SSDs.
|
11095
|
+
:param pulumi.Input[int] local_ssd_count: The number of local SSD disks to be attached to the node.
|
11096
|
+
:param pulumi.Input[str] logging_variant: Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
|
11097
|
+
:param pulumi.Input[str] machine_type: The name of a Google Compute Engine machine type.
|
11098
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The metadata key/value pairs assigned to instances in the cluster.
|
11099
|
+
:param pulumi.Input[str] min_cpu_platform: Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
|
11100
|
+
:param pulumi.Input[str] node_group: Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
|
11101
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] oauth_scopes: The set of Google API scopes to be made available on all of the node VMs.
|
11102
|
+
:param pulumi.Input[bool] preemptible: Whether the nodes are created as preemptible VM instances.
|
11103
|
+
:param pulumi.Input['NodePoolNodeConfigReservationAffinityArgs'] reservation_affinity: The reservation affinity configuration for the node pool.
|
11104
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
|
11105
|
+
:param pulumi.Input['NodePoolNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
|
11106
|
+
:param pulumi.Input[str] service_account: The Google Cloud Platform Service Account to be used by the node VMs.
|
11107
|
+
:param pulumi.Input['NodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options.
|
11108
|
+
:param pulumi.Input['NodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Node affinity options for sole tenant node pools.
|
11109
|
+
:param pulumi.Input[bool] spot: Whether the nodes are created as spot VM instances.
|
11110
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The list of instance tags applied to all nodes.
|
11111
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigTaintArgs']]] taints: List of Kubernetes taints to be applied to each node.
|
11112
|
+
:param pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs'] workload_metadata_config: The workload metadata configuration for this node.
|
10857
11113
|
"""
|
10858
11114
|
if advanced_machine_features is not None:
|
10859
11115
|
pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
|
@@ -10933,6 +11189,9 @@ class NodePoolNodeConfigArgs:
|
|
10933
11189
|
@property
|
10934
11190
|
@pulumi.getter(name="advancedMachineFeatures")
|
10935
11191
|
def advanced_machine_features(self) -> Optional[pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs']]:
|
11192
|
+
"""
|
11193
|
+
Specifies options for controlling advanced machine features.
|
11194
|
+
"""
|
10936
11195
|
return pulumi.get(self, "advanced_machine_features")
|
10937
11196
|
|
10938
11197
|
@advanced_machine_features.setter
|
@@ -10942,6 +11201,9 @@ class NodePoolNodeConfigArgs:
|
|
10942
11201
|
@property
|
10943
11202
|
@pulumi.getter(name="bootDiskKmsKey")
|
10944
11203
|
def boot_disk_kms_key(self) -> Optional[pulumi.Input[str]]:
|
11204
|
+
"""
|
11205
|
+
The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
|
11206
|
+
"""
|
10945
11207
|
return pulumi.get(self, "boot_disk_kms_key")
|
10946
11208
|
|
10947
11209
|
@boot_disk_kms_key.setter
|
@@ -10963,6 +11225,9 @@ class NodePoolNodeConfigArgs:
|
|
10963
11225
|
@property
|
10964
11226
|
@pulumi.getter(name="diskSizeGb")
|
10965
11227
|
def disk_size_gb(self) -> Optional[pulumi.Input[int]]:
|
11228
|
+
"""
|
11229
|
+
Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
|
11230
|
+
"""
|
10966
11231
|
return pulumi.get(self, "disk_size_gb")
|
10967
11232
|
|
10968
11233
|
@disk_size_gb.setter
|
@@ -10972,6 +11237,9 @@ class NodePoolNodeConfigArgs:
|
|
10972
11237
|
@property
|
10973
11238
|
@pulumi.getter(name="diskType")
|
10974
11239
|
def disk_type(self) -> Optional[pulumi.Input[str]]:
|
11240
|
+
"""
|
11241
|
+
Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
|
11242
|
+
"""
|
10975
11243
|
return pulumi.get(self, "disk_type")
|
10976
11244
|
|
10977
11245
|
@disk_type.setter
|
@@ -10981,6 +11249,9 @@ class NodePoolNodeConfigArgs:
|
|
10981
11249
|
@property
|
10982
11250
|
@pulumi.getter(name="effectiveTaints")
|
10983
11251
|
def effective_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigEffectiveTaintArgs']]]]:
|
11252
|
+
"""
|
11253
|
+
List of kubernetes taints applied to each node.
|
11254
|
+
"""
|
10984
11255
|
return pulumi.get(self, "effective_taints")
|
10985
11256
|
|
10986
11257
|
@effective_taints.setter
|
@@ -10990,6 +11261,9 @@ class NodePoolNodeConfigArgs:
|
|
10990
11261
|
@property
|
10991
11262
|
@pulumi.getter(name="enableConfidentialStorage")
|
10992
11263
|
def enable_confidential_storage(self) -> Optional[pulumi.Input[bool]]:
|
11264
|
+
"""
|
11265
|
+
If enabled boot disks are configured with confidential mode.
|
11266
|
+
"""
|
10993
11267
|
return pulumi.get(self, "enable_confidential_storage")
|
10994
11268
|
|
10995
11269
|
@enable_confidential_storage.setter
|
@@ -10999,6 +11273,9 @@ class NodePoolNodeConfigArgs:
|
|
10999
11273
|
@property
|
11000
11274
|
@pulumi.getter(name="ephemeralStorageConfig")
|
11001
11275
|
def ephemeral_storage_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigEphemeralStorageConfigArgs']]:
|
11276
|
+
"""
|
11277
|
+
Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11278
|
+
"""
|
11002
11279
|
return pulumi.get(self, "ephemeral_storage_config")
|
11003
11280
|
|
11004
11281
|
@ephemeral_storage_config.setter
|
@@ -11008,6 +11285,9 @@ class NodePoolNodeConfigArgs:
|
|
11008
11285
|
@property
|
11009
11286
|
@pulumi.getter(name="ephemeralStorageLocalSsdConfig")
|
11010
11287
|
def ephemeral_storage_local_ssd_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs']]:
|
11288
|
+
"""
|
11289
|
+
Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11290
|
+
"""
|
11011
11291
|
return pulumi.get(self, "ephemeral_storage_local_ssd_config")
|
11012
11292
|
|
11013
11293
|
@ephemeral_storage_local_ssd_config.setter
|
@@ -11017,6 +11297,9 @@ class NodePoolNodeConfigArgs:
|
|
11017
11297
|
@property
|
11018
11298
|
@pulumi.getter(name="fastSocket")
|
11019
11299
|
def fast_socket(self) -> Optional[pulumi.Input['NodePoolNodeConfigFastSocketArgs']]:
|
11300
|
+
"""
|
11301
|
+
Enable or disable NCCL Fast Socket in the node pool.
|
11302
|
+
"""
|
11020
11303
|
return pulumi.get(self, "fast_socket")
|
11021
11304
|
|
11022
11305
|
@fast_socket.setter
|
@@ -11026,6 +11309,9 @@ class NodePoolNodeConfigArgs:
|
|
11026
11309
|
@property
|
11027
11310
|
@pulumi.getter(name="gcfsConfig")
|
11028
11311
|
def gcfs_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigGcfsConfigArgs']]:
|
11312
|
+
"""
|
11313
|
+
GCFS configuration for this node.
|
11314
|
+
"""
|
11029
11315
|
return pulumi.get(self, "gcfs_config")
|
11030
11316
|
|
11031
11317
|
@gcfs_config.setter
|
@@ -11035,6 +11321,9 @@ class NodePoolNodeConfigArgs:
|
|
11035
11321
|
@property
|
11036
11322
|
@pulumi.getter(name="guestAccelerators")
|
11037
11323
|
def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigGuestAcceleratorArgs']]]]:
|
11324
|
+
"""
|
11325
|
+
List of the type and count of accelerator cards attached to the instance.
|
11326
|
+
"""
|
11038
11327
|
return pulumi.get(self, "guest_accelerators")
|
11039
11328
|
|
11040
11329
|
@guest_accelerators.setter
|
@@ -11044,6 +11333,9 @@ class NodePoolNodeConfigArgs:
|
|
11044
11333
|
@property
|
11045
11334
|
@pulumi.getter
|
11046
11335
|
def gvnic(self) -> Optional[pulumi.Input['NodePoolNodeConfigGvnicArgs']]:
|
11336
|
+
"""
|
11337
|
+
Enable or disable gvnic in the node pool.
|
11338
|
+
"""
|
11047
11339
|
return pulumi.get(self, "gvnic")
|
11048
11340
|
|
11049
11341
|
@gvnic.setter
|
@@ -11053,6 +11345,9 @@ class NodePoolNodeConfigArgs:
|
|
11053
11345
|
@property
|
11054
11346
|
@pulumi.getter(name="hostMaintenancePolicy")
|
11055
11347
|
def host_maintenance_policy(self) -> Optional[pulumi.Input['NodePoolNodeConfigHostMaintenancePolicyArgs']]:
|
11348
|
+
"""
|
11349
|
+
The maintenance policy for the hosts on which the GKE VMs run on.
|
11350
|
+
"""
|
11056
11351
|
return pulumi.get(self, "host_maintenance_policy")
|
11057
11352
|
|
11058
11353
|
@host_maintenance_policy.setter
|
@@ -11062,6 +11357,9 @@ class NodePoolNodeConfigArgs:
|
|
11062
11357
|
@property
|
11063
11358
|
@pulumi.getter(name="imageType")
|
11064
11359
|
def image_type(self) -> Optional[pulumi.Input[str]]:
|
11360
|
+
"""
|
11361
|
+
The image type to use for this node. Note that for a given image type, the latest version of it will be used.
|
11362
|
+
"""
|
11065
11363
|
return pulumi.get(self, "image_type")
|
11066
11364
|
|
11067
11365
|
@image_type.setter
|
@@ -11071,6 +11369,9 @@ class NodePoolNodeConfigArgs:
|
|
11071
11369
|
@property
|
11072
11370
|
@pulumi.getter(name="kubeletConfig")
|
11073
11371
|
def kubelet_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigArgs']]:
|
11372
|
+
"""
|
11373
|
+
Node kubelet configs.
|
11374
|
+
"""
|
11074
11375
|
return pulumi.get(self, "kubelet_config")
|
11075
11376
|
|
11076
11377
|
@kubelet_config.setter
|
@@ -11080,6 +11381,9 @@ class NodePoolNodeConfigArgs:
|
|
11080
11381
|
@property
|
11081
11382
|
@pulumi.getter
|
11082
11383
|
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
11384
|
+
"""
|
11385
|
+
The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
|
11386
|
+
"""
|
11083
11387
|
return pulumi.get(self, "labels")
|
11084
11388
|
|
11085
11389
|
@labels.setter
|
@@ -11089,6 +11393,9 @@ class NodePoolNodeConfigArgs:
|
|
11089
11393
|
@property
|
11090
11394
|
@pulumi.getter(name="linuxNodeConfig")
|
11091
11395
|
def linux_node_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigLinuxNodeConfigArgs']]:
|
11396
|
+
"""
|
11397
|
+
Parameters that can be configured on Linux nodes.
|
11398
|
+
"""
|
11092
11399
|
return pulumi.get(self, "linux_node_config")
|
11093
11400
|
|
11094
11401
|
@linux_node_config.setter
|
@@ -11098,6 +11405,9 @@ class NodePoolNodeConfigArgs:
|
|
11098
11405
|
@property
|
11099
11406
|
@pulumi.getter(name="localNvmeSsdBlockConfig")
|
11100
11407
|
def local_nvme_ssd_block_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs']]:
|
11408
|
+
"""
|
11409
|
+
Parameters for raw-block local NVMe SSDs.
|
11410
|
+
"""
|
11101
11411
|
return pulumi.get(self, "local_nvme_ssd_block_config")
|
11102
11412
|
|
11103
11413
|
@local_nvme_ssd_block_config.setter
|
@@ -11107,6 +11417,9 @@ class NodePoolNodeConfigArgs:
|
|
11107
11417
|
@property
|
11108
11418
|
@pulumi.getter(name="localSsdCount")
|
11109
11419
|
def local_ssd_count(self) -> Optional[pulumi.Input[int]]:
|
11420
|
+
"""
|
11421
|
+
The number of local SSD disks to be attached to the node.
|
11422
|
+
"""
|
11110
11423
|
return pulumi.get(self, "local_ssd_count")
|
11111
11424
|
|
11112
11425
|
@local_ssd_count.setter
|
@@ -11116,6 +11429,9 @@ class NodePoolNodeConfigArgs:
|
|
11116
11429
|
@property
|
11117
11430
|
@pulumi.getter(name="loggingVariant")
|
11118
11431
|
def logging_variant(self) -> Optional[pulumi.Input[str]]:
|
11432
|
+
"""
|
11433
|
+
Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
|
11434
|
+
"""
|
11119
11435
|
return pulumi.get(self, "logging_variant")
|
11120
11436
|
|
11121
11437
|
@logging_variant.setter
|
@@ -11125,6 +11441,9 @@ class NodePoolNodeConfigArgs:
|
|
11125
11441
|
@property
|
11126
11442
|
@pulumi.getter(name="machineType")
|
11127
11443
|
def machine_type(self) -> Optional[pulumi.Input[str]]:
|
11444
|
+
"""
|
11445
|
+
The name of a Google Compute Engine machine type.
|
11446
|
+
"""
|
11128
11447
|
return pulumi.get(self, "machine_type")
|
11129
11448
|
|
11130
11449
|
@machine_type.setter
|
@@ -11134,6 +11453,9 @@ class NodePoolNodeConfigArgs:
|
|
11134
11453
|
@property
|
11135
11454
|
@pulumi.getter
|
11136
11455
|
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
11456
|
+
"""
|
11457
|
+
The metadata key/value pairs assigned to instances in the cluster.
|
11458
|
+
"""
|
11137
11459
|
return pulumi.get(self, "metadata")
|
11138
11460
|
|
11139
11461
|
@metadata.setter
|
@@ -11143,6 +11465,9 @@ class NodePoolNodeConfigArgs:
|
|
11143
11465
|
@property
|
11144
11466
|
@pulumi.getter(name="minCpuPlatform")
|
11145
11467
|
def min_cpu_platform(self) -> Optional[pulumi.Input[str]]:
|
11468
|
+
"""
|
11469
|
+
Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
|
11470
|
+
"""
|
11146
11471
|
return pulumi.get(self, "min_cpu_platform")
|
11147
11472
|
|
11148
11473
|
@min_cpu_platform.setter
|
@@ -11152,6 +11477,9 @@ class NodePoolNodeConfigArgs:
|
|
11152
11477
|
@property
|
11153
11478
|
@pulumi.getter(name="nodeGroup")
|
11154
11479
|
def node_group(self) -> Optional[pulumi.Input[str]]:
|
11480
|
+
"""
|
11481
|
+
Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
|
11482
|
+
"""
|
11155
11483
|
return pulumi.get(self, "node_group")
|
11156
11484
|
|
11157
11485
|
@node_group.setter
|
@@ -11161,6 +11489,9 @@ class NodePoolNodeConfigArgs:
|
|
11161
11489
|
@property
|
11162
11490
|
@pulumi.getter(name="oauthScopes")
|
11163
11491
|
def oauth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
11492
|
+
"""
|
11493
|
+
The set of Google API scopes to be made available on all of the node VMs.
|
11494
|
+
"""
|
11164
11495
|
return pulumi.get(self, "oauth_scopes")
|
11165
11496
|
|
11166
11497
|
@oauth_scopes.setter
|
@@ -11170,6 +11501,9 @@ class NodePoolNodeConfigArgs:
|
|
11170
11501
|
@property
|
11171
11502
|
@pulumi.getter
|
11172
11503
|
def preemptible(self) -> Optional[pulumi.Input[bool]]:
|
11504
|
+
"""
|
11505
|
+
Whether the nodes are created as preemptible VM instances.
|
11506
|
+
"""
|
11173
11507
|
return pulumi.get(self, "preemptible")
|
11174
11508
|
|
11175
11509
|
@preemptible.setter
|
@@ -11179,6 +11513,9 @@ class NodePoolNodeConfigArgs:
|
|
11179
11513
|
@property
|
11180
11514
|
@pulumi.getter(name="reservationAffinity")
|
11181
11515
|
def reservation_affinity(self) -> Optional[pulumi.Input['NodePoolNodeConfigReservationAffinityArgs']]:
|
11516
|
+
"""
|
11517
|
+
The reservation affinity configuration for the node pool.
|
11518
|
+
"""
|
11182
11519
|
return pulumi.get(self, "reservation_affinity")
|
11183
11520
|
|
11184
11521
|
@reservation_affinity.setter
|
@@ -11188,6 +11525,9 @@ class NodePoolNodeConfigArgs:
|
|
11188
11525
|
@property
|
11189
11526
|
@pulumi.getter(name="resourceLabels")
|
11190
11527
|
def resource_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
11528
|
+
"""
|
11529
|
+
The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
|
11530
|
+
"""
|
11191
11531
|
return pulumi.get(self, "resource_labels")
|
11192
11532
|
|
11193
11533
|
@resource_labels.setter
|
@@ -11197,6 +11537,9 @@ class NodePoolNodeConfigArgs:
|
|
11197
11537
|
@property
|
11198
11538
|
@pulumi.getter(name="sandboxConfig")
|
11199
11539
|
def sandbox_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigSandboxConfigArgs']]:
|
11540
|
+
"""
|
11541
|
+
Sandbox configuration for this node.
|
11542
|
+
"""
|
11200
11543
|
return pulumi.get(self, "sandbox_config")
|
11201
11544
|
|
11202
11545
|
@sandbox_config.setter
|
@@ -11206,6 +11549,9 @@ class NodePoolNodeConfigArgs:
|
|
11206
11549
|
@property
|
11207
11550
|
@pulumi.getter(name="serviceAccount")
|
11208
11551
|
def service_account(self) -> Optional[pulumi.Input[str]]:
|
11552
|
+
"""
|
11553
|
+
The Google Cloud Platform Service Account to be used by the node VMs.
|
11554
|
+
"""
|
11209
11555
|
return pulumi.get(self, "service_account")
|
11210
11556
|
|
11211
11557
|
@service_account.setter
|
@@ -11215,6 +11561,9 @@ class NodePoolNodeConfigArgs:
|
|
11215
11561
|
@property
|
11216
11562
|
@pulumi.getter(name="shieldedInstanceConfig")
|
11217
11563
|
def shielded_instance_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigShieldedInstanceConfigArgs']]:
|
11564
|
+
"""
|
11565
|
+
Shielded Instance options.
|
11566
|
+
"""
|
11218
11567
|
return pulumi.get(self, "shielded_instance_config")
|
11219
11568
|
|
11220
11569
|
@shielded_instance_config.setter
|
@@ -11224,6 +11573,9 @@ class NodePoolNodeConfigArgs:
|
|
11224
11573
|
@property
|
11225
11574
|
@pulumi.getter(name="soleTenantConfig")
|
11226
11575
|
def sole_tenant_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigSoleTenantConfigArgs']]:
|
11576
|
+
"""
|
11577
|
+
Node affinity options for sole tenant node pools.
|
11578
|
+
"""
|
11227
11579
|
return pulumi.get(self, "sole_tenant_config")
|
11228
11580
|
|
11229
11581
|
@sole_tenant_config.setter
|
@@ -11233,6 +11585,9 @@ class NodePoolNodeConfigArgs:
|
|
11233
11585
|
@property
|
11234
11586
|
@pulumi.getter
|
11235
11587
|
def spot(self) -> Optional[pulumi.Input[bool]]:
|
11588
|
+
"""
|
11589
|
+
Whether the nodes are created as spot VM instances.
|
11590
|
+
"""
|
11236
11591
|
return pulumi.get(self, "spot")
|
11237
11592
|
|
11238
11593
|
@spot.setter
|
@@ -11242,6 +11597,9 @@ class NodePoolNodeConfigArgs:
|
|
11242
11597
|
@property
|
11243
11598
|
@pulumi.getter
|
11244
11599
|
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
11600
|
+
"""
|
11601
|
+
The list of instance tags applied to all nodes.
|
11602
|
+
"""
|
11245
11603
|
return pulumi.get(self, "tags")
|
11246
11604
|
|
11247
11605
|
@tags.setter
|
@@ -11251,6 +11609,9 @@ class NodePoolNodeConfigArgs:
|
|
11251
11609
|
@property
|
11252
11610
|
@pulumi.getter
|
11253
11611
|
def taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigTaintArgs']]]]:
|
11612
|
+
"""
|
11613
|
+
List of Kubernetes taints to be applied to each node.
|
11614
|
+
"""
|
11254
11615
|
return pulumi.get(self, "taints")
|
11255
11616
|
|
11256
11617
|
@taints.setter
|
@@ -11260,6 +11621,9 @@ class NodePoolNodeConfigArgs:
|
|
11260
11621
|
@property
|
11261
11622
|
@pulumi.getter(name="workloadMetadataConfig")
|
11262
11623
|
def workload_metadata_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs']]:
|
11624
|
+
"""
|
11625
|
+
The workload metadata configuration for this node.
|
11626
|
+
"""
|
11263
11627
|
return pulumi.get(self, "workload_metadata_config")
|
11264
11628
|
|
11265
11629
|
@workload_metadata_config.setter
|
@@ -11271,11 +11635,17 @@ class NodePoolNodeConfigArgs:
|
|
11271
11635
|
class NodePoolNodeConfigAdvancedMachineFeaturesArgs:
|
11272
11636
|
def __init__(__self__, *,
|
11273
11637
|
threads_per_core: pulumi.Input[int]):
|
11638
|
+
"""
|
11639
|
+
:param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
|
11640
|
+
"""
|
11274
11641
|
pulumi.set(__self__, "threads_per_core", threads_per_core)
|
11275
11642
|
|
11276
11643
|
@property
|
11277
11644
|
@pulumi.getter(name="threadsPerCore")
|
11278
11645
|
def threads_per_core(self) -> pulumi.Input[int]:
|
11646
|
+
"""
|
11647
|
+
The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
|
11648
|
+
"""
|
11279
11649
|
return pulumi.get(self, "threads_per_core")
|
11280
11650
|
|
11281
11651
|
@threads_per_core.setter
|
@@ -11313,6 +11683,11 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11313
11683
|
effect: Optional[pulumi.Input[str]] = None,
|
11314
11684
|
key: Optional[pulumi.Input[str]] = None,
|
11315
11685
|
value: Optional[pulumi.Input[str]] = None):
|
11686
|
+
"""
|
11687
|
+
:param pulumi.Input[str] effect: Effect for taint.
|
11688
|
+
:param pulumi.Input[str] key: Key for taint.
|
11689
|
+
:param pulumi.Input[str] value: Value for taint.
|
11690
|
+
"""
|
11316
11691
|
if effect is not None:
|
11317
11692
|
pulumi.set(__self__, "effect", effect)
|
11318
11693
|
if key is not None:
|
@@ -11323,6 +11698,9 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11323
11698
|
@property
|
11324
11699
|
@pulumi.getter
|
11325
11700
|
def effect(self) -> Optional[pulumi.Input[str]]:
|
11701
|
+
"""
|
11702
|
+
Effect for taint.
|
11703
|
+
"""
|
11326
11704
|
return pulumi.get(self, "effect")
|
11327
11705
|
|
11328
11706
|
@effect.setter
|
@@ -11332,6 +11710,9 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11332
11710
|
@property
|
11333
11711
|
@pulumi.getter
|
11334
11712
|
def key(self) -> Optional[pulumi.Input[str]]:
|
11713
|
+
"""
|
11714
|
+
Key for taint.
|
11715
|
+
"""
|
11335
11716
|
return pulumi.get(self, "key")
|
11336
11717
|
|
11337
11718
|
@key.setter
|
@@ -11341,6 +11722,9 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11341
11722
|
@property
|
11342
11723
|
@pulumi.getter
|
11343
11724
|
def value(self) -> Optional[pulumi.Input[str]]:
|
11725
|
+
"""
|
11726
|
+
Value for taint.
|
11727
|
+
"""
|
11344
11728
|
return pulumi.get(self, "value")
|
11345
11729
|
|
11346
11730
|
@value.setter
|
@@ -11352,11 +11736,17 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11352
11736
|
class NodePoolNodeConfigEphemeralStorageConfigArgs:
|
11353
11737
|
def __init__(__self__, *,
|
11354
11738
|
local_ssd_count: pulumi.Input[int]):
|
11739
|
+
"""
|
11740
|
+
:param pulumi.Input[int] local_ssd_count: Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11741
|
+
"""
|
11355
11742
|
pulumi.set(__self__, "local_ssd_count", local_ssd_count)
|
11356
11743
|
|
11357
11744
|
@property
|
11358
11745
|
@pulumi.getter(name="localSsdCount")
|
11359
11746
|
def local_ssd_count(self) -> pulumi.Input[int]:
|
11747
|
+
"""
|
11748
|
+
Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11749
|
+
"""
|
11360
11750
|
return pulumi.get(self, "local_ssd_count")
|
11361
11751
|
|
11362
11752
|
@local_ssd_count.setter
|
@@ -11368,11 +11758,17 @@ class NodePoolNodeConfigEphemeralStorageConfigArgs:
|
|
11368
11758
|
class NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs:
|
11369
11759
|
def __init__(__self__, *,
|
11370
11760
|
local_ssd_count: pulumi.Input[int]):
|
11761
|
+
"""
|
11762
|
+
:param pulumi.Input[int] local_ssd_count: Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11763
|
+
"""
|
11371
11764
|
pulumi.set(__self__, "local_ssd_count", local_ssd_count)
|
11372
11765
|
|
11373
11766
|
@property
|
11374
11767
|
@pulumi.getter(name="localSsdCount")
|
11375
11768
|
def local_ssd_count(self) -> pulumi.Input[int]:
|
11769
|
+
"""
|
11770
|
+
Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11771
|
+
"""
|
11376
11772
|
return pulumi.get(self, "local_ssd_count")
|
11377
11773
|
|
11378
11774
|
@local_ssd_count.setter
|
@@ -11437,9 +11833,13 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11437
11833
|
gpu_partition_size: Optional[pulumi.Input[str]] = None,
|
11438
11834
|
gpu_sharing_config: Optional[pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs']] = None):
|
11439
11835
|
"""
|
11836
|
+
:param pulumi.Input[int] count: The number of the accelerator cards exposed to an instance.
|
11440
11837
|
:param pulumi.Input[str] type: The type of the policy. Supports a single value: COMPACT.
|
11441
11838
|
Specifying COMPACT placement policy type places node pool's nodes in a closer
|
11442
11839
|
physical proximity in order to reduce network latency between nodes.
|
11840
|
+
:param pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs'] gpu_driver_installation_config: Configuration for auto installation of GPU driver.
|
11841
|
+
:param pulumi.Input[str] gpu_partition_size: Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
|
11842
|
+
:param pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs'] gpu_sharing_config: Configuration for GPU sharing.
|
11443
11843
|
"""
|
11444
11844
|
pulumi.set(__self__, "count", count)
|
11445
11845
|
pulumi.set(__self__, "type", type)
|
@@ -11453,6 +11853,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11453
11853
|
@property
|
11454
11854
|
@pulumi.getter
|
11455
11855
|
def count(self) -> pulumi.Input[int]:
|
11856
|
+
"""
|
11857
|
+
The number of the accelerator cards exposed to an instance.
|
11858
|
+
"""
|
11456
11859
|
return pulumi.get(self, "count")
|
11457
11860
|
|
11458
11861
|
@count.setter
|
@@ -11476,6 +11879,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11476
11879
|
@property
|
11477
11880
|
@pulumi.getter(name="gpuDriverInstallationConfig")
|
11478
11881
|
def gpu_driver_installation_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs']]:
|
11882
|
+
"""
|
11883
|
+
Configuration for auto installation of GPU driver.
|
11884
|
+
"""
|
11479
11885
|
return pulumi.get(self, "gpu_driver_installation_config")
|
11480
11886
|
|
11481
11887
|
@gpu_driver_installation_config.setter
|
@@ -11485,6 +11891,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11485
11891
|
@property
|
11486
11892
|
@pulumi.getter(name="gpuPartitionSize")
|
11487
11893
|
def gpu_partition_size(self) -> Optional[pulumi.Input[str]]:
|
11894
|
+
"""
|
11895
|
+
Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
|
11896
|
+
"""
|
11488
11897
|
return pulumi.get(self, "gpu_partition_size")
|
11489
11898
|
|
11490
11899
|
@gpu_partition_size.setter
|
@@ -11494,6 +11903,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11494
11903
|
@property
|
11495
11904
|
@pulumi.getter(name="gpuSharingConfig")
|
11496
11905
|
def gpu_sharing_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs']]:
|
11906
|
+
"""
|
11907
|
+
Configuration for GPU sharing.
|
11908
|
+
"""
|
11497
11909
|
return pulumi.get(self, "gpu_sharing_config")
|
11498
11910
|
|
11499
11911
|
@gpu_sharing_config.setter
|
@@ -11505,11 +11917,17 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11505
11917
|
class NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs:
|
11506
11918
|
def __init__(__self__, *,
|
11507
11919
|
gpu_driver_version: pulumi.Input[str]):
|
11920
|
+
"""
|
11921
|
+
:param pulumi.Input[str] gpu_driver_version: Mode for how the GPU driver is installed.
|
11922
|
+
"""
|
11508
11923
|
pulumi.set(__self__, "gpu_driver_version", gpu_driver_version)
|
11509
11924
|
|
11510
11925
|
@property
|
11511
11926
|
@pulumi.getter(name="gpuDriverVersion")
|
11512
11927
|
def gpu_driver_version(self) -> pulumi.Input[str]:
|
11928
|
+
"""
|
11929
|
+
Mode for how the GPU driver is installed.
|
11930
|
+
"""
|
11513
11931
|
return pulumi.get(self, "gpu_driver_version")
|
11514
11932
|
|
11515
11933
|
@gpu_driver_version.setter
|
@@ -11522,12 +11940,19 @@ class NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs:
|
|
11522
11940
|
def __init__(__self__, *,
|
11523
11941
|
gpu_sharing_strategy: pulumi.Input[str],
|
11524
11942
|
max_shared_clients_per_gpu: pulumi.Input[int]):
|
11943
|
+
"""
|
11944
|
+
:param pulumi.Input[str] gpu_sharing_strategy: The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
|
11945
|
+
:param pulumi.Input[int] max_shared_clients_per_gpu: The maximum number of containers that can share a GPU.
|
11946
|
+
"""
|
11525
11947
|
pulumi.set(__self__, "gpu_sharing_strategy", gpu_sharing_strategy)
|
11526
11948
|
pulumi.set(__self__, "max_shared_clients_per_gpu", max_shared_clients_per_gpu)
|
11527
11949
|
|
11528
11950
|
@property
|
11529
11951
|
@pulumi.getter(name="gpuSharingStrategy")
|
11530
11952
|
def gpu_sharing_strategy(self) -> pulumi.Input[str]:
|
11953
|
+
"""
|
11954
|
+
The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
|
11955
|
+
"""
|
11531
11956
|
return pulumi.get(self, "gpu_sharing_strategy")
|
11532
11957
|
|
11533
11958
|
@gpu_sharing_strategy.setter
|
@@ -11537,6 +11962,9 @@ class NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs:
|
|
11537
11962
|
@property
|
11538
11963
|
@pulumi.getter(name="maxSharedClientsPerGpu")
|
11539
11964
|
def max_shared_clients_per_gpu(self) -> pulumi.Input[int]:
|
11965
|
+
"""
|
11966
|
+
The maximum number of containers that can share a GPU.
|
11967
|
+
"""
|
11540
11968
|
return pulumi.get(self, "max_shared_clients_per_gpu")
|
11541
11969
|
|
11542
11970
|
@max_shared_clients_per_gpu.setter
|
@@ -11572,11 +12000,17 @@ class NodePoolNodeConfigGvnicArgs:
|
|
11572
12000
|
class NodePoolNodeConfigHostMaintenancePolicyArgs:
|
11573
12001
|
def __init__(__self__, *,
|
11574
12002
|
maintenance_interval: pulumi.Input[str]):
|
12003
|
+
"""
|
12004
|
+
:param pulumi.Input[str] maintenance_interval: .
|
12005
|
+
"""
|
11575
12006
|
pulumi.set(__self__, "maintenance_interval", maintenance_interval)
|
11576
12007
|
|
11577
12008
|
@property
|
11578
12009
|
@pulumi.getter(name="maintenanceInterval")
|
11579
12010
|
def maintenance_interval(self) -> pulumi.Input[str]:
|
12011
|
+
"""
|
12012
|
+
.
|
12013
|
+
"""
|
11580
12014
|
return pulumi.get(self, "maintenance_interval")
|
11581
12015
|
|
11582
12016
|
@maintenance_interval.setter
|
@@ -11591,6 +12025,12 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11591
12025
|
cpu_cfs_quota: Optional[pulumi.Input[bool]] = None,
|
11592
12026
|
cpu_cfs_quota_period: Optional[pulumi.Input[str]] = None,
|
11593
12027
|
pod_pids_limit: Optional[pulumi.Input[int]] = None):
|
12028
|
+
"""
|
12029
|
+
:param pulumi.Input[str] cpu_manager_policy: Control the CPU management policy on the node.
|
12030
|
+
:param pulumi.Input[bool] cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
|
12031
|
+
:param pulumi.Input[str] cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
|
12032
|
+
:param pulumi.Input[int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
|
12033
|
+
"""
|
11594
12034
|
pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
|
11595
12035
|
if cpu_cfs_quota is not None:
|
11596
12036
|
pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
|
@@ -11602,6 +12042,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11602
12042
|
@property
|
11603
12043
|
@pulumi.getter(name="cpuManagerPolicy")
|
11604
12044
|
def cpu_manager_policy(self) -> pulumi.Input[str]:
|
12045
|
+
"""
|
12046
|
+
Control the CPU management policy on the node.
|
12047
|
+
"""
|
11605
12048
|
return pulumi.get(self, "cpu_manager_policy")
|
11606
12049
|
|
11607
12050
|
@cpu_manager_policy.setter
|
@@ -11611,6 +12054,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11611
12054
|
@property
|
11612
12055
|
@pulumi.getter(name="cpuCfsQuota")
|
11613
12056
|
def cpu_cfs_quota(self) -> Optional[pulumi.Input[bool]]:
|
12057
|
+
"""
|
12058
|
+
Enable CPU CFS quota enforcement for containers that specify CPU limits.
|
12059
|
+
"""
|
11614
12060
|
return pulumi.get(self, "cpu_cfs_quota")
|
11615
12061
|
|
11616
12062
|
@cpu_cfs_quota.setter
|
@@ -11620,6 +12066,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11620
12066
|
@property
|
11621
12067
|
@pulumi.getter(name="cpuCfsQuotaPeriod")
|
11622
12068
|
def cpu_cfs_quota_period(self) -> Optional[pulumi.Input[str]]:
|
12069
|
+
"""
|
12070
|
+
Set the CPU CFS quota period value 'cpu.cfs_period_us'.
|
12071
|
+
"""
|
11623
12072
|
return pulumi.get(self, "cpu_cfs_quota_period")
|
11624
12073
|
|
11625
12074
|
@cpu_cfs_quota_period.setter
|
@@ -11629,6 +12078,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11629
12078
|
@property
|
11630
12079
|
@pulumi.getter(name="podPidsLimit")
|
11631
12080
|
def pod_pids_limit(self) -> Optional[pulumi.Input[int]]:
|
12081
|
+
"""
|
12082
|
+
Controls the maximum number of processes allowed to run in a pod.
|
12083
|
+
"""
|
11632
12084
|
return pulumi.get(self, "pod_pids_limit")
|
11633
12085
|
|
11634
12086
|
@pod_pids_limit.setter
|
@@ -11641,6 +12093,10 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11641
12093
|
def __init__(__self__, *,
|
11642
12094
|
cgroup_mode: Optional[pulumi.Input[str]] = None,
|
11643
12095
|
sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
|
12096
|
+
"""
|
12097
|
+
:param pulumi.Input[str] cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
|
12098
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
|
12099
|
+
"""
|
11644
12100
|
if cgroup_mode is not None:
|
11645
12101
|
pulumi.set(__self__, "cgroup_mode", cgroup_mode)
|
11646
12102
|
if sysctls is not None:
|
@@ -11649,6 +12105,9 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11649
12105
|
@property
|
11650
12106
|
@pulumi.getter(name="cgroupMode")
|
11651
12107
|
def cgroup_mode(self) -> Optional[pulumi.Input[str]]:
|
12108
|
+
"""
|
12109
|
+
cgroupMode specifies the cgroup mode to be used on the node.
|
12110
|
+
"""
|
11652
12111
|
return pulumi.get(self, "cgroup_mode")
|
11653
12112
|
|
11654
12113
|
@cgroup_mode.setter
|
@@ -11658,6 +12117,9 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11658
12117
|
@property
|
11659
12118
|
@pulumi.getter
|
11660
12119
|
def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
12120
|
+
"""
|
12121
|
+
The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
|
12122
|
+
"""
|
11661
12123
|
return pulumi.get(self, "sysctls")
|
11662
12124
|
|
11663
12125
|
@sysctls.setter
|
@@ -11669,11 +12131,17 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11669
12131
|
class NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs:
|
11670
12132
|
def __init__(__self__, *,
|
11671
12133
|
local_ssd_count: pulumi.Input[int]):
|
12134
|
+
"""
|
12135
|
+
:param pulumi.Input[int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
|
12136
|
+
"""
|
11672
12137
|
pulumi.set(__self__, "local_ssd_count", local_ssd_count)
|
11673
12138
|
|
11674
12139
|
@property
|
11675
12140
|
@pulumi.getter(name="localSsdCount")
|
11676
12141
|
def local_ssd_count(self) -> pulumi.Input[int]:
|
12142
|
+
"""
|
12143
|
+
Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
|
12144
|
+
"""
|
11677
12145
|
return pulumi.get(self, "local_ssd_count")
|
11678
12146
|
|
11679
12147
|
@local_ssd_count.setter
|
@@ -11687,6 +12155,11 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11687
12155
|
consume_reservation_type: pulumi.Input[str],
|
11688
12156
|
key: Optional[pulumi.Input[str]] = None,
|
11689
12157
|
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
|
12158
|
+
"""
|
12159
|
+
:param pulumi.Input[str] consume_reservation_type: Corresponds to the type of reservation consumption.
|
12160
|
+
:param pulumi.Input[str] key: The label key of a reservation resource.
|
12161
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: The label values of the reservation resource.
|
12162
|
+
"""
|
11690
12163
|
pulumi.set(__self__, "consume_reservation_type", consume_reservation_type)
|
11691
12164
|
if key is not None:
|
11692
12165
|
pulumi.set(__self__, "key", key)
|
@@ -11696,6 +12169,9 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11696
12169
|
@property
|
11697
12170
|
@pulumi.getter(name="consumeReservationType")
|
11698
12171
|
def consume_reservation_type(self) -> pulumi.Input[str]:
|
12172
|
+
"""
|
12173
|
+
Corresponds to the type of reservation consumption.
|
12174
|
+
"""
|
11699
12175
|
return pulumi.get(self, "consume_reservation_type")
|
11700
12176
|
|
11701
12177
|
@consume_reservation_type.setter
|
@@ -11705,6 +12181,9 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11705
12181
|
@property
|
11706
12182
|
@pulumi.getter
|
11707
12183
|
def key(self) -> Optional[pulumi.Input[str]]:
|
12184
|
+
"""
|
12185
|
+
The label key of a reservation resource.
|
12186
|
+
"""
|
11708
12187
|
return pulumi.get(self, "key")
|
11709
12188
|
|
11710
12189
|
@key.setter
|
@@ -11714,6 +12193,9 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11714
12193
|
@property
|
11715
12194
|
@pulumi.getter
|
11716
12195
|
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
12196
|
+
"""
|
12197
|
+
The label values of the reservation resource.
|
12198
|
+
"""
|
11717
12199
|
return pulumi.get(self, "values")
|
11718
12200
|
|
11719
12201
|
@values.setter
|
@@ -11725,11 +12207,17 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11725
12207
|
class NodePoolNodeConfigSandboxConfigArgs:
|
11726
12208
|
def __init__(__self__, *,
|
11727
12209
|
sandbox_type: pulumi.Input[str]):
|
12210
|
+
"""
|
12211
|
+
:param pulumi.Input[str] sandbox_type: Type of the sandbox to use for the node (e.g. 'gvisor')
|
12212
|
+
"""
|
11728
12213
|
pulumi.set(__self__, "sandbox_type", sandbox_type)
|
11729
12214
|
|
11730
12215
|
@property
|
11731
12216
|
@pulumi.getter(name="sandboxType")
|
11732
12217
|
def sandbox_type(self) -> pulumi.Input[str]:
|
12218
|
+
"""
|
12219
|
+
Type of the sandbox to use for the node (e.g. 'gvisor')
|
12220
|
+
"""
|
11733
12221
|
return pulumi.get(self, "sandbox_type")
|
11734
12222
|
|
11735
12223
|
@sandbox_type.setter
|
@@ -11742,6 +12230,10 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11742
12230
|
def __init__(__self__, *,
|
11743
12231
|
enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None,
|
11744
12232
|
enable_secure_boot: Optional[pulumi.Input[bool]] = None):
|
12233
|
+
"""
|
12234
|
+
:param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled.
|
12235
|
+
:param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled.
|
12236
|
+
"""
|
11745
12237
|
if enable_integrity_monitoring is not None:
|
11746
12238
|
pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring)
|
11747
12239
|
if enable_secure_boot is not None:
|
@@ -11750,6 +12242,9 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11750
12242
|
@property
|
11751
12243
|
@pulumi.getter(name="enableIntegrityMonitoring")
|
11752
12244
|
def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]:
|
12245
|
+
"""
|
12246
|
+
Defines whether the instance has integrity monitoring enabled.
|
12247
|
+
"""
|
11753
12248
|
return pulumi.get(self, "enable_integrity_monitoring")
|
11754
12249
|
|
11755
12250
|
@enable_integrity_monitoring.setter
|
@@ -11759,6 +12254,9 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11759
12254
|
@property
|
11760
12255
|
@pulumi.getter(name="enableSecureBoot")
|
11761
12256
|
def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:
|
12257
|
+
"""
|
12258
|
+
Defines whether the instance has Secure Boot enabled.
|
12259
|
+
"""
|
11762
12260
|
return pulumi.get(self, "enable_secure_boot")
|
11763
12261
|
|
11764
12262
|
@enable_secure_boot.setter
|
@@ -11770,11 +12268,17 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11770
12268
|
class NodePoolNodeConfigSoleTenantConfigArgs:
|
11771
12269
|
def __init__(__self__, *,
|
11772
12270
|
node_affinities: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
|
12271
|
+
"""
|
12272
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
|
12273
|
+
"""
|
11773
12274
|
pulumi.set(__self__, "node_affinities", node_affinities)
|
11774
12275
|
|
11775
12276
|
@property
|
11776
12277
|
@pulumi.getter(name="nodeAffinities")
|
11777
12278
|
def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
|
12279
|
+
"""
|
12280
|
+
.
|
12281
|
+
"""
|
11778
12282
|
return pulumi.get(self, "node_affinities")
|
11779
12283
|
|
11780
12284
|
@node_affinities.setter
|
@@ -11788,6 +12292,11 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11788
12292
|
key: pulumi.Input[str],
|
11789
12293
|
operator: pulumi.Input[str],
|
11790
12294
|
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
|
12295
|
+
"""
|
12296
|
+
:param pulumi.Input[str] key: .
|
12297
|
+
:param pulumi.Input[str] operator: .
|
12298
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: .
|
12299
|
+
"""
|
11791
12300
|
pulumi.set(__self__, "key", key)
|
11792
12301
|
pulumi.set(__self__, "operator", operator)
|
11793
12302
|
pulumi.set(__self__, "values", values)
|
@@ -11795,6 +12304,9 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11795
12304
|
@property
|
11796
12305
|
@pulumi.getter
|
11797
12306
|
def key(self) -> pulumi.Input[str]:
|
12307
|
+
"""
|
12308
|
+
.
|
12309
|
+
"""
|
11798
12310
|
return pulumi.get(self, "key")
|
11799
12311
|
|
11800
12312
|
@key.setter
|
@@ -11804,6 +12316,9 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11804
12316
|
@property
|
11805
12317
|
@pulumi.getter
|
11806
12318
|
def operator(self) -> pulumi.Input[str]:
|
12319
|
+
"""
|
12320
|
+
.
|
12321
|
+
"""
|
11807
12322
|
return pulumi.get(self, "operator")
|
11808
12323
|
|
11809
12324
|
@operator.setter
|
@@ -11813,6 +12328,9 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11813
12328
|
@property
|
11814
12329
|
@pulumi.getter
|
11815
12330
|
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
|
12331
|
+
"""
|
12332
|
+
.
|
12333
|
+
"""
|
11816
12334
|
return pulumi.get(self, "values")
|
11817
12335
|
|
11818
12336
|
@values.setter
|
@@ -11826,6 +12344,11 @@ class NodePoolNodeConfigTaintArgs:
|
|
11826
12344
|
effect: pulumi.Input[str],
|
11827
12345
|
key: pulumi.Input[str],
|
11828
12346
|
value: pulumi.Input[str]):
|
12347
|
+
"""
|
12348
|
+
:param pulumi.Input[str] effect: Effect for taint.
|
12349
|
+
:param pulumi.Input[str] key: Key for taint.
|
12350
|
+
:param pulumi.Input[str] value: Value for taint.
|
12351
|
+
"""
|
11829
12352
|
pulumi.set(__self__, "effect", effect)
|
11830
12353
|
pulumi.set(__self__, "key", key)
|
11831
12354
|
pulumi.set(__self__, "value", value)
|
@@ -11833,6 +12356,9 @@ class NodePoolNodeConfigTaintArgs:
|
|
11833
12356
|
@property
|
11834
12357
|
@pulumi.getter
|
11835
12358
|
def effect(self) -> pulumi.Input[str]:
|
12359
|
+
"""
|
12360
|
+
Effect for taint.
|
12361
|
+
"""
|
11836
12362
|
return pulumi.get(self, "effect")
|
11837
12363
|
|
11838
12364
|
@effect.setter
|
@@ -11842,6 +12368,9 @@ class NodePoolNodeConfigTaintArgs:
|
|
11842
12368
|
@property
|
11843
12369
|
@pulumi.getter
|
11844
12370
|
def key(self) -> pulumi.Input[str]:
|
12371
|
+
"""
|
12372
|
+
Key for taint.
|
12373
|
+
"""
|
11845
12374
|
return pulumi.get(self, "key")
|
11846
12375
|
|
11847
12376
|
@key.setter
|
@@ -11851,6 +12380,9 @@ class NodePoolNodeConfigTaintArgs:
|
|
11851
12380
|
@property
|
11852
12381
|
@pulumi.getter
|
11853
12382
|
def value(self) -> pulumi.Input[str]:
|
12383
|
+
"""
|
12384
|
+
Value for taint.
|
12385
|
+
"""
|
11854
12386
|
return pulumi.get(self, "value")
|
11855
12387
|
|
11856
12388
|
@value.setter
|
@@ -11862,11 +12394,17 @@ class NodePoolNodeConfigTaintArgs:
|
|
11862
12394
|
class NodePoolNodeConfigWorkloadMetadataConfigArgs:
|
11863
12395
|
def __init__(__self__, *,
|
11864
12396
|
mode: pulumi.Input[str]):
|
12397
|
+
"""
|
12398
|
+
:param pulumi.Input[str] mode: Mode is the configuration for how to expose metadata to workloads running on the node.
|
12399
|
+
"""
|
11865
12400
|
pulumi.set(__self__, "mode", mode)
|
11866
12401
|
|
11867
12402
|
@property
|
11868
12403
|
@pulumi.getter
|
11869
12404
|
def mode(self) -> pulumi.Input[str]:
|
12405
|
+
"""
|
12406
|
+
Mode is the configuration for how to expose metadata to workloads running on the node.
|
12407
|
+
"""
|
11870
12408
|
return pulumi.get(self, "mode")
|
11871
12409
|
|
11872
12410
|
@mode.setter
|