pulumiverse-scaleway 1.30.0a1750140900__py3-none-any.whl → 1.31.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumiverse_scaleway/__init__.py +38 -1
- pulumiverse_scaleway/_inputs.py +2331 -2160
- pulumiverse_scaleway/_utilities.py +1 -1
- pulumiverse_scaleway/account/__init__.py +2 -1
- pulumiverse_scaleway/account/get_availability_zones.py +9 -8
- pulumiverse_scaleway/account/get_project.py +21 -20
- pulumiverse_scaleway/account/get_projects.py +28 -21
- pulumiverse_scaleway/account/get_ssh_key.py +24 -23
- pulumiverse_scaleway/account/outputs.py +20 -19
- pulumiverse_scaleway/account/project.py +59 -57
- pulumiverse_scaleway/account/ssh_key.py +87 -85
- pulumiverse_scaleway/account_project.py +59 -57
- pulumiverse_scaleway/account_ssh_key.py +87 -85
- pulumiverse_scaleway/apple_silicon_server.py +251 -155
- pulumiverse_scaleway/applesilicon/__init__.py +2 -1
- pulumiverse_scaleway/applesilicon/_inputs.py +89 -34
- pulumiverse_scaleway/applesilicon/outputs.py +54 -21
- pulumiverse_scaleway/applesilicon/server.py +251 -155
- pulumiverse_scaleway/autoscaling/__init__.py +13 -0
- pulumiverse_scaleway/autoscaling/_inputs.py +559 -0
- pulumiverse_scaleway/autoscaling/instance_group.py +808 -0
- pulumiverse_scaleway/autoscaling/instance_policy.py +635 -0
- pulumiverse_scaleway/autoscaling/instance_template.py +879 -0
- pulumiverse_scaleway/autoscaling/outputs.py +439 -0
- pulumiverse_scaleway/baremetal_server.py +262 -260
- pulumiverse_scaleway/billing/__init__.py +2 -1
- pulumiverse_scaleway/billing/get_consumptions.py +10 -9
- pulumiverse_scaleway/billing/get_invoices.py +19 -18
- pulumiverse_scaleway/billing/outputs.py +73 -72
- pulumiverse_scaleway/block/__init__.py +2 -1
- pulumiverse_scaleway/block/_inputs.py +12 -11
- pulumiverse_scaleway/block/get_snapshot.py +29 -28
- pulumiverse_scaleway/block/get_volume.py +28 -27
- pulumiverse_scaleway/block/outputs.py +14 -13
- pulumiverse_scaleway/block/snapshot.py +73 -71
- pulumiverse_scaleway/block/volume.py +115 -113
- pulumiverse_scaleway/block_snapshot.py +73 -71
- pulumiverse_scaleway/block_volume.py +115 -113
- pulumiverse_scaleway/cockpit.py +38 -36
- pulumiverse_scaleway/cockpit_alert_manager.py +52 -50
- pulumiverse_scaleway/cockpit_grafana_user.py +59 -57
- pulumiverse_scaleway/cockpit_source.py +115 -113
- pulumiverse_scaleway/cockpit_token.py +66 -64
- pulumiverse_scaleway/config/__init__.py +2 -1
- pulumiverse_scaleway/config/__init__.pyi +2 -1
- pulumiverse_scaleway/config/vars.py +2 -1
- pulumiverse_scaleway/container.py +573 -343
- pulumiverse_scaleway/container_cron.py +80 -78
- pulumiverse_scaleway/container_domain.py +52 -50
- pulumiverse_scaleway/container_namespace.py +211 -140
- pulumiverse_scaleway/container_token.py +80 -78
- pulumiverse_scaleway/container_trigger.py +59 -57
- pulumiverse_scaleway/containers/__init__.py +2 -1
- pulumiverse_scaleway/containers/_inputs.py +72 -71
- pulumiverse_scaleway/containers/container.py +573 -343
- pulumiverse_scaleway/containers/cron.py +80 -78
- pulumiverse_scaleway/containers/domain.py +52 -50
- pulumiverse_scaleway/containers/get_container.py +87 -42
- pulumiverse_scaleway/containers/get_namespace.py +43 -31
- pulumiverse_scaleway/containers/namespace.py +211 -140
- pulumiverse_scaleway/containers/outputs.py +62 -61
- pulumiverse_scaleway/containers/token.py +80 -78
- pulumiverse_scaleway/containers/trigger.py +59 -57
- pulumiverse_scaleway/database.py +66 -64
- pulumiverse_scaleway/database_acl.py +31 -29
- pulumiverse_scaleway/database_backup.py +101 -99
- pulumiverse_scaleway/database_instance.py +297 -295
- pulumiverse_scaleway/database_privilege.py +73 -71
- pulumiverse_scaleway/database_read_replica.py +45 -43
- pulumiverse_scaleway/database_user.py +73 -71
- pulumiverse_scaleway/databases/__init__.py +2 -1
- pulumiverse_scaleway/databases/_inputs.py +191 -190
- pulumiverse_scaleway/databases/acl.py +31 -29
- pulumiverse_scaleway/databases/database.py +66 -64
- pulumiverse_scaleway/databases/database_backup.py +101 -99
- pulumiverse_scaleway/databases/get_acl.py +13 -12
- pulumiverse_scaleway/databases/get_database.py +19 -18
- pulumiverse_scaleway/databases/get_database_backup.py +34 -33
- pulumiverse_scaleway/databases/get_instance.py +43 -42
- pulumiverse_scaleway/databases/get_privilege.py +24 -23
- pulumiverse_scaleway/databases/instance.py +297 -295
- pulumiverse_scaleway/databases/outputs.py +188 -187
- pulumiverse_scaleway/databases/privilege.py +73 -71
- pulumiverse_scaleway/databases/read_replica.py +45 -43
- pulumiverse_scaleway/databases/serverless_database.py +80 -78
- pulumiverse_scaleway/databases/snapshot.py +101 -99
- pulumiverse_scaleway/databases/user.py +73 -71
- pulumiverse_scaleway/domain/__init__.py +2 -1
- pulumiverse_scaleway/domain/_inputs.py +531 -530
- pulumiverse_scaleway/domain/get_record.py +38 -37
- pulumiverse_scaleway/domain/get_zone.py +24 -23
- pulumiverse_scaleway/domain/outputs.py +358 -357
- pulumiverse_scaleway/domain/record.py +129 -127
- pulumiverse_scaleway/domain/registration.py +94 -92
- pulumiverse_scaleway/domain/zone.py +87 -85
- pulumiverse_scaleway/domain_record.py +129 -127
- pulumiverse_scaleway/domain_zone.py +87 -85
- pulumiverse_scaleway/edge_services_backend_stage.py +45 -43
- pulumiverse_scaleway/edge_services_cache_stage.py +162 -113
- pulumiverse_scaleway/edge_services_dns_stage.py +108 -106
- pulumiverse_scaleway/edge_services_head_stage.py +31 -29
- pulumiverse_scaleway/edge_services_pipeline.py +66 -64
- pulumiverse_scaleway/edge_services_plan.py +31 -29
- pulumiverse_scaleway/edge_services_route_stage.py +59 -57
- pulumiverse_scaleway/edge_services_tls_stage.py +122 -120
- pulumiverse_scaleway/edge_services_waf_stage.py +87 -85
- pulumiverse_scaleway/elasticmetal/__init__.py +2 -1
- pulumiverse_scaleway/elasticmetal/_inputs.py +123 -122
- pulumiverse_scaleway/elasticmetal/get_ip.py +56 -39
- pulumiverse_scaleway/elasticmetal/get_ips.py +24 -23
- pulumiverse_scaleway/elasticmetal/get_offer.py +29 -28
- pulumiverse_scaleway/elasticmetal/get_option.py +19 -18
- pulumiverse_scaleway/elasticmetal/get_os.py +23 -22
- pulumiverse_scaleway/elasticmetal/get_partition_schema.py +29 -28
- pulumiverse_scaleway/elasticmetal/get_server.py +39 -38
- pulumiverse_scaleway/elasticmetal/ip.py +139 -137
- pulumiverse_scaleway/elasticmetal/ip_mac_address.py +87 -85
- pulumiverse_scaleway/elasticmetal/outputs.py +233 -232
- pulumiverse_scaleway/elasticmetal/server.py +262 -260
- pulumiverse_scaleway/file_filesystem.py +552 -0
- pulumiverse_scaleway/flexible_ip.py +139 -137
- pulumiverse_scaleway/flexible_ip_mac_address.py +87 -85
- pulumiverse_scaleway/function.py +394 -284
- pulumiverse_scaleway/function_cron.py +80 -78
- pulumiverse_scaleway/function_domain.py +52 -50
- pulumiverse_scaleway/function_namespace.py +197 -126
- pulumiverse_scaleway/function_token.py +80 -78
- pulumiverse_scaleway/function_trigger.py +59 -57
- pulumiverse_scaleway/functions/__init__.py +2 -1
- pulumiverse_scaleway/functions/_inputs.py +42 -41
- pulumiverse_scaleway/functions/cron.py +80 -78
- pulumiverse_scaleway/functions/domain.py +52 -50
- pulumiverse_scaleway/functions/function.py +394 -284
- pulumiverse_scaleway/functions/get_function.py +69 -46
- pulumiverse_scaleway/functions/get_namespace.py +42 -30
- pulumiverse_scaleway/functions/namespace.py +197 -126
- pulumiverse_scaleway/functions/outputs.py +26 -25
- pulumiverse_scaleway/functions/token.py +80 -78
- pulumiverse_scaleway/functions/trigger.py +59 -57
- pulumiverse_scaleway/get_account_project.py +21 -20
- pulumiverse_scaleway/get_account_ssh_key.py +24 -23
- pulumiverse_scaleway/get_availability_zones.py +9 -8
- pulumiverse_scaleway/get_baremetal_offer.py +29 -28
- pulumiverse_scaleway/get_baremetal_option.py +19 -18
- pulumiverse_scaleway/get_baremetal_os.py +23 -22
- pulumiverse_scaleway/get_baremetal_server.py +39 -38
- pulumiverse_scaleway/get_billing_consumptions.py +10 -9
- pulumiverse_scaleway/get_billing_invoices.py +19 -18
- pulumiverse_scaleway/get_block_snapshot.py +29 -28
- pulumiverse_scaleway/get_block_volume.py +28 -27
- pulumiverse_scaleway/get_cockpit.py +10 -9
- pulumiverse_scaleway/get_cockpit_source.py +77 -33
- pulumiverse_scaleway/get_config.py +13 -12
- pulumiverse_scaleway/get_container.py +87 -42
- pulumiverse_scaleway/get_container_namespace.py +43 -31
- pulumiverse_scaleway/get_database.py +19 -18
- pulumiverse_scaleway/get_database_acl.py +13 -12
- pulumiverse_scaleway/get_database_backup.py +34 -33
- pulumiverse_scaleway/get_database_instance.py +43 -42
- pulumiverse_scaleway/get_database_privilege.py +24 -23
- pulumiverse_scaleway/get_domain_record.py +38 -37
- pulumiverse_scaleway/get_domain_zone.py +24 -23
- pulumiverse_scaleway/get_flexible_ip.py +56 -39
- pulumiverse_scaleway/get_flexible_ips.py +24 -23
- pulumiverse_scaleway/get_function.py +69 -46
- pulumiverse_scaleway/get_function_namespace.py +42 -30
- pulumiverse_scaleway/get_iam_api_key.py +17 -16
- pulumiverse_scaleway/get_iam_application.py +23 -22
- pulumiverse_scaleway/get_iam_group.py +25 -24
- pulumiverse_scaleway/get_iam_ssh_key.py +24 -23
- pulumiverse_scaleway/get_iam_user.py +23 -22
- pulumiverse_scaleway/get_instance_image.py +42 -41
- pulumiverse_scaleway/get_instance_ip.py +20 -19
- pulumiverse_scaleway/get_instance_placement_group.py +28 -27
- pulumiverse_scaleway/get_instance_private_nic.py +31 -30
- pulumiverse_scaleway/get_instance_security_group.py +31 -30
- pulumiverse_scaleway/get_instance_server.py +59 -47
- pulumiverse_scaleway/get_instance_servers.py +24 -23
- pulumiverse_scaleway/get_instance_snapshot.py +29 -28
- pulumiverse_scaleway/get_instance_volume.py +30 -29
- pulumiverse_scaleway/get_iot_device.py +31 -30
- pulumiverse_scaleway/get_iot_hub.py +39 -38
- pulumiverse_scaleway/get_ipam_ip.py +51 -50
- pulumiverse_scaleway/get_ipam_ips.py +44 -43
- pulumiverse_scaleway/get_k8s_version.py +16 -15
- pulumiverse_scaleway/get_kubernetes_cluster.py +39 -38
- pulumiverse_scaleway/get_kubernetes_node_pool.py +59 -47
- pulumiverse_scaleway/get_lb_acls.py +22 -21
- pulumiverse_scaleway/get_lb_backend.py +43 -42
- pulumiverse_scaleway/get_lb_backends.py +22 -21
- pulumiverse_scaleway/get_lb_frontend.py +26 -25
- pulumiverse_scaleway/get_lb_frontends.py +22 -21
- pulumiverse_scaleway/get_lb_ips.py +29 -28
- pulumiverse_scaleway/get_lb_route.py +16 -15
- pulumiverse_scaleway/get_lb_routes.py +17 -16
- pulumiverse_scaleway/get_lbs.py +24 -23
- pulumiverse_scaleway/get_loadbalancer.py +36 -35
- pulumiverse_scaleway/get_loadbalancer_certificate.py +24 -23
- pulumiverse_scaleway/get_loadbalancer_ip.py +29 -28
- pulumiverse_scaleway/get_marketplace_image.py +23 -22
- pulumiverse_scaleway/get_mnq_sns.py +14 -13
- pulumiverse_scaleway/get_mnq_sqs.py +14 -13
- pulumiverse_scaleway/get_mongo_db_instance.py +35 -34
- pulumiverse_scaleway/get_object_bucket.py +22 -21
- pulumiverse_scaleway/get_object_bucket_policy.py +17 -16
- pulumiverse_scaleway/get_redis_cluster.py +34 -33
- pulumiverse_scaleway/get_registry_image.py +37 -36
- pulumiverse_scaleway/get_registry_image_tag.py +33 -32
- pulumiverse_scaleway/get_registry_namespace.py +27 -26
- pulumiverse_scaleway/get_secret.py +41 -40
- pulumiverse_scaleway/get_secret_version.py +34 -33
- pulumiverse_scaleway/get_tem_domain.py +43 -42
- pulumiverse_scaleway/get_vpc.py +47 -35
- pulumiverse_scaleway/get_vpc_gateway_network.py +28 -27
- pulumiverse_scaleway/get_vpc_private_network.py +44 -32
- pulumiverse_scaleway/get_vpc_public_gateway.py +36 -35
- pulumiverse_scaleway/get_vpc_public_gateway_dhcp.py +24 -23
- pulumiverse_scaleway/get_vpc_public_gateway_dhcp_reservation.py +33 -32
- pulumiverse_scaleway/get_vpc_public_gateway_ip.py +14 -13
- pulumiverse_scaleway/get_vpc_public_pat_rule.py +21 -20
- pulumiverse_scaleway/get_vpc_routes.py +38 -37
- pulumiverse_scaleway/get_vpcs.py +24 -23
- pulumiverse_scaleway/get_web_host_offer.py +25 -24
- pulumiverse_scaleway/get_webhosting.py +36 -35
- pulumiverse_scaleway/hosting/__init__.py +2 -1
- pulumiverse_scaleway/hosting/_inputs.py +61 -60
- pulumiverse_scaleway/hosting/get_hosting.py +36 -35
- pulumiverse_scaleway/hosting/get_offer.py +25 -24
- pulumiverse_scaleway/hosting/hosting.py +164 -162
- pulumiverse_scaleway/hosting/outputs.py +139 -138
- pulumiverse_scaleway/iam/__init__.py +2 -1
- pulumiverse_scaleway/iam/_inputs.py +22 -21
- pulumiverse_scaleway/iam/api_key.py +115 -113
- pulumiverse_scaleway/iam/application.py +80 -78
- pulumiverse_scaleway/iam/get_api_key.py +17 -16
- pulumiverse_scaleway/iam/get_application.py +23 -22
- pulumiverse_scaleway/iam/get_group.py +25 -24
- pulumiverse_scaleway/iam/get_ssh_key.py +24 -23
- pulumiverse_scaleway/iam/get_user.py +23 -22
- pulumiverse_scaleway/iam/group.py +115 -113
- pulumiverse_scaleway/iam/group_membership.py +45 -43
- pulumiverse_scaleway/iam/outputs.py +14 -13
- pulumiverse_scaleway/iam/policy.py +136 -134
- pulumiverse_scaleway/iam/ssh_key.py +87 -85
- pulumiverse_scaleway/iam/user.py +284 -273
- pulumiverse_scaleway/iam_api_key.py +115 -113
- pulumiverse_scaleway/iam_application.py +80 -78
- pulumiverse_scaleway/iam_group.py +115 -113
- pulumiverse_scaleway/iam_group_membership.py +45 -43
- pulumiverse_scaleway/iam_policy.py +136 -134
- pulumiverse_scaleway/iam_ssh_key.py +87 -85
- pulumiverse_scaleway/iam_user.py +284 -273
- pulumiverse_scaleway/inference/__init__.py +2 -1
- pulumiverse_scaleway/inference/_inputs.py +116 -61
- pulumiverse_scaleway/inference/deployment.py +225 -176
- pulumiverse_scaleway/inference/get_model.py +27 -26
- pulumiverse_scaleway/inference/model.py +129 -127
- pulumiverse_scaleway/inference/outputs.py +82 -49
- pulumiverse_scaleway/inference_deployment.py +225 -176
- pulumiverse_scaleway/instance/__init__.py +2 -1
- pulumiverse_scaleway/instance/_inputs.py +296 -295
- pulumiverse_scaleway/instance/get_image.py +42 -41
- pulumiverse_scaleway/instance/get_ip.py +20 -19
- pulumiverse_scaleway/instance/get_placement_group.py +28 -27
- pulumiverse_scaleway/instance/get_private_nic.py +31 -30
- pulumiverse_scaleway/instance/get_security_group.py +31 -30
- pulumiverse_scaleway/instance/get_server.py +59 -47
- pulumiverse_scaleway/instance/get_servers.py +24 -23
- pulumiverse_scaleway/instance/get_snapshot.py +29 -28
- pulumiverse_scaleway/instance/get_volume.py +30 -29
- pulumiverse_scaleway/instance/image.py +150 -148
- pulumiverse_scaleway/instance/ip.py +94 -92
- pulumiverse_scaleway/instance/ip_reverse_dns.py +45 -43
- pulumiverse_scaleway/instance/outputs.py +349 -348
- pulumiverse_scaleway/instance/placement_group.py +101 -99
- pulumiverse_scaleway/instance/private_nic.py +94 -92
- pulumiverse_scaleway/instance/security_group.py +150 -148
- pulumiverse_scaleway/instance/security_group_rules.py +17 -15
- pulumiverse_scaleway/instance/server.py +393 -330
- pulumiverse_scaleway/instance/snapshot.py +108 -106
- pulumiverse_scaleway/instance/user_data.py +59 -57
- pulumiverse_scaleway/instance/volume.py +129 -127
- pulumiverse_scaleway/instance_image.py +150 -148
- pulumiverse_scaleway/instance_ip.py +94 -92
- pulumiverse_scaleway/instance_ip_reverse_dns.py +45 -43
- pulumiverse_scaleway/instance_placement_group.py +101 -99
- pulumiverse_scaleway/instance_private_nic.py +94 -92
- pulumiverse_scaleway/instance_security_group.py +150 -148
- pulumiverse_scaleway/instance_security_group_rules.py +17 -15
- pulumiverse_scaleway/instance_server.py +393 -330
- pulumiverse_scaleway/instance_snapshot.py +108 -106
- pulumiverse_scaleway/instance_user_data.py +59 -57
- pulumiverse_scaleway/instance_volume.py +129 -127
- pulumiverse_scaleway/iot/__init__.py +2 -1
- pulumiverse_scaleway/iot/_inputs.py +98 -97
- pulumiverse_scaleway/iot/device.py +122 -120
- pulumiverse_scaleway/iot/get_device.py +31 -30
- pulumiverse_scaleway/iot/get_hub.py +39 -38
- pulumiverse_scaleway/iot/hub.py +206 -204
- pulumiverse_scaleway/iot/network.py +94 -92
- pulumiverse_scaleway/iot/outputs.py +78 -77
- pulumiverse_scaleway/iot/route.py +66 -64
- pulumiverse_scaleway/iot_device.py +122 -120
- pulumiverse_scaleway/iot_hub.py +206 -204
- pulumiverse_scaleway/iot_network.py +94 -92
- pulumiverse_scaleway/iot_route.py +66 -64
- pulumiverse_scaleway/ipam/__init__.py +2 -1
- pulumiverse_scaleway/ipam/_inputs.py +87 -86
- pulumiverse_scaleway/ipam/get_ip.py +51 -50
- pulumiverse_scaleway/ipam/get_ips.py +44 -43
- pulumiverse_scaleway/ipam/ip.py +94 -92
- pulumiverse_scaleway/ipam/ip_reverse_dns.py +59 -57
- pulumiverse_scaleway/ipam/outputs.py +90 -89
- pulumiverse_scaleway/ipam_ip.py +94 -92
- pulumiverse_scaleway/ipam_ip_reverse_dns.py +59 -57
- pulumiverse_scaleway/job/__init__.py +2 -1
- pulumiverse_scaleway/job/_inputs.py +38 -37
- pulumiverse_scaleway/job/definition.py +143 -141
- pulumiverse_scaleway/job/outputs.py +24 -23
- pulumiverse_scaleway/job_definition.py +143 -141
- pulumiverse_scaleway/kubernetes/__init__.py +2 -1
- pulumiverse_scaleway/kubernetes/_inputs.py +190 -189
- pulumiverse_scaleway/kubernetes/acl.py +45 -43
- pulumiverse_scaleway/kubernetes/cluster.py +234 -232
- pulumiverse_scaleway/kubernetes/get_cluster.py +39 -38
- pulumiverse_scaleway/kubernetes/get_pool.py +59 -47
- pulumiverse_scaleway/kubernetes/get_version.py +16 -15
- pulumiverse_scaleway/kubernetes/outputs.py +217 -216
- pulumiverse_scaleway/kubernetes/pool.py +351 -288
- pulumiverse_scaleway/kubernetes_cluster.py +234 -232
- pulumiverse_scaleway/kubernetes_node_pool.py +351 -288
- pulumiverse_scaleway/loadbalancer.py +199 -197
- pulumiverse_scaleway/loadbalancer_acl.py +80 -78
- pulumiverse_scaleway/loadbalancer_backend.py +381 -379
- pulumiverse_scaleway/loadbalancer_certificate.py +73 -71
- pulumiverse_scaleway/loadbalancer_frontend.py +136 -134
- pulumiverse_scaleway/loadbalancer_ip.py +101 -99
- pulumiverse_scaleway/loadbalancer_route.py +101 -99
- pulumiverse_scaleway/loadbalancers/__init__.py +2 -1
- pulumiverse_scaleway/loadbalancers/_inputs.py +263 -222
- pulumiverse_scaleway/loadbalancers/acl.py +80 -78
- pulumiverse_scaleway/loadbalancers/backend.py +381 -379
- pulumiverse_scaleway/loadbalancers/certificate.py +73 -71
- pulumiverse_scaleway/loadbalancers/frontend.py +136 -134
- pulumiverse_scaleway/loadbalancers/get_acls.py +22 -21
- pulumiverse_scaleway/loadbalancers/get_backend.py +43 -42
- pulumiverse_scaleway/loadbalancers/get_backends.py +22 -21
- pulumiverse_scaleway/loadbalancers/get_certificate.py +24 -23
- pulumiverse_scaleway/loadbalancers/get_frontend.py +26 -25
- pulumiverse_scaleway/loadbalancers/get_frontends.py +22 -21
- pulumiverse_scaleway/loadbalancers/get_ip.py +29 -28
- pulumiverse_scaleway/loadbalancers/get_ips.py +29 -28
- pulumiverse_scaleway/loadbalancers/get_load_balancer.py +36 -35
- pulumiverse_scaleway/loadbalancers/get_load_balancers.py +24 -23
- pulumiverse_scaleway/loadbalancers/get_route.py +16 -15
- pulumiverse_scaleway/loadbalancers/get_routes.py +17 -16
- pulumiverse_scaleway/loadbalancers/ip.py +101 -99
- pulumiverse_scaleway/loadbalancers/load_balancer.py +199 -197
- pulumiverse_scaleway/loadbalancers/outputs.py +581 -530
- pulumiverse_scaleway/loadbalancers/route.py +101 -99
- pulumiverse_scaleway/mnq/__init__.py +2 -1
- pulumiverse_scaleway/mnq/_inputs.py +32 -31
- pulumiverse_scaleway/mnq/get_sns.py +14 -13
- pulumiverse_scaleway/mnq/get_sqs.py +14 -13
- pulumiverse_scaleway/mnq/nats_account.py +52 -50
- pulumiverse_scaleway/mnq/nats_credentials.py +52 -50
- pulumiverse_scaleway/mnq/outputs.py +20 -19
- pulumiverse_scaleway/mnq/sns.py +38 -36
- pulumiverse_scaleway/mnq/sns_credentials.py +59 -57
- pulumiverse_scaleway/mnq/sns_topic.py +143 -141
- pulumiverse_scaleway/mnq/sns_topic_subscription.py +150 -148
- pulumiverse_scaleway/mnq/sqs.py +38 -36
- pulumiverse_scaleway/mnq/sqs_credentials.py +59 -57
- pulumiverse_scaleway/mnq/sqs_queue.py +192 -190
- pulumiverse_scaleway/mnq_nats_account.py +52 -50
- pulumiverse_scaleway/mnq_nats_credentials.py +52 -50
- pulumiverse_scaleway/mnq_sns.py +38 -36
- pulumiverse_scaleway/mnq_sns_credentials.py +59 -57
- pulumiverse_scaleway/mnq_sns_topic.py +143 -141
- pulumiverse_scaleway/mnq_sns_topic_subscription.py +150 -148
- pulumiverse_scaleway/mnq_sqs.py +38 -36
- pulumiverse_scaleway/mnq_sqs_credentials.py +59 -57
- pulumiverse_scaleway/mnq_sqs_queue.py +192 -190
- pulumiverse_scaleway/mongo_db_instance.py +199 -197
- pulumiverse_scaleway/mongo_db_snapshot.py +101 -99
- pulumiverse_scaleway/mongodb/__init__.py +2 -1
- pulumiverse_scaleway/mongodb/_inputs.py +53 -52
- pulumiverse_scaleway/mongodb/get_instance.py +35 -34
- pulumiverse_scaleway/mongodb/instance.py +199 -197
- pulumiverse_scaleway/mongodb/outputs.py +64 -63
- pulumiverse_scaleway/mongodb/snapshot.py +101 -99
- pulumiverse_scaleway/network/__init__.py +2 -1
- pulumiverse_scaleway/network/_inputs.py +140 -139
- pulumiverse_scaleway/network/acl.py +59 -57
- pulumiverse_scaleway/network/gateway_network.py +143 -141
- pulumiverse_scaleway/network/get_gateway_network.py +28 -27
- pulumiverse_scaleway/network/get_private_network.py +44 -32
- pulumiverse_scaleway/network/get_public_gateway.py +36 -35
- pulumiverse_scaleway/network/get_public_gateway_dhcp.py +24 -23
- pulumiverse_scaleway/network/get_public_gateway_dhcp_reservation.py +33 -32
- pulumiverse_scaleway/network/get_public_gateway_ip.py +14 -13
- pulumiverse_scaleway/network/get_public_gateway_pat_rule.py +21 -20
- pulumiverse_scaleway/network/get_routes.py +38 -37
- pulumiverse_scaleway/network/get_vpc.py +47 -35
- pulumiverse_scaleway/network/get_vpcs.py +24 -23
- pulumiverse_scaleway/network/outputs.py +206 -205
- pulumiverse_scaleway/network/private_network.py +170 -121
- pulumiverse_scaleway/network/public_gateway.py +213 -211
- pulumiverse_scaleway/network/public_gateway_dhcp.py +234 -232
- pulumiverse_scaleway/network/public_gateway_dhcp_reservation.py +87 -85
- pulumiverse_scaleway/network/public_gateway_ip.py +87 -85
- pulumiverse_scaleway/network/public_gateway_ip_reverse_dns.py +45 -43
- pulumiverse_scaleway/network/public_gateway_pat_rule.py +108 -106
- pulumiverse_scaleway/network/route.py +115 -113
- pulumiverse_scaleway/network/vpc.py +149 -100
- pulumiverse_scaleway/object/__init__.py +2 -1
- pulumiverse_scaleway/object/_inputs.py +157 -134
- pulumiverse_scaleway/object/bucket.py +115 -113
- pulumiverse_scaleway/object/bucket_acl.py +79 -75
- pulumiverse_scaleway/object/bucket_lock_configuration.py +45 -43
- pulumiverse_scaleway/object/bucket_policy.py +59 -57
- pulumiverse_scaleway/object/bucket_website_configuration.py +85 -65
- pulumiverse_scaleway/object/get_bucket.py +22 -21
- pulumiverse_scaleway/object/get_bucket_policy.py +17 -16
- pulumiverse_scaleway/object/item.py +232 -183
- pulumiverse_scaleway/object/outputs.py +133 -118
- pulumiverse_scaleway/object_bucket.py +115 -113
- pulumiverse_scaleway/object_bucket_acl.py +79 -75
- pulumiverse_scaleway/object_bucket_lock_configuration.py +45 -43
- pulumiverse_scaleway/object_bucket_policy.py +59 -57
- pulumiverse_scaleway/object_bucket_website_configuration.py +85 -65
- pulumiverse_scaleway/object_item.py +232 -183
- pulumiverse_scaleway/observability/__init__.py +2 -1
- pulumiverse_scaleway/observability/_inputs.py +89 -88
- pulumiverse_scaleway/observability/alert_manager.py +52 -50
- pulumiverse_scaleway/observability/cockpit.py +38 -36
- pulumiverse_scaleway/observability/get_instance.py +10 -9
- pulumiverse_scaleway/observability/get_source.py +77 -33
- pulumiverse_scaleway/observability/grafana_user.py +59 -57
- pulumiverse_scaleway/observability/outputs.py +77 -76
- pulumiverse_scaleway/observability/source.py +115 -113
- pulumiverse_scaleway/observability/token.py +66 -64
- pulumiverse_scaleway/outputs.py +2870 -2741
- pulumiverse_scaleway/provider.py +87 -65
- pulumiverse_scaleway/pulumi-plugin.json +1 -1
- pulumiverse_scaleway/rdb_snapshot.py +101 -99
- pulumiverse_scaleway/redis/__init__.py +2 -1
- pulumiverse_scaleway/redis/_inputs.py +63 -62
- pulumiverse_scaleway/redis/cluster.py +178 -176
- pulumiverse_scaleway/redis/get_cluster.py +34 -33
- pulumiverse_scaleway/redis/outputs.py +74 -73
- pulumiverse_scaleway/redis_cluster.py +178 -176
- pulumiverse_scaleway/registry/__init__.py +2 -1
- pulumiverse_scaleway/registry/get_image.py +37 -36
- pulumiverse_scaleway/registry/get_image_tag.py +33 -32
- pulumiverse_scaleway/registry/get_namespace.py +27 -26
- pulumiverse_scaleway/registry/namespace.py +87 -85
- pulumiverse_scaleway/registry_namespace.py +87 -85
- pulumiverse_scaleway/sdb_database.py +80 -78
- pulumiverse_scaleway/secret.py +143 -141
- pulumiverse_scaleway/secret_version.py +87 -85
- pulumiverse_scaleway/secrets/__init__.py +2 -1
- pulumiverse_scaleway/secrets/_inputs.py +53 -52
- pulumiverse_scaleway/secrets/get_secret.py +41 -40
- pulumiverse_scaleway/secrets/get_version.py +34 -33
- pulumiverse_scaleway/secrets/outputs.py +64 -63
- pulumiverse_scaleway/secrets/secret.py +143 -141
- pulumiverse_scaleway/secrets/version.py +87 -85
- pulumiverse_scaleway/tem/__init__.py +2 -1
- pulumiverse_scaleway/tem/_inputs.py +28 -27
- pulumiverse_scaleway/tem/blocked_list.py +87 -85
- pulumiverse_scaleway/tem/domain.py +199 -197
- pulumiverse_scaleway/tem/domain_validation.py +52 -50
- pulumiverse_scaleway/tem/get_domain.py +43 -42
- pulumiverse_scaleway/tem/get_offer_subscription.py +22 -21
- pulumiverse_scaleway/tem/outputs.py +34 -33
- pulumiverse_scaleway/tem/webhook.py +108 -106
- pulumiverse_scaleway/tem_domain.py +199 -197
- pulumiverse_scaleway/tem_domain_validation.py +52 -50
- pulumiverse_scaleway/tem_webhook.py +108 -106
- pulumiverse_scaleway/vpc.py +149 -100
- pulumiverse_scaleway/vpc_gateway_network.py +143 -141
- pulumiverse_scaleway/vpc_private_network.py +170 -121
- pulumiverse_scaleway/vpc_public_gateway.py +213 -211
- pulumiverse_scaleway/vpc_public_gateway_dhcp.py +234 -232
- pulumiverse_scaleway/vpc_public_gateway_dhcp_reservation.py +87 -85
- pulumiverse_scaleway/vpc_public_gateway_ip.py +87 -85
- pulumiverse_scaleway/vpc_public_gateway_ip_reverse_dns.py +45 -43
- pulumiverse_scaleway/vpc_public_gateway_pat_rule.py +108 -106
- pulumiverse_scaleway/vpc_route.py +115 -113
- pulumiverse_scaleway/webhosting.py +164 -162
- {pulumiverse_scaleway-1.30.0a1750140900.dist-info → pulumiverse_scaleway-1.31.0.dist-info}/METADATA +2 -2
- pulumiverse_scaleway-1.31.0.dist-info/RECORD +496 -0
- pulumiverse_scaleway/elasticmetal/get_easy_partitioning.py +0 -177
- pulumiverse_scaleway/get_cockpit_plan.py +0 -94
- pulumiverse_scaleway/observability/get_plan.py +0 -90
- pulumiverse_scaleway-1.30.0a1750140900.dist-info/RECORD +0 -492
- {pulumiverse_scaleway-1.30.0a1750140900.dist-info → pulumiverse_scaleway-1.31.0.dist-info}/WHEEL +0 -0
- {pulumiverse_scaleway-1.30.0a1750140900.dist-info → pulumiverse_scaleway-1.31.0.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,8 @@
|
|
1
1
|
# coding=utf-8
|
2
|
-
# *** WARNING: this file was generated by
|
2
|
+
# *** WARNING: this file was generated by pulumi-language-python. ***
|
3
3
|
# *** Do not edit by hand unless you're certain you know what you are doing! ***
|
4
4
|
|
5
|
+
import builtins
|
5
6
|
import copy
|
6
7
|
import warnings
|
7
8
|
import sys
|
@@ -53,17 +54,17 @@ class AclAclRule(dict):
|
|
53
54
|
return super().get(key, default)
|
54
55
|
|
55
56
|
def __init__(__self__, *,
|
56
|
-
description: Optional[str] = None,
|
57
|
-
id: Optional[str] = None,
|
58
|
-
ip: Optional[str] = None,
|
59
|
-
scaleway_ranges: Optional[bool] = None):
|
60
|
-
"""
|
61
|
-
:param str description: A text describing this rule.
|
62
|
-
:param str id: The ID of the ACL resource. It is the same as the ID of the cluster.
|
63
|
-
:param str ip: The IP range to whitelist in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
|
57
|
+
description: Optional[builtins.str] = None,
|
58
|
+
id: Optional[builtins.str] = None,
|
59
|
+
ip: Optional[builtins.str] = None,
|
60
|
+
scaleway_ranges: Optional[builtins.bool] = None):
|
61
|
+
"""
|
62
|
+
:param builtins.str description: A text describing this rule.
|
63
|
+
:param builtins.str id: The ID of the ACL resource. It is the same as the ID of the cluster.
|
64
|
+
:param builtins.str ip: The IP range to whitelist in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
|
64
65
|
|
65
66
|
> **Important:** If the `ip` field is set, `scaleway_ranges` cannot be set to true in the same rule.
|
66
|
-
:param bool scaleway_ranges: Allow access to cluster from all Scaleway ranges as defined in [Scaleway Network Information - IP ranges used by Scaleway](https://www.scaleway.com/en/docs/console/account/reference-content/scaleway-network-information/#ip-ranges-used-by-scaleway).
|
67
|
+
:param builtins.bool scaleway_ranges: Allow access to cluster from all Scaleway ranges as defined in [Scaleway Network Information - IP ranges used by Scaleway](https://www.scaleway.com/en/docs/console/account/reference-content/scaleway-network-information/#ip-ranges-used-by-scaleway).
|
67
68
|
Only one rule with this field set to true can be added.
|
68
69
|
|
69
70
|
> **Important:** If the `scaleway_ranges` field is set to true, the `ip` field cannot be set on the same rule.
|
@@ -79,7 +80,7 @@ class AclAclRule(dict):
|
|
79
80
|
|
80
81
|
@property
|
81
82
|
@pulumi.getter
|
82
|
-
def description(self) -> Optional[str]:
|
83
|
+
def description(self) -> Optional[builtins.str]:
|
83
84
|
"""
|
84
85
|
A text describing this rule.
|
85
86
|
"""
|
@@ -87,7 +88,7 @@ class AclAclRule(dict):
|
|
87
88
|
|
88
89
|
@property
|
89
90
|
@pulumi.getter
|
90
|
-
def id(self) -> Optional[str]:
|
91
|
+
def id(self) -> Optional[builtins.str]:
|
91
92
|
"""
|
92
93
|
The ID of the ACL resource. It is the same as the ID of the cluster.
|
93
94
|
"""
|
@@ -95,7 +96,7 @@ class AclAclRule(dict):
|
|
95
96
|
|
96
97
|
@property
|
97
98
|
@pulumi.getter
|
98
|
-
def ip(self) -> Optional[str]:
|
99
|
+
def ip(self) -> Optional[builtins.str]:
|
99
100
|
"""
|
100
101
|
The IP range to whitelist in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)
|
101
102
|
|
@@ -105,7 +106,7 @@ class AclAclRule(dict):
|
|
105
106
|
|
106
107
|
@property
|
107
108
|
@pulumi.getter(name="scalewayRanges")
|
108
|
-
def scaleway_ranges(self) -> Optional[bool]:
|
109
|
+
def scaleway_ranges(self) -> Optional[builtins.bool]:
|
109
110
|
"""
|
110
111
|
Allow access to cluster from all Scaleway ranges as defined in [Scaleway Network Information - IP ranges used by Scaleway](https://www.scaleway.com/en/docs/console/account/reference-content/scaleway-network-information/#ip-ranges-used-by-scaleway).
|
111
112
|
Only one rule with this field set to true can be added.
|
@@ -137,14 +138,14 @@ class ClusterAutoUpgrade(dict):
|
|
137
138
|
return super().get(key, default)
|
138
139
|
|
139
140
|
def __init__(__self__, *,
|
140
|
-
enable: bool,
|
141
|
-
maintenance_window_day: str,
|
142
|
-
maintenance_window_start_hour: int):
|
141
|
+
enable: builtins.bool,
|
142
|
+
maintenance_window_day: builtins.str,
|
143
|
+
maintenance_window_start_hour: builtins.int):
|
143
144
|
"""
|
144
|
-
:param bool enable: Set to `true` to enable Kubernetes patch version auto upgrades.
|
145
|
+
:param builtins.bool enable: Set to `true` to enable Kubernetes patch version auto upgrades.
|
145
146
|
> **Important:** When enabling auto upgrades, the `version` field take a minor version like x.y (ie 1.18).
|
146
|
-
:param str maintenance_window_day: The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
147
|
-
:param int maintenance_window_start_hour: The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
147
|
+
:param builtins.str maintenance_window_day: The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
148
|
+
:param builtins.int maintenance_window_start_hour: The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
148
149
|
"""
|
149
150
|
pulumi.set(__self__, "enable", enable)
|
150
151
|
pulumi.set(__self__, "maintenance_window_day", maintenance_window_day)
|
@@ -152,7 +153,7 @@ class ClusterAutoUpgrade(dict):
|
|
152
153
|
|
153
154
|
@property
|
154
155
|
@pulumi.getter
|
155
|
-
def enable(self) -> bool:
|
156
|
+
def enable(self) -> builtins.bool:
|
156
157
|
"""
|
157
158
|
Set to `true` to enable Kubernetes patch version auto upgrades.
|
158
159
|
> **Important:** When enabling auto upgrades, the `version` field take a minor version like x.y (ie 1.18).
|
@@ -161,7 +162,7 @@ class ClusterAutoUpgrade(dict):
|
|
161
162
|
|
162
163
|
@property
|
163
164
|
@pulumi.getter(name="maintenanceWindowDay")
|
164
|
-
def maintenance_window_day(self) -> str:
|
165
|
+
def maintenance_window_day(self) -> builtins.str:
|
165
166
|
"""
|
166
167
|
The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
167
168
|
"""
|
@@ -169,7 +170,7 @@ class ClusterAutoUpgrade(dict):
|
|
169
170
|
|
170
171
|
@property
|
171
172
|
@pulumi.getter(name="maintenanceWindowStartHour")
|
172
|
-
def maintenance_window_start_hour(self) -> int:
|
173
|
+
def maintenance_window_start_hour(self) -> builtins.int:
|
173
174
|
"""
|
174
175
|
The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
175
176
|
"""
|
@@ -210,27 +211,27 @@ class ClusterAutoscalerConfig(dict):
|
|
210
211
|
return super().get(key, default)
|
211
212
|
|
212
213
|
def __init__(__self__, *,
|
213
|
-
balance_similar_node_groups: Optional[bool] = None,
|
214
|
-
disable_scale_down: Optional[bool] = None,
|
215
|
-
estimator: Optional[str] = None,
|
216
|
-
expander: Optional[str] = None,
|
217
|
-
expendable_pods_priority_cutoff: Optional[int] = None,
|
218
|
-
ignore_daemonsets_utilization: Optional[bool] = None,
|
219
|
-
max_graceful_termination_sec: Optional[int] = None,
|
220
|
-
scale_down_delay_after_add: Optional[str] = None,
|
221
|
-
scale_down_unneeded_time: Optional[str] = None,
|
222
|
-
scale_down_utilization_threshold: Optional[float] = None):
|
223
|
-
"""
|
224
|
-
:param bool balance_similar_node_groups: Detect similar node groups and balance the number of nodes between them.
|
225
|
-
:param bool disable_scale_down: Disables the scale down feature of the autoscaler.
|
226
|
-
:param str estimator: Type of resource estimator to be used in scale up.
|
227
|
-
:param str expander: Type of node group expander to be used in scale up.
|
228
|
-
:param int expendable_pods_priority_cutoff: Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
229
|
-
:param bool ignore_daemonsets_utilization: Ignore DaemonSet pods when calculating resource utilization for scaling down.
|
230
|
-
:param int max_graceful_termination_sec: Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
231
|
-
:param str scale_down_delay_after_add: How long after scale up that scale down evaluation resumes.
|
232
|
-
:param str scale_down_unneeded_time: How long a node should be unneeded before it is eligible for scale down.
|
233
|
-
:param float scale_down_utilization_threshold: Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
214
|
+
balance_similar_node_groups: Optional[builtins.bool] = None,
|
215
|
+
disable_scale_down: Optional[builtins.bool] = None,
|
216
|
+
estimator: Optional[builtins.str] = None,
|
217
|
+
expander: Optional[builtins.str] = None,
|
218
|
+
expendable_pods_priority_cutoff: Optional[builtins.int] = None,
|
219
|
+
ignore_daemonsets_utilization: Optional[builtins.bool] = None,
|
220
|
+
max_graceful_termination_sec: Optional[builtins.int] = None,
|
221
|
+
scale_down_delay_after_add: Optional[builtins.str] = None,
|
222
|
+
scale_down_unneeded_time: Optional[builtins.str] = None,
|
223
|
+
scale_down_utilization_threshold: Optional[builtins.float] = None):
|
224
|
+
"""
|
225
|
+
:param builtins.bool balance_similar_node_groups: Detect similar node groups and balance the number of nodes between them.
|
226
|
+
:param builtins.bool disable_scale_down: Disables the scale down feature of the autoscaler.
|
227
|
+
:param builtins.str estimator: Type of resource estimator to be used in scale up.
|
228
|
+
:param builtins.str expander: Type of node group expander to be used in scale up.
|
229
|
+
:param builtins.int expendable_pods_priority_cutoff: Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
230
|
+
:param builtins.bool ignore_daemonsets_utilization: Ignore DaemonSet pods when calculating resource utilization for scaling down.
|
231
|
+
:param builtins.int max_graceful_termination_sec: Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
232
|
+
:param builtins.str scale_down_delay_after_add: How long after scale up that scale down evaluation resumes.
|
233
|
+
:param builtins.str scale_down_unneeded_time: How long a node should be unneeded before it is eligible for scale down.
|
234
|
+
:param builtins.float scale_down_utilization_threshold: Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
234
235
|
"""
|
235
236
|
if balance_similar_node_groups is not None:
|
236
237
|
pulumi.set(__self__, "balance_similar_node_groups", balance_similar_node_groups)
|
@@ -255,7 +256,7 @@ class ClusterAutoscalerConfig(dict):
|
|
255
256
|
|
256
257
|
@property
|
257
258
|
@pulumi.getter(name="balanceSimilarNodeGroups")
|
258
|
-
def balance_similar_node_groups(self) -> Optional[bool]:
|
259
|
+
def balance_similar_node_groups(self) -> Optional[builtins.bool]:
|
259
260
|
"""
|
260
261
|
Detect similar node groups and balance the number of nodes between them.
|
261
262
|
"""
|
@@ -263,7 +264,7 @@ class ClusterAutoscalerConfig(dict):
|
|
263
264
|
|
264
265
|
@property
|
265
266
|
@pulumi.getter(name="disableScaleDown")
|
266
|
-
def disable_scale_down(self) -> Optional[bool]:
|
267
|
+
def disable_scale_down(self) -> Optional[builtins.bool]:
|
267
268
|
"""
|
268
269
|
Disables the scale down feature of the autoscaler.
|
269
270
|
"""
|
@@ -271,7 +272,7 @@ class ClusterAutoscalerConfig(dict):
|
|
271
272
|
|
272
273
|
@property
|
273
274
|
@pulumi.getter
|
274
|
-
def estimator(self) -> Optional[str]:
|
275
|
+
def estimator(self) -> Optional[builtins.str]:
|
275
276
|
"""
|
276
277
|
Type of resource estimator to be used in scale up.
|
277
278
|
"""
|
@@ -279,7 +280,7 @@ class ClusterAutoscalerConfig(dict):
|
|
279
280
|
|
280
281
|
@property
|
281
282
|
@pulumi.getter
|
282
|
-
def expander(self) -> Optional[str]:
|
283
|
+
def expander(self) -> Optional[builtins.str]:
|
283
284
|
"""
|
284
285
|
Type of node group expander to be used in scale up.
|
285
286
|
"""
|
@@ -287,7 +288,7 @@ class ClusterAutoscalerConfig(dict):
|
|
287
288
|
|
288
289
|
@property
|
289
290
|
@pulumi.getter(name="expendablePodsPriorityCutoff")
|
290
|
-
def expendable_pods_priority_cutoff(self) -> Optional[int]:
|
291
|
+
def expendable_pods_priority_cutoff(self) -> Optional[builtins.int]:
|
291
292
|
"""
|
292
293
|
Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
293
294
|
"""
|
@@ -295,7 +296,7 @@ class ClusterAutoscalerConfig(dict):
|
|
295
296
|
|
296
297
|
@property
|
297
298
|
@pulumi.getter(name="ignoreDaemonsetsUtilization")
|
298
|
-
def ignore_daemonsets_utilization(self) -> Optional[bool]:
|
299
|
+
def ignore_daemonsets_utilization(self) -> Optional[builtins.bool]:
|
299
300
|
"""
|
300
301
|
Ignore DaemonSet pods when calculating resource utilization for scaling down.
|
301
302
|
"""
|
@@ -303,7 +304,7 @@ class ClusterAutoscalerConfig(dict):
|
|
303
304
|
|
304
305
|
@property
|
305
306
|
@pulumi.getter(name="maxGracefulTerminationSec")
|
306
|
-
def max_graceful_termination_sec(self) -> Optional[int]:
|
307
|
+
def max_graceful_termination_sec(self) -> Optional[builtins.int]:
|
307
308
|
"""
|
308
309
|
Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
309
310
|
"""
|
@@ -311,7 +312,7 @@ class ClusterAutoscalerConfig(dict):
|
|
311
312
|
|
312
313
|
@property
|
313
314
|
@pulumi.getter(name="scaleDownDelayAfterAdd")
|
314
|
-
def scale_down_delay_after_add(self) -> Optional[str]:
|
315
|
+
def scale_down_delay_after_add(self) -> Optional[builtins.str]:
|
315
316
|
"""
|
316
317
|
How long after scale up that scale down evaluation resumes.
|
317
318
|
"""
|
@@ -319,7 +320,7 @@ class ClusterAutoscalerConfig(dict):
|
|
319
320
|
|
320
321
|
@property
|
321
322
|
@pulumi.getter(name="scaleDownUnneededTime")
|
322
|
-
def scale_down_unneeded_time(self) -> Optional[str]:
|
323
|
+
def scale_down_unneeded_time(self) -> Optional[builtins.str]:
|
323
324
|
"""
|
324
325
|
How long a node should be unneeded before it is eligible for scale down.
|
325
326
|
"""
|
@@ -327,7 +328,7 @@ class ClusterAutoscalerConfig(dict):
|
|
327
328
|
|
328
329
|
@property
|
329
330
|
@pulumi.getter(name="scaleDownUtilizationThreshold")
|
330
|
-
def scale_down_utilization_threshold(self) -> Optional[float]:
|
331
|
+
def scale_down_utilization_threshold(self) -> Optional[builtins.float]:
|
331
332
|
"""
|
332
333
|
Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
333
334
|
"""
|
@@ -356,15 +357,15 @@ class ClusterKubeconfig(dict):
|
|
356
357
|
return super().get(key, default)
|
357
358
|
|
358
359
|
def __init__(__self__, *,
|
359
|
-
cluster_ca_certificate: Optional[str] = None,
|
360
|
-
config_file: Optional[str] = None,
|
361
|
-
host: Optional[str] = None,
|
362
|
-
token: Optional[str] = None):
|
360
|
+
cluster_ca_certificate: Optional[builtins.str] = None,
|
361
|
+
config_file: Optional[builtins.str] = None,
|
362
|
+
host: Optional[builtins.str] = None,
|
363
|
+
token: Optional[builtins.str] = None):
|
363
364
|
"""
|
364
|
-
:param str cluster_ca_certificate: The CA certificate of the Kubernetes API server.
|
365
|
-
:param str config_file: The raw kubeconfig file.
|
366
|
-
:param str host: The URL of the Kubernetes API server.
|
367
|
-
:param str token: The token to connect to the Kubernetes API server.
|
365
|
+
:param builtins.str cluster_ca_certificate: The CA certificate of the Kubernetes API server.
|
366
|
+
:param builtins.str config_file: The raw kubeconfig file.
|
367
|
+
:param builtins.str host: The URL of the Kubernetes API server.
|
368
|
+
:param builtins.str token: The token to connect to the Kubernetes API server.
|
368
369
|
"""
|
369
370
|
if cluster_ca_certificate is not None:
|
370
371
|
pulumi.set(__self__, "cluster_ca_certificate", cluster_ca_certificate)
|
@@ -377,7 +378,7 @@ class ClusterKubeconfig(dict):
|
|
377
378
|
|
378
379
|
@property
|
379
380
|
@pulumi.getter(name="clusterCaCertificate")
|
380
|
-
def cluster_ca_certificate(self) -> Optional[str]:
|
381
|
+
def cluster_ca_certificate(self) -> Optional[builtins.str]:
|
381
382
|
"""
|
382
383
|
The CA certificate of the Kubernetes API server.
|
383
384
|
"""
|
@@ -385,7 +386,7 @@ class ClusterKubeconfig(dict):
|
|
385
386
|
|
386
387
|
@property
|
387
388
|
@pulumi.getter(name="configFile")
|
388
|
-
def config_file(self) -> Optional[str]:
|
389
|
+
def config_file(self) -> Optional[builtins.str]:
|
389
390
|
"""
|
390
391
|
The raw kubeconfig file.
|
391
392
|
"""
|
@@ -393,7 +394,7 @@ class ClusterKubeconfig(dict):
|
|
393
394
|
|
394
395
|
@property
|
395
396
|
@pulumi.getter
|
396
|
-
def host(self) -> Optional[str]:
|
397
|
+
def host(self) -> Optional[builtins.str]:
|
397
398
|
"""
|
398
399
|
The URL of the Kubernetes API server.
|
399
400
|
"""
|
@@ -401,7 +402,7 @@ class ClusterKubeconfig(dict):
|
|
401
402
|
|
402
403
|
@property
|
403
404
|
@pulumi.getter
|
404
|
-
def token(self) -> Optional[str]:
|
405
|
+
def token(self) -> Optional[builtins.str]:
|
405
406
|
"""
|
406
407
|
The token to connect to the Kubernetes API server.
|
407
408
|
"""
|
@@ -440,21 +441,21 @@ class ClusterOpenIdConnectConfig(dict):
|
|
440
441
|
return super().get(key, default)
|
441
442
|
|
442
443
|
def __init__(__self__, *,
|
443
|
-
client_id: str,
|
444
|
-
issuer_url: str,
|
445
|
-
groups_claims: Optional[Sequence[str]] = None,
|
446
|
-
groups_prefix: Optional[str] = None,
|
447
|
-
required_claims: Optional[Sequence[str]] = None,
|
448
|
-
username_claim: Optional[str] = None,
|
449
|
-
username_prefix: Optional[str] = None):
|
450
|
-
"""
|
451
|
-
:param str client_id: A client id that all tokens must be issued for
|
452
|
-
:param str issuer_url: URL of the provider which allows the API server to discover public signing keys
|
453
|
-
:param Sequence[str] groups_claims: JWT claim to use as the user's group
|
454
|
-
:param str groups_prefix: Prefix prepended to group claims
|
455
|
-
:param Sequence[str] required_claims: Multiple key=value pairs that describes a required claim in the ID Token
|
456
|
-
:param str username_claim: JWT claim to use as the user name
|
457
|
-
:param str username_prefix: Prefix prepended to username
|
444
|
+
client_id: builtins.str,
|
445
|
+
issuer_url: builtins.str,
|
446
|
+
groups_claims: Optional[Sequence[builtins.str]] = None,
|
447
|
+
groups_prefix: Optional[builtins.str] = None,
|
448
|
+
required_claims: Optional[Sequence[builtins.str]] = None,
|
449
|
+
username_claim: Optional[builtins.str] = None,
|
450
|
+
username_prefix: Optional[builtins.str] = None):
|
451
|
+
"""
|
452
|
+
:param builtins.str client_id: A client id that all tokens must be issued for
|
453
|
+
:param builtins.str issuer_url: URL of the provider which allows the API server to discover public signing keys
|
454
|
+
:param Sequence[builtins.str] groups_claims: JWT claim to use as the user's group
|
455
|
+
:param builtins.str groups_prefix: Prefix prepended to group claims
|
456
|
+
:param Sequence[builtins.str] required_claims: Multiple key=value pairs that describes a required claim in the ID Token
|
457
|
+
:param builtins.str username_claim: JWT claim to use as the user name
|
458
|
+
:param builtins.str username_prefix: Prefix prepended to username
|
458
459
|
"""
|
459
460
|
pulumi.set(__self__, "client_id", client_id)
|
460
461
|
pulumi.set(__self__, "issuer_url", issuer_url)
|
@@ -471,7 +472,7 @@ class ClusterOpenIdConnectConfig(dict):
|
|
471
472
|
|
472
473
|
@property
|
473
474
|
@pulumi.getter(name="clientId")
|
474
|
-
def client_id(self) -> str:
|
475
|
+
def client_id(self) -> builtins.str:
|
475
476
|
"""
|
476
477
|
A client id that all tokens must be issued for
|
477
478
|
"""
|
@@ -479,7 +480,7 @@ class ClusterOpenIdConnectConfig(dict):
|
|
479
480
|
|
480
481
|
@property
|
481
482
|
@pulumi.getter(name="issuerUrl")
|
482
|
-
def issuer_url(self) -> str:
|
483
|
+
def issuer_url(self) -> builtins.str:
|
483
484
|
"""
|
484
485
|
URL of the provider which allows the API server to discover public signing keys
|
485
486
|
"""
|
@@ -487,7 +488,7 @@ class ClusterOpenIdConnectConfig(dict):
|
|
487
488
|
|
488
489
|
@property
|
489
490
|
@pulumi.getter(name="groupsClaims")
|
490
|
-
def groups_claims(self) -> Optional[Sequence[str]]:
|
491
|
+
def groups_claims(self) -> Optional[Sequence[builtins.str]]:
|
491
492
|
"""
|
492
493
|
JWT claim to use as the user's group
|
493
494
|
"""
|
@@ -495,7 +496,7 @@ class ClusterOpenIdConnectConfig(dict):
|
|
495
496
|
|
496
497
|
@property
|
497
498
|
@pulumi.getter(name="groupsPrefix")
|
498
|
-
def groups_prefix(self) -> Optional[str]:
|
499
|
+
def groups_prefix(self) -> Optional[builtins.str]:
|
499
500
|
"""
|
500
501
|
Prefix prepended to group claims
|
501
502
|
"""
|
@@ -503,7 +504,7 @@ class ClusterOpenIdConnectConfig(dict):
|
|
503
504
|
|
504
505
|
@property
|
505
506
|
@pulumi.getter(name="requiredClaims")
|
506
|
-
def required_claims(self) -> Optional[Sequence[str]]:
|
507
|
+
def required_claims(self) -> Optional[Sequence[builtins.str]]:
|
507
508
|
"""
|
508
509
|
Multiple key=value pairs that describes a required claim in the ID Token
|
509
510
|
"""
|
@@ -511,7 +512,7 @@ class ClusterOpenIdConnectConfig(dict):
|
|
511
512
|
|
512
513
|
@property
|
513
514
|
@pulumi.getter(name="usernameClaim")
|
514
|
-
def username_claim(self) -> Optional[str]:
|
515
|
+
def username_claim(self) -> Optional[builtins.str]:
|
515
516
|
"""
|
516
517
|
JWT claim to use as the user name
|
517
518
|
"""
|
@@ -519,7 +520,7 @@ class ClusterOpenIdConnectConfig(dict):
|
|
519
520
|
|
520
521
|
@property
|
521
522
|
@pulumi.getter(name="usernamePrefix")
|
522
|
-
def username_prefix(self) -> Optional[str]:
|
523
|
+
def username_prefix(self) -> Optional[builtins.str]:
|
523
524
|
"""
|
524
525
|
Prefix prepended to username
|
525
526
|
"""
|
@@ -550,21 +551,21 @@ class PoolNode(dict):
|
|
550
551
|
return super().get(key, default)
|
551
552
|
|
552
553
|
def __init__(__self__, *,
|
553
|
-
id: Optional[str] = None,
|
554
|
-
name: Optional[str] = None,
|
554
|
+
id: Optional[builtins.str] = None,
|
555
|
+
name: Optional[builtins.str] = None,
|
555
556
|
private_ips: Optional[Sequence['outputs.PoolNodePrivateIp']] = None,
|
556
|
-
public_ip: Optional[str] = None,
|
557
|
-
public_ip_v6: Optional[str] = None,
|
558
|
-
status: Optional[str] = None):
|
557
|
+
public_ip: Optional[builtins.str] = None,
|
558
|
+
public_ip_v6: Optional[builtins.str] = None,
|
559
|
+
status: Optional[builtins.str] = None):
|
559
560
|
"""
|
560
|
-
:param str id: The ID of the IP address resource.
|
561
|
-
:param str name: The name for the pool.
|
561
|
+
:param builtins.str id: The ID of the IP address resource.
|
562
|
+
:param builtins.str name: The name for the pool.
|
562
563
|
|
563
564
|
> **Important:** Updates to this field will recreate a new resource.
|
564
565
|
:param Sequence['PoolNodePrivateIpArgs'] private_ips: The list of private IPv4 and IPv6 addresses associated with the node.
|
565
|
-
:param str public_ip: The public IPv4. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
566
|
-
:param str public_ip_v6: The public IPv6. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
567
|
-
:param str status: The status of the node.
|
566
|
+
:param builtins.str public_ip: The public IPv4. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
567
|
+
:param builtins.str public_ip_v6: The public IPv6. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
568
|
+
:param builtins.str status: The status of the node.
|
568
569
|
"""
|
569
570
|
if id is not None:
|
570
571
|
pulumi.set(__self__, "id", id)
|
@@ -581,7 +582,7 @@ class PoolNode(dict):
|
|
581
582
|
|
582
583
|
@property
|
583
584
|
@pulumi.getter
|
584
|
-
def id(self) -> Optional[str]:
|
585
|
+
def id(self) -> Optional[builtins.str]:
|
585
586
|
"""
|
586
587
|
The ID of the IP address resource.
|
587
588
|
"""
|
@@ -589,7 +590,7 @@ class PoolNode(dict):
|
|
589
590
|
|
590
591
|
@property
|
591
592
|
@pulumi.getter
|
592
|
-
def name(self) -> Optional[str]:
|
593
|
+
def name(self) -> Optional[builtins.str]:
|
593
594
|
"""
|
594
595
|
The name for the pool.
|
595
596
|
|
@@ -608,7 +609,7 @@ class PoolNode(dict):
|
|
608
609
|
@property
|
609
610
|
@pulumi.getter(name="publicIp")
|
610
611
|
@_utilities.deprecated("""Please use the official Kubernetes provider and the kubernetes_nodes data source""")
|
611
|
-
def public_ip(self) -> Optional[str]:
|
612
|
+
def public_ip(self) -> Optional[builtins.str]:
|
612
613
|
"""
|
613
614
|
The public IPv4. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
614
615
|
"""
|
@@ -617,7 +618,7 @@ class PoolNode(dict):
|
|
617
618
|
@property
|
618
619
|
@pulumi.getter(name="publicIpV6")
|
619
620
|
@_utilities.deprecated("""Please use the official Kubernetes provider and the kubernetes_nodes data source""")
|
620
|
-
def public_ip_v6(self) -> Optional[str]:
|
621
|
+
def public_ip_v6(self) -> Optional[builtins.str]:
|
621
622
|
"""
|
622
623
|
The public IPv6. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
623
624
|
"""
|
@@ -625,7 +626,7 @@ class PoolNode(dict):
|
|
625
626
|
|
626
627
|
@property
|
627
628
|
@pulumi.getter
|
628
|
-
def status(self) -> Optional[str]:
|
629
|
+
def status(self) -> Optional[builtins.str]:
|
629
630
|
"""
|
630
631
|
The status of the node.
|
631
632
|
"""
|
@@ -635,11 +636,11 @@ class PoolNode(dict):
|
|
635
636
|
@pulumi.output_type
|
636
637
|
class PoolNodePrivateIp(dict):
|
637
638
|
def __init__(__self__, *,
|
638
|
-
address: Optional[str] = None,
|
639
|
-
id: Optional[str] = None):
|
639
|
+
address: Optional[builtins.str] = None,
|
640
|
+
id: Optional[builtins.str] = None):
|
640
641
|
"""
|
641
|
-
:param str address: The private IP address.
|
642
|
-
:param str id: The ID of the IP address resource.
|
642
|
+
:param builtins.str address: The private IP address.
|
643
|
+
:param builtins.str id: The ID of the IP address resource.
|
643
644
|
"""
|
644
645
|
if address is not None:
|
645
646
|
pulumi.set(__self__, "address", address)
|
@@ -648,7 +649,7 @@ class PoolNodePrivateIp(dict):
|
|
648
649
|
|
649
650
|
@property
|
650
651
|
@pulumi.getter
|
651
|
-
def address(self) -> Optional[str]:
|
652
|
+
def address(self) -> Optional[builtins.str]:
|
652
653
|
"""
|
653
654
|
The private IP address.
|
654
655
|
"""
|
@@ -656,7 +657,7 @@ class PoolNodePrivateIp(dict):
|
|
656
657
|
|
657
658
|
@property
|
658
659
|
@pulumi.getter
|
659
|
-
def id(self) -> Optional[str]:
|
660
|
+
def id(self) -> Optional[builtins.str]:
|
660
661
|
"""
|
661
662
|
The ID of the IP address resource.
|
662
663
|
"""
|
@@ -685,11 +686,11 @@ class PoolUpgradePolicy(dict):
|
|
685
686
|
return super().get(key, default)
|
686
687
|
|
687
688
|
def __init__(__self__, *,
|
688
|
-
max_surge: Optional[int] = None,
|
689
|
-
max_unavailable: Optional[int] = None):
|
689
|
+
max_surge: Optional[builtins.int] = None,
|
690
|
+
max_unavailable: Optional[builtins.int] = None):
|
690
691
|
"""
|
691
|
-
:param int max_surge: The maximum number of nodes to be created during the upgrade
|
692
|
-
:param int max_unavailable: The maximum number of nodes that can be not ready at the same time
|
692
|
+
:param builtins.int max_surge: The maximum number of nodes to be created during the upgrade
|
693
|
+
:param builtins.int max_unavailable: The maximum number of nodes that can be not ready at the same time
|
693
694
|
"""
|
694
695
|
if max_surge is not None:
|
695
696
|
pulumi.set(__self__, "max_surge", max_surge)
|
@@ -698,7 +699,7 @@ class PoolUpgradePolicy(dict):
|
|
698
699
|
|
699
700
|
@property
|
700
701
|
@pulumi.getter(name="maxSurge")
|
701
|
-
def max_surge(self) -> Optional[int]:
|
702
|
+
def max_surge(self) -> Optional[builtins.int]:
|
702
703
|
"""
|
703
704
|
The maximum number of nodes to be created during the upgrade
|
704
705
|
"""
|
@@ -706,7 +707,7 @@ class PoolUpgradePolicy(dict):
|
|
706
707
|
|
707
708
|
@property
|
708
709
|
@pulumi.getter(name="maxUnavailable")
|
709
|
-
def max_unavailable(self) -> Optional[int]:
|
710
|
+
def max_unavailable(self) -> Optional[builtins.int]:
|
710
711
|
"""
|
711
712
|
The maximum number of nodes that can be not ready at the same time
|
712
713
|
"""
|
@@ -716,13 +717,13 @@ class PoolUpgradePolicy(dict):
|
|
716
717
|
@pulumi.output_type
|
717
718
|
class GetClusterAutoUpgradeResult(dict):
|
718
719
|
def __init__(__self__, *,
|
719
|
-
enable: bool,
|
720
|
-
maintenance_window_day: str,
|
721
|
-
maintenance_window_start_hour: int):
|
720
|
+
enable: builtins.bool,
|
721
|
+
maintenance_window_day: builtins.str,
|
722
|
+
maintenance_window_start_hour: builtins.int):
|
722
723
|
"""
|
723
|
-
:param bool enable: True if Kubernetes patch version auto upgrades is enabled.
|
724
|
-
:param str maintenance_window_day: The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
725
|
-
:param int maintenance_window_start_hour: The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
724
|
+
:param builtins.bool enable: True if Kubernetes patch version auto upgrades is enabled.
|
725
|
+
:param builtins.str maintenance_window_day: The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
726
|
+
:param builtins.int maintenance_window_start_hour: The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
726
727
|
"""
|
727
728
|
pulumi.set(__self__, "enable", enable)
|
728
729
|
pulumi.set(__self__, "maintenance_window_day", maintenance_window_day)
|
@@ -730,7 +731,7 @@ class GetClusterAutoUpgradeResult(dict):
|
|
730
731
|
|
731
732
|
@property
|
732
733
|
@pulumi.getter
|
733
|
-
def enable(self) -> bool:
|
734
|
+
def enable(self) -> builtins.bool:
|
734
735
|
"""
|
735
736
|
True if Kubernetes patch version auto upgrades is enabled.
|
736
737
|
"""
|
@@ -738,7 +739,7 @@ class GetClusterAutoUpgradeResult(dict):
|
|
738
739
|
|
739
740
|
@property
|
740
741
|
@pulumi.getter(name="maintenanceWindowDay")
|
741
|
-
def maintenance_window_day(self) -> str:
|
742
|
+
def maintenance_window_day(self) -> builtins.str:
|
742
743
|
"""
|
743
744
|
The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
744
745
|
"""
|
@@ -746,7 +747,7 @@ class GetClusterAutoUpgradeResult(dict):
|
|
746
747
|
|
747
748
|
@property
|
748
749
|
@pulumi.getter(name="maintenanceWindowStartHour")
|
749
|
-
def maintenance_window_start_hour(self) -> int:
|
750
|
+
def maintenance_window_start_hour(self) -> builtins.int:
|
750
751
|
"""
|
751
752
|
The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
752
753
|
"""
|
@@ -756,27 +757,27 @@ class GetClusterAutoUpgradeResult(dict):
|
|
756
757
|
@pulumi.output_type
|
757
758
|
class GetClusterAutoscalerConfigResult(dict):
|
758
759
|
def __init__(__self__, *,
|
759
|
-
balance_similar_node_groups: bool,
|
760
|
-
disable_scale_down: bool,
|
761
|
-
estimator: str,
|
762
|
-
expander: str,
|
763
|
-
expendable_pods_priority_cutoff: int,
|
764
|
-
ignore_daemonsets_utilization: bool,
|
765
|
-
max_graceful_termination_sec: int,
|
766
|
-
scale_down_delay_after_add: str,
|
767
|
-
scale_down_unneeded_time: str,
|
768
|
-
scale_down_utilization_threshold: float):
|
769
|
-
"""
|
770
|
-
:param bool balance_similar_node_groups: True if detecting similar node groups and balance the number of nodes between them is enabled.
|
771
|
-
:param bool disable_scale_down: True if the scale down feature of the autoscaler is disabled.
|
772
|
-
:param str estimator: The type of resource estimator used in scale up.
|
773
|
-
:param str expander: The type of node group expander be used in scale up.
|
774
|
-
:param int expendable_pods_priority_cutoff: Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
775
|
-
:param bool ignore_daemonsets_utilization: True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
|
776
|
-
:param int max_graceful_termination_sec: Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
777
|
-
:param str scale_down_delay_after_add: The duration after scale up that scale down evaluation resumes.
|
778
|
-
:param str scale_down_unneeded_time: The duration a node should be unneeded before it is eligible for scale down.
|
779
|
-
:param float scale_down_utilization_threshold: Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
760
|
+
balance_similar_node_groups: builtins.bool,
|
761
|
+
disable_scale_down: builtins.bool,
|
762
|
+
estimator: builtins.str,
|
763
|
+
expander: builtins.str,
|
764
|
+
expendable_pods_priority_cutoff: builtins.int,
|
765
|
+
ignore_daemonsets_utilization: builtins.bool,
|
766
|
+
max_graceful_termination_sec: builtins.int,
|
767
|
+
scale_down_delay_after_add: builtins.str,
|
768
|
+
scale_down_unneeded_time: builtins.str,
|
769
|
+
scale_down_utilization_threshold: builtins.float):
|
770
|
+
"""
|
771
|
+
:param builtins.bool balance_similar_node_groups: True if detecting similar node groups and balance the number of nodes between them is enabled.
|
772
|
+
:param builtins.bool disable_scale_down: True if the scale down feature of the autoscaler is disabled.
|
773
|
+
:param builtins.str estimator: The type of resource estimator used in scale up.
|
774
|
+
:param builtins.str expander: The type of node group expander be used in scale up.
|
775
|
+
:param builtins.int expendable_pods_priority_cutoff: Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
776
|
+
:param builtins.bool ignore_daemonsets_utilization: True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
|
777
|
+
:param builtins.int max_graceful_termination_sec: Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
778
|
+
:param builtins.str scale_down_delay_after_add: The duration after scale up that scale down evaluation resumes.
|
779
|
+
:param builtins.str scale_down_unneeded_time: The duration a node should be unneeded before it is eligible for scale down.
|
780
|
+
:param builtins.float scale_down_utilization_threshold: Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
780
781
|
"""
|
781
782
|
pulumi.set(__self__, "balance_similar_node_groups", balance_similar_node_groups)
|
782
783
|
pulumi.set(__self__, "disable_scale_down", disable_scale_down)
|
@@ -791,7 +792,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
791
792
|
|
792
793
|
@property
|
793
794
|
@pulumi.getter(name="balanceSimilarNodeGroups")
|
794
|
-
def balance_similar_node_groups(self) -> bool:
|
795
|
+
def balance_similar_node_groups(self) -> builtins.bool:
|
795
796
|
"""
|
796
797
|
True if detecting similar node groups and balance the number of nodes between them is enabled.
|
797
798
|
"""
|
@@ -799,7 +800,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
799
800
|
|
800
801
|
@property
|
801
802
|
@pulumi.getter(name="disableScaleDown")
|
802
|
-
def disable_scale_down(self) -> bool:
|
803
|
+
def disable_scale_down(self) -> builtins.bool:
|
803
804
|
"""
|
804
805
|
True if the scale down feature of the autoscaler is disabled.
|
805
806
|
"""
|
@@ -807,7 +808,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
807
808
|
|
808
809
|
@property
|
809
810
|
@pulumi.getter
|
810
|
-
def estimator(self) -> str:
|
811
|
+
def estimator(self) -> builtins.str:
|
811
812
|
"""
|
812
813
|
The type of resource estimator used in scale up.
|
813
814
|
"""
|
@@ -815,7 +816,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
815
816
|
|
816
817
|
@property
|
817
818
|
@pulumi.getter
|
818
|
-
def expander(self) -> str:
|
819
|
+
def expander(self) -> builtins.str:
|
819
820
|
"""
|
820
821
|
The type of node group expander be used in scale up.
|
821
822
|
"""
|
@@ -823,7 +824,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
823
824
|
|
824
825
|
@property
|
825
826
|
@pulumi.getter(name="expendablePodsPriorityCutoff")
|
826
|
-
def expendable_pods_priority_cutoff(self) -> int:
|
827
|
+
def expendable_pods_priority_cutoff(self) -> builtins.int:
|
827
828
|
"""
|
828
829
|
Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
829
830
|
"""
|
@@ -831,7 +832,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
831
832
|
|
832
833
|
@property
|
833
834
|
@pulumi.getter(name="ignoreDaemonsetsUtilization")
|
834
|
-
def ignore_daemonsets_utilization(self) -> bool:
|
835
|
+
def ignore_daemonsets_utilization(self) -> builtins.bool:
|
835
836
|
"""
|
836
837
|
True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
|
837
838
|
"""
|
@@ -839,7 +840,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
839
840
|
|
840
841
|
@property
|
841
842
|
@pulumi.getter(name="maxGracefulTerminationSec")
|
842
|
-
def max_graceful_termination_sec(self) -> int:
|
843
|
+
def max_graceful_termination_sec(self) -> builtins.int:
|
843
844
|
"""
|
844
845
|
Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
845
846
|
"""
|
@@ -847,7 +848,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
847
848
|
|
848
849
|
@property
|
849
850
|
@pulumi.getter(name="scaleDownDelayAfterAdd")
|
850
|
-
def scale_down_delay_after_add(self) -> str:
|
851
|
+
def scale_down_delay_after_add(self) -> builtins.str:
|
851
852
|
"""
|
852
853
|
The duration after scale up that scale down evaluation resumes.
|
853
854
|
"""
|
@@ -855,7 +856,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
855
856
|
|
856
857
|
@property
|
857
858
|
@pulumi.getter(name="scaleDownUnneededTime")
|
858
|
-
def scale_down_unneeded_time(self) -> str:
|
859
|
+
def scale_down_unneeded_time(self) -> builtins.str:
|
859
860
|
"""
|
860
861
|
The duration a node should be unneeded before it is eligible for scale down.
|
861
862
|
"""
|
@@ -863,7 +864,7 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
863
864
|
|
864
865
|
@property
|
865
866
|
@pulumi.getter(name="scaleDownUtilizationThreshold")
|
866
|
-
def scale_down_utilization_threshold(self) -> float:
|
867
|
+
def scale_down_utilization_threshold(self) -> builtins.float:
|
867
868
|
"""
|
868
869
|
Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
869
870
|
"""
|
@@ -873,15 +874,15 @@ class GetClusterAutoscalerConfigResult(dict):
|
|
873
874
|
@pulumi.output_type
|
874
875
|
class GetClusterKubeconfigResult(dict):
|
875
876
|
def __init__(__self__, *,
|
876
|
-
cluster_ca_certificate: str,
|
877
|
-
config_file: str,
|
878
|
-
host: str,
|
879
|
-
token: str):
|
877
|
+
cluster_ca_certificate: builtins.str,
|
878
|
+
config_file: builtins.str,
|
879
|
+
host: builtins.str,
|
880
|
+
token: builtins.str):
|
880
881
|
"""
|
881
|
-
:param str cluster_ca_certificate: The CA certificate of the Kubernetes API server.
|
882
|
-
:param str config_file: The raw kubeconfig file.
|
883
|
-
:param str host: The URL of the Kubernetes API server.
|
884
|
-
:param str token: The token to connect to the Kubernetes API server.
|
882
|
+
:param builtins.str cluster_ca_certificate: The CA certificate of the Kubernetes API server.
|
883
|
+
:param builtins.str config_file: The raw kubeconfig file.
|
884
|
+
:param builtins.str host: The URL of the Kubernetes API server.
|
885
|
+
:param builtins.str token: The token to connect to the Kubernetes API server.
|
885
886
|
"""
|
886
887
|
pulumi.set(__self__, "cluster_ca_certificate", cluster_ca_certificate)
|
887
888
|
pulumi.set(__self__, "config_file", config_file)
|
@@ -890,7 +891,7 @@ class GetClusterKubeconfigResult(dict):
|
|
890
891
|
|
891
892
|
@property
|
892
893
|
@pulumi.getter(name="clusterCaCertificate")
|
893
|
-
def cluster_ca_certificate(self) -> str:
|
894
|
+
def cluster_ca_certificate(self) -> builtins.str:
|
894
895
|
"""
|
895
896
|
The CA certificate of the Kubernetes API server.
|
896
897
|
"""
|
@@ -898,7 +899,7 @@ class GetClusterKubeconfigResult(dict):
|
|
898
899
|
|
899
900
|
@property
|
900
901
|
@pulumi.getter(name="configFile")
|
901
|
-
def config_file(self) -> str:
|
902
|
+
def config_file(self) -> builtins.str:
|
902
903
|
"""
|
903
904
|
The raw kubeconfig file.
|
904
905
|
"""
|
@@ -906,7 +907,7 @@ class GetClusterKubeconfigResult(dict):
|
|
906
907
|
|
907
908
|
@property
|
908
909
|
@pulumi.getter
|
909
|
-
def host(self) -> str:
|
910
|
+
def host(self) -> builtins.str:
|
910
911
|
"""
|
911
912
|
The URL of the Kubernetes API server.
|
912
913
|
"""
|
@@ -914,7 +915,7 @@ class GetClusterKubeconfigResult(dict):
|
|
914
915
|
|
915
916
|
@property
|
916
917
|
@pulumi.getter
|
917
|
-
def token(self) -> str:
|
918
|
+
def token(self) -> builtins.str:
|
918
919
|
"""
|
919
920
|
The token to connect to the Kubernetes API server.
|
920
921
|
"""
|
@@ -924,21 +925,21 @@ class GetClusterKubeconfigResult(dict):
|
|
924
925
|
@pulumi.output_type
|
925
926
|
class GetClusterOpenIdConnectConfigResult(dict):
|
926
927
|
def __init__(__self__, *,
|
927
|
-
client_id: str,
|
928
|
-
groups_claims: Sequence[str],
|
929
|
-
groups_prefix: str,
|
930
|
-
issuer_url: str,
|
931
|
-
required_claims: Sequence[str],
|
932
|
-
username_claim: str,
|
933
|
-
username_prefix: str):
|
934
|
-
"""
|
935
|
-
:param str client_id: A client id that all tokens must be issued for
|
936
|
-
:param Sequence[str] groups_claims: JWT claim to use as the user's group
|
937
|
-
:param str groups_prefix: Prefix prepended to group claims
|
938
|
-
:param str issuer_url: URL of the provider which allows the API server to discover public signing keys
|
939
|
-
:param Sequence[str] required_claims: Multiple key=value pairs that describes a required claim in the ID Token
|
940
|
-
:param str username_claim: JWT claim to use as the user name
|
941
|
-
:param str username_prefix: Prefix prepended to username
|
928
|
+
client_id: builtins.str,
|
929
|
+
groups_claims: Sequence[builtins.str],
|
930
|
+
groups_prefix: builtins.str,
|
931
|
+
issuer_url: builtins.str,
|
932
|
+
required_claims: Sequence[builtins.str],
|
933
|
+
username_claim: builtins.str,
|
934
|
+
username_prefix: builtins.str):
|
935
|
+
"""
|
936
|
+
:param builtins.str client_id: A client id that all tokens must be issued for
|
937
|
+
:param Sequence[builtins.str] groups_claims: JWT claim to use as the user's group
|
938
|
+
:param builtins.str groups_prefix: Prefix prepended to group claims
|
939
|
+
:param builtins.str issuer_url: URL of the provider which allows the API server to discover public signing keys
|
940
|
+
:param Sequence[builtins.str] required_claims: Multiple key=value pairs that describes a required claim in the ID Token
|
941
|
+
:param builtins.str username_claim: JWT claim to use as the user name
|
942
|
+
:param builtins.str username_prefix: Prefix prepended to username
|
942
943
|
"""
|
943
944
|
pulumi.set(__self__, "client_id", client_id)
|
944
945
|
pulumi.set(__self__, "groups_claims", groups_claims)
|
@@ -950,7 +951,7 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
950
951
|
|
951
952
|
@property
|
952
953
|
@pulumi.getter(name="clientId")
|
953
|
-
def client_id(self) -> str:
|
954
|
+
def client_id(self) -> builtins.str:
|
954
955
|
"""
|
955
956
|
A client id that all tokens must be issued for
|
956
957
|
"""
|
@@ -958,7 +959,7 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
958
959
|
|
959
960
|
@property
|
960
961
|
@pulumi.getter(name="groupsClaims")
|
961
|
-
def groups_claims(self) -> Sequence[str]:
|
962
|
+
def groups_claims(self) -> Sequence[builtins.str]:
|
962
963
|
"""
|
963
964
|
JWT claim to use as the user's group
|
964
965
|
"""
|
@@ -966,7 +967,7 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
966
967
|
|
967
968
|
@property
|
968
969
|
@pulumi.getter(name="groupsPrefix")
|
969
|
-
def groups_prefix(self) -> str:
|
970
|
+
def groups_prefix(self) -> builtins.str:
|
970
971
|
"""
|
971
972
|
Prefix prepended to group claims
|
972
973
|
"""
|
@@ -974,7 +975,7 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
974
975
|
|
975
976
|
@property
|
976
977
|
@pulumi.getter(name="issuerUrl")
|
977
|
-
def issuer_url(self) -> str:
|
978
|
+
def issuer_url(self) -> builtins.str:
|
978
979
|
"""
|
979
980
|
URL of the provider which allows the API server to discover public signing keys
|
980
981
|
"""
|
@@ -982,7 +983,7 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
982
983
|
|
983
984
|
@property
|
984
985
|
@pulumi.getter(name="requiredClaims")
|
985
|
-
def required_claims(self) -> Sequence[str]:
|
986
|
+
def required_claims(self) -> Sequence[builtins.str]:
|
986
987
|
"""
|
987
988
|
Multiple key=value pairs that describes a required claim in the ID Token
|
988
989
|
"""
|
@@ -990,7 +991,7 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
990
991
|
|
991
992
|
@property
|
992
993
|
@pulumi.getter(name="usernameClaim")
|
993
|
-
def username_claim(self) -> str:
|
994
|
+
def username_claim(self) -> builtins.str:
|
994
995
|
"""
|
995
996
|
JWT claim to use as the user name
|
996
997
|
"""
|
@@ -998,7 +999,7 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
998
999
|
|
999
1000
|
@property
|
1000
1001
|
@pulumi.getter(name="usernamePrefix")
|
1001
|
-
def username_prefix(self) -> str:
|
1002
|
+
def username_prefix(self) -> builtins.str:
|
1002
1003
|
"""
|
1003
1004
|
Prefix prepended to username
|
1004
1005
|
"""
|
@@ -1008,19 +1009,19 @@ class GetClusterOpenIdConnectConfigResult(dict):
|
|
1008
1009
|
@pulumi.output_type
|
1009
1010
|
class GetPoolNodeResult(dict):
|
1010
1011
|
def __init__(__self__, *,
|
1011
|
-
id: str,
|
1012
|
-
name: str,
|
1012
|
+
id: builtins.str,
|
1013
|
+
name: builtins.str,
|
1013
1014
|
private_ips: Sequence['outputs.GetPoolNodePrivateIpResult'],
|
1014
|
-
public_ip: str,
|
1015
|
-
public_ip_v6: str,
|
1016
|
-
status: str):
|
1015
|
+
public_ip: builtins.str,
|
1016
|
+
public_ip_v6: builtins.str,
|
1017
|
+
status: builtins.str):
|
1017
1018
|
"""
|
1018
|
-
:param str id: The ID of the pool.
|
1019
|
-
:param str name: The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
|
1019
|
+
:param builtins.str id: The ID of the pool.
|
1020
|
+
:param builtins.str name: The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
|
1020
1021
|
:param Sequence['GetPoolNodePrivateIpArgs'] private_ips: List of private IPv4 and IPv6 addresses associated with the node
|
1021
|
-
:param str public_ip: The public IPv4.
|
1022
|
-
:param str public_ip_v6: The public IPv6.
|
1023
|
-
:param str status: The status of the node.
|
1022
|
+
:param builtins.str public_ip: The public IPv4.
|
1023
|
+
:param builtins.str public_ip_v6: The public IPv6.
|
1024
|
+
:param builtins.str status: The status of the node.
|
1024
1025
|
"""
|
1025
1026
|
pulumi.set(__self__, "id", id)
|
1026
1027
|
pulumi.set(__self__, "name", name)
|
@@ -1031,7 +1032,7 @@ class GetPoolNodeResult(dict):
|
|
1031
1032
|
|
1032
1033
|
@property
|
1033
1034
|
@pulumi.getter
|
1034
|
-
def id(self) -> str:
|
1035
|
+
def id(self) -> builtins.str:
|
1035
1036
|
"""
|
1036
1037
|
The ID of the pool.
|
1037
1038
|
"""
|
@@ -1039,7 +1040,7 @@ class GetPoolNodeResult(dict):
|
|
1039
1040
|
|
1040
1041
|
@property
|
1041
1042
|
@pulumi.getter
|
1042
|
-
def name(self) -> str:
|
1043
|
+
def name(self) -> builtins.str:
|
1043
1044
|
"""
|
1044
1045
|
The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
|
1045
1046
|
"""
|
@@ -1055,7 +1056,7 @@ class GetPoolNodeResult(dict):
|
|
1055
1056
|
|
1056
1057
|
@property
|
1057
1058
|
@pulumi.getter(name="publicIp")
|
1058
|
-
def public_ip(self) -> str:
|
1059
|
+
def public_ip(self) -> builtins.str:
|
1059
1060
|
"""
|
1060
1061
|
The public IPv4.
|
1061
1062
|
"""
|
@@ -1063,7 +1064,7 @@ class GetPoolNodeResult(dict):
|
|
1063
1064
|
|
1064
1065
|
@property
|
1065
1066
|
@pulumi.getter(name="publicIpV6")
|
1066
|
-
def public_ip_v6(self) -> str:
|
1067
|
+
def public_ip_v6(self) -> builtins.str:
|
1067
1068
|
"""
|
1068
1069
|
The public IPv6.
|
1069
1070
|
"""
|
@@ -1071,7 +1072,7 @@ class GetPoolNodeResult(dict):
|
|
1071
1072
|
|
1072
1073
|
@property
|
1073
1074
|
@pulumi.getter
|
1074
|
-
def status(self) -> str:
|
1075
|
+
def status(self) -> builtins.str:
|
1075
1076
|
"""
|
1076
1077
|
The status of the node.
|
1077
1078
|
"""
|
@@ -1081,18 +1082,18 @@ class GetPoolNodeResult(dict):
|
|
1081
1082
|
@pulumi.output_type
|
1082
1083
|
class GetPoolNodePrivateIpResult(dict):
|
1083
1084
|
def __init__(__self__, *,
|
1084
|
-
address: str,
|
1085
|
-
id: str):
|
1085
|
+
address: builtins.str,
|
1086
|
+
id: builtins.str):
|
1086
1087
|
"""
|
1087
|
-
:param str address: The private IP address
|
1088
|
-
:param str id: The ID of the pool.
|
1088
|
+
:param builtins.str address: The private IP address
|
1089
|
+
:param builtins.str id: The ID of the pool.
|
1089
1090
|
"""
|
1090
1091
|
pulumi.set(__self__, "address", address)
|
1091
1092
|
pulumi.set(__self__, "id", id)
|
1092
1093
|
|
1093
1094
|
@property
|
1094
1095
|
@pulumi.getter
|
1095
|
-
def address(self) -> str:
|
1096
|
+
def address(self) -> builtins.str:
|
1096
1097
|
"""
|
1097
1098
|
The private IP address
|
1098
1099
|
"""
|
@@ -1100,7 +1101,7 @@ class GetPoolNodePrivateIpResult(dict):
|
|
1100
1101
|
|
1101
1102
|
@property
|
1102
1103
|
@pulumi.getter
|
1103
|
-
def id(self) -> str:
|
1104
|
+
def id(self) -> builtins.str:
|
1104
1105
|
"""
|
1105
1106
|
The ID of the pool.
|
1106
1107
|
"""
|
@@ -1110,18 +1111,18 @@ class GetPoolNodePrivateIpResult(dict):
|
|
1110
1111
|
@pulumi.output_type
|
1111
1112
|
class GetPoolUpgradePolicyResult(dict):
|
1112
1113
|
def __init__(__self__, *,
|
1113
|
-
max_surge: int,
|
1114
|
-
max_unavailable: int):
|
1114
|
+
max_surge: builtins.int,
|
1115
|
+
max_unavailable: builtins.int):
|
1115
1116
|
"""
|
1116
|
-
:param int max_surge: The maximum number of nodes to be created during the upgrade
|
1117
|
-
:param int max_unavailable: The maximum number of nodes that can be not ready at the same time
|
1117
|
+
:param builtins.int max_surge: The maximum number of nodes to be created during the upgrade
|
1118
|
+
:param builtins.int max_unavailable: The maximum number of nodes that can be not ready at the same time
|
1118
1119
|
"""
|
1119
1120
|
pulumi.set(__self__, "max_surge", max_surge)
|
1120
1121
|
pulumi.set(__self__, "max_unavailable", max_unavailable)
|
1121
1122
|
|
1122
1123
|
@property
|
1123
1124
|
@pulumi.getter(name="maxSurge")
|
1124
|
-
def max_surge(self) -> int:
|
1125
|
+
def max_surge(self) -> builtins.int:
|
1125
1126
|
"""
|
1126
1127
|
The maximum number of nodes to be created during the upgrade
|
1127
1128
|
"""
|
@@ -1129,7 +1130,7 @@ class GetPoolUpgradePolicyResult(dict):
|
|
1129
1130
|
|
1130
1131
|
@property
|
1131
1132
|
@pulumi.getter(name="maxUnavailable")
|
1132
|
-
def max_unavailable(self) -> int:
|
1133
|
+
def max_unavailable(self) -> builtins.int:
|
1133
1134
|
"""
|
1134
1135
|
The maximum number of nodes that can be not ready at the same time
|
1135
1136
|
"""
|