pulumiverse-scaleway 1.25.0a1742288097__py3-none-any.whl → 1.25.0a1742668904__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumiverse_scaleway/__init__.py +929 -0
- pulumiverse_scaleway/account/__init__.py +12 -0
- pulumiverse_scaleway/account/get_availability_zones.py +139 -0
- pulumiverse_scaleway/account/get_project.py +170 -0
- pulumiverse_scaleway/account/get_ssh_key.py +205 -0
- pulumiverse_scaleway/account/project.py +318 -0
- pulumiverse_scaleway/account/ssh_key.py +456 -0
- pulumiverse_scaleway/account_project.py +6 -0
- pulumiverse_scaleway/account_ssh_key.py +12 -6
- pulumiverse_scaleway/apple_silicon_server.py +8 -2
- pulumiverse_scaleway/applesilicon/__init__.py +10 -0
- pulumiverse_scaleway/applesilicon/_inputs.py +154 -0
- pulumiverse_scaleway/applesilicon/outputs.py +119 -0
- pulumiverse_scaleway/applesilicon/server.py +690 -0
- pulumiverse_scaleway/baremetal_server.py +64 -58
- pulumiverse_scaleway/billing/__init__.py +10 -0
- pulumiverse_scaleway/billing/get_consumptions.py +134 -0
- pulumiverse_scaleway/billing/get_invoices.py +154 -0
- pulumiverse_scaleway/billing/outputs.py +288 -0
- pulumiverse_scaleway/block/__init__.py +11 -0
- pulumiverse_scaleway/block/get_snapshot.py +174 -0
- pulumiverse_scaleway/block/get_volume.py +199 -0
- pulumiverse_scaleway/block/snapshot.py +403 -0
- pulumiverse_scaleway/block/volume.py +576 -0
- pulumiverse_scaleway/block_snapshot.py +12 -6
- pulumiverse_scaleway/block_volume.py +16 -10
- pulumiverse_scaleway/cockpit.py +10 -4
- pulumiverse_scaleway/cockpit_alert_manager.py +12 -6
- pulumiverse_scaleway/cockpit_grafana_user.py +12 -6
- pulumiverse_scaleway/cockpit_source.py +12 -6
- pulumiverse_scaleway/cockpit_token.py +16 -10
- pulumiverse_scaleway/container.py +16 -10
- pulumiverse_scaleway/container_cron.py +14 -8
- pulumiverse_scaleway/container_domain.py +24 -18
- pulumiverse_scaleway/container_namespace.py +10 -4
- pulumiverse_scaleway/container_token.py +32 -26
- pulumiverse_scaleway/container_trigger.py +12 -6
- pulumiverse_scaleway/containers/__init__.py +17 -0
- pulumiverse_scaleway/containers/_inputs.py +389 -0
- pulumiverse_scaleway/containers/container.py +1635 -0
- pulumiverse_scaleway/containers/cron.py +460 -0
- pulumiverse_scaleway/containers/domain.py +408 -0
- pulumiverse_scaleway/containers/get_container.py +562 -0
- pulumiverse_scaleway/containers/get_namespace.py +283 -0
- pulumiverse_scaleway/containers/namespace.py +638 -0
- pulumiverse_scaleway/containers/outputs.py +412 -0
- pulumiverse_scaleway/containers/token.py +454 -0
- pulumiverse_scaleway/containers/trigger.py +482 -0
- pulumiverse_scaleway/database.py +10 -4
- pulumiverse_scaleway/database_acl.py +10 -4
- pulumiverse_scaleway/database_backup.py +14 -8
- pulumiverse_scaleway/database_instance.py +24 -18
- pulumiverse_scaleway/database_privilege.py +16 -10
- pulumiverse_scaleway/database_read_replica.py +22 -16
- pulumiverse_scaleway/database_user.py +10 -4
- pulumiverse_scaleway/databases/__init__.py +23 -0
- pulumiverse_scaleway/databases/_inputs.py +822 -0
- pulumiverse_scaleway/databases/acl.py +334 -0
- pulumiverse_scaleway/databases/database.py +409 -0
- pulumiverse_scaleway/databases/database_backup.py +576 -0
- pulumiverse_scaleway/databases/get_acl.py +143 -0
- pulumiverse_scaleway/databases/get_database.py +187 -0
- pulumiverse_scaleway/databases/get_database_backup.py +253 -0
- pulumiverse_scaleway/databases/get_instance.py +415 -0
- pulumiverse_scaleway/databases/get_privilege.py +181 -0
- pulumiverse_scaleway/databases/instance.py +1696 -0
- pulumiverse_scaleway/databases/outputs.py +866 -0
- pulumiverse_scaleway/databases/privilege.py +424 -0
- pulumiverse_scaleway/databases/read_replica.py +530 -0
- pulumiverse_scaleway/databases/serverless_database.py +434 -0
- pulumiverse_scaleway/databases/snapshot.py +610 -0
- pulumiverse_scaleway/databases/user.py +448 -0
- pulumiverse_scaleway/domain/__init__.py +13 -0
- pulumiverse_scaleway/domain/_inputs.py +341 -0
- pulumiverse_scaleway/domain/get_record.py +340 -0
- pulumiverse_scaleway/domain/get_zone.py +201 -0
- pulumiverse_scaleway/domain/outputs.py +408 -0
- pulumiverse_scaleway/domain/record.py +1118 -0
- pulumiverse_scaleway/domain/zone.py +432 -0
- pulumiverse_scaleway/domain_record.py +36 -30
- pulumiverse_scaleway/domain_zone.py +6 -0
- pulumiverse_scaleway/elasticmetal/__init__.py +18 -0
- pulumiverse_scaleway/elasticmetal/_inputs.py +509 -0
- pulumiverse_scaleway/elasticmetal/get_ip.py +247 -0
- pulumiverse_scaleway/elasticmetal/get_ips.py +240 -0
- pulumiverse_scaleway/elasticmetal/get_offer.py +245 -0
- pulumiverse_scaleway/elasticmetal/get_option.py +168 -0
- pulumiverse_scaleway/elasticmetal/get_os.py +174 -0
- pulumiverse_scaleway/elasticmetal/get_server.py +422 -0
- pulumiverse_scaleway/elasticmetal/ip.py +704 -0
- pulumiverse_scaleway/elasticmetal/ip_mac_address.py +512 -0
- pulumiverse_scaleway/elasticmetal/outputs.py +974 -0
- pulumiverse_scaleway/elasticmetal/server.py +1591 -0
- pulumiverse_scaleway/flexible_ip.py +22 -16
- pulumiverse_scaleway/flexible_ip_mac_address.py +22 -16
- pulumiverse_scaleway/function.py +6 -0
- pulumiverse_scaleway/function_cron.py +16 -10
- pulumiverse_scaleway/function_domain.py +16 -10
- pulumiverse_scaleway/function_namespace.py +10 -4
- pulumiverse_scaleway/function_token.py +32 -26
- pulumiverse_scaleway/function_trigger.py +12 -6
- pulumiverse_scaleway/functions/__init__.py +17 -0
- pulumiverse_scaleway/functions/_inputs.py +211 -0
- pulumiverse_scaleway/functions/cron.py +464 -0
- pulumiverse_scaleway/functions/domain.py +372 -0
- pulumiverse_scaleway/functions/function.py +1105 -0
- pulumiverse_scaleway/functions/get_function.py +365 -0
- pulumiverse_scaleway/functions/get_namespace.py +238 -0
- pulumiverse_scaleway/functions/namespace.py +582 -0
- pulumiverse_scaleway/functions/outputs.py +168 -0
- pulumiverse_scaleway/functions/token.py +462 -0
- pulumiverse_scaleway/functions/trigger.py +482 -0
- pulumiverse_scaleway/get_account_project.py +6 -2
- pulumiverse_scaleway/get_account_ssh_key.py +6 -2
- pulumiverse_scaleway/get_availability_zones.py +8 -4
- pulumiverse_scaleway/get_baremetal_offer.py +4 -0
- pulumiverse_scaleway/get_baremetal_option.py +8 -4
- pulumiverse_scaleway/get_baremetal_os.py +8 -4
- pulumiverse_scaleway/get_baremetal_server.py +8 -4
- pulumiverse_scaleway/get_billing_consumptions.py +4 -0
- pulumiverse_scaleway/get_billing_invoices.py +4 -0
- pulumiverse_scaleway/get_block_snapshot.py +6 -2
- pulumiverse_scaleway/get_block_volume.py +6 -2
- pulumiverse_scaleway/get_cockpit.py +12 -8
- pulumiverse_scaleway/get_cockpit_plan.py +10 -6
- pulumiverse_scaleway/get_cockpit_source.py +8 -4
- pulumiverse_scaleway/get_container.py +16 -12
- pulumiverse_scaleway/get_container_namespace.py +10 -6
- pulumiverse_scaleway/get_database.py +6 -2
- pulumiverse_scaleway/get_database_acl.py +6 -2
- pulumiverse_scaleway/get_database_backup.py +10 -6
- pulumiverse_scaleway/get_database_instance.py +4 -0
- pulumiverse_scaleway/get_database_privilege.py +6 -2
- pulumiverse_scaleway/get_domain_record.py +10 -6
- pulumiverse_scaleway/get_domain_zone.py +4 -0
- pulumiverse_scaleway/get_flexible_ip.py +4 -0
- pulumiverse_scaleway/get_flexible_ips.py +16 -12
- pulumiverse_scaleway/get_function.py +6 -2
- pulumiverse_scaleway/get_function_namespace.py +6 -2
- pulumiverse_scaleway/get_iam_api_key.py +6 -2
- pulumiverse_scaleway/get_iam_application.py +8 -4
- pulumiverse_scaleway/get_iam_group.py +8 -4
- pulumiverse_scaleway/get_iam_ssh_key.py +4 -0
- pulumiverse_scaleway/get_iam_user.py +8 -4
- pulumiverse_scaleway/get_instance_image.py +4 -0
- pulumiverse_scaleway/get_instance_ip.py +4 -0
- pulumiverse_scaleway/get_instance_placement_group.py +4 -0
- pulumiverse_scaleway/get_instance_private_nic.py +10 -6
- pulumiverse_scaleway/get_instance_security_group.py +4 -0
- pulumiverse_scaleway/get_instance_server.py +4 -0
- pulumiverse_scaleway/get_instance_servers.py +4 -0
- pulumiverse_scaleway/get_instance_snapshot.py +8 -4
- pulumiverse_scaleway/get_instance_volume.py +4 -0
- pulumiverse_scaleway/get_iot_device.py +4 -0
- pulumiverse_scaleway/get_iot_hub.py +4 -0
- pulumiverse_scaleway/get_ipam_ip.py +18 -14
- pulumiverse_scaleway/get_ipam_ips.py +14 -10
- pulumiverse_scaleway/get_k8s_version.py +8 -4
- pulumiverse_scaleway/get_kubernetes_cluster.py +4 -0
- pulumiverse_scaleway/get_kubernetes_node_pool.py +4 -0
- pulumiverse_scaleway/get_lb_acls.py +8 -4
- pulumiverse_scaleway/get_lb_backend.py +18 -14
- pulumiverse_scaleway/get_lb_backends.py +8 -4
- pulumiverse_scaleway/get_lb_frontend.py +16 -12
- pulumiverse_scaleway/get_lb_frontends.py +8 -4
- pulumiverse_scaleway/get_lb_ips.py +4 -0
- pulumiverse_scaleway/get_lb_route.py +16 -12
- pulumiverse_scaleway/get_lb_routes.py +8 -4
- pulumiverse_scaleway/get_lbs.py +4 -0
- pulumiverse_scaleway/get_loadbalancer.py +8 -4
- pulumiverse_scaleway/get_loadbalancer_certificate.py +4 -0
- pulumiverse_scaleway/get_loadbalancer_ip.py +4 -0
- pulumiverse_scaleway/get_mnq_sns.py +8 -4
- pulumiverse_scaleway/get_mnq_sqs.py +8 -4
- pulumiverse_scaleway/get_mongo_db_instance.py +4 -0
- pulumiverse_scaleway/get_object_bucket.py +12 -8
- pulumiverse_scaleway/get_object_bucket_policy.py +8 -4
- pulumiverse_scaleway/get_redis_cluster.py +4 -0
- pulumiverse_scaleway/get_registry_image.py +4 -0
- pulumiverse_scaleway/get_registry_image_tag.py +4 -0
- pulumiverse_scaleway/get_registry_namespace.py +4 -0
- pulumiverse_scaleway/get_secret.py +12 -8
- pulumiverse_scaleway/get_secret_version.py +14 -10
- pulumiverse_scaleway/get_tem_domain.py +4 -0
- pulumiverse_scaleway/get_vpc.py +10 -6
- pulumiverse_scaleway/get_vpc_gateway_network.py +10 -6
- pulumiverse_scaleway/get_vpc_private_network.py +10 -6
- pulumiverse_scaleway/get_vpc_public_gateway.py +10 -6
- pulumiverse_scaleway/get_vpc_public_gateway_dhcp.py +8 -4
- pulumiverse_scaleway/get_vpc_public_gateway_dhcp_reservation.py +70 -66
- pulumiverse_scaleway/get_vpc_public_gateway_ip.py +8 -4
- pulumiverse_scaleway/get_vpc_public_pat_rule.py +26 -22
- pulumiverse_scaleway/get_vpc_routes.py +4 -0
- pulumiverse_scaleway/get_vpcs.py +4 -0
- pulumiverse_scaleway/get_web_host_offer.py +8 -4
- pulumiverse_scaleway/get_webhosting.py +8 -4
- pulumiverse_scaleway/hosting/__init__.py +12 -0
- pulumiverse_scaleway/hosting/_inputs.py +295 -0
- pulumiverse_scaleway/hosting/get_hosting.py +354 -0
- pulumiverse_scaleway/hosting/get_offer.py +229 -0
- pulumiverse_scaleway/hosting/hosting.py +870 -0
- pulumiverse_scaleway/hosting/outputs.py +626 -0
- pulumiverse_scaleway/iam/__init__.py +21 -0
- pulumiverse_scaleway/iam/_inputs.py +138 -0
- pulumiverse_scaleway/iam/api_key.py +622 -0
- pulumiverse_scaleway/iam/application.py +419 -0
- pulumiverse_scaleway/iam/get_api_key.py +210 -0
- pulumiverse_scaleway/iam/get_application.py +210 -0
- pulumiverse_scaleway/iam/get_group.py +236 -0
- pulumiverse_scaleway/iam/get_ssh_key.py +212 -0
- pulumiverse_scaleway/iam/get_user.py +177 -0
- pulumiverse_scaleway/iam/group.py +568 -0
- pulumiverse_scaleway/iam/group_membership.py +325 -0
- pulumiverse_scaleway/iam/outputs.py +111 -0
- pulumiverse_scaleway/iam/policy.py +775 -0
- pulumiverse_scaleway/iam/ssh_key.py +457 -0
- pulumiverse_scaleway/iam/user.py +515 -0
- pulumiverse_scaleway/iam_api_key.py +16 -10
- pulumiverse_scaleway/iam_application.py +8 -2
- pulumiverse_scaleway/iam_group.py +12 -6
- pulumiverse_scaleway/iam_group_membership.py +12 -6
- pulumiverse_scaleway/iam_policy.py +18 -12
- pulumiverse_scaleway/iam_ssh_key.py +8 -2
- pulumiverse_scaleway/iam_user.py +8 -2
- pulumiverse_scaleway/inference/__init__.py +10 -0
- pulumiverse_scaleway/inference/_inputs.py +209 -0
- pulumiverse_scaleway/inference/deployment.py +824 -0
- pulumiverse_scaleway/inference/outputs.py +169 -0
- pulumiverse_scaleway/inference_deployment.py +8 -2
- pulumiverse_scaleway/instance/__init__.py +29 -0
- pulumiverse_scaleway/instance/_inputs.py +1237 -0
- pulumiverse_scaleway/instance/get_image.py +305 -0
- pulumiverse_scaleway/instance/get_ip.py +204 -0
- pulumiverse_scaleway/instance/get_placement_group.py +212 -0
- pulumiverse_scaleway/instance/get_private_nic.py +226 -0
- pulumiverse_scaleway/instance/get_security_group.py +268 -0
- pulumiverse_scaleway/instance/get_server.py +488 -0
- pulumiverse_scaleway/instance/get_servers.py +187 -0
- pulumiverse_scaleway/instance/get_snapshot.py +248 -0
- pulumiverse_scaleway/instance/get_volume.py +226 -0
- pulumiverse_scaleway/instance/image.py +752 -0
- pulumiverse_scaleway/instance/ip.py +471 -0
- pulumiverse_scaleway/instance/ip_reverse_dns.py +310 -0
- pulumiverse_scaleway/instance/outputs.py +1533 -0
- pulumiverse_scaleway/instance/placement_group.py +481 -0
- pulumiverse_scaleway/instance/private_nic.py +557 -0
- pulumiverse_scaleway/instance/security_group.py +722 -0
- pulumiverse_scaleway/instance/security_group_rules.py +441 -0
- pulumiverse_scaleway/instance/server.py +1938 -0
- pulumiverse_scaleway/instance/snapshot.py +671 -0
- pulumiverse_scaleway/instance/user_data.py +437 -0
- pulumiverse_scaleway/instance/volume.py +584 -0
- pulumiverse_scaleway/instance_image.py +18 -12
- pulumiverse_scaleway/instance_ip.py +8 -2
- pulumiverse_scaleway/instance_ip_reverse_dns.py +12 -6
- pulumiverse_scaleway/instance_placement_group.py +8 -2
- pulumiverse_scaleway/instance_private_nic.py +24 -18
- pulumiverse_scaleway/instance_security_group.py +6 -0
- pulumiverse_scaleway/instance_security_group_rules.py +22 -16
- pulumiverse_scaleway/instance_server.py +74 -68
- pulumiverse_scaleway/instance_snapshot.py +22 -16
- pulumiverse_scaleway/instance_user_data.py +16 -10
- pulumiverse_scaleway/instance_volume.py +8 -2
- pulumiverse_scaleway/iot/__init__.py +15 -0
- pulumiverse_scaleway/iot/_inputs.py +539 -0
- pulumiverse_scaleway/iot/device.py +752 -0
- pulumiverse_scaleway/iot/get_device.py +257 -0
- pulumiverse_scaleway/iot/get_hub.py +322 -0
- pulumiverse_scaleway/iot/hub.py +898 -0
- pulumiverse_scaleway/iot/network.py +474 -0
- pulumiverse_scaleway/iot/outputs.py +465 -0
- pulumiverse_scaleway/iot/route.py +662 -0
- pulumiverse_scaleway/iot_device.py +14 -8
- pulumiverse_scaleway/iot_hub.py +8 -2
- pulumiverse_scaleway/iot_network.py +12 -6
- pulumiverse_scaleway/iot_route.py +32 -26
- pulumiverse_scaleway/ipam/__init__.py +13 -0
- pulumiverse_scaleway/ipam/_inputs.py +442 -0
- pulumiverse_scaleway/ipam/get_ip.py +419 -0
- pulumiverse_scaleway/ipam/get_ips.py +358 -0
- pulumiverse_scaleway/ipam/ip.py +759 -0
- pulumiverse_scaleway/ipam/ip_reverse_dns.py +320 -0
- pulumiverse_scaleway/ipam/outputs.py +481 -0
- pulumiverse_scaleway/ipam_ip.py +30 -24
- pulumiverse_scaleway/ipam_ip_reverse_dns.py +6 -0
- pulumiverse_scaleway/job/__init__.py +10 -0
- pulumiverse_scaleway/job/_inputs.py +73 -0
- pulumiverse_scaleway/job/definition.py +694 -0
- pulumiverse_scaleway/job/outputs.py +49 -0
- pulumiverse_scaleway/job_definition.py +8 -2
- pulumiverse_scaleway/kubernetes/__init__.py +14 -0
- pulumiverse_scaleway/kubernetes/_inputs.py +717 -0
- pulumiverse_scaleway/kubernetes/cluster.py +1540 -0
- pulumiverse_scaleway/kubernetes/get_cluster.py +417 -0
- pulumiverse_scaleway/kubernetes/get_pool.py +436 -0
- pulumiverse_scaleway/kubernetes/get_version.py +196 -0
- pulumiverse_scaleway/kubernetes/outputs.py +944 -0
- pulumiverse_scaleway/kubernetes/pool.py +1313 -0
- pulumiverse_scaleway/kubernetes_cluster.py +36 -30
- pulumiverse_scaleway/kubernetes_node_pool.py +6 -0
- pulumiverse_scaleway/loadbalancer.py +35 -29
- pulumiverse_scaleway/loadbalancer_acl.py +8 -2
- pulumiverse_scaleway/loadbalancer_backend.py +10 -4
- pulumiverse_scaleway/loadbalancer_certificate.py +6 -0
- pulumiverse_scaleway/loadbalancer_frontend.py +10 -4
- pulumiverse_scaleway/loadbalancer_ip.py +10 -4
- pulumiverse_scaleway/loadbalancer_route.py +26 -20
- pulumiverse_scaleway/loadbalancers/__init__.py +28 -0
- pulumiverse_scaleway/loadbalancers/_inputs.py +1103 -0
- pulumiverse_scaleway/loadbalancers/acl.py +522 -0
- pulumiverse_scaleway/loadbalancers/backend.py +1590 -0
- pulumiverse_scaleway/loadbalancers/certificate.py +462 -0
- pulumiverse_scaleway/loadbalancers/frontend.py +831 -0
- pulumiverse_scaleway/loadbalancers/get_acls.py +198 -0
- pulumiverse_scaleway/loadbalancers/get_backend.py +486 -0
- pulumiverse_scaleway/loadbalancers/get_backends.py +196 -0
- pulumiverse_scaleway/loadbalancers/get_certificate.py +230 -0
- pulumiverse_scaleway/loadbalancers/get_frontend.py +274 -0
- pulumiverse_scaleway/loadbalancers/get_frontends.py +196 -0
- pulumiverse_scaleway/loadbalancers/get_ip.py +228 -0
- pulumiverse_scaleway/loadbalancers/get_ips.py +198 -0
- pulumiverse_scaleway/loadbalancers/get_load_balancer.py +339 -0
- pulumiverse_scaleway/loadbalancers/get_load_balancers.py +187 -0
- pulumiverse_scaleway/loadbalancers/get_route.py +217 -0
- pulumiverse_scaleway/loadbalancers/get_routes.py +179 -0
- pulumiverse_scaleway/loadbalancers/ip.py +516 -0
- pulumiverse_scaleway/loadbalancers/load_balancer.py +1063 -0
- pulumiverse_scaleway/loadbalancers/outputs.py +2491 -0
- pulumiverse_scaleway/loadbalancers/route.py +525 -0
- pulumiverse_scaleway/mnq/__init__.py +20 -0
- pulumiverse_scaleway/mnq/_inputs.py +169 -0
- pulumiverse_scaleway/mnq/get_sns.py +150 -0
- pulumiverse_scaleway/mnq/get_sqs.py +150 -0
- pulumiverse_scaleway/mnq/nats_account.py +336 -0
- pulumiverse_scaleway/mnq/nats_credentials.py +332 -0
- pulumiverse_scaleway/mnq/outputs.py +149 -0
- pulumiverse_scaleway/mnq/sns.py +308 -0
- pulumiverse_scaleway/mnq/sns_credentials.py +415 -0
- pulumiverse_scaleway/mnq/sns_topic.py +661 -0
- pulumiverse_scaleway/mnq/sns_topic_subscription.py +701 -0
- pulumiverse_scaleway/mnq/sqs.py +306 -0
- pulumiverse_scaleway/mnq/sqs_credentials.py +415 -0
- pulumiverse_scaleway/mnq/sqs_queue.py +802 -0
- pulumiverse_scaleway/mnq_nats_account.py +8 -2
- pulumiverse_scaleway/mnq_nats_credentials.py +10 -4
- pulumiverse_scaleway/mnq_sns.py +12 -6
- pulumiverse_scaleway/mnq_sns_credentials.py +10 -4
- pulumiverse_scaleway/mnq_sns_topic.py +16 -10
- pulumiverse_scaleway/mnq_sns_topic_subscription.py +22 -16
- pulumiverse_scaleway/mnq_sqs.py +12 -6
- pulumiverse_scaleway/mnq_sqs_credentials.py +10 -4
- pulumiverse_scaleway/mnq_sqs_queue.py +16 -10
- pulumiverse_scaleway/mongo_db_instance.py +14 -8
- pulumiverse_scaleway/mongo_db_snapshot.py +8 -2
- pulumiverse_scaleway/mongodb/__init__.py +12 -0
- pulumiverse_scaleway/mongodb/_inputs.py +208 -0
- pulumiverse_scaleway/mongodb/get_instance.py +335 -0
- pulumiverse_scaleway/mongodb/instance.py +1000 -0
- pulumiverse_scaleway/mongodb/outputs.py +270 -0
- pulumiverse_scaleway/mongodb/snapshot.py +523 -0
- pulumiverse_scaleway/network/__init__.py +29 -0
- pulumiverse_scaleway/network/_inputs.py +383 -0
- pulumiverse_scaleway/network/gateway_network.py +868 -0
- pulumiverse_scaleway/network/get_gateway_network.py +287 -0
- pulumiverse_scaleway/network/get_private_network.py +282 -0
- pulumiverse_scaleway/network/get_public_gateway.py +304 -0
- pulumiverse_scaleway/network/get_public_gateway_dhcp.py +305 -0
- pulumiverse_scaleway/network/get_public_gateway_dhcp_reservation.py +382 -0
- pulumiverse_scaleway/network/get_public_gateway_ip.py +199 -0
- pulumiverse_scaleway/network/get_public_gateway_pat_rule.py +313 -0
- pulumiverse_scaleway/network/get_routes.py +208 -0
- pulumiverse_scaleway/network/get_vpc.py +246 -0
- pulumiverse_scaleway/network/get_vpcs.py +174 -0
- pulumiverse_scaleway/network/outputs.py +747 -0
- pulumiverse_scaleway/network/private_network.py +736 -0
- pulumiverse_scaleway/network/public_gateway.py +791 -0
- pulumiverse_scaleway/network/public_gateway_dhcp.py +949 -0
- pulumiverse_scaleway/network/public_gateway_dhcp_reservation.py +516 -0
- pulumiverse_scaleway/network/public_gateway_ip.py +459 -0
- pulumiverse_scaleway/network/public_gateway_ip_reverse_dns.py +308 -0
- pulumiverse_scaleway/network/public_gateway_pat_rule.py +593 -0
- pulumiverse_scaleway/network/route.py +579 -0
- pulumiverse_scaleway/network/vpc.py +538 -0
- pulumiverse_scaleway/object/__init__.py +17 -0
- pulumiverse_scaleway/object/_inputs.py +831 -0
- pulumiverse_scaleway/object/bucket.py +876 -0
- pulumiverse_scaleway/object/bucket_acl.py +598 -0
- pulumiverse_scaleway/object/bucket_lock_configuration.py +397 -0
- pulumiverse_scaleway/object/bucket_policy.py +675 -0
- pulumiverse_scaleway/object/bucket_website_configuration.py +536 -0
- pulumiverse_scaleway/object/get_bucket.py +290 -0
- pulumiverse_scaleway/object/get_bucket_policy.py +163 -0
- pulumiverse_scaleway/object/item.py +778 -0
- pulumiverse_scaleway/object/outputs.py +802 -0
- pulumiverse_scaleway/object_bucket.py +28 -22
- pulumiverse_scaleway/object_bucket_acl.py +14 -8
- pulumiverse_scaleway/object_bucket_lock_configuration.py +12 -6
- pulumiverse_scaleway/object_bucket_policy.py +46 -40
- pulumiverse_scaleway/object_bucket_website_configuration.py +18 -12
- pulumiverse_scaleway/object_item.py +8 -2
- pulumiverse_scaleway/observability/__init__.py +17 -0
- pulumiverse_scaleway/observability/_inputs.py +417 -0
- pulumiverse_scaleway/observability/alert_manager.py +403 -0
- pulumiverse_scaleway/observability/cockpit.py +325 -0
- pulumiverse_scaleway/observability/get_instance.py +205 -0
- pulumiverse_scaleway/observability/get_plan.py +125 -0
- pulumiverse_scaleway/observability/get_source.py +262 -0
- pulumiverse_scaleway/observability/grafana_user.py +364 -0
- pulumiverse_scaleway/observability/outputs.py +425 -0
- pulumiverse_scaleway/observability/source.py +569 -0
- pulumiverse_scaleway/observability/token.py +481 -0
- pulumiverse_scaleway/pulumi-plugin.json +1 -1
- pulumiverse_scaleway/rdb_snapshot.py +16 -10
- pulumiverse_scaleway/redis/__init__.py +11 -0
- pulumiverse_scaleway/redis/_inputs.py +330 -0
- pulumiverse_scaleway/redis/cluster.py +1203 -0
- pulumiverse_scaleway/redis/get_cluster.py +347 -0
- pulumiverse_scaleway/redis/outputs.py +356 -0
- pulumiverse_scaleway/redis_cluster.py +14 -8
- pulumiverse_scaleway/registry/__init__.py +11 -0
- pulumiverse_scaleway/registry/get_image.py +239 -0
- pulumiverse_scaleway/registry/get_image_tag.py +229 -0
- pulumiverse_scaleway/registry/get_namespace.py +199 -0
- pulumiverse_scaleway/registry/namespace.py +460 -0
- pulumiverse_scaleway/registry_namespace.py +8 -2
- pulumiverse_scaleway/sdb_database.py +10 -4
- pulumiverse_scaleway/secret.py +6 -0
- pulumiverse_scaleway/secret_version.py +12 -6
- pulumiverse_scaleway/secrets/__init__.py +13 -0
- pulumiverse_scaleway/secrets/_inputs.py +94 -0
- pulumiverse_scaleway/secrets/get_secret.py +338 -0
- pulumiverse_scaleway/secrets/get_version.py +340 -0
- pulumiverse_scaleway/secrets/outputs.py +120 -0
- pulumiverse_scaleway/secrets/secret.py +665 -0
- pulumiverse_scaleway/secrets/version.py +489 -0
- pulumiverse_scaleway/tem/__init__.py +13 -0
- pulumiverse_scaleway/tem/_inputs.py +135 -0
- pulumiverse_scaleway/tem/domain.py +1032 -0
- pulumiverse_scaleway/tem/domain_validation.py +305 -0
- pulumiverse_scaleway/tem/get_domain.py +378 -0
- pulumiverse_scaleway/tem/outputs.py +171 -0
- pulumiverse_scaleway/tem/webhook.py +642 -0
- pulumiverse_scaleway/tem_domain.py +20 -14
- pulumiverse_scaleway/tem_domain_validation.py +10 -4
- pulumiverse_scaleway/tem_webhook.py +28 -22
- pulumiverse_scaleway/vpc.py +10 -4
- pulumiverse_scaleway/vpc_gateway_network.py +40 -34
- pulumiverse_scaleway/vpc_private_network.py +10 -4
- pulumiverse_scaleway/vpc_public_gateway.py +8 -2
- pulumiverse_scaleway/vpc_public_gateway_dhcp.py +15 -9
- pulumiverse_scaleway/vpc_public_gateway_dhcp_reservation.py +32 -26
- pulumiverse_scaleway/vpc_public_gateway_ip.py +10 -4
- pulumiverse_scaleway/vpc_public_gateway_ip_reverse_dns.py +12 -6
- pulumiverse_scaleway/vpc_public_gateway_pat_rule.py +26 -20
- pulumiverse_scaleway/vpc_route.py +16 -10
- pulumiverse_scaleway/webhosting.py +10 -4
- {pulumiverse_scaleway-1.25.0a1742288097.dist-info → pulumiverse_scaleway-1.25.0a1742668904.dist-info}/METADATA +2 -2
- pulumiverse_scaleway-1.25.0a1742668904.dist-info/RECORD +470 -0
- {pulumiverse_scaleway-1.25.0a1742288097.dist-info → pulumiverse_scaleway-1.25.0a1742668904.dist-info}/WHEEL +1 -1
- pulumiverse_scaleway-1.25.0a1742288097.dist-info/RECORD +0 -206
- {pulumiverse_scaleway-1.25.0a1742288097.dist-info → pulumiverse_scaleway-1.25.0a1742668904.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,944 @@
|
|
1
|
+
# coding=utf-8
|
2
|
+
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
|
3
|
+
# *** Do not edit by hand unless you're certain you know what you are doing! ***
|
4
|
+
|
5
|
+
import copy
|
6
|
+
import warnings
|
7
|
+
import sys
|
8
|
+
import pulumi
|
9
|
+
import pulumi.runtime
|
10
|
+
from typing import Any, Mapping, Optional, Sequence, Union, overload
|
11
|
+
if sys.version_info >= (3, 11):
|
12
|
+
from typing import NotRequired, TypedDict, TypeAlias
|
13
|
+
else:
|
14
|
+
from typing_extensions import NotRequired, TypedDict, TypeAlias
|
15
|
+
from .. import _utilities
|
16
|
+
|
17
|
+
__all__ = [
|
18
|
+
'ClusterAutoUpgrade',
|
19
|
+
'ClusterAutoscalerConfig',
|
20
|
+
'ClusterKubeconfig',
|
21
|
+
'ClusterOpenIdConnectConfig',
|
22
|
+
'PoolNode',
|
23
|
+
'PoolUpgradePolicy',
|
24
|
+
'GetClusterAutoUpgradeResult',
|
25
|
+
'GetClusterAutoscalerConfigResult',
|
26
|
+
'GetClusterKubeconfigResult',
|
27
|
+
'GetClusterOpenIdConnectConfigResult',
|
28
|
+
'GetPoolNodeResult',
|
29
|
+
'GetPoolUpgradePolicyResult',
|
30
|
+
]
|
31
|
+
|
32
|
+
@pulumi.output_type
|
33
|
+
class ClusterAutoUpgrade(dict):
|
34
|
+
@staticmethod
|
35
|
+
def __key_warning(key: str):
|
36
|
+
suggest = None
|
37
|
+
if key == "maintenanceWindowDay":
|
38
|
+
suggest = "maintenance_window_day"
|
39
|
+
elif key == "maintenanceWindowStartHour":
|
40
|
+
suggest = "maintenance_window_start_hour"
|
41
|
+
|
42
|
+
if suggest:
|
43
|
+
pulumi.log.warn(f"Key '{key}' not found in ClusterAutoUpgrade. Access the value via the '{suggest}' property getter instead.")
|
44
|
+
|
45
|
+
def __getitem__(self, key: str) -> Any:
|
46
|
+
ClusterAutoUpgrade.__key_warning(key)
|
47
|
+
return super().__getitem__(key)
|
48
|
+
|
49
|
+
def get(self, key: str, default = None) -> Any:
|
50
|
+
ClusterAutoUpgrade.__key_warning(key)
|
51
|
+
return super().get(key, default)
|
52
|
+
|
53
|
+
def __init__(__self__, *,
|
54
|
+
enable: bool,
|
55
|
+
maintenance_window_day: str,
|
56
|
+
maintenance_window_start_hour: int):
|
57
|
+
"""
|
58
|
+
:param bool enable: Set to `true` to enable Kubernetes patch version auto upgrades.
|
59
|
+
> **Important:** When enabling auto upgrades, the `version` field take a minor version like x.y (ie 1.18).
|
60
|
+
:param str maintenance_window_day: The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
61
|
+
:param int maintenance_window_start_hour: The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
62
|
+
"""
|
63
|
+
pulumi.set(__self__, "enable", enable)
|
64
|
+
pulumi.set(__self__, "maintenance_window_day", maintenance_window_day)
|
65
|
+
pulumi.set(__self__, "maintenance_window_start_hour", maintenance_window_start_hour)
|
66
|
+
|
67
|
+
@property
|
68
|
+
@pulumi.getter
|
69
|
+
def enable(self) -> bool:
|
70
|
+
"""
|
71
|
+
Set to `true` to enable Kubernetes patch version auto upgrades.
|
72
|
+
> **Important:** When enabling auto upgrades, the `version` field take a minor version like x.y (ie 1.18).
|
73
|
+
"""
|
74
|
+
return pulumi.get(self, "enable")
|
75
|
+
|
76
|
+
@property
|
77
|
+
@pulumi.getter(name="maintenanceWindowDay")
|
78
|
+
def maintenance_window_day(self) -> str:
|
79
|
+
"""
|
80
|
+
The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
81
|
+
"""
|
82
|
+
return pulumi.get(self, "maintenance_window_day")
|
83
|
+
|
84
|
+
@property
|
85
|
+
@pulumi.getter(name="maintenanceWindowStartHour")
|
86
|
+
def maintenance_window_start_hour(self) -> int:
|
87
|
+
"""
|
88
|
+
The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
89
|
+
"""
|
90
|
+
return pulumi.get(self, "maintenance_window_start_hour")
|
91
|
+
|
92
|
+
|
93
|
+
@pulumi.output_type
|
94
|
+
class ClusterAutoscalerConfig(dict):
|
95
|
+
@staticmethod
|
96
|
+
def __key_warning(key: str):
|
97
|
+
suggest = None
|
98
|
+
if key == "balanceSimilarNodeGroups":
|
99
|
+
suggest = "balance_similar_node_groups"
|
100
|
+
elif key == "disableScaleDown":
|
101
|
+
suggest = "disable_scale_down"
|
102
|
+
elif key == "expendablePodsPriorityCutoff":
|
103
|
+
suggest = "expendable_pods_priority_cutoff"
|
104
|
+
elif key == "ignoreDaemonsetsUtilization":
|
105
|
+
suggest = "ignore_daemonsets_utilization"
|
106
|
+
elif key == "maxGracefulTerminationSec":
|
107
|
+
suggest = "max_graceful_termination_sec"
|
108
|
+
elif key == "scaleDownDelayAfterAdd":
|
109
|
+
suggest = "scale_down_delay_after_add"
|
110
|
+
elif key == "scaleDownUnneededTime":
|
111
|
+
suggest = "scale_down_unneeded_time"
|
112
|
+
elif key == "scaleDownUtilizationThreshold":
|
113
|
+
suggest = "scale_down_utilization_threshold"
|
114
|
+
|
115
|
+
if suggest:
|
116
|
+
pulumi.log.warn(f"Key '{key}' not found in ClusterAutoscalerConfig. Access the value via the '{suggest}' property getter instead.")
|
117
|
+
|
118
|
+
def __getitem__(self, key: str) -> Any:
|
119
|
+
ClusterAutoscalerConfig.__key_warning(key)
|
120
|
+
return super().__getitem__(key)
|
121
|
+
|
122
|
+
def get(self, key: str, default = None) -> Any:
|
123
|
+
ClusterAutoscalerConfig.__key_warning(key)
|
124
|
+
return super().get(key, default)
|
125
|
+
|
126
|
+
def __init__(__self__, *,
|
127
|
+
balance_similar_node_groups: Optional[bool] = None,
|
128
|
+
disable_scale_down: Optional[bool] = None,
|
129
|
+
estimator: Optional[str] = None,
|
130
|
+
expander: Optional[str] = None,
|
131
|
+
expendable_pods_priority_cutoff: Optional[int] = None,
|
132
|
+
ignore_daemonsets_utilization: Optional[bool] = None,
|
133
|
+
max_graceful_termination_sec: Optional[int] = None,
|
134
|
+
scale_down_delay_after_add: Optional[str] = None,
|
135
|
+
scale_down_unneeded_time: Optional[str] = None,
|
136
|
+
scale_down_utilization_threshold: Optional[float] = None):
|
137
|
+
"""
|
138
|
+
:param bool balance_similar_node_groups: Detect similar node groups and balance the number of nodes between them.
|
139
|
+
:param bool disable_scale_down: Disables the scale down feature of the autoscaler.
|
140
|
+
:param str estimator: Type of resource estimator to be used in scale up.
|
141
|
+
:param str expander: Type of node group expander to be used in scale up.
|
142
|
+
:param int expendable_pods_priority_cutoff: Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
143
|
+
:param bool ignore_daemonsets_utilization: Ignore DaemonSet pods when calculating resource utilization for scaling down.
|
144
|
+
:param int max_graceful_termination_sec: Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
145
|
+
:param str scale_down_delay_after_add: How long after scale up that scale down evaluation resumes.
|
146
|
+
:param str scale_down_unneeded_time: How long a node should be unneeded before it is eligible for scale down.
|
147
|
+
:param float scale_down_utilization_threshold: Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
148
|
+
"""
|
149
|
+
if balance_similar_node_groups is not None:
|
150
|
+
pulumi.set(__self__, "balance_similar_node_groups", balance_similar_node_groups)
|
151
|
+
if disable_scale_down is not None:
|
152
|
+
pulumi.set(__self__, "disable_scale_down", disable_scale_down)
|
153
|
+
if estimator is not None:
|
154
|
+
pulumi.set(__self__, "estimator", estimator)
|
155
|
+
if expander is not None:
|
156
|
+
pulumi.set(__self__, "expander", expander)
|
157
|
+
if expendable_pods_priority_cutoff is not None:
|
158
|
+
pulumi.set(__self__, "expendable_pods_priority_cutoff", expendable_pods_priority_cutoff)
|
159
|
+
if ignore_daemonsets_utilization is not None:
|
160
|
+
pulumi.set(__self__, "ignore_daemonsets_utilization", ignore_daemonsets_utilization)
|
161
|
+
if max_graceful_termination_sec is not None:
|
162
|
+
pulumi.set(__self__, "max_graceful_termination_sec", max_graceful_termination_sec)
|
163
|
+
if scale_down_delay_after_add is not None:
|
164
|
+
pulumi.set(__self__, "scale_down_delay_after_add", scale_down_delay_after_add)
|
165
|
+
if scale_down_unneeded_time is not None:
|
166
|
+
pulumi.set(__self__, "scale_down_unneeded_time", scale_down_unneeded_time)
|
167
|
+
if scale_down_utilization_threshold is not None:
|
168
|
+
pulumi.set(__self__, "scale_down_utilization_threshold", scale_down_utilization_threshold)
|
169
|
+
|
170
|
+
@property
|
171
|
+
@pulumi.getter(name="balanceSimilarNodeGroups")
|
172
|
+
def balance_similar_node_groups(self) -> Optional[bool]:
|
173
|
+
"""
|
174
|
+
Detect similar node groups and balance the number of nodes between them.
|
175
|
+
"""
|
176
|
+
return pulumi.get(self, "balance_similar_node_groups")
|
177
|
+
|
178
|
+
@property
|
179
|
+
@pulumi.getter(name="disableScaleDown")
|
180
|
+
def disable_scale_down(self) -> Optional[bool]:
|
181
|
+
"""
|
182
|
+
Disables the scale down feature of the autoscaler.
|
183
|
+
"""
|
184
|
+
return pulumi.get(self, "disable_scale_down")
|
185
|
+
|
186
|
+
@property
|
187
|
+
@pulumi.getter
|
188
|
+
def estimator(self) -> Optional[str]:
|
189
|
+
"""
|
190
|
+
Type of resource estimator to be used in scale up.
|
191
|
+
"""
|
192
|
+
return pulumi.get(self, "estimator")
|
193
|
+
|
194
|
+
@property
|
195
|
+
@pulumi.getter
|
196
|
+
def expander(self) -> Optional[str]:
|
197
|
+
"""
|
198
|
+
Type of node group expander to be used in scale up.
|
199
|
+
"""
|
200
|
+
return pulumi.get(self, "expander")
|
201
|
+
|
202
|
+
@property
|
203
|
+
@pulumi.getter(name="expendablePodsPriorityCutoff")
|
204
|
+
def expendable_pods_priority_cutoff(self) -> Optional[int]:
|
205
|
+
"""
|
206
|
+
Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
207
|
+
"""
|
208
|
+
return pulumi.get(self, "expendable_pods_priority_cutoff")
|
209
|
+
|
210
|
+
@property
|
211
|
+
@pulumi.getter(name="ignoreDaemonsetsUtilization")
|
212
|
+
def ignore_daemonsets_utilization(self) -> Optional[bool]:
|
213
|
+
"""
|
214
|
+
Ignore DaemonSet pods when calculating resource utilization for scaling down.
|
215
|
+
"""
|
216
|
+
return pulumi.get(self, "ignore_daemonsets_utilization")
|
217
|
+
|
218
|
+
@property
|
219
|
+
@pulumi.getter(name="maxGracefulTerminationSec")
|
220
|
+
def max_graceful_termination_sec(self) -> Optional[int]:
|
221
|
+
"""
|
222
|
+
Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
223
|
+
"""
|
224
|
+
return pulumi.get(self, "max_graceful_termination_sec")
|
225
|
+
|
226
|
+
@property
|
227
|
+
@pulumi.getter(name="scaleDownDelayAfterAdd")
|
228
|
+
def scale_down_delay_after_add(self) -> Optional[str]:
|
229
|
+
"""
|
230
|
+
How long after scale up that scale down evaluation resumes.
|
231
|
+
"""
|
232
|
+
return pulumi.get(self, "scale_down_delay_after_add")
|
233
|
+
|
234
|
+
@property
|
235
|
+
@pulumi.getter(name="scaleDownUnneededTime")
|
236
|
+
def scale_down_unneeded_time(self) -> Optional[str]:
|
237
|
+
"""
|
238
|
+
How long a node should be unneeded before it is eligible for scale down.
|
239
|
+
"""
|
240
|
+
return pulumi.get(self, "scale_down_unneeded_time")
|
241
|
+
|
242
|
+
@property
|
243
|
+
@pulumi.getter(name="scaleDownUtilizationThreshold")
|
244
|
+
def scale_down_utilization_threshold(self) -> Optional[float]:
|
245
|
+
"""
|
246
|
+
Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
247
|
+
"""
|
248
|
+
return pulumi.get(self, "scale_down_utilization_threshold")
|
249
|
+
|
250
|
+
|
251
|
+
@pulumi.output_type
|
252
|
+
class ClusterKubeconfig(dict):
|
253
|
+
@staticmethod
|
254
|
+
def __key_warning(key: str):
|
255
|
+
suggest = None
|
256
|
+
if key == "clusterCaCertificate":
|
257
|
+
suggest = "cluster_ca_certificate"
|
258
|
+
elif key == "configFile":
|
259
|
+
suggest = "config_file"
|
260
|
+
|
261
|
+
if suggest:
|
262
|
+
pulumi.log.warn(f"Key '{key}' not found in ClusterKubeconfig. Access the value via the '{suggest}' property getter instead.")
|
263
|
+
|
264
|
+
def __getitem__(self, key: str) -> Any:
|
265
|
+
ClusterKubeconfig.__key_warning(key)
|
266
|
+
return super().__getitem__(key)
|
267
|
+
|
268
|
+
def get(self, key: str, default = None) -> Any:
|
269
|
+
ClusterKubeconfig.__key_warning(key)
|
270
|
+
return super().get(key, default)
|
271
|
+
|
272
|
+
def __init__(__self__, *,
|
273
|
+
cluster_ca_certificate: Optional[str] = None,
|
274
|
+
config_file: Optional[str] = None,
|
275
|
+
host: Optional[str] = None,
|
276
|
+
token: Optional[str] = None):
|
277
|
+
"""
|
278
|
+
:param str cluster_ca_certificate: The CA certificate of the Kubernetes API server.
|
279
|
+
:param str config_file: The raw kubeconfig file.
|
280
|
+
:param str host: The URL of the Kubernetes API server.
|
281
|
+
:param str token: The token to connect to the Kubernetes API server.
|
282
|
+
"""
|
283
|
+
if cluster_ca_certificate is not None:
|
284
|
+
pulumi.set(__self__, "cluster_ca_certificate", cluster_ca_certificate)
|
285
|
+
if config_file is not None:
|
286
|
+
pulumi.set(__self__, "config_file", config_file)
|
287
|
+
if host is not None:
|
288
|
+
pulumi.set(__self__, "host", host)
|
289
|
+
if token is not None:
|
290
|
+
pulumi.set(__self__, "token", token)
|
291
|
+
|
292
|
+
@property
|
293
|
+
@pulumi.getter(name="clusterCaCertificate")
|
294
|
+
def cluster_ca_certificate(self) -> Optional[str]:
|
295
|
+
"""
|
296
|
+
The CA certificate of the Kubernetes API server.
|
297
|
+
"""
|
298
|
+
return pulumi.get(self, "cluster_ca_certificate")
|
299
|
+
|
300
|
+
@property
|
301
|
+
@pulumi.getter(name="configFile")
|
302
|
+
def config_file(self) -> Optional[str]:
|
303
|
+
"""
|
304
|
+
The raw kubeconfig file.
|
305
|
+
"""
|
306
|
+
return pulumi.get(self, "config_file")
|
307
|
+
|
308
|
+
@property
|
309
|
+
@pulumi.getter
|
310
|
+
def host(self) -> Optional[str]:
|
311
|
+
"""
|
312
|
+
The URL of the Kubernetes API server.
|
313
|
+
"""
|
314
|
+
return pulumi.get(self, "host")
|
315
|
+
|
316
|
+
@property
|
317
|
+
@pulumi.getter
|
318
|
+
def token(self) -> Optional[str]:
|
319
|
+
"""
|
320
|
+
The token to connect to the Kubernetes API server.
|
321
|
+
"""
|
322
|
+
return pulumi.get(self, "token")
|
323
|
+
|
324
|
+
|
325
|
+
@pulumi.output_type
|
326
|
+
class ClusterOpenIdConnectConfig(dict):
|
327
|
+
@staticmethod
|
328
|
+
def __key_warning(key: str):
|
329
|
+
suggest = None
|
330
|
+
if key == "clientId":
|
331
|
+
suggest = "client_id"
|
332
|
+
elif key == "issuerUrl":
|
333
|
+
suggest = "issuer_url"
|
334
|
+
elif key == "groupsClaims":
|
335
|
+
suggest = "groups_claims"
|
336
|
+
elif key == "groupsPrefix":
|
337
|
+
suggest = "groups_prefix"
|
338
|
+
elif key == "requiredClaims":
|
339
|
+
suggest = "required_claims"
|
340
|
+
elif key == "usernameClaim":
|
341
|
+
suggest = "username_claim"
|
342
|
+
elif key == "usernamePrefix":
|
343
|
+
suggest = "username_prefix"
|
344
|
+
|
345
|
+
if suggest:
|
346
|
+
pulumi.log.warn(f"Key '{key}' not found in ClusterOpenIdConnectConfig. Access the value via the '{suggest}' property getter instead.")
|
347
|
+
|
348
|
+
def __getitem__(self, key: str) -> Any:
|
349
|
+
ClusterOpenIdConnectConfig.__key_warning(key)
|
350
|
+
return super().__getitem__(key)
|
351
|
+
|
352
|
+
def get(self, key: str, default = None) -> Any:
|
353
|
+
ClusterOpenIdConnectConfig.__key_warning(key)
|
354
|
+
return super().get(key, default)
|
355
|
+
|
356
|
+
def __init__(__self__, *,
|
357
|
+
client_id: str,
|
358
|
+
issuer_url: str,
|
359
|
+
groups_claims: Optional[Sequence[str]] = None,
|
360
|
+
groups_prefix: Optional[str] = None,
|
361
|
+
required_claims: Optional[Sequence[str]] = None,
|
362
|
+
username_claim: Optional[str] = None,
|
363
|
+
username_prefix: Optional[str] = None):
|
364
|
+
"""
|
365
|
+
:param str client_id: A client id that all tokens must be issued for
|
366
|
+
:param str issuer_url: URL of the provider which allows the API server to discover public signing keys
|
367
|
+
:param Sequence[str] groups_claims: JWT claim to use as the user's group
|
368
|
+
:param str groups_prefix: Prefix prepended to group claims
|
369
|
+
:param Sequence[str] required_claims: Multiple key=value pairs that describes a required claim in the ID Token
|
370
|
+
:param str username_claim: JWT claim to use as the user name
|
371
|
+
:param str username_prefix: Prefix prepended to username
|
372
|
+
"""
|
373
|
+
pulumi.set(__self__, "client_id", client_id)
|
374
|
+
pulumi.set(__self__, "issuer_url", issuer_url)
|
375
|
+
if groups_claims is not None:
|
376
|
+
pulumi.set(__self__, "groups_claims", groups_claims)
|
377
|
+
if groups_prefix is not None:
|
378
|
+
pulumi.set(__self__, "groups_prefix", groups_prefix)
|
379
|
+
if required_claims is not None:
|
380
|
+
pulumi.set(__self__, "required_claims", required_claims)
|
381
|
+
if username_claim is not None:
|
382
|
+
pulumi.set(__self__, "username_claim", username_claim)
|
383
|
+
if username_prefix is not None:
|
384
|
+
pulumi.set(__self__, "username_prefix", username_prefix)
|
385
|
+
|
386
|
+
@property
|
387
|
+
@pulumi.getter(name="clientId")
|
388
|
+
def client_id(self) -> str:
|
389
|
+
"""
|
390
|
+
A client id that all tokens must be issued for
|
391
|
+
"""
|
392
|
+
return pulumi.get(self, "client_id")
|
393
|
+
|
394
|
+
@property
|
395
|
+
@pulumi.getter(name="issuerUrl")
|
396
|
+
def issuer_url(self) -> str:
|
397
|
+
"""
|
398
|
+
URL of the provider which allows the API server to discover public signing keys
|
399
|
+
"""
|
400
|
+
return pulumi.get(self, "issuer_url")
|
401
|
+
|
402
|
+
@property
|
403
|
+
@pulumi.getter(name="groupsClaims")
|
404
|
+
def groups_claims(self) -> Optional[Sequence[str]]:
|
405
|
+
"""
|
406
|
+
JWT claim to use as the user's group
|
407
|
+
"""
|
408
|
+
return pulumi.get(self, "groups_claims")
|
409
|
+
|
410
|
+
@property
|
411
|
+
@pulumi.getter(name="groupsPrefix")
|
412
|
+
def groups_prefix(self) -> Optional[str]:
|
413
|
+
"""
|
414
|
+
Prefix prepended to group claims
|
415
|
+
"""
|
416
|
+
return pulumi.get(self, "groups_prefix")
|
417
|
+
|
418
|
+
@property
|
419
|
+
@pulumi.getter(name="requiredClaims")
|
420
|
+
def required_claims(self) -> Optional[Sequence[str]]:
|
421
|
+
"""
|
422
|
+
Multiple key=value pairs that describes a required claim in the ID Token
|
423
|
+
"""
|
424
|
+
return pulumi.get(self, "required_claims")
|
425
|
+
|
426
|
+
@property
|
427
|
+
@pulumi.getter(name="usernameClaim")
|
428
|
+
def username_claim(self) -> Optional[str]:
|
429
|
+
"""
|
430
|
+
JWT claim to use as the user name
|
431
|
+
"""
|
432
|
+
return pulumi.get(self, "username_claim")
|
433
|
+
|
434
|
+
@property
|
435
|
+
@pulumi.getter(name="usernamePrefix")
|
436
|
+
def username_prefix(self) -> Optional[str]:
|
437
|
+
"""
|
438
|
+
Prefix prepended to username
|
439
|
+
"""
|
440
|
+
return pulumi.get(self, "username_prefix")
|
441
|
+
|
442
|
+
|
443
|
+
@pulumi.output_type
|
444
|
+
class PoolNode(dict):
|
445
|
+
@staticmethod
|
446
|
+
def __key_warning(key: str):
|
447
|
+
suggest = None
|
448
|
+
if key == "publicIp":
|
449
|
+
suggest = "public_ip"
|
450
|
+
elif key == "publicIpV6":
|
451
|
+
suggest = "public_ip_v6"
|
452
|
+
|
453
|
+
if suggest:
|
454
|
+
pulumi.log.warn(f"Key '{key}' not found in PoolNode. Access the value via the '{suggest}' property getter instead.")
|
455
|
+
|
456
|
+
def __getitem__(self, key: str) -> Any:
|
457
|
+
PoolNode.__key_warning(key)
|
458
|
+
return super().__getitem__(key)
|
459
|
+
|
460
|
+
def get(self, key: str, default = None) -> Any:
|
461
|
+
PoolNode.__key_warning(key)
|
462
|
+
return super().get(key, default)
|
463
|
+
|
464
|
+
def __init__(__self__, *,
|
465
|
+
name: Optional[str] = None,
|
466
|
+
public_ip: Optional[str] = None,
|
467
|
+
public_ip_v6: Optional[str] = None,
|
468
|
+
status: Optional[str] = None):
|
469
|
+
"""
|
470
|
+
:param str name: The name for the pool.
|
471
|
+
|
472
|
+
> **Important:** Updates to this field will recreate a new resource.
|
473
|
+
:param str public_ip: The public IPv4. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
474
|
+
:param str public_ip_v6: The public IPv6. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
475
|
+
:param str status: The status of the node.
|
476
|
+
"""
|
477
|
+
if name is not None:
|
478
|
+
pulumi.set(__self__, "name", name)
|
479
|
+
if public_ip is not None:
|
480
|
+
pulumi.set(__self__, "public_ip", public_ip)
|
481
|
+
if public_ip_v6 is not None:
|
482
|
+
pulumi.set(__self__, "public_ip_v6", public_ip_v6)
|
483
|
+
if status is not None:
|
484
|
+
pulumi.set(__self__, "status", status)
|
485
|
+
|
486
|
+
@property
|
487
|
+
@pulumi.getter
|
488
|
+
def name(self) -> Optional[str]:
|
489
|
+
"""
|
490
|
+
The name for the pool.
|
491
|
+
|
492
|
+
> **Important:** Updates to this field will recreate a new resource.
|
493
|
+
"""
|
494
|
+
return pulumi.get(self, "name")
|
495
|
+
|
496
|
+
@property
|
497
|
+
@pulumi.getter(name="publicIp")
|
498
|
+
@_utilities.deprecated("""Please use the official Kubernetes provider and the kubernetes_nodes data source""")
|
499
|
+
def public_ip(self) -> Optional[str]:
|
500
|
+
"""
|
501
|
+
The public IPv4. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
502
|
+
"""
|
503
|
+
return pulumi.get(self, "public_ip")
|
504
|
+
|
505
|
+
@property
|
506
|
+
@pulumi.getter(name="publicIpV6")
|
507
|
+
@_utilities.deprecated("""Please use the official Kubernetes provider and the kubernetes_nodes data source""")
|
508
|
+
def public_ip_v6(self) -> Optional[str]:
|
509
|
+
"""
|
510
|
+
The public IPv6. (Deprecated, Please use the official Kubernetes provider and the kubernetes_nodes data source)
|
511
|
+
"""
|
512
|
+
return pulumi.get(self, "public_ip_v6")
|
513
|
+
|
514
|
+
@property
|
515
|
+
@pulumi.getter
|
516
|
+
def status(self) -> Optional[str]:
|
517
|
+
"""
|
518
|
+
The status of the node.
|
519
|
+
"""
|
520
|
+
return pulumi.get(self, "status")
|
521
|
+
|
522
|
+
|
523
|
+
@pulumi.output_type
|
524
|
+
class PoolUpgradePolicy(dict):
|
525
|
+
@staticmethod
|
526
|
+
def __key_warning(key: str):
|
527
|
+
suggest = None
|
528
|
+
if key == "maxSurge":
|
529
|
+
suggest = "max_surge"
|
530
|
+
elif key == "maxUnavailable":
|
531
|
+
suggest = "max_unavailable"
|
532
|
+
|
533
|
+
if suggest:
|
534
|
+
pulumi.log.warn(f"Key '{key}' not found in PoolUpgradePolicy. Access the value via the '{suggest}' property getter instead.")
|
535
|
+
|
536
|
+
def __getitem__(self, key: str) -> Any:
|
537
|
+
PoolUpgradePolicy.__key_warning(key)
|
538
|
+
return super().__getitem__(key)
|
539
|
+
|
540
|
+
def get(self, key: str, default = None) -> Any:
|
541
|
+
PoolUpgradePolicy.__key_warning(key)
|
542
|
+
return super().get(key, default)
|
543
|
+
|
544
|
+
def __init__(__self__, *,
|
545
|
+
max_surge: Optional[int] = None,
|
546
|
+
max_unavailable: Optional[int] = None):
|
547
|
+
"""
|
548
|
+
:param int max_surge: The maximum number of nodes to be created during the upgrade
|
549
|
+
:param int max_unavailable: The maximum number of nodes that can be not ready at the same time
|
550
|
+
"""
|
551
|
+
if max_surge is not None:
|
552
|
+
pulumi.set(__self__, "max_surge", max_surge)
|
553
|
+
if max_unavailable is not None:
|
554
|
+
pulumi.set(__self__, "max_unavailable", max_unavailable)
|
555
|
+
|
556
|
+
@property
|
557
|
+
@pulumi.getter(name="maxSurge")
|
558
|
+
def max_surge(self) -> Optional[int]:
|
559
|
+
"""
|
560
|
+
The maximum number of nodes to be created during the upgrade
|
561
|
+
"""
|
562
|
+
return pulumi.get(self, "max_surge")
|
563
|
+
|
564
|
+
@property
|
565
|
+
@pulumi.getter(name="maxUnavailable")
|
566
|
+
def max_unavailable(self) -> Optional[int]:
|
567
|
+
"""
|
568
|
+
The maximum number of nodes that can be not ready at the same time
|
569
|
+
"""
|
570
|
+
return pulumi.get(self, "max_unavailable")
|
571
|
+
|
572
|
+
|
573
|
+
@pulumi.output_type
|
574
|
+
class GetClusterAutoUpgradeResult(dict):
|
575
|
+
def __init__(__self__, *,
|
576
|
+
enable: bool,
|
577
|
+
maintenance_window_day: str,
|
578
|
+
maintenance_window_start_hour: int):
|
579
|
+
"""
|
580
|
+
:param bool enable: True if Kubernetes patch version auto upgrades is enabled.
|
581
|
+
:param str maintenance_window_day: The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
582
|
+
:param int maintenance_window_start_hour: The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
583
|
+
"""
|
584
|
+
pulumi.set(__self__, "enable", enable)
|
585
|
+
pulumi.set(__self__, "maintenance_window_day", maintenance_window_day)
|
586
|
+
pulumi.set(__self__, "maintenance_window_start_hour", maintenance_window_start_hour)
|
587
|
+
|
588
|
+
@property
|
589
|
+
@pulumi.getter
|
590
|
+
def enable(self) -> bool:
|
591
|
+
"""
|
592
|
+
True if Kubernetes patch version auto upgrades is enabled.
|
593
|
+
"""
|
594
|
+
return pulumi.get(self, "enable")
|
595
|
+
|
596
|
+
@property
|
597
|
+
@pulumi.getter(name="maintenanceWindowDay")
|
598
|
+
def maintenance_window_day(self) -> str:
|
599
|
+
"""
|
600
|
+
The day of the auto upgrade maintenance window (`monday` to `sunday`, or `any`).
|
601
|
+
"""
|
602
|
+
return pulumi.get(self, "maintenance_window_day")
|
603
|
+
|
604
|
+
@property
|
605
|
+
@pulumi.getter(name="maintenanceWindowStartHour")
|
606
|
+
def maintenance_window_start_hour(self) -> int:
|
607
|
+
"""
|
608
|
+
The start hour (UTC) of the 2-hour auto upgrade maintenance window (0 to 23).
|
609
|
+
"""
|
610
|
+
return pulumi.get(self, "maintenance_window_start_hour")
|
611
|
+
|
612
|
+
|
613
|
+
@pulumi.output_type
|
614
|
+
class GetClusterAutoscalerConfigResult(dict):
|
615
|
+
def __init__(__self__, *,
|
616
|
+
balance_similar_node_groups: bool,
|
617
|
+
disable_scale_down: bool,
|
618
|
+
estimator: str,
|
619
|
+
expander: str,
|
620
|
+
expendable_pods_priority_cutoff: int,
|
621
|
+
ignore_daemonsets_utilization: bool,
|
622
|
+
max_graceful_termination_sec: int,
|
623
|
+
scale_down_delay_after_add: str,
|
624
|
+
scale_down_unneeded_time: str,
|
625
|
+
scale_down_utilization_threshold: float):
|
626
|
+
"""
|
627
|
+
:param bool balance_similar_node_groups: True if detecting similar node groups and balance the number of nodes between them is enabled.
|
628
|
+
:param bool disable_scale_down: True if the scale down feature of the autoscaler is disabled.
|
629
|
+
:param str estimator: The type of resource estimator used in scale up.
|
630
|
+
:param str expander: The type of node group expander be used in scale up.
|
631
|
+
:param int expendable_pods_priority_cutoff: Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
632
|
+
:param bool ignore_daemonsets_utilization: True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
|
633
|
+
:param int max_graceful_termination_sec: Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
634
|
+
:param str scale_down_delay_after_add: The duration after scale up that scale down evaluation resumes.
|
635
|
+
:param str scale_down_unneeded_time: The duration a node should be unneeded before it is eligible for scale down.
|
636
|
+
:param float scale_down_utilization_threshold: Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
637
|
+
"""
|
638
|
+
pulumi.set(__self__, "balance_similar_node_groups", balance_similar_node_groups)
|
639
|
+
pulumi.set(__self__, "disable_scale_down", disable_scale_down)
|
640
|
+
pulumi.set(__self__, "estimator", estimator)
|
641
|
+
pulumi.set(__self__, "expander", expander)
|
642
|
+
pulumi.set(__self__, "expendable_pods_priority_cutoff", expendable_pods_priority_cutoff)
|
643
|
+
pulumi.set(__self__, "ignore_daemonsets_utilization", ignore_daemonsets_utilization)
|
644
|
+
pulumi.set(__self__, "max_graceful_termination_sec", max_graceful_termination_sec)
|
645
|
+
pulumi.set(__self__, "scale_down_delay_after_add", scale_down_delay_after_add)
|
646
|
+
pulumi.set(__self__, "scale_down_unneeded_time", scale_down_unneeded_time)
|
647
|
+
pulumi.set(__self__, "scale_down_utilization_threshold", scale_down_utilization_threshold)
|
648
|
+
|
649
|
+
@property
|
650
|
+
@pulumi.getter(name="balanceSimilarNodeGroups")
|
651
|
+
def balance_similar_node_groups(self) -> bool:
|
652
|
+
"""
|
653
|
+
True if detecting similar node groups and balance the number of nodes between them is enabled.
|
654
|
+
"""
|
655
|
+
return pulumi.get(self, "balance_similar_node_groups")
|
656
|
+
|
657
|
+
@property
|
658
|
+
@pulumi.getter(name="disableScaleDown")
|
659
|
+
def disable_scale_down(self) -> bool:
|
660
|
+
"""
|
661
|
+
True if the scale down feature of the autoscaler is disabled.
|
662
|
+
"""
|
663
|
+
return pulumi.get(self, "disable_scale_down")
|
664
|
+
|
665
|
+
@property
|
666
|
+
@pulumi.getter
|
667
|
+
def estimator(self) -> str:
|
668
|
+
"""
|
669
|
+
The type of resource estimator used in scale up.
|
670
|
+
"""
|
671
|
+
return pulumi.get(self, "estimator")
|
672
|
+
|
673
|
+
@property
|
674
|
+
@pulumi.getter
|
675
|
+
def expander(self) -> str:
|
676
|
+
"""
|
677
|
+
The type of node group expander be used in scale up.
|
678
|
+
"""
|
679
|
+
return pulumi.get(self, "expander")
|
680
|
+
|
681
|
+
@property
|
682
|
+
@pulumi.getter(name="expendablePodsPriorityCutoff")
|
683
|
+
def expendable_pods_priority_cutoff(self) -> int:
|
684
|
+
"""
|
685
|
+
Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable.
|
686
|
+
"""
|
687
|
+
return pulumi.get(self, "expendable_pods_priority_cutoff")
|
688
|
+
|
689
|
+
@property
|
690
|
+
@pulumi.getter(name="ignoreDaemonsetsUtilization")
|
691
|
+
def ignore_daemonsets_utilization(self) -> bool:
|
692
|
+
"""
|
693
|
+
True if ignoring DaemonSet pods when calculating resource utilization for scaling down is enabled.
|
694
|
+
"""
|
695
|
+
return pulumi.get(self, "ignore_daemonsets_utilization")
|
696
|
+
|
697
|
+
@property
|
698
|
+
@pulumi.getter(name="maxGracefulTerminationSec")
|
699
|
+
def max_graceful_termination_sec(self) -> int:
|
700
|
+
"""
|
701
|
+
Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node
|
702
|
+
"""
|
703
|
+
return pulumi.get(self, "max_graceful_termination_sec")
|
704
|
+
|
705
|
+
@property
|
706
|
+
@pulumi.getter(name="scaleDownDelayAfterAdd")
|
707
|
+
def scale_down_delay_after_add(self) -> str:
|
708
|
+
"""
|
709
|
+
The duration after scale up that scale down evaluation resumes.
|
710
|
+
"""
|
711
|
+
return pulumi.get(self, "scale_down_delay_after_add")
|
712
|
+
|
713
|
+
@property
|
714
|
+
@pulumi.getter(name="scaleDownUnneededTime")
|
715
|
+
def scale_down_unneeded_time(self) -> str:
|
716
|
+
"""
|
717
|
+
The duration a node should be unneeded before it is eligible for scale down.
|
718
|
+
"""
|
719
|
+
return pulumi.get(self, "scale_down_unneeded_time")
|
720
|
+
|
721
|
+
@property
|
722
|
+
@pulumi.getter(name="scaleDownUtilizationThreshold")
|
723
|
+
def scale_down_utilization_threshold(self) -> float:
|
724
|
+
"""
|
725
|
+
Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down
|
726
|
+
"""
|
727
|
+
return pulumi.get(self, "scale_down_utilization_threshold")
|
728
|
+
|
729
|
+
|
730
|
+
@pulumi.output_type
|
731
|
+
class GetClusterKubeconfigResult(dict):
|
732
|
+
def __init__(__self__, *,
|
733
|
+
cluster_ca_certificate: str,
|
734
|
+
config_file: str,
|
735
|
+
host: str,
|
736
|
+
token: str):
|
737
|
+
"""
|
738
|
+
:param str cluster_ca_certificate: The CA certificate of the Kubernetes API server.
|
739
|
+
:param str config_file: The raw kubeconfig file.
|
740
|
+
:param str host: The URL of the Kubernetes API server.
|
741
|
+
:param str token: The token to connect to the Kubernetes API server.
|
742
|
+
"""
|
743
|
+
pulumi.set(__self__, "cluster_ca_certificate", cluster_ca_certificate)
|
744
|
+
pulumi.set(__self__, "config_file", config_file)
|
745
|
+
pulumi.set(__self__, "host", host)
|
746
|
+
pulumi.set(__self__, "token", token)
|
747
|
+
|
748
|
+
@property
|
749
|
+
@pulumi.getter(name="clusterCaCertificate")
|
750
|
+
def cluster_ca_certificate(self) -> str:
|
751
|
+
"""
|
752
|
+
The CA certificate of the Kubernetes API server.
|
753
|
+
"""
|
754
|
+
return pulumi.get(self, "cluster_ca_certificate")
|
755
|
+
|
756
|
+
@property
|
757
|
+
@pulumi.getter(name="configFile")
|
758
|
+
def config_file(self) -> str:
|
759
|
+
"""
|
760
|
+
The raw kubeconfig file.
|
761
|
+
"""
|
762
|
+
return pulumi.get(self, "config_file")
|
763
|
+
|
764
|
+
@property
|
765
|
+
@pulumi.getter
|
766
|
+
def host(self) -> str:
|
767
|
+
"""
|
768
|
+
The URL of the Kubernetes API server.
|
769
|
+
"""
|
770
|
+
return pulumi.get(self, "host")
|
771
|
+
|
772
|
+
@property
|
773
|
+
@pulumi.getter
|
774
|
+
def token(self) -> str:
|
775
|
+
"""
|
776
|
+
The token to connect to the Kubernetes API server.
|
777
|
+
"""
|
778
|
+
return pulumi.get(self, "token")
|
779
|
+
|
780
|
+
|
781
|
+
@pulumi.output_type
|
782
|
+
class GetClusterOpenIdConnectConfigResult(dict):
|
783
|
+
def __init__(__self__, *,
|
784
|
+
client_id: str,
|
785
|
+
groups_claims: Sequence[str],
|
786
|
+
groups_prefix: str,
|
787
|
+
issuer_url: str,
|
788
|
+
required_claims: Sequence[str],
|
789
|
+
username_claim: str,
|
790
|
+
username_prefix: str):
|
791
|
+
"""
|
792
|
+
:param str client_id: A client id that all tokens must be issued for
|
793
|
+
:param Sequence[str] groups_claims: JWT claim to use as the user's group
|
794
|
+
:param str groups_prefix: Prefix prepended to group claims
|
795
|
+
:param str issuer_url: URL of the provider which allows the API server to discover public signing keys
|
796
|
+
:param Sequence[str] required_claims: Multiple key=value pairs that describes a required claim in the ID Token
|
797
|
+
:param str username_claim: JWT claim to use as the user name
|
798
|
+
:param str username_prefix: Prefix prepended to username
|
799
|
+
"""
|
800
|
+
pulumi.set(__self__, "client_id", client_id)
|
801
|
+
pulumi.set(__self__, "groups_claims", groups_claims)
|
802
|
+
pulumi.set(__self__, "groups_prefix", groups_prefix)
|
803
|
+
pulumi.set(__self__, "issuer_url", issuer_url)
|
804
|
+
pulumi.set(__self__, "required_claims", required_claims)
|
805
|
+
pulumi.set(__self__, "username_claim", username_claim)
|
806
|
+
pulumi.set(__self__, "username_prefix", username_prefix)
|
807
|
+
|
808
|
+
@property
|
809
|
+
@pulumi.getter(name="clientId")
|
810
|
+
def client_id(self) -> str:
|
811
|
+
"""
|
812
|
+
A client id that all tokens must be issued for
|
813
|
+
"""
|
814
|
+
return pulumi.get(self, "client_id")
|
815
|
+
|
816
|
+
@property
|
817
|
+
@pulumi.getter(name="groupsClaims")
|
818
|
+
def groups_claims(self) -> Sequence[str]:
|
819
|
+
"""
|
820
|
+
JWT claim to use as the user's group
|
821
|
+
"""
|
822
|
+
return pulumi.get(self, "groups_claims")
|
823
|
+
|
824
|
+
@property
|
825
|
+
@pulumi.getter(name="groupsPrefix")
|
826
|
+
def groups_prefix(self) -> str:
|
827
|
+
"""
|
828
|
+
Prefix prepended to group claims
|
829
|
+
"""
|
830
|
+
return pulumi.get(self, "groups_prefix")
|
831
|
+
|
832
|
+
@property
|
833
|
+
@pulumi.getter(name="issuerUrl")
|
834
|
+
def issuer_url(self) -> str:
|
835
|
+
"""
|
836
|
+
URL of the provider which allows the API server to discover public signing keys
|
837
|
+
"""
|
838
|
+
return pulumi.get(self, "issuer_url")
|
839
|
+
|
840
|
+
@property
|
841
|
+
@pulumi.getter(name="requiredClaims")
|
842
|
+
def required_claims(self) -> Sequence[str]:
|
843
|
+
"""
|
844
|
+
Multiple key=value pairs that describes a required claim in the ID Token
|
845
|
+
"""
|
846
|
+
return pulumi.get(self, "required_claims")
|
847
|
+
|
848
|
+
@property
|
849
|
+
@pulumi.getter(name="usernameClaim")
|
850
|
+
def username_claim(self) -> str:
|
851
|
+
"""
|
852
|
+
JWT claim to use as the user name
|
853
|
+
"""
|
854
|
+
return pulumi.get(self, "username_claim")
|
855
|
+
|
856
|
+
@property
|
857
|
+
@pulumi.getter(name="usernamePrefix")
|
858
|
+
def username_prefix(self) -> str:
|
859
|
+
"""
|
860
|
+
Prefix prepended to username
|
861
|
+
"""
|
862
|
+
return pulumi.get(self, "username_prefix")
|
863
|
+
|
864
|
+
|
865
|
+
@pulumi.output_type
|
866
|
+
class GetPoolNodeResult(dict):
|
867
|
+
def __init__(__self__, *,
|
868
|
+
name: str,
|
869
|
+
public_ip: str,
|
870
|
+
public_ip_v6: str,
|
871
|
+
status: str):
|
872
|
+
"""
|
873
|
+
:param str name: The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
|
874
|
+
:param str public_ip: The public IPv4.
|
875
|
+
:param str public_ip_v6: The public IPv6.
|
876
|
+
:param str status: The status of the node.
|
877
|
+
"""
|
878
|
+
pulumi.set(__self__, "name", name)
|
879
|
+
pulumi.set(__self__, "public_ip", public_ip)
|
880
|
+
pulumi.set(__self__, "public_ip_v6", public_ip_v6)
|
881
|
+
pulumi.set(__self__, "status", status)
|
882
|
+
|
883
|
+
@property
|
884
|
+
@pulumi.getter
|
885
|
+
def name(self) -> str:
|
886
|
+
"""
|
887
|
+
The pool name. Only one of `name` and `pool_id` should be specified. `cluster_id` should be specified with `name`.
|
888
|
+
"""
|
889
|
+
return pulumi.get(self, "name")
|
890
|
+
|
891
|
+
@property
|
892
|
+
@pulumi.getter(name="publicIp")
|
893
|
+
def public_ip(self) -> str:
|
894
|
+
"""
|
895
|
+
The public IPv4.
|
896
|
+
"""
|
897
|
+
return pulumi.get(self, "public_ip")
|
898
|
+
|
899
|
+
@property
|
900
|
+
@pulumi.getter(name="publicIpV6")
|
901
|
+
def public_ip_v6(self) -> str:
|
902
|
+
"""
|
903
|
+
The public IPv6.
|
904
|
+
"""
|
905
|
+
return pulumi.get(self, "public_ip_v6")
|
906
|
+
|
907
|
+
@property
|
908
|
+
@pulumi.getter
|
909
|
+
def status(self) -> str:
|
910
|
+
"""
|
911
|
+
The status of the node.
|
912
|
+
"""
|
913
|
+
return pulumi.get(self, "status")
|
914
|
+
|
915
|
+
|
916
|
+
@pulumi.output_type
|
917
|
+
class GetPoolUpgradePolicyResult(dict):
|
918
|
+
def __init__(__self__, *,
|
919
|
+
max_surge: int,
|
920
|
+
max_unavailable: int):
|
921
|
+
"""
|
922
|
+
:param int max_surge: The maximum number of nodes to be created during the upgrade
|
923
|
+
:param int max_unavailable: The maximum number of nodes that can be not ready at the same time
|
924
|
+
"""
|
925
|
+
pulumi.set(__self__, "max_surge", max_surge)
|
926
|
+
pulumi.set(__self__, "max_unavailable", max_unavailable)
|
927
|
+
|
928
|
+
@property
|
929
|
+
@pulumi.getter(name="maxSurge")
|
930
|
+
def max_surge(self) -> int:
|
931
|
+
"""
|
932
|
+
The maximum number of nodes to be created during the upgrade
|
933
|
+
"""
|
934
|
+
return pulumi.get(self, "max_surge")
|
935
|
+
|
936
|
+
@property
|
937
|
+
@pulumi.getter(name="maxUnavailable")
|
938
|
+
def max_unavailable(self) -> int:
|
939
|
+
"""
|
940
|
+
The maximum number of nodes that can be not ready at the same time
|
941
|
+
"""
|
942
|
+
return pulumi.get(self, "max_unavailable")
|
943
|
+
|
944
|
+
|