pulumi-alicloud 3.75.0a1741324537__py3-none-any.whl → 3.76.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-alicloud might be problematic. Click here for more details.
- pulumi_alicloud/__init__.py +501 -4
- pulumi_alicloud/_inputs.py +53 -0
- pulumi_alicloud/ackone/__init__.py +1 -0
- pulumi_alicloud/ackone/cluster.py +73 -8
- pulumi_alicloud/ackone/membership_attachment.py +223 -0
- pulumi_alicloud/actiontrail/global_events_storage_region.py +2 -2
- pulumi_alicloud/adb/db_cluster_lake_version.py +458 -49
- pulumi_alicloud/alb/_inputs.py +217 -17
- pulumi_alicloud/alb/load_balancer.py +7 -35
- pulumi_alicloud/alb/load_balancer_access_log_config_attachment.py +2 -2
- pulumi_alicloud/alb/load_balancer_security_group_attachment.py +69 -22
- pulumi_alicloud/alb/outputs.py +150 -12
- pulumi_alicloud/alb/server_group.py +199 -44
- pulumi_alicloud/aligreen/audit_callback.py +2 -2
- pulumi_alicloud/aligreen/biz_type.py +2 -2
- pulumi_alicloud/aligreen/callback.py +2 -2
- pulumi_alicloud/aligreen/image_lib.py +2 -2
- pulumi_alicloud/aligreen/keyword_lib.py +2 -2
- pulumi_alicloud/aligreen/oss_stock_task.py +77 -14
- pulumi_alicloud/alikafka/sasl_acl.py +7 -7
- pulumi_alicloud/apigateway/_inputs.py +134 -0
- pulumi_alicloud/apigateway/group.py +117 -5
- pulumi_alicloud/apigateway/instance.py +199 -11
- pulumi_alicloud/apigateway/outputs.py +107 -0
- pulumi_alicloud/arms/alert_contact.py +2 -2
- pulumi_alicloud/arms/alert_contact_group.py +2 -2
- pulumi_alicloud/arms/get_prometheus.py +2 -6
- pulumi_alicloud/arms/grafana_workspace.py +511 -69
- pulumi_alicloud/arms/outputs.py +18 -18
- pulumi_alicloud/arms/prometheus_alert_rule.py +2 -2
- pulumi_alicloud/arms/prometheus_monitoring.py +2 -2
- pulumi_alicloud/cdn/domain_new.py +2 -2
- pulumi_alicloud/cen/_inputs.py +6 -3
- pulumi_alicloud/cen/get_transit_router_vpn_attachments.py +131 -19
- pulumi_alicloud/cen/instance.py +101 -59
- pulumi_alicloud/cen/inter_region_traffic_qos_policy.py +126 -81
- pulumi_alicloud/cen/inter_region_traffic_qos_queue.py +93 -47
- pulumi_alicloud/cen/outputs.py +69 -26
- pulumi_alicloud/cen/transit_route_table_aggregation.py +171 -55
- pulumi_alicloud/cen/transit_router.py +127 -53
- pulumi_alicloud/cen/transit_router_cidr.py +53 -35
- pulumi_alicloud/cen/transit_router_ecr_attachment.py +2 -2
- pulumi_alicloud/cen/transit_router_peer_attachment.py +360 -143
- pulumi_alicloud/cen/transit_router_route_table_association.py +33 -43
- pulumi_alicloud/cen/transit_router_vpn_attachment.py +438 -109
- pulumi_alicloud/cfg/aggregate_compliance_pack.py +6 -4
- pulumi_alicloud/cfg/aggregate_config_rule.py +6 -4
- pulumi_alicloud/cfg/aggregator.py +6 -4
- pulumi_alicloud/clickhouse/db_cluster.py +179 -6
- pulumi_alicloud/clickhouseenterprisedbcluster/__init__.py +14 -0
- pulumi_alicloud/clickhouseenterprisedbcluster/_inputs.py +167 -0
- pulumi_alicloud/clickhouseenterprisedbcluster/account.py +554 -0
- pulumi_alicloud/clickhouseenterprisedbcluster/backup_policy.py +407 -0
- pulumi_alicloud/clickhouseenterprisedbcluster/click_house_enterprise_db_cluster.py +641 -0
- pulumi_alicloud/clickhouseenterprisedbcluster/outputs.py +147 -0
- pulumi_alicloud/clickhouseenterprisedbcluster/public_endpoint.py +364 -0
- pulumi_alicloud/clickhouseenterprisedbcluster/security_ip.py +357 -0
- pulumi_alicloud/cloudcontrol/get_prices.py +2 -2
- pulumi_alicloud/cloudcontrol/get_products.py +2 -2
- pulumi_alicloud/cloudcontrol/get_resource_types.py +2 -2
- pulumi_alicloud/cloudcontrol/resource.py +2 -2
- pulumi_alicloud/cloudfirewall/__init__.py +2 -0
- pulumi_alicloud/cloudfirewall/get_nat_firewalls.py +315 -0
- pulumi_alicloud/cloudfirewall/get_vpc_cen_tr_firewalls.py +569 -0
- pulumi_alicloud/cloudfirewall/outputs.py +362 -0
- pulumi_alicloud/cloudphone/__init__.py +12 -0
- pulumi_alicloud/cloudphone/_inputs.py +175 -0
- pulumi_alicloud/cloudphone/image.py +323 -0
- pulumi_alicloud/cloudphone/key_pair.py +294 -0
- pulumi_alicloud/cloudphone/outputs.py +138 -0
- pulumi_alicloud/cloudphone/policy.py +522 -0
- pulumi_alicloud/cloudphoneinstance/__init__.py +9 -0
- pulumi_alicloud/cloudphoneinstance/cloud_phone_instance.py +209 -0
- pulumi_alicloud/cloudphoneinstance/group.py +961 -0
- pulumi_alicloud/cloudsso/scim_server_credential.py +178 -11
- pulumi_alicloud/cms/get_site_monitors.py +2 -2
- pulumi_alicloud/config/outputs.py +32 -0
- pulumi_alicloud/cs/__init__.py +1 -0
- pulumi_alicloud/cs/_inputs.py +9 -9
- pulumi_alicloud/cs/get_kubernetes_clusters.py +5 -4
- pulumi_alicloud/cs/get_kubernetes_node_pools.py +147 -0
- pulumi_alicloud/cs/get_managed_kubernetes_clusters.py +5 -4
- pulumi_alicloud/cs/get_serverless_kubernetes_clusters.py +5 -4
- pulumi_alicloud/cs/managed_kubernetes.py +174 -59
- pulumi_alicloud/cs/node_pool.py +91 -63
- pulumi_alicloud/cs/outputs.py +1991 -213
- pulumi_alicloud/databasefilesystem/service_linked_role.py +2 -2
- pulumi_alicloud/dataworks/data_source.py +2 -2
- pulumi_alicloud/dataworks/data_source_shared_rule.py +2 -2
- pulumi_alicloud/dataworks/di_alarm_rule.py +2 -2
- pulumi_alicloud/dataworks/di_job.py +2 -2
- pulumi_alicloud/dataworks/dw_resource_group.py +2 -2
- pulumi_alicloud/dataworks/network.py +2 -2
- pulumi_alicloud/dataworks/project.py +2 -2
- pulumi_alicloud/dataworks/project_member.py +2 -2
- pulumi_alicloud/dbs/backup_plan.py +2 -2
- pulumi_alicloud/ddos/bgp_policy.py +2 -2
- pulumi_alicloud/ddos/ddos_bgp_instance.py +0 -4
- pulumi_alicloud/dfs/vsc_mount_point.py +28 -28
- pulumi_alicloud/dns/ddos_bgp_instance.py +0 -4
- pulumi_alicloud/eais/__init__.py +2 -0
- pulumi_alicloud/eais/_inputs.py +75 -0
- pulumi_alicloud/eais/client_instance_attachment.py +408 -0
- pulumi_alicloud/eais/instance.py +365 -118
- pulumi_alicloud/eais/outputs.py +32 -0
- pulumi_alicloud/ebs/disk_replica_group.py +512 -30
- pulumi_alicloud/ebs/disk_replica_pair.py +443 -113
- pulumi_alicloud/ebs/enterprise_snapshot_policy.py +2 -2
- pulumi_alicloud/ebs/enterprise_snapshot_policy_attachment.py +2 -2
- pulumi_alicloud/ebs/replica_group_drill.py +2 -2
- pulumi_alicloud/ebs/replica_pair_drill.py +2 -2
- pulumi_alicloud/ecp/instance.py +2 -2
- pulumi_alicloud/ecp/key_pair.py +4 -4
- pulumi_alicloud/ecs/_inputs.py +33 -0
- pulumi_alicloud/ecs/disk.py +1 -1
- pulumi_alicloud/ecs/ecs_deployment_set.py +89 -67
- pulumi_alicloud/ecs/ecs_disk.py +1 -1
- pulumi_alicloud/ecs/ecs_launch_template.py +23 -23
- pulumi_alicloud/ecs/eip.py +1 -1
- pulumi_alicloud/ecs/eip_address.py +1 -1
- pulumi_alicloud/ecs/instance.py +44 -31
- pulumi_alicloud/ecs/outputs.py +26 -2
- pulumi_alicloud/eds/command.py +10 -2
- pulumi_alicloud/eflo/__init__.py +5 -0
- pulumi_alicloud/eflo/_inputs.py +1717 -0
- pulumi_alicloud/eflo/cluster.py +935 -0
- pulumi_alicloud/eflo/invocation.py +1114 -0
- pulumi_alicloud/eflo/node.py +1013 -0
- pulumi_alicloud/eflo/node_group.py +1101 -0
- pulumi_alicloud/eflo/outputs.py +1366 -0
- pulumi_alicloud/emrv2/__init__.py +1 -0
- pulumi_alicloud/emrv2/_inputs.py +47 -7
- pulumi_alicloud/emrv2/get_cluster_instances.py +537 -0
- pulumi_alicloud/emrv2/outputs.py +195 -5
- pulumi_alicloud/ens/eip_instance_attachment.py +2 -2
- pulumi_alicloud/ens/image.py +81 -34
- pulumi_alicloud/ens/instance_security_group_attachment.py +8 -4
- pulumi_alicloud/ens/nat_gateway.py +6 -6
- pulumi_alicloud/ens/vswitch.py +36 -36
- pulumi_alicloud/esa/__init__.py +23 -0
- pulumi_alicloud/esa/_inputs.py +1068 -15
- pulumi_alicloud/esa/cache_reserve_instance.py +522 -0
- pulumi_alicloud/esa/cache_rule.py +1419 -0
- pulumi_alicloud/esa/certificate.py +731 -0
- pulumi_alicloud/esa/client_ca_certificate.py +388 -0
- pulumi_alicloud/esa/client_certificate.py +452 -0
- pulumi_alicloud/esa/compression_rule.py +596 -0
- pulumi_alicloud/esa/edge_container_app.py +972 -0
- pulumi_alicloud/esa/edge_container_app_record.py +376 -0
- pulumi_alicloud/esa/get_sites.py +421 -0
- pulumi_alicloud/esa/http_request_header_modification_rule.py +58 -44
- pulumi_alicloud/esa/http_response_header_modification_rule.py +539 -0
- pulumi_alicloud/esa/https_application_configuration.py +939 -0
- pulumi_alicloud/esa/https_basic_configuration.py +1041 -0
- pulumi_alicloud/esa/image_transform.py +498 -0
- pulumi_alicloud/esa/kv_namespace.py +276 -0
- pulumi_alicloud/esa/list.py +30 -30
- pulumi_alicloud/esa/network_optimization.py +680 -0
- pulumi_alicloud/esa/origin_pool.py +485 -0
- pulumi_alicloud/esa/origin_rule.py +796 -0
- pulumi_alicloud/esa/outputs.py +959 -10
- pulumi_alicloud/esa/page.py +30 -51
- pulumi_alicloud/esa/rate_plan_instance.py +88 -161
- pulumi_alicloud/esa/record.py +2 -2
- pulumi_alicloud/esa/redirect_rule.py +748 -0
- pulumi_alicloud/esa/rewrite_url_rule.py +688 -0
- pulumi_alicloud/esa/site.py +276 -27
- pulumi_alicloud/esa/site_delivery_task.py +841 -0
- pulumi_alicloud/esa/waiting_room.py +1207 -0
- pulumi_alicloud/esa/waiting_room_event.py +1378 -0
- pulumi_alicloud/esa/waiting_room_rule.py +497 -0
- pulumi_alicloud/ess/_inputs.py +155 -0
- pulumi_alicloud/ess/outputs.py +128 -0
- pulumi_alicloud/ess/scaling_configuration.py +7 -7
- pulumi_alicloud/ess/scaling_group.py +237 -2
- pulumi_alicloud/ess/scaling_rule.py +143 -2
- pulumi_alicloud/expressconnect/ec_failover_test_job.py +0 -2
- pulumi_alicloud/expressconnect/router_grant_association.py +2 -2
- pulumi_alicloud/expressconnect/vbr_pconn_association.py +2 -2
- pulumi_alicloud/expressconnect/virtual_border_router.py +4 -4
- pulumi_alicloud/ga/get_basic_accelerators.py +31 -9
- pulumi_alicloud/ga/outputs.py +10 -4
- pulumi_alicloud/gpdb/db_resource_group.py +6 -6
- pulumi_alicloud/gpdb/external_data_service.py +6 -6
- pulumi_alicloud/gpdb/hadoop_data_source.py +2 -2
- pulumi_alicloud/gpdb/jdbc_data_source.py +2 -2
- pulumi_alicloud/gpdb/remote_adb_data_source.py +6 -6
- pulumi_alicloud/gpdb/streaming_data_service.py +2 -2
- pulumi_alicloud/gpdb/streaming_data_source.py +2 -2
- pulumi_alicloud/gpdb/streaming_job.py +2 -2
- pulumi_alicloud/hbr/policy.py +67 -6
- pulumi_alicloud/hbr/vault.py +210 -9
- pulumi_alicloud/ims/oidc_provider.py +24 -13
- pulumi_alicloud/kms/get_keys.py +10 -0
- pulumi_alicloud/kvstore/account.py +9 -9
- pulumi_alicloud/kvstore/connection.py +2 -2
- pulumi_alicloud/kvstore/get_instance_classes.py +2 -2
- pulumi_alicloud/kvstore/get_instance_engines.py +2 -2
- pulumi_alicloud/kvstore/instance.py +7 -7
- pulumi_alicloud/kvstore/outputs.py +2 -2
- pulumi_alicloud/live/caster.py +2 -2
- pulumi_alicloud/maxcompute/__init__.py +1 -0
- pulumi_alicloud/maxcompute/_inputs.py +232 -3
- pulumi_alicloud/maxcompute/outputs.py +183 -2
- pulumi_alicloud/maxcompute/project.py +2 -2
- pulumi_alicloud/maxcompute/quota.py +438 -0
- pulumi_alicloud/message/__init__.py +3 -0
- pulumi_alicloud/message/_inputs.py +149 -0
- pulumi_alicloud/message/outputs.py +112 -0
- pulumi_alicloud/message/service_endpoint.py +218 -0
- pulumi_alicloud/message/service_endpoint_acl.py +306 -0
- pulumi_alicloud/message/service_queue.py +49 -0
- pulumi_alicloud/message/service_subscription.py +91 -14
- pulumi_alicloud/mse/nacos_config.py +4 -0
- pulumi_alicloud/nas/mount_target.py +149 -54
- pulumi_alicloud/nlb/__init__.py +1 -0
- pulumi_alicloud/nlb/_inputs.py +157 -30
- pulumi_alicloud/nlb/listener.py +87 -20
- pulumi_alicloud/nlb/load_balancer.py +212 -24
- pulumi_alicloud/nlb/load_balancer_zone_shifted_attachment.py +397 -0
- pulumi_alicloud/nlb/outputs.py +123 -20
- pulumi_alicloud/nlb/server_group.py +55 -20
- pulumi_alicloud/nlb/server_group_server_attachment.py +118 -65
- pulumi_alicloud/oss/__init__.py +1 -0
- pulumi_alicloud/oss/_inputs.py +20 -0
- pulumi_alicloud/oss/access_point.py +2 -2
- pulumi_alicloud/oss/bucket_cname.py +2 -2
- pulumi_alicloud/oss/bucket_cors.py +2 -2
- pulumi_alicloud/oss/bucket_style.py +402 -0
- pulumi_alicloud/oss/bucket_website.py +2 -2
- pulumi_alicloud/oss/outputs.py +14 -0
- pulumi_alicloud/ots/_inputs.py +3 -3
- pulumi_alicloud/ots/outputs.py +2 -2
- pulumi_alicloud/pai/service.py +59 -67
- pulumi_alicloud/pai/workspace_code_source.py +2 -2
- pulumi_alicloud/pai/workspace_dataset.py +2 -2
- pulumi_alicloud/pai/workspace_datasetversion.py +2 -4
- pulumi_alicloud/pai/workspace_experiment.py +2 -2
- pulumi_alicloud/pai/workspace_run.py +2 -2
- pulumi_alicloud/pai/workspace_workspace.py +2 -2
- pulumi_alicloud/polardb/cluster.py +75 -28
- pulumi_alicloud/privatelink/get_vpc_endpoints.py +21 -1
- pulumi_alicloud/privatelink/outputs.py +11 -0
- pulumi_alicloud/privatelink/vpc_endpoint.py +1 -1
- pulumi_alicloud/privatelink/vpc_endpoint_zone.py +1 -1
- pulumi_alicloud/pulumi-plugin.json +1 -1
- pulumi_alicloud/ram/__init__.py +3 -0
- pulumi_alicloud/ram/_inputs.py +9 -9
- pulumi_alicloud/ram/access_key.py +97 -35
- pulumi_alicloud/ram/account_alias.py +18 -45
- pulumi_alicloud/ram/get_system_policys.py +189 -0
- pulumi_alicloud/ram/group.py +156 -35
- pulumi_alicloud/ram/group_policy_attachment.py +51 -29
- pulumi_alicloud/ram/login_profile.py +92 -38
- pulumi_alicloud/ram/outputs.py +91 -6
- pulumi_alicloud/ram/password_policy.py +779 -0
- pulumi_alicloud/ram/policy.py +199 -88
- pulumi_alicloud/ram/role_policy_attachment.py +51 -29
- pulumi_alicloud/ram/saml_provider.py +44 -37
- pulumi_alicloud/ram/user.py +10 -2
- pulumi_alicloud/ram/user_group_attachment.py +273 -0
- pulumi_alicloud/ram/user_policy_attachment.py +49 -27
- pulumi_alicloud/rds/__init__.py +1 -0
- pulumi_alicloud/rds/custom_disk.py +1053 -0
- pulumi_alicloud/rds/instance.py +117 -14
- pulumi_alicloud/redis/tair_instance.py +14 -14
- pulumi_alicloud/resourcemanager/__init__.py +1 -0
- pulumi_alicloud/resourcemanager/_inputs.py +53 -0
- pulumi_alicloud/resourcemanager/auto_grouping_rule.py +796 -0
- pulumi_alicloud/resourcemanager/outputs.py +50 -0
- pulumi_alicloud/resourcemanager/shared_resource.py +7 -0
- pulumi_alicloud/rocketmq/__init__.py +1 -1
- pulumi_alicloud/rocketmq/_inputs.py +146 -4
- pulumi_alicloud/rocketmq/account.py +452 -0
- pulumi_alicloud/rocketmq/acl.py +473 -47
- pulumi_alicloud/rocketmq/client_user.py +8 -2
- pulumi_alicloud/rocketmq/consumer_group.py +121 -24
- pulumi_alicloud/rocketmq/dnat_entry.py +8 -2
- pulumi_alicloud/rocketmq/outputs.py +113 -4
- pulumi_alicloud/rocketmq/qos.py +8 -2
- pulumi_alicloud/rocketmq/qos_car.py +10 -4
- pulumi_alicloud/rocketmq/qos_policy.py +6 -0
- pulumi_alicloud/rocketmq/rocket_mq_instance.py +136 -7
- pulumi_alicloud/rocketmq/rocket_mq_topic.py +121 -24
- pulumi_alicloud/rocketmq/snat_entry.py +8 -2
- pulumi_alicloud/sag/__init__.py +8 -0
- pulumi_alicloud/sag/acl.py +200 -0
- pulumi_alicloud/{rocketmq → sag}/acl_rule.py +7 -7
- pulumi_alicloud/sag/client_user.py +560 -0
- pulumi_alicloud/sag/dnat_entry.py +512 -0
- pulumi_alicloud/sag/get_acls.py +2 -2
- pulumi_alicloud/sag/qos.py +202 -0
- pulumi_alicloud/sag/qos_car.py +654 -0
- pulumi_alicloud/sag/qos_policy.py +659 -0
- pulumi_alicloud/sag/snat_entry.py +313 -0
- pulumi_alicloud/securitycenter/group.py +2 -2
- pulumi_alicloud/selectdb/db_instance.py +60 -11
- pulumi_alicloud/simpleapplicationserver/get_server_plans.py +7 -7
- pulumi_alicloud/simpleapplicationserver/outputs.py +3 -3
- pulumi_alicloud/slb/server_group_server_attachment.py +101 -111
- pulumi_alicloud/sls/oss_export_sink.py +2 -2
- pulumi_alicloud/tag/__init__.py +1 -0
- pulumi_alicloud/tag/associated_rule.py +306 -0
- pulumi_alicloud/tag/policy.py +27 -36
- pulumi_alicloud/threatdetection/__init__.py +3 -0
- pulumi_alicloud/threatdetection/anti_brute_force_rule.py +41 -65
- pulumi_alicloud/threatdetection/asset_bind.py +209 -0
- pulumi_alicloud/threatdetection/asset_selection_config.py +258 -0
- pulumi_alicloud/threatdetection/instance.py +271 -54
- pulumi_alicloud/threatdetection/log_meta.py +351 -0
- pulumi_alicloud/vpc/__init__.py +2 -0
- pulumi_alicloud/vpc/_inputs.py +281 -0
- pulumi_alicloud/vpc/common_bandwith_package.py +107 -44
- pulumi_alicloud/vpc/common_bandwith_package_attachment.py +2 -2
- pulumi_alicloud/vpc/flow_log.py +184 -49
- pulumi_alicloud/vpc/forward_entry.py +8 -4
- pulumi_alicloud/vpc/gateway_endpoint.py +85 -27
- pulumi_alicloud/vpc/get_enhanced_nat_available_zones.py +38 -2
- pulumi_alicloud/vpc/get_forward_entries.py +2 -2
- pulumi_alicloud/vpc/get_ipam_ipam_pool_allocations.py +2 -2
- pulumi_alicloud/vpc/get_ipam_ipam_pool_cidrs.py +2 -2
- pulumi_alicloud/vpc/get_ipam_ipam_pools.py +2 -2
- pulumi_alicloud/vpc/get_ipam_ipam_scopes.py +2 -2
- pulumi_alicloud/vpc/get_ipam_ipams.py +2 -2
- pulumi_alicloud/vpc/get_ipsec_servers.py +14 -2
- pulumi_alicloud/vpc/get_nat_gateways.py +48 -2
- pulumi_alicloud/vpc/get_prefix_lists.py +11 -2
- pulumi_alicloud/vpc/get_snat_entries.py +5 -2
- pulumi_alicloud/vpc/get_traffic_mirror_filters.py +14 -2
- pulumi_alicloud/vpc/ipam_ipam.py +2 -9
- pulumi_alicloud/vpc/ipam_ipam_pool.py +2 -30
- pulumi_alicloud/vpc/ipam_ipam_pool_allocation.py +6 -16
- pulumi_alicloud/vpc/ipam_ipam_pool_cidr.py +2 -2
- pulumi_alicloud/vpc/ipam_ipam_resource_discovery.py +489 -0
- pulumi_alicloud/vpc/ipam_ipam_scope.py +2 -9
- pulumi_alicloud/vpc/ipv6_address.py +85 -5
- pulumi_alicloud/vpc/nat_gateway.py +8 -0
- pulumi_alicloud/vpc/outputs.py +244 -2
- pulumi_alicloud/vpc/peer_connection.py +84 -42
- pulumi_alicloud/vpc/peer_connection_accepter.py +7 -21
- pulumi_alicloud/vpc/route_table.py +100 -45
- pulumi_alicloud/vpc/router_interface_connection.py +4 -4
- pulumi_alicloud/vpc/traffic_mirror_filter.py +2 -2
- pulumi_alicloud/vpc/vpc_route_entry.py +724 -0
- pulumi_alicloud/vpn/_inputs.py +716 -71
- pulumi_alicloud/vpn/gateway_vpn_attachment.py +622 -128
- pulumi_alicloud/vpn/get_gateway_vco_routes.py +11 -2
- pulumi_alicloud/vpn/get_gateway_vpn_attachments.py +26 -60
- pulumi_alicloud/vpn/get_gateways.py +21 -1
- pulumi_alicloud/vpn/ipsec_server.py +2 -2
- pulumi_alicloud/vpn/outputs.py +1982 -1006
- {pulumi_alicloud-3.75.0a1741324537.dist-info → pulumi_alicloud-3.76.0.dist-info}/METADATA +2 -2
- {pulumi_alicloud-3.75.0a1741324537.dist-info → pulumi_alicloud-3.76.0.dist-info}/RECORD +355 -278
- {pulumi_alicloud-3.75.0a1741324537.dist-info → pulumi_alicloud-3.76.0.dist-info}/WHEEL +1 -1
- {pulumi_alicloud-3.75.0a1741324537.dist-info → pulumi_alicloud-3.76.0.dist-info}/top_level.txt +0 -0
pulumi_alicloud/cs/outputs.py
CHANGED
|
@@ -73,12 +73,28 @@ __all__ = [
|
|
|
73
73
|
'GetKubernetesClustersClusterLogConfigResult',
|
|
74
74
|
'GetKubernetesClustersClusterMasterNodeResult',
|
|
75
75
|
'GetKubernetesClustersClusterWorkerNodeResult',
|
|
76
|
+
'GetKubernetesNodePoolsNodepoolResult',
|
|
77
|
+
'GetKubernetesNodePoolsNodepoolDataDiskResult',
|
|
78
|
+
'GetKubernetesNodePoolsNodepoolKubeletConfigurationResult',
|
|
79
|
+
'GetKubernetesNodePoolsNodepoolKubeletConfigurationReservedMemoryResult',
|
|
80
|
+
'GetKubernetesNodePoolsNodepoolKubeletConfigurationTracingResult',
|
|
81
|
+
'GetKubernetesNodePoolsNodepoolLabelResult',
|
|
82
|
+
'GetKubernetesNodePoolsNodepoolManagementResult',
|
|
83
|
+
'GetKubernetesNodePoolsNodepoolManagementAutoRepairPolicyResult',
|
|
84
|
+
'GetKubernetesNodePoolsNodepoolManagementAutoUpgradePolicyResult',
|
|
85
|
+
'GetKubernetesNodePoolsNodepoolManagementAutoVulFixPolicyResult',
|
|
86
|
+
'GetKubernetesNodePoolsNodepoolPrivatePoolOptionsResult',
|
|
87
|
+
'GetKubernetesNodePoolsNodepoolScalingConfigResult',
|
|
88
|
+
'GetKubernetesNodePoolsNodepoolSpotPriceLimitResult',
|
|
89
|
+
'GetKubernetesNodePoolsNodepoolTaintResult',
|
|
90
|
+
'GetKubernetesNodePoolsNodepoolTeeConfigResult',
|
|
76
91
|
'GetKubernetesPermissionPermissionResult',
|
|
77
92
|
'GetKubernetesVersionMetadataResult',
|
|
78
93
|
'GetKubernetesVersionMetadataRuntimeResult',
|
|
79
94
|
'GetManagedKubernetesClustersClusterResult',
|
|
80
95
|
'GetManagedKubernetesClustersClusterConnectionsResult',
|
|
81
96
|
'GetManagedKubernetesClustersClusterLogConfigResult',
|
|
97
|
+
'GetManagedKubernetesClustersClusterRrsaConfigResult',
|
|
82
98
|
'GetManagedKubernetesClustersClusterWorkerNodeResult',
|
|
83
99
|
'GetRegistryEnterpriseInstancesInstanceResult',
|
|
84
100
|
'GetRegistryEnterpriseNamespacesNamespaceResult',
|
|
@@ -1906,12 +1922,12 @@ class NodePoolDataDisk(dict):
|
|
|
1906
1922
|
:param str auto_format: Whether to automatically mount the data disk. Valid values: true and false.
|
|
1907
1923
|
:param str auto_snapshot_policy_id: The ID of the automatic snapshot policy that you want to apply to the system disk.
|
|
1908
1924
|
:param bool bursting_enabled: Whether the data disk is enabled with Burst (performance Burst). This is configured when the disk type is cloud_auto.
|
|
1909
|
-
:param str category: The type of
|
|
1925
|
+
:param str category: The type of data disk. Default value: `cloud_efficiency`. Valid values:
|
|
1910
1926
|
:param str device: The mount target of data disk N. Valid values of N: 1 to 16. If you do not specify this parameter, the system automatically assigns a mount target when Auto Scaling creates ECS instances. The name of the mount target ranges from /dev/xvdb to /dev/xvdz.
|
|
1911
1927
|
:param str encrypted: Specifies whether to encrypt data disks. Valid values: true and false. Default to `false`.
|
|
1912
|
-
:param str file_system: The
|
|
1928
|
+
:param str file_system: The type of the mounted file system. Works when auto_format is true. Optional value: `ext4`, `xfs`.
|
|
1913
1929
|
:param str kms_key_id: The kms key id used to encrypt the data disk. It takes effect when `encrypted` is true.
|
|
1914
|
-
:param str mount_target: The
|
|
1930
|
+
:param str mount_target: The Mount path. Works when auto_format is true.
|
|
1915
1931
|
:param str name: The length is 2~128 English or Chinese characters. It must start with an uppercase or lowr letter or a Chinese character and cannot start with http:// or https. Can contain numbers, colons (:), underscores (_), or dashes (-). It will be overwritten if auto_format is set.
|
|
1916
1932
|
:param str performance_level: Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
|
|
1917
1933
|
:param int provisioned_iops: The read/write IOPS preconfigured for the data disk, which is configured when the disk type is cloud_auto.
|
|
@@ -1975,7 +1991,7 @@ class NodePoolDataDisk(dict):
|
|
|
1975
1991
|
@pulumi.getter
|
|
1976
1992
|
def category(self) -> Optional[str]:
|
|
1977
1993
|
"""
|
|
1978
|
-
The type of
|
|
1994
|
+
The type of data disk. Default value: `cloud_efficiency`. Valid values:
|
|
1979
1995
|
"""
|
|
1980
1996
|
return pulumi.get(self, "category")
|
|
1981
1997
|
|
|
@@ -1999,7 +2015,7 @@ class NodePoolDataDisk(dict):
|
|
|
1999
2015
|
@pulumi.getter(name="fileSystem")
|
|
2000
2016
|
def file_system(self) -> Optional[str]:
|
|
2001
2017
|
"""
|
|
2002
|
-
The
|
|
2018
|
+
The type of the mounted file system. Works when auto_format is true. Optional value: `ext4`, `xfs`.
|
|
2003
2019
|
"""
|
|
2004
2020
|
return pulumi.get(self, "file_system")
|
|
2005
2021
|
|
|
@@ -2015,7 +2031,7 @@ class NodePoolDataDisk(dict):
|
|
|
2015
2031
|
@pulumi.getter(name="mountTarget")
|
|
2016
2032
|
def mount_target(self) -> Optional[str]:
|
|
2017
2033
|
"""
|
|
2018
|
-
The
|
|
2034
|
+
The Mount path. Works when auto_format is true.
|
|
2019
2035
|
"""
|
|
2020
2036
|
return pulumi.get(self, "mount_target")
|
|
2021
2037
|
|
|
@@ -4591,322 +4607,2033 @@ class GetKubernetesClustersClusterWorkerNodeResult(dict):
|
|
|
4591
4607
|
|
|
4592
4608
|
|
|
4593
4609
|
@pulumi.output_type
|
|
4594
|
-
class
|
|
4610
|
+
class GetKubernetesNodePoolsNodepoolResult(dict):
|
|
4595
4611
|
def __init__(__self__, *,
|
|
4596
|
-
|
|
4597
|
-
|
|
4598
|
-
|
|
4599
|
-
|
|
4600
|
-
|
|
4601
|
-
|
|
4612
|
+
auto_renew: bool,
|
|
4613
|
+
auto_renew_period: int,
|
|
4614
|
+
cis_enabled: bool,
|
|
4615
|
+
compensate_with_on_demand: bool,
|
|
4616
|
+
cpu_policy: str,
|
|
4617
|
+
data_disks: Sequence['outputs.GetKubernetesNodePoolsNodepoolDataDiskResult'],
|
|
4618
|
+
deployment_set_id: str,
|
|
4619
|
+
desired_size: str,
|
|
4620
|
+
image_id: str,
|
|
4621
|
+
image_type: str,
|
|
4622
|
+
install_cloud_monitor: bool,
|
|
4623
|
+
instance_charge_type: str,
|
|
4624
|
+
instance_types: Sequence[str],
|
|
4625
|
+
internet_charge_type: str,
|
|
4626
|
+
internet_max_bandwidth_out: int,
|
|
4627
|
+
key_name: str,
|
|
4628
|
+
kubelet_configuration: 'outputs.GetKubernetesNodePoolsNodepoolKubeletConfigurationResult',
|
|
4629
|
+
labels: Sequence['outputs.GetKubernetesNodePoolsNodepoolLabelResult'],
|
|
4630
|
+
login_as_non_root: bool,
|
|
4631
|
+
management: 'outputs.GetKubernetesNodePoolsNodepoolManagementResult',
|
|
4632
|
+
multi_az_policy: str,
|
|
4633
|
+
node_name_mode: str,
|
|
4634
|
+
node_pool_id: str,
|
|
4635
|
+
node_pool_name: str,
|
|
4636
|
+
on_demand_base_capacity: str,
|
|
4637
|
+
on_demand_percentage_above_base_capacity: str,
|
|
4638
|
+
password: str,
|
|
4639
|
+
period: int,
|
|
4640
|
+
period_unit: str,
|
|
4641
|
+
platform: str,
|
|
4642
|
+
pre_user_data: str,
|
|
4643
|
+
private_pool_options: 'outputs.GetKubernetesNodePoolsNodepoolPrivatePoolOptionsResult',
|
|
4644
|
+
ram_role_name: str,
|
|
4645
|
+
rds_instances: Sequence[str],
|
|
4646
|
+
resource_group_id: str,
|
|
4647
|
+
runtime_name: str,
|
|
4648
|
+
runtime_version: str,
|
|
4649
|
+
scaling_config: 'outputs.GetKubernetesNodePoolsNodepoolScalingConfigResult',
|
|
4650
|
+
scaling_group_id: str,
|
|
4651
|
+
scaling_policy: str,
|
|
4652
|
+
security_group_id: str,
|
|
4653
|
+
security_group_ids: Sequence[str],
|
|
4654
|
+
security_hardening_os: bool,
|
|
4655
|
+
soc_enabled: bool,
|
|
4656
|
+
spot_instance_pools: int,
|
|
4657
|
+
spot_instance_remedy: bool,
|
|
4658
|
+
spot_price_limits: Sequence['outputs.GetKubernetesNodePoolsNodepoolSpotPriceLimitResult'],
|
|
4659
|
+
spot_strategy: str,
|
|
4660
|
+
system_disk_bursting_enabled: bool,
|
|
4661
|
+
system_disk_categories: Sequence[str],
|
|
4662
|
+
system_disk_category: str,
|
|
4663
|
+
system_disk_encrypt_algorithm: str,
|
|
4664
|
+
system_disk_encrypted: bool,
|
|
4665
|
+
system_disk_kms_key: str,
|
|
4666
|
+
system_disk_performance_level: str,
|
|
4667
|
+
system_disk_provisioned_iops: int,
|
|
4668
|
+
system_disk_size: int,
|
|
4669
|
+
system_disk_snapshot_policy_id: str,
|
|
4670
|
+
tags: Mapping[str, str],
|
|
4671
|
+
taints: Sequence['outputs.GetKubernetesNodePoolsNodepoolTaintResult'],
|
|
4672
|
+
tee_config: 'outputs.GetKubernetesNodePoolsNodepoolTeeConfigResult',
|
|
4673
|
+
unschedulable: bool,
|
|
4674
|
+
user_data: str,
|
|
4675
|
+
vswitch_ids: Sequence[str]):
|
|
4676
|
+
"""
|
|
4677
|
+
:param bool auto_renew: Whether to enable automatic renewal for nodes in the node pool takes effect only when `instance_charge_type` is set to `PrePaid`. Default value: `false`. Valid values:- `true`: Automatic renewal. - `false`: Do not renew automatically.
|
|
4678
|
+
:param int auto_renew_period: The automatic renewal period of nodes in the node pool takes effect only when you select Prepaid and Automatic Renewal, and is a required value. When `PeriodUnit = Month`, the value range is {1, 2, 3, 6, 12}. Default value: 1.
|
|
4679
|
+
:param bool cis_enabled: Whether enable worker node to support cis security reinforcement, its valid value `true` or `false`. Default to `false` and apply to AliyunLinux series. Use `security_hardening_os` instead.
|
|
4680
|
+
:param bool compensate_with_on_demand: Specifies whether to automatically create pay-as-you-go instances to meet the required number of ECS instances if preemptible instances cannot be created due to reasons such as cost or insufficient inventory. This parameter takes effect when you set `multi_az_policy` to `COST_OPTIMIZED`. Valid values: `true`: automatically creates pay-as-you-go instances to meet the required number of ECS instances if preemptible instances cannot be created. `false`: does not create pay-as-you-go instances to meet the required number of ECS instances if preemptible instances cannot be created.
|
|
4681
|
+
:param str cpu_policy: Node CPU management policies. Default value: `none`. When the cluster version is 1.12.6 or later, the following two policies are supported:- `static`: allows pods with certain resource characteristics on the node to enhance its CPU affinity and exclusivity.- `none`: Enables the existing default CPU affinity scheme.
|
|
4682
|
+
:param Sequence['GetKubernetesNodePoolsNodepoolDataDiskArgs'] data_disks: Configure the data disk of the node in the node pool.
|
|
4683
|
+
:param str deployment_set_id: The deployment set of node pool. Specify the deploymentSet to ensure that the nodes in the node pool can be distributed on different physical machines.
|
|
4684
|
+
:param str desired_size: Number of expected nodes in the node pool.
|
|
4685
|
+
:param str image_id: The custom image ID. The system-provided image is used by default.
|
|
4686
|
+
:param str image_type: The operating system image type and the `platform` parameter can be selected from the following values:- `AliyunLinux` : Alinux2 image.- `AliyunLinux3` : Alinux3 image.- `AliyunLinux3Arm64` : Alinux3 mirror ARM version.- `AliyunLinuxUEFI` : Alinux2 Image UEFI version.- `CentOS` : CentOS image.- `Windows` : Windows image.- `WindowsCore` : WindowsCore image.- `ContainerOS` : container-optimized image.- `Ubuntu`: Ubuntu image.
|
|
4687
|
+
:param bool install_cloud_monitor: Whether to install cloud monitoring on the ECS node. After installation, you can view the monitoring information of the created ECS instance in the cloud monitoring console and recommend enable it. Default value: `false`. Valid values:- `true` : install cloud monitoring on the ECS node.- `false` : does not install cloud monitoring on the ECS node.
|
|
4688
|
+
:param str instance_charge_type: Node payment type. Valid values: `PostPaid`, `PrePaid`, default is `PostPaid`. If value is `PrePaid`, the arguments `period`, `period_unit`, `auto_renew` and `auto_renew_period` are required.
|
|
4689
|
+
:param Sequence[str] instance_types: In the node instance specification list, you can select multiple instance specifications as alternatives. When each node is created, it will try to purchase from the first specification until it is created successfully. The final purchased instance specifications may vary with inventory changes.
|
|
4690
|
+
:param str internet_charge_type: The billing method for network usage. Valid values `PayByBandwidth` and `PayByTraffic`. Conflict with `eip_internet_charge_type`, EIP and public network IP can only choose one.
|
|
4691
|
+
:param int internet_max_bandwidth_out: The maximum bandwidth of the public IP address of the node. The unit is Mbps(Mega bit per second). The value range is:\\[1,100\\]
|
|
4692
|
+
:param str key_name: The name of the key pair. When the node pool is a managed node pool, only `key_name` is supported.
|
|
4693
|
+
:param 'GetKubernetesNodePoolsNodepoolKubeletConfigurationArgs' kubelet_configuration: Kubelet configuration parameters for worker nodes. See `kubelet_configuration` below. More information in [Kubelet Configuration](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
|
|
4694
|
+
:param Sequence['GetKubernetesNodePoolsNodepoolLabelArgs'] labels: A List of Kubernetes labels to assign to the nodes . Only labels that are applied with the ACK API are managed by this argument. Detailed below. More information in [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
|
|
4695
|
+
:param bool login_as_non_root: Whether the ECS instance is logged on as a ecs-user user. Valid value: `true` and `false`.
|
|
4696
|
+
:param 'GetKubernetesNodePoolsNodepoolManagementArgs' management: Managed node pool configuration.
|
|
4697
|
+
:param str multi_az_policy: The scaling policy for ECS instances in a multi-zone scaling group. Valid value: `PRIORITY`, `COST_OPTIMIZED` and `BALANCE`. `PRIORITY`: scales the capacity according to the virtual switches you define (VSwitchIds.N). When an ECS instance cannot be created in the zone where the higher-priority vSwitch is located, the next-priority vSwitch is automatically used to create an ECS instance. `COST_OPTIMIZED`: try to create by vCPU unit price from low to high. When the scaling configuration is configured with multiple instances of preemptible billing, preemptible instances are created first. You can continue to use the `CompensateWithOnDemand` parameter to specify whether to automatically try to create a preemptible instance by paying for it. It takes effect only when the scaling configuration has multi-instance specifications or preemptible instances. `BALANCE`: distributes ECS instances evenly among the multi-zone specified by the scaling group. If the zones become unbalanced due to insufficient inventory, you can use the API RebalanceInstances to balance resources.
|
|
4698
|
+
:param str node_name_mode: Each node name consists of a prefix, its private network IP, and a suffix, separated by commas. The input format is `customized,,ip,`.- The prefix and suffix can be composed of one or more parts separated by '.', each part can use lowercase letters, numbers and '-', and the beginning and end of the node name must be lowercase letters and numbers.- The node IP address is the complete private IP address of the node.- For example, if the string `customized,aliyun,ip,com` is passed in (where 'customized' and 'ip' are fixed strings, 'aliyun' is the prefix, and 'com' is the suffix), the name of the node is `aliyun192.168.xxx.xxxcom`.
|
|
4699
|
+
:param str node_pool_id: The ID of node pool.
|
|
4700
|
+
:param str node_pool_name: The name of node pool.
|
|
4701
|
+
:param str on_demand_base_capacity: The minimum number of pay-as-you-go instances that must be kept in the scaling group. Valid values: 0 to 1000. If the number of pay-as-you-go instances is less than the value of this parameter, Auto Scaling preferably creates pay-as-you-go instances.
|
|
4702
|
+
:param str on_demand_percentage_above_base_capacity: The percentage of pay-as-you-go instances among the extra instances that exceed the number specified by `on_demand_base_capacity`. Valid values: 0 to 100.
|
|
4703
|
+
:param str password: The password of ssh login. You have to specify one of `password` and `key_name` fields. The password rule is 8 to 30 characters and contains at least three items (upper and lower case letters, numbers, and special symbols).
|
|
4704
|
+
:param int period: Node payment period. Its valid value is one of {1, 2, 3, 6, 12}.
|
|
4705
|
+
:param str period_unit: Node payment period unit, valid value: `Month`. Default is `Month`.
|
|
4706
|
+
:param str platform: Operating system release, using `image_type` instead.
|
|
4707
|
+
:param str pre_user_data: Node pre custom data, base64-encoded, the script executed before the node is initialized.
|
|
4708
|
+
:param 'GetKubernetesNodePoolsNodepoolPrivatePoolOptionsArgs' private_pool_options: Private node pool configuration.
|
|
4709
|
+
:param str ram_role_name: The name of the Worker RAM role.* If it is empty, the default Worker RAM role created in the cluster will be used.* If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.> **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
|
|
4710
|
+
:param Sequence[str] rds_instances: The list of RDS instances.
|
|
4711
|
+
:param str resource_group_id: The ID of the resource group
|
|
4712
|
+
:param str runtime_name: The runtime name of containers. If not set, the cluster runtime will be used as the node pool runtime. If you select another container runtime, see [Comparison of Docker, containerd, and Sandboxed-Container](https://www.alibabacloud.com/help/doc-detail/160313.htm).
|
|
4713
|
+
:param str runtime_version: The runtime version of containers. If not set, the cluster runtime will be used as the node pool runtime.
|
|
4714
|
+
:param 'GetKubernetesNodePoolsNodepoolScalingConfigArgs' scaling_config: Automatic scaling configuration.
|
|
4715
|
+
:param str scaling_group_id: The ID of the scaling group.
|
|
4716
|
+
:param str scaling_policy: Scaling group mode, default value: `release`. Valid values:- `release`: in the standard mode, scaling is performed by creating and releasing ECS instances based on the usage of the application resource value.- `recycle`: in the speed mode, scaling is performed through creation, shutdown, and startup to increase the speed of scaling again (computing resources are not charged during shutdown, only storage fees are charged, except for local disk models).
|
|
4717
|
+
:param str security_group_id: The security group ID of the node pool. This field has been replaced by `security_group_ids`, please use the `security_group_ids` field instead.
|
|
4718
|
+
:param Sequence[str] security_group_ids: Multiple security groups can be configured for a node pool. If both `security_group_ids` and `security_group_id` are configured, `security_group_ids` takes effect. This field cannot be modified.
|
|
4719
|
+
:param bool security_hardening_os: Alibaba Cloud OS security reinforcement. Default value: `false`. Value:-`true`: enable Alibaba Cloud OS security reinforcement.-`false`: does not enable Alibaba Cloud OS security reinforcement.
|
|
4720
|
+
:param bool soc_enabled: Whether enable worker node to support soc security reinforcement, its valid value `true` or `false`. Default to `false` and apply to AliyunLinux series. See [SOC Reinforcement](https://help.aliyun.com/document_detail/196148.html).> It is forbidden to set both `security_hardening_os` and `soc_enabled` to `true` at the same time.
|
|
4721
|
+
:param int spot_instance_pools: The number of instance types that are available. Auto Scaling creates preemptible instances of multiple instance types that are available at the lowest cost. Valid values: 1 to 10.
|
|
4722
|
+
:param bool spot_instance_remedy: Specifies whether to supplement preemptible instances when the number of preemptible instances drops below the specified minimum number. If you set the value to true, Auto Scaling attempts to create a new preemptible instance when the system notifies that an existing preemptible instance is about to be reclaimed. Valid values: `true`: enables the supplementation of preemptible instances. `false`: disables the supplementation of preemptible instances.
|
|
4723
|
+
:param Sequence['GetKubernetesNodePoolsNodepoolSpotPriceLimitArgs'] spot_price_limits: The current single preemptible instance type market price range configuration.
|
|
4724
|
+
:param str spot_strategy: The preemptible instance type. Value:- `NoSpot` : Non-preemptible instance.- `SpotWithPriceLimit` : Set the upper limit of the preemptible instance price.- `SpotAsPriceGo` : The system automatically bids, following the actual price of the current market.
|
|
4725
|
+
:param bool system_disk_bursting_enabled: Specifies whether to enable the burst feature for system disks. Valid values:`true`: enables the burst feature. `false`: disables the burst feature. This parameter is supported only when `system_disk_category` is set to `cloud_auto`.
|
|
4726
|
+
:param Sequence[str] system_disk_categories: The multi-disk categories of the system disk. When a high-priority disk type cannot be used, Auto Scaling automatically tries to create a system disk with the next priority disk category. Valid values see `system_disk_category`.
|
|
4727
|
+
:param str system_disk_category: The category of the system disk for nodes. Default value: `cloud_efficiency`. Valid values:- `cloud`: basic disk.- `cloud_efficiency`: ultra disk.- `cloud_ssd`: standard SSD.- `cloud_essd`: ESSD.- `cloud_auto`: ESSD AutoPL disk.- `cloud_essd_entry`: ESSD Entry disk.
|
|
4728
|
+
:param str system_disk_encrypt_algorithm: The encryption algorithm used by the system disk. Value range: aes-256.
|
|
4729
|
+
:param bool system_disk_encrypted: Whether to encrypt the system disk. Value range: `true`: encryption. `false`: Do not encrypt.
|
|
4730
|
+
:param str system_disk_kms_key: The ID of the KMS key used by the system disk.
|
|
4731
|
+
:param str system_disk_performance_level: The system disk performance of the node takes effect only for the ESSD disk.- `PL0`: maximum random read/write IOPS 10000 for a single disk.- `PL1`: maximum random read/write IOPS 50000 for a single disk.- `PL2`: highest random read/write IOPS 100000 for a single disk.- `PL3`: maximum random read/write IOPS 1 million for a single disk.
|
|
4732
|
+
:param int system_disk_provisioned_iops: The predefined IOPS of a system disk. Valid values: 0 to min{50,000, 1,000 × Capacity - Baseline IOPS}. Baseline IOPS = min{1,800 + 50 × Capacity, 50,000}. This parameter is supported only when `system_disk_category` is set to `cloud_auto`.
|
|
4733
|
+
:param int system_disk_size: The size of the system disk. Unit: GiB. The value of this parameter must be at least 1 and greater than or equal to the image size. Default value: 40 or the size of the image, whichever is larger.- Basic disk: 20 to 500.- ESSD (cloud_essd): The valid values vary based on the performance level of the ESSD. PL0 ESSD: 1 to 2048. PL1 ESSD: 20 to 2048. PL2 ESSD: 461 to 2048. PL3 ESSD: 1261 to 2048.- ESSD AutoPL disk (cloud_auto): 1 to 2048.- Other disk categories: 20 to 2048.
|
|
4734
|
+
:param str system_disk_snapshot_policy_id: The ID of the automatic snapshot policy used by the system disk.
|
|
4735
|
+
:param Mapping[str, str] tags: Add tags only for ECS instances. The maximum length of the tag key is 128 characters. The tag key and value cannot start with aliyun or acs:, or contain https:// or http://.
|
|
4736
|
+
:param Sequence['GetKubernetesNodePoolsNodepoolTaintArgs'] taints: A List of Kubernetes taints to assign to the nodes. Detailed below. More information in [Taints and Toleration](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
|
|
4737
|
+
:param 'GetKubernetesNodePoolsNodepoolTeeConfigArgs' tee_config: The configuration about confidential computing for the cluster.
|
|
4738
|
+
:param bool unschedulable: Whether the node after expansion can be scheduled.
|
|
4739
|
+
:param str user_data: Node custom data, base64-encoded.
|
|
4740
|
+
:param Sequence[str] vswitch_ids: The vswitches used by node pool workers.
|
|
4741
|
+
"""
|
|
4742
|
+
pulumi.set(__self__, "auto_renew", auto_renew)
|
|
4743
|
+
pulumi.set(__self__, "auto_renew_period", auto_renew_period)
|
|
4744
|
+
pulumi.set(__self__, "cis_enabled", cis_enabled)
|
|
4745
|
+
pulumi.set(__self__, "compensate_with_on_demand", compensate_with_on_demand)
|
|
4746
|
+
pulumi.set(__self__, "cpu_policy", cpu_policy)
|
|
4747
|
+
pulumi.set(__self__, "data_disks", data_disks)
|
|
4748
|
+
pulumi.set(__self__, "deployment_set_id", deployment_set_id)
|
|
4749
|
+
pulumi.set(__self__, "desired_size", desired_size)
|
|
4750
|
+
pulumi.set(__self__, "image_id", image_id)
|
|
4751
|
+
pulumi.set(__self__, "image_type", image_type)
|
|
4752
|
+
pulumi.set(__self__, "install_cloud_monitor", install_cloud_monitor)
|
|
4753
|
+
pulumi.set(__self__, "instance_charge_type", instance_charge_type)
|
|
4754
|
+
pulumi.set(__self__, "instance_types", instance_types)
|
|
4755
|
+
pulumi.set(__self__, "internet_charge_type", internet_charge_type)
|
|
4756
|
+
pulumi.set(__self__, "internet_max_bandwidth_out", internet_max_bandwidth_out)
|
|
4757
|
+
pulumi.set(__self__, "key_name", key_name)
|
|
4758
|
+
pulumi.set(__self__, "kubelet_configuration", kubelet_configuration)
|
|
4759
|
+
pulumi.set(__self__, "labels", labels)
|
|
4760
|
+
pulumi.set(__self__, "login_as_non_root", login_as_non_root)
|
|
4761
|
+
pulumi.set(__self__, "management", management)
|
|
4762
|
+
pulumi.set(__self__, "multi_az_policy", multi_az_policy)
|
|
4763
|
+
pulumi.set(__self__, "node_name_mode", node_name_mode)
|
|
4764
|
+
pulumi.set(__self__, "node_pool_id", node_pool_id)
|
|
4765
|
+
pulumi.set(__self__, "node_pool_name", node_pool_name)
|
|
4766
|
+
pulumi.set(__self__, "on_demand_base_capacity", on_demand_base_capacity)
|
|
4767
|
+
pulumi.set(__self__, "on_demand_percentage_above_base_capacity", on_demand_percentage_above_base_capacity)
|
|
4768
|
+
pulumi.set(__self__, "password", password)
|
|
4769
|
+
pulumi.set(__self__, "period", period)
|
|
4770
|
+
pulumi.set(__self__, "period_unit", period_unit)
|
|
4771
|
+
pulumi.set(__self__, "platform", platform)
|
|
4772
|
+
pulumi.set(__self__, "pre_user_data", pre_user_data)
|
|
4773
|
+
pulumi.set(__self__, "private_pool_options", private_pool_options)
|
|
4774
|
+
pulumi.set(__self__, "ram_role_name", ram_role_name)
|
|
4775
|
+
pulumi.set(__self__, "rds_instances", rds_instances)
|
|
4776
|
+
pulumi.set(__self__, "resource_group_id", resource_group_id)
|
|
4777
|
+
pulumi.set(__self__, "runtime_name", runtime_name)
|
|
4778
|
+
pulumi.set(__self__, "runtime_version", runtime_version)
|
|
4779
|
+
pulumi.set(__self__, "scaling_config", scaling_config)
|
|
4780
|
+
pulumi.set(__self__, "scaling_group_id", scaling_group_id)
|
|
4781
|
+
pulumi.set(__self__, "scaling_policy", scaling_policy)
|
|
4782
|
+
pulumi.set(__self__, "security_group_id", security_group_id)
|
|
4783
|
+
pulumi.set(__self__, "security_group_ids", security_group_ids)
|
|
4784
|
+
pulumi.set(__self__, "security_hardening_os", security_hardening_os)
|
|
4785
|
+
pulumi.set(__self__, "soc_enabled", soc_enabled)
|
|
4786
|
+
pulumi.set(__self__, "spot_instance_pools", spot_instance_pools)
|
|
4787
|
+
pulumi.set(__self__, "spot_instance_remedy", spot_instance_remedy)
|
|
4788
|
+
pulumi.set(__self__, "spot_price_limits", spot_price_limits)
|
|
4789
|
+
pulumi.set(__self__, "spot_strategy", spot_strategy)
|
|
4790
|
+
pulumi.set(__self__, "system_disk_bursting_enabled", system_disk_bursting_enabled)
|
|
4791
|
+
pulumi.set(__self__, "system_disk_categories", system_disk_categories)
|
|
4792
|
+
pulumi.set(__self__, "system_disk_category", system_disk_category)
|
|
4793
|
+
pulumi.set(__self__, "system_disk_encrypt_algorithm", system_disk_encrypt_algorithm)
|
|
4794
|
+
pulumi.set(__self__, "system_disk_encrypted", system_disk_encrypted)
|
|
4795
|
+
pulumi.set(__self__, "system_disk_kms_key", system_disk_kms_key)
|
|
4796
|
+
pulumi.set(__self__, "system_disk_performance_level", system_disk_performance_level)
|
|
4797
|
+
pulumi.set(__self__, "system_disk_provisioned_iops", system_disk_provisioned_iops)
|
|
4798
|
+
pulumi.set(__self__, "system_disk_size", system_disk_size)
|
|
4799
|
+
pulumi.set(__self__, "system_disk_snapshot_policy_id", system_disk_snapshot_policy_id)
|
|
4800
|
+
pulumi.set(__self__, "tags", tags)
|
|
4801
|
+
pulumi.set(__self__, "taints", taints)
|
|
4802
|
+
pulumi.set(__self__, "tee_config", tee_config)
|
|
4803
|
+
pulumi.set(__self__, "unschedulable", unschedulable)
|
|
4804
|
+
pulumi.set(__self__, "user_data", user_data)
|
|
4805
|
+
pulumi.set(__self__, "vswitch_ids", vswitch_ids)
|
|
4806
|
+
|
|
4807
|
+
@property
|
|
4808
|
+
@pulumi.getter(name="autoRenew")
|
|
4809
|
+
def auto_renew(self) -> bool:
|
|
4602
4810
|
"""
|
|
4603
|
-
|
|
4604
|
-
:param bool is_ram_role: Indicates whether the permissions are granted to the RAM role. Valid values `false`, `true`.
|
|
4605
|
-
:param str resource_id: The permission settings to manage ACK clusters.
|
|
4606
|
-
:param str resource_type: The authorization type. Valid values `cluster`, `namespace` and `console`.
|
|
4607
|
-
:param str role_name: The name of the predefined role. If a custom role is assigned, the value is the name of the assigined custom role.
|
|
4608
|
-
:param str role_type: The predefined role. Valid values `admin`,`ops`,`dev`,`restricted` and `custom`.
|
|
4811
|
+
Whether to enable automatic renewal for nodes in the node pool takes effect only when `instance_charge_type` is set to `PrePaid`. Default value: `false`. Valid values:- `true`: Automatic renewal. - `false`: Do not renew automatically.
|
|
4609
4812
|
"""
|
|
4610
|
-
pulumi.
|
|
4611
|
-
pulumi.set(__self__, "is_ram_role", is_ram_role)
|
|
4612
|
-
pulumi.set(__self__, "resource_id", resource_id)
|
|
4613
|
-
pulumi.set(__self__, "resource_type", resource_type)
|
|
4614
|
-
pulumi.set(__self__, "role_name", role_name)
|
|
4615
|
-
pulumi.set(__self__, "role_type", role_type)
|
|
4813
|
+
return pulumi.get(self, "auto_renew")
|
|
4616
4814
|
|
|
4617
4815
|
@property
|
|
4618
|
-
@pulumi.getter(name="
|
|
4619
|
-
def
|
|
4816
|
+
@pulumi.getter(name="autoRenewPeriod")
|
|
4817
|
+
def auto_renew_period(self) -> int:
|
|
4620
4818
|
"""
|
|
4621
|
-
|
|
4819
|
+
The automatic renewal period of nodes in the node pool takes effect only when you select Prepaid and Automatic Renewal, and is a required value. When `PeriodUnit = Month`, the value range is {1, 2, 3, 6, 12}. Default value: 1.
|
|
4622
4820
|
"""
|
|
4623
|
-
return pulumi.get(self, "
|
|
4821
|
+
return pulumi.get(self, "auto_renew_period")
|
|
4624
4822
|
|
|
4625
4823
|
@property
|
|
4626
|
-
@pulumi.getter(name="
|
|
4627
|
-
def
|
|
4824
|
+
@pulumi.getter(name="cisEnabled")
|
|
4825
|
+
def cis_enabled(self) -> bool:
|
|
4628
4826
|
"""
|
|
4629
|
-
|
|
4827
|
+
Whether enable worker node to support cis security reinforcement, its valid value `true` or `false`. Default to `false` and apply to AliyunLinux series. Use `security_hardening_os` instead.
|
|
4630
4828
|
"""
|
|
4631
|
-
return pulumi.get(self, "
|
|
4829
|
+
return pulumi.get(self, "cis_enabled")
|
|
4632
4830
|
|
|
4633
4831
|
@property
|
|
4634
|
-
@pulumi.getter(name="
|
|
4635
|
-
def
|
|
4832
|
+
@pulumi.getter(name="compensateWithOnDemand")
|
|
4833
|
+
def compensate_with_on_demand(self) -> bool:
|
|
4636
4834
|
"""
|
|
4637
|
-
|
|
4835
|
+
Specifies whether to automatically create pay-as-you-go instances to meet the required number of ECS instances if preemptible instances cannot be created due to reasons such as cost or insufficient inventory. This parameter takes effect when you set `multi_az_policy` to `COST_OPTIMIZED`. Valid values: `true`: automatically creates pay-as-you-go instances to meet the required number of ECS instances if preemptible instances cannot be created. `false`: does not create pay-as-you-go instances to meet the required number of ECS instances if preemptible instances cannot be created.
|
|
4638
4836
|
"""
|
|
4639
|
-
return pulumi.get(self, "
|
|
4837
|
+
return pulumi.get(self, "compensate_with_on_demand")
|
|
4640
4838
|
|
|
4641
4839
|
@property
|
|
4642
|
-
@pulumi.getter(name="
|
|
4643
|
-
def
|
|
4840
|
+
@pulumi.getter(name="cpuPolicy")
|
|
4841
|
+
def cpu_policy(self) -> str:
|
|
4644
4842
|
"""
|
|
4645
|
-
|
|
4843
|
+
Node CPU management policies. Default value: `none`. When the cluster version is 1.12.6 or later, the following two policies are supported:- `static`: allows pods with certain resource characteristics on the node to enhance its CPU affinity and exclusivity.- `none`: Enables the existing default CPU affinity scheme.
|
|
4646
4844
|
"""
|
|
4647
|
-
return pulumi.get(self, "
|
|
4845
|
+
return pulumi.get(self, "cpu_policy")
|
|
4648
4846
|
|
|
4649
4847
|
@property
|
|
4650
|
-
@pulumi.getter(name="
|
|
4651
|
-
def
|
|
4848
|
+
@pulumi.getter(name="dataDisks")
|
|
4849
|
+
def data_disks(self) -> Sequence['outputs.GetKubernetesNodePoolsNodepoolDataDiskResult']:
|
|
4652
4850
|
"""
|
|
4653
|
-
|
|
4851
|
+
Configure the data disk of the node in the node pool.
|
|
4654
4852
|
"""
|
|
4655
|
-
return pulumi.get(self, "
|
|
4853
|
+
return pulumi.get(self, "data_disks")
|
|
4656
4854
|
|
|
4657
4855
|
@property
|
|
4658
|
-
@pulumi.getter(name="
|
|
4659
|
-
def
|
|
4856
|
+
@pulumi.getter(name="deploymentSetId")
|
|
4857
|
+
def deployment_set_id(self) -> str:
|
|
4660
4858
|
"""
|
|
4661
|
-
The
|
|
4859
|
+
The deployment set of node pool. Specify the deploymentSet to ensure that the nodes in the node pool can be distributed on different physical machines.
|
|
4662
4860
|
"""
|
|
4663
|
-
return pulumi.get(self, "
|
|
4861
|
+
return pulumi.get(self, "deployment_set_id")
|
|
4664
4862
|
|
|
4863
|
+
@property
|
|
4864
|
+
@pulumi.getter(name="desiredSize")
|
|
4865
|
+
def desired_size(self) -> str:
|
|
4866
|
+
"""
|
|
4867
|
+
Number of expected nodes in the node pool.
|
|
4868
|
+
"""
|
|
4869
|
+
return pulumi.get(self, "desired_size")
|
|
4665
4870
|
|
|
4666
|
-
@
|
|
4667
|
-
|
|
4668
|
-
def
|
|
4669
|
-
runtimes: Sequence['outputs.GetKubernetesVersionMetadataRuntimeResult'],
|
|
4670
|
-
version: str):
|
|
4871
|
+
@property
|
|
4872
|
+
@pulumi.getter(name="imageId")
|
|
4873
|
+
def image_id(self) -> str:
|
|
4671
4874
|
"""
|
|
4672
|
-
|
|
4673
|
-
:param str version: The runtime version.
|
|
4875
|
+
The custom image ID. The system-provided image is used by default.
|
|
4674
4876
|
"""
|
|
4675
|
-
pulumi.
|
|
4676
|
-
pulumi.set(__self__, "version", version)
|
|
4877
|
+
return pulumi.get(self, "image_id")
|
|
4677
4878
|
|
|
4678
4879
|
@property
|
|
4679
|
-
@pulumi.getter
|
|
4680
|
-
def
|
|
4880
|
+
@pulumi.getter(name="imageType")
|
|
4881
|
+
def image_type(self) -> str:
|
|
4681
4882
|
"""
|
|
4682
|
-
The
|
|
4883
|
+
The operating system image type and the `platform` parameter can be selected from the following values:- `AliyunLinux` : Alinux2 image.- `AliyunLinux3` : Alinux3 image.- `AliyunLinux3Arm64` : Alinux3 mirror ARM version.- `AliyunLinuxUEFI` : Alinux2 Image UEFI version.- `CentOS` : CentOS image.- `Windows` : Windows image.- `WindowsCore` : WindowsCore image.- `ContainerOS` : container-optimized image.- `Ubuntu`: Ubuntu image.
|
|
4683
4884
|
"""
|
|
4684
|
-
return pulumi.get(self, "
|
|
4885
|
+
return pulumi.get(self, "image_type")
|
|
4685
4886
|
|
|
4686
4887
|
@property
|
|
4687
|
-
@pulumi.getter
|
|
4688
|
-
def
|
|
4888
|
+
@pulumi.getter(name="installCloudMonitor")
|
|
4889
|
+
def install_cloud_monitor(self) -> bool:
|
|
4689
4890
|
"""
|
|
4690
|
-
|
|
4891
|
+
Whether to install cloud monitoring on the ECS node. After installation, you can view the monitoring information of the created ECS instance in the cloud monitoring console and recommend enable it. Default value: `false`. Valid values:- `true` : install cloud monitoring on the ECS node.- `false` : does not install cloud monitoring on the ECS node.
|
|
4691
4892
|
"""
|
|
4692
|
-
return pulumi.get(self, "
|
|
4893
|
+
return pulumi.get(self, "install_cloud_monitor")
|
|
4693
4894
|
|
|
4895
|
+
@property
|
|
4896
|
+
@pulumi.getter(name="instanceChargeType")
|
|
4897
|
+
def instance_charge_type(self) -> str:
|
|
4898
|
+
"""
|
|
4899
|
+
Node payment type. Valid values: `PostPaid`, `PrePaid`, default is `PostPaid`. If value is `PrePaid`, the arguments `period`, `period_unit`, `auto_renew` and `auto_renew_period` are required.
|
|
4900
|
+
"""
|
|
4901
|
+
return pulumi.get(self, "instance_charge_type")
|
|
4694
4902
|
|
|
4695
|
-
@
|
|
4696
|
-
|
|
4697
|
-
def
|
|
4698
|
-
name: str,
|
|
4699
|
-
version: str):
|
|
4903
|
+
@property
|
|
4904
|
+
@pulumi.getter(name="instanceTypes")
|
|
4905
|
+
def instance_types(self) -> Sequence[str]:
|
|
4700
4906
|
"""
|
|
4701
|
-
|
|
4702
|
-
:param str version: The runtime version.
|
|
4907
|
+
In the node instance specification list, you can select multiple instance specifications as alternatives. When each node is created, it will try to purchase from the first specification until it is created successfully. The final purchased instance specifications may vary with inventory changes.
|
|
4703
4908
|
"""
|
|
4704
|
-
pulumi.
|
|
4705
|
-
pulumi.set(__self__, "version", version)
|
|
4909
|
+
return pulumi.get(self, "instance_types")
|
|
4706
4910
|
|
|
4707
4911
|
@property
|
|
4708
|
-
@pulumi.getter
|
|
4709
|
-
def
|
|
4912
|
+
@pulumi.getter(name="internetChargeType")
|
|
4913
|
+
def internet_charge_type(self) -> str:
|
|
4710
4914
|
"""
|
|
4711
|
-
The
|
|
4915
|
+
The billing method for network usage. Valid values `PayByBandwidth` and `PayByTraffic`. Conflict with `eip_internet_charge_type`, EIP and public network IP can only choose one.
|
|
4712
4916
|
"""
|
|
4713
|
-
return pulumi.get(self, "
|
|
4917
|
+
return pulumi.get(self, "internet_charge_type")
|
|
4714
4918
|
|
|
4715
4919
|
@property
|
|
4716
|
-
@pulumi.getter
|
|
4717
|
-
def
|
|
4920
|
+
@pulumi.getter(name="internetMaxBandwidthOut")
|
|
4921
|
+
def internet_max_bandwidth_out(self) -> int:
|
|
4718
4922
|
"""
|
|
4719
|
-
The
|
|
4923
|
+
The maximum bandwidth of the public IP address of the node. The unit is Mbps(Mega bit per second). The value range is:\\[1,100\\]
|
|
4720
4924
|
"""
|
|
4721
|
-
return pulumi.get(self, "
|
|
4925
|
+
return pulumi.get(self, "internet_max_bandwidth_out")
|
|
4722
4926
|
|
|
4927
|
+
@property
|
|
4928
|
+
@pulumi.getter(name="keyName")
|
|
4929
|
+
def key_name(self) -> str:
|
|
4930
|
+
"""
|
|
4931
|
+
The name of the key pair. When the node pool is a managed node pool, only `key_name` is supported.
|
|
4932
|
+
"""
|
|
4933
|
+
return pulumi.get(self, "key_name")
|
|
4723
4934
|
|
|
4724
|
-
@
|
|
4725
|
-
|
|
4726
|
-
def
|
|
4727
|
-
availability_zone: str,
|
|
4728
|
-
cluster_network_type: str,
|
|
4729
|
-
connections: 'outputs.GetManagedKubernetesClustersClusterConnectionsResult',
|
|
4730
|
-
id: str,
|
|
4731
|
-
image_id: str,
|
|
4732
|
-
key_name: str,
|
|
4733
|
-
log_configs: Sequence['outputs.GetManagedKubernetesClustersClusterLogConfigResult'],
|
|
4734
|
-
name: str,
|
|
4735
|
-
nat_gateway_id: str,
|
|
4736
|
-
pod_cidr: str,
|
|
4737
|
-
security_group_id: str,
|
|
4738
|
-
service_cidr: str,
|
|
4739
|
-
slb_internet_enabled: bool,
|
|
4740
|
-
vpc_id: str,
|
|
4741
|
-
vswitch_ids: Sequence[str],
|
|
4742
|
-
worker_auto_renew: bool,
|
|
4743
|
-
worker_auto_renew_period: int,
|
|
4744
|
-
worker_data_disk_category: str,
|
|
4745
|
-
worker_data_disk_size: int,
|
|
4746
|
-
worker_disk_category: str,
|
|
4747
|
-
worker_disk_size: int,
|
|
4748
|
-
worker_instance_charge_type: str,
|
|
4749
|
-
worker_instance_types: Sequence[str],
|
|
4750
|
-
worker_nodes: Sequence['outputs.GetManagedKubernetesClustersClusterWorkerNodeResult'],
|
|
4751
|
-
worker_numbers: Sequence[int],
|
|
4752
|
-
worker_period: int,
|
|
4753
|
-
worker_period_unit: str):
|
|
4935
|
+
@property
|
|
4936
|
+
@pulumi.getter(name="kubeletConfiguration")
|
|
4937
|
+
def kubelet_configuration(self) -> 'outputs.GetKubernetesNodePoolsNodepoolKubeletConfigurationResult':
|
|
4754
4938
|
"""
|
|
4755
|
-
|
|
4756
|
-
:param 'GetManagedKubernetesClustersClusterConnectionsArgs' connections: Map of kubernetes cluster connection information. It contains several attributes to `Block Connections`.
|
|
4757
|
-
:param str id: ID of the node.
|
|
4758
|
-
:param str key_name: The keypair of ssh login cluster node, you have to create it first.
|
|
4759
|
-
:param Sequence['GetManagedKubernetesClustersClusterLogConfigArgs'] log_configs: A list of one element containing information about the associated log store. It contains the following attributes:
|
|
4760
|
-
:param str name: Node name.
|
|
4761
|
-
:param str nat_gateway_id: The ID of nat gateway used to launch kubernetes cluster.
|
|
4762
|
-
:param str security_group_id: The ID of security group where the current cluster worker node is located.
|
|
4763
|
-
:param str vpc_id: The ID of VPC where the current cluster is located.
|
|
4764
|
-
:param Sequence[str] vswitch_ids: The ID of VSwitches where the current cluster is located.
|
|
4765
|
-
:param Sequence['GetManagedKubernetesClustersClusterWorkerNodeArgs'] worker_nodes: List of cluster worker nodes. It contains several attributes to `Block Nodes`.
|
|
4766
|
-
:param Sequence[int] worker_numbers: The ECS instance node number in the current container cluster.
|
|
4939
|
+
Kubelet configuration parameters for worker nodes. See `kubelet_configuration` below. More information in [Kubelet Configuration](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
|
|
4767
4940
|
"""
|
|
4768
|
-
pulumi.
|
|
4769
|
-
pulumi.set(__self__, "cluster_network_type", cluster_network_type)
|
|
4770
|
-
pulumi.set(__self__, "connections", connections)
|
|
4771
|
-
pulumi.set(__self__, "id", id)
|
|
4772
|
-
pulumi.set(__self__, "image_id", image_id)
|
|
4773
|
-
pulumi.set(__self__, "key_name", key_name)
|
|
4774
|
-
pulumi.set(__self__, "log_configs", log_configs)
|
|
4775
|
-
pulumi.set(__self__, "name", name)
|
|
4776
|
-
pulumi.set(__self__, "nat_gateway_id", nat_gateway_id)
|
|
4777
|
-
pulumi.set(__self__, "pod_cidr", pod_cidr)
|
|
4778
|
-
pulumi.set(__self__, "security_group_id", security_group_id)
|
|
4779
|
-
pulumi.set(__self__, "service_cidr", service_cidr)
|
|
4780
|
-
pulumi.set(__self__, "slb_internet_enabled", slb_internet_enabled)
|
|
4781
|
-
pulumi.set(__self__, "vpc_id", vpc_id)
|
|
4782
|
-
pulumi.set(__self__, "vswitch_ids", vswitch_ids)
|
|
4783
|
-
pulumi.set(__self__, "worker_auto_renew", worker_auto_renew)
|
|
4784
|
-
pulumi.set(__self__, "worker_auto_renew_period", worker_auto_renew_period)
|
|
4785
|
-
pulumi.set(__self__, "worker_data_disk_category", worker_data_disk_category)
|
|
4786
|
-
pulumi.set(__self__, "worker_data_disk_size", worker_data_disk_size)
|
|
4787
|
-
pulumi.set(__self__, "worker_disk_category", worker_disk_category)
|
|
4788
|
-
pulumi.set(__self__, "worker_disk_size", worker_disk_size)
|
|
4789
|
-
pulumi.set(__self__, "worker_instance_charge_type", worker_instance_charge_type)
|
|
4790
|
-
pulumi.set(__self__, "worker_instance_types", worker_instance_types)
|
|
4791
|
-
pulumi.set(__self__, "worker_nodes", worker_nodes)
|
|
4792
|
-
pulumi.set(__self__, "worker_numbers", worker_numbers)
|
|
4793
|
-
pulumi.set(__self__, "worker_period", worker_period)
|
|
4794
|
-
pulumi.set(__self__, "worker_period_unit", worker_period_unit)
|
|
4941
|
+
return pulumi.get(self, "kubelet_configuration")
|
|
4795
4942
|
|
|
4796
4943
|
@property
|
|
4797
|
-
@pulumi.getter
|
|
4798
|
-
def
|
|
4944
|
+
@pulumi.getter
|
|
4945
|
+
def labels(self) -> Sequence['outputs.GetKubernetesNodePoolsNodepoolLabelResult']:
|
|
4799
4946
|
"""
|
|
4800
|
-
|
|
4947
|
+
A List of Kubernetes labels to assign to the nodes . Only labels that are applied with the ACK API are managed by this argument. Detailed below. More information in [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
|
|
4801
4948
|
"""
|
|
4802
|
-
return pulumi.get(self, "
|
|
4949
|
+
return pulumi.get(self, "labels")
|
|
4803
4950
|
|
|
4804
4951
|
@property
|
|
4805
|
-
@pulumi.getter(name="
|
|
4806
|
-
def
|
|
4807
|
-
|
|
4952
|
+
@pulumi.getter(name="loginAsNonRoot")
|
|
4953
|
+
def login_as_non_root(self) -> bool:
|
|
4954
|
+
"""
|
|
4955
|
+
Whether the ECS instance is logged on as a ecs-user user. Valid value: `true` and `false`.
|
|
4956
|
+
"""
|
|
4957
|
+
return pulumi.get(self, "login_as_non_root")
|
|
4808
4958
|
|
|
4809
4959
|
@property
|
|
4810
4960
|
@pulumi.getter
|
|
4811
|
-
def
|
|
4961
|
+
def management(self) -> 'outputs.GetKubernetesNodePoolsNodepoolManagementResult':
|
|
4812
4962
|
"""
|
|
4813
|
-
|
|
4963
|
+
Managed node pool configuration.
|
|
4814
4964
|
"""
|
|
4815
|
-
return pulumi.get(self, "
|
|
4965
|
+
return pulumi.get(self, "management")
|
|
4816
4966
|
|
|
4817
4967
|
@property
|
|
4818
|
-
@pulumi.getter
|
|
4819
|
-
def
|
|
4968
|
+
@pulumi.getter(name="multiAzPolicy")
|
|
4969
|
+
def multi_az_policy(self) -> str:
|
|
4820
4970
|
"""
|
|
4821
|
-
|
|
4971
|
+
The scaling policy for ECS instances in a multi-zone scaling group. Valid value: `PRIORITY`, `COST_OPTIMIZED` and `BALANCE`. `PRIORITY`: scales the capacity according to the virtual switches you define (VSwitchIds.N). When an ECS instance cannot be created in the zone where the higher-priority vSwitch is located, the next-priority vSwitch is automatically used to create an ECS instance. `COST_OPTIMIZED`: try to create by vCPU unit price from low to high. When the scaling configuration is configured with multiple instances of preemptible billing, preemptible instances are created first. You can continue to use the `CompensateWithOnDemand` parameter to specify whether to automatically try to create a preemptible instance by paying for it. It takes effect only when the scaling configuration has multi-instance specifications or preemptible instances. `BALANCE`: distributes ECS instances evenly among the multi-zone specified by the scaling group. If the zones become unbalanced due to insufficient inventory, you can use the API RebalanceInstances to balance resources.
|
|
4822
4972
|
"""
|
|
4823
|
-
return pulumi.get(self, "
|
|
4973
|
+
return pulumi.get(self, "multi_az_policy")
|
|
4824
4974
|
|
|
4825
4975
|
@property
|
|
4826
|
-
@pulumi.getter(name="
|
|
4827
|
-
def
|
|
4828
|
-
|
|
4976
|
+
@pulumi.getter(name="nodeNameMode")
|
|
4977
|
+
def node_name_mode(self) -> str:
|
|
4978
|
+
"""
|
|
4979
|
+
Each node name consists of a prefix, its private network IP, and a suffix, separated by commas. The input format is `customized,,ip,`.- The prefix and suffix can be composed of one or more parts separated by '.', each part can use lowercase letters, numbers and '-', and the beginning and end of the node name must be lowercase letters and numbers.- The node IP address is the complete private IP address of the node.- For example, if the string `customized,aliyun,ip,com` is passed in (where 'customized' and 'ip' are fixed strings, 'aliyun' is the prefix, and 'com' is the suffix), the name of the node is `aliyun192.168.xxx.xxxcom`.
|
|
4980
|
+
"""
|
|
4981
|
+
return pulumi.get(self, "node_name_mode")
|
|
4829
4982
|
|
|
4830
4983
|
@property
|
|
4831
|
-
@pulumi.getter(name="
|
|
4832
|
-
def
|
|
4984
|
+
@pulumi.getter(name="nodePoolId")
|
|
4985
|
+
def node_pool_id(self) -> str:
|
|
4833
4986
|
"""
|
|
4834
|
-
The
|
|
4987
|
+
The ID of node pool.
|
|
4835
4988
|
"""
|
|
4836
|
-
return pulumi.get(self, "
|
|
4989
|
+
return pulumi.get(self, "node_pool_id")
|
|
4837
4990
|
|
|
4838
4991
|
@property
|
|
4839
|
-
@pulumi.getter(name="
|
|
4840
|
-
def
|
|
4992
|
+
@pulumi.getter(name="nodePoolName")
|
|
4993
|
+
def node_pool_name(self) -> str:
|
|
4841
4994
|
"""
|
|
4842
|
-
|
|
4995
|
+
The name of node pool.
|
|
4843
4996
|
"""
|
|
4844
|
-
return pulumi.get(self, "
|
|
4997
|
+
return pulumi.get(self, "node_pool_name")
|
|
4845
4998
|
|
|
4846
4999
|
@property
|
|
4847
|
-
@pulumi.getter
|
|
4848
|
-
def
|
|
5000
|
+
@pulumi.getter(name="onDemandBaseCapacity")
|
|
5001
|
+
def on_demand_base_capacity(self) -> str:
|
|
4849
5002
|
"""
|
|
4850
|
-
|
|
5003
|
+
The minimum number of pay-as-you-go instances that must be kept in the scaling group. Valid values: 0 to 1000. If the number of pay-as-you-go instances is less than the value of this parameter, Auto Scaling preferably creates pay-as-you-go instances.
|
|
4851
5004
|
"""
|
|
4852
|
-
return pulumi.get(self, "
|
|
5005
|
+
return pulumi.get(self, "on_demand_base_capacity")
|
|
4853
5006
|
|
|
4854
5007
|
@property
|
|
4855
|
-
@pulumi.getter(name="
|
|
4856
|
-
def
|
|
5008
|
+
@pulumi.getter(name="onDemandPercentageAboveBaseCapacity")
|
|
5009
|
+
def on_demand_percentage_above_base_capacity(self) -> str:
|
|
4857
5010
|
"""
|
|
4858
|
-
The
|
|
5011
|
+
The percentage of pay-as-you-go instances among the extra instances that exceed the number specified by `on_demand_base_capacity`. Valid values: 0 to 100.
|
|
4859
5012
|
"""
|
|
4860
|
-
return pulumi.get(self, "
|
|
5013
|
+
return pulumi.get(self, "on_demand_percentage_above_base_capacity")
|
|
4861
5014
|
|
|
4862
5015
|
@property
|
|
4863
|
-
@pulumi.getter
|
|
4864
|
-
def
|
|
4865
|
-
|
|
5016
|
+
@pulumi.getter
|
|
5017
|
+
def password(self) -> str:
|
|
5018
|
+
"""
|
|
5019
|
+
The password of ssh login. You have to specify one of `password` and `key_name` fields. The password rule is 8 to 30 characters and contains at least three items (upper and lower case letters, numbers, and special symbols).
|
|
5020
|
+
"""
|
|
5021
|
+
return pulumi.get(self, "password")
|
|
4866
5022
|
|
|
4867
5023
|
@property
|
|
4868
|
-
@pulumi.getter
|
|
4869
|
-
def
|
|
5024
|
+
@pulumi.getter
|
|
5025
|
+
def period(self) -> int:
|
|
4870
5026
|
"""
|
|
4871
|
-
|
|
5027
|
+
Node payment period. Its valid value is one of {1, 2, 3, 6, 12}.
|
|
4872
5028
|
"""
|
|
4873
|
-
return pulumi.get(self, "
|
|
5029
|
+
return pulumi.get(self, "period")
|
|
4874
5030
|
|
|
4875
5031
|
@property
|
|
4876
|
-
@pulumi.getter(name="
|
|
4877
|
-
def
|
|
4878
|
-
|
|
5032
|
+
@pulumi.getter(name="periodUnit")
|
|
5033
|
+
def period_unit(self) -> str:
|
|
5034
|
+
"""
|
|
5035
|
+
Node payment period unit, valid value: `Month`. Default is `Month`.
|
|
5036
|
+
"""
|
|
5037
|
+
return pulumi.get(self, "period_unit")
|
|
4879
5038
|
|
|
4880
5039
|
@property
|
|
4881
|
-
@pulumi.getter
|
|
4882
|
-
def
|
|
4883
|
-
|
|
5040
|
+
@pulumi.getter
|
|
5041
|
+
def platform(self) -> str:
|
|
5042
|
+
"""
|
|
5043
|
+
Operating system release, using `image_type` instead.
|
|
5044
|
+
"""
|
|
5045
|
+
return pulumi.get(self, "platform")
|
|
4884
5046
|
|
|
4885
5047
|
@property
|
|
4886
|
-
@pulumi.getter(name="
|
|
4887
|
-
def
|
|
5048
|
+
@pulumi.getter(name="preUserData")
|
|
5049
|
+
def pre_user_data(self) -> str:
|
|
4888
5050
|
"""
|
|
4889
|
-
|
|
5051
|
+
Node pre custom data, base64-encoded, the script executed before the node is initialized.
|
|
4890
5052
|
"""
|
|
4891
|
-
return pulumi.get(self, "
|
|
5053
|
+
return pulumi.get(self, "pre_user_data")
|
|
4892
5054
|
|
|
4893
5055
|
@property
|
|
4894
|
-
@pulumi.getter(name="
|
|
4895
|
-
def
|
|
5056
|
+
@pulumi.getter(name="privatePoolOptions")
|
|
5057
|
+
def private_pool_options(self) -> 'outputs.GetKubernetesNodePoolsNodepoolPrivatePoolOptionsResult':
|
|
4896
5058
|
"""
|
|
4897
|
-
|
|
5059
|
+
Private node pool configuration.
|
|
4898
5060
|
"""
|
|
4899
|
-
return pulumi.get(self, "
|
|
5061
|
+
return pulumi.get(self, "private_pool_options")
|
|
4900
5062
|
|
|
4901
5063
|
@property
|
|
4902
|
-
@pulumi.getter(name="
|
|
4903
|
-
def
|
|
4904
|
-
|
|
5064
|
+
@pulumi.getter(name="ramRoleName")
|
|
5065
|
+
def ram_role_name(self) -> str:
|
|
5066
|
+
"""
|
|
5067
|
+
The name of the Worker RAM role.* If it is empty, the default Worker RAM role created in the cluster will be used.* If the specified RAM role is not empty, the specified RAM role must be a **Common Service role**, and its **trusted service** configuration must be **cloud server**. For more information, see [Create a common service role](https://help.aliyun.com/document_detail/116800.html). If the specified RAM role is not the default Worker RAM role created in the cluster, the role name cannot start with 'KubernetesMasterRole-'or 'KubernetesWorkerRole.> **NOTE:** This parameter is only supported for ACK-managed clusters of 1.22 or later versions.
|
|
5068
|
+
"""
|
|
5069
|
+
return pulumi.get(self, "ram_role_name")
|
|
4905
5070
|
|
|
4906
5071
|
@property
|
|
4907
|
-
@pulumi.getter(name="
|
|
4908
|
-
def
|
|
4909
|
-
|
|
5072
|
+
@pulumi.getter(name="rdsInstances")
|
|
5073
|
+
def rds_instances(self) -> Sequence[str]:
|
|
5074
|
+
"""
|
|
5075
|
+
The list of RDS instances.
|
|
5076
|
+
"""
|
|
5077
|
+
return pulumi.get(self, "rds_instances")
|
|
5078
|
+
|
|
5079
|
+
@property
|
|
5080
|
+
@pulumi.getter(name="resourceGroupId")
|
|
5081
|
+
def resource_group_id(self) -> str:
|
|
5082
|
+
"""
|
|
5083
|
+
The ID of the resource group
|
|
5084
|
+
"""
|
|
5085
|
+
return pulumi.get(self, "resource_group_id")
|
|
5086
|
+
|
|
5087
|
+
@property
|
|
5088
|
+
@pulumi.getter(name="runtimeName")
|
|
5089
|
+
def runtime_name(self) -> str:
|
|
5090
|
+
"""
|
|
5091
|
+
The runtime name of containers. If not set, the cluster runtime will be used as the node pool runtime. If you select another container runtime, see [Comparison of Docker, containerd, and Sandboxed-Container](https://www.alibabacloud.com/help/doc-detail/160313.htm).
|
|
5092
|
+
"""
|
|
5093
|
+
return pulumi.get(self, "runtime_name")
|
|
5094
|
+
|
|
5095
|
+
@property
|
|
5096
|
+
@pulumi.getter(name="runtimeVersion")
|
|
5097
|
+
def runtime_version(self) -> str:
|
|
5098
|
+
"""
|
|
5099
|
+
The runtime version of containers. If not set, the cluster runtime will be used as the node pool runtime.
|
|
5100
|
+
"""
|
|
5101
|
+
return pulumi.get(self, "runtime_version")
|
|
5102
|
+
|
|
5103
|
+
@property
|
|
5104
|
+
@pulumi.getter(name="scalingConfig")
|
|
5105
|
+
def scaling_config(self) -> 'outputs.GetKubernetesNodePoolsNodepoolScalingConfigResult':
|
|
5106
|
+
"""
|
|
5107
|
+
Automatic scaling configuration.
|
|
5108
|
+
"""
|
|
5109
|
+
return pulumi.get(self, "scaling_config")
|
|
5110
|
+
|
|
5111
|
+
@property
|
|
5112
|
+
@pulumi.getter(name="scalingGroupId")
|
|
5113
|
+
def scaling_group_id(self) -> str:
|
|
5114
|
+
"""
|
|
5115
|
+
The ID of the scaling group.
|
|
5116
|
+
"""
|
|
5117
|
+
return pulumi.get(self, "scaling_group_id")
|
|
5118
|
+
|
|
5119
|
+
@property
|
|
5120
|
+
@pulumi.getter(name="scalingPolicy")
|
|
5121
|
+
def scaling_policy(self) -> str:
|
|
5122
|
+
"""
|
|
5123
|
+
Scaling group mode, default value: `release`. Valid values:- `release`: in the standard mode, scaling is performed by creating and releasing ECS instances based on the usage of the application resource value.- `recycle`: in the speed mode, scaling is performed through creation, shutdown, and startup to increase the speed of scaling again (computing resources are not charged during shutdown, only storage fees are charged, except for local disk models).
|
|
5124
|
+
"""
|
|
5125
|
+
return pulumi.get(self, "scaling_policy")
|
|
5126
|
+
|
|
5127
|
+
@property
|
|
5128
|
+
@pulumi.getter(name="securityGroupId")
|
|
5129
|
+
def security_group_id(self) -> str:
|
|
5130
|
+
"""
|
|
5131
|
+
The security group ID of the node pool. This field has been replaced by `security_group_ids`, please use the `security_group_ids` field instead.
|
|
5132
|
+
"""
|
|
5133
|
+
return pulumi.get(self, "security_group_id")
|
|
5134
|
+
|
|
5135
|
+
@property
|
|
5136
|
+
@pulumi.getter(name="securityGroupIds")
|
|
5137
|
+
def security_group_ids(self) -> Sequence[str]:
|
|
5138
|
+
"""
|
|
5139
|
+
Multiple security groups can be configured for a node pool. If both `security_group_ids` and `security_group_id` are configured, `security_group_ids` takes effect. This field cannot be modified.
|
|
5140
|
+
"""
|
|
5141
|
+
return pulumi.get(self, "security_group_ids")
|
|
5142
|
+
|
|
5143
|
+
@property
|
|
5144
|
+
@pulumi.getter(name="securityHardeningOs")
|
|
5145
|
+
def security_hardening_os(self) -> bool:
|
|
5146
|
+
"""
|
|
5147
|
+
Alibaba Cloud OS security reinforcement. Default value: `false`. Value:-`true`: enable Alibaba Cloud OS security reinforcement.-`false`: does not enable Alibaba Cloud OS security reinforcement.
|
|
5148
|
+
"""
|
|
5149
|
+
return pulumi.get(self, "security_hardening_os")
|
|
5150
|
+
|
|
5151
|
+
@property
|
|
5152
|
+
@pulumi.getter(name="socEnabled")
|
|
5153
|
+
def soc_enabled(self) -> bool:
|
|
5154
|
+
"""
|
|
5155
|
+
Whether enable worker node to support soc security reinforcement, its valid value `true` or `false`. Default to `false` and apply to AliyunLinux series. See [SOC Reinforcement](https://help.aliyun.com/document_detail/196148.html).> It is forbidden to set both `security_hardening_os` and `soc_enabled` to `true` at the same time.
|
|
5156
|
+
"""
|
|
5157
|
+
return pulumi.get(self, "soc_enabled")
|
|
5158
|
+
|
|
5159
|
+
@property
|
|
5160
|
+
@pulumi.getter(name="spotInstancePools")
|
|
5161
|
+
def spot_instance_pools(self) -> int:
|
|
5162
|
+
"""
|
|
5163
|
+
The number of instance types that are available. Auto Scaling creates preemptible instances of multiple instance types that are available at the lowest cost. Valid values: 1 to 10.
|
|
5164
|
+
"""
|
|
5165
|
+
return pulumi.get(self, "spot_instance_pools")
|
|
5166
|
+
|
|
5167
|
+
@property
|
|
5168
|
+
@pulumi.getter(name="spotInstanceRemedy")
|
|
5169
|
+
def spot_instance_remedy(self) -> bool:
|
|
5170
|
+
"""
|
|
5171
|
+
Specifies whether to supplement preemptible instances when the number of preemptible instances drops below the specified minimum number. If you set the value to true, Auto Scaling attempts to create a new preemptible instance when the system notifies that an existing preemptible instance is about to be reclaimed. Valid values: `true`: enables the supplementation of preemptible instances. `false`: disables the supplementation of preemptible instances.
|
|
5172
|
+
"""
|
|
5173
|
+
return pulumi.get(self, "spot_instance_remedy")
|
|
5174
|
+
|
|
5175
|
+
@property
|
|
5176
|
+
@pulumi.getter(name="spotPriceLimits")
|
|
5177
|
+
def spot_price_limits(self) -> Sequence['outputs.GetKubernetesNodePoolsNodepoolSpotPriceLimitResult']:
|
|
5178
|
+
"""
|
|
5179
|
+
The current single preemptible instance type market price range configuration.
|
|
5180
|
+
"""
|
|
5181
|
+
return pulumi.get(self, "spot_price_limits")
|
|
5182
|
+
|
|
5183
|
+
@property
|
|
5184
|
+
@pulumi.getter(name="spotStrategy")
|
|
5185
|
+
def spot_strategy(self) -> str:
|
|
5186
|
+
"""
|
|
5187
|
+
The preemptible instance type. Value:- `NoSpot` : Non-preemptible instance.- `SpotWithPriceLimit` : Set the upper limit of the preemptible instance price.- `SpotAsPriceGo` : The system automatically bids, following the actual price of the current market.
|
|
5188
|
+
"""
|
|
5189
|
+
return pulumi.get(self, "spot_strategy")
|
|
5190
|
+
|
|
5191
|
+
@property
|
|
5192
|
+
@pulumi.getter(name="systemDiskBurstingEnabled")
|
|
5193
|
+
def system_disk_bursting_enabled(self) -> bool:
|
|
5194
|
+
"""
|
|
5195
|
+
Specifies whether to enable the burst feature for system disks. Valid values:`true`: enables the burst feature. `false`: disables the burst feature. This parameter is supported only when `system_disk_category` is set to `cloud_auto`.
|
|
5196
|
+
"""
|
|
5197
|
+
return pulumi.get(self, "system_disk_bursting_enabled")
|
|
5198
|
+
|
|
5199
|
+
@property
|
|
5200
|
+
@pulumi.getter(name="systemDiskCategories")
|
|
5201
|
+
def system_disk_categories(self) -> Sequence[str]:
|
|
5202
|
+
"""
|
|
5203
|
+
The multi-disk categories of the system disk. When a high-priority disk type cannot be used, Auto Scaling automatically tries to create a system disk with the next priority disk category. Valid values see `system_disk_category`.
|
|
5204
|
+
"""
|
|
5205
|
+
return pulumi.get(self, "system_disk_categories")
|
|
5206
|
+
|
|
5207
|
+
@property
|
|
5208
|
+
@pulumi.getter(name="systemDiskCategory")
|
|
5209
|
+
def system_disk_category(self) -> str:
|
|
5210
|
+
"""
|
|
5211
|
+
The category of the system disk for nodes. Default value: `cloud_efficiency`. Valid values:- `cloud`: basic disk.- `cloud_efficiency`: ultra disk.- `cloud_ssd`: standard SSD.- `cloud_essd`: ESSD.- `cloud_auto`: ESSD AutoPL disk.- `cloud_essd_entry`: ESSD Entry disk.
|
|
5212
|
+
"""
|
|
5213
|
+
return pulumi.get(self, "system_disk_category")
|
|
5214
|
+
|
|
5215
|
+
@property
|
|
5216
|
+
@pulumi.getter(name="systemDiskEncryptAlgorithm")
|
|
5217
|
+
def system_disk_encrypt_algorithm(self) -> str:
|
|
5218
|
+
"""
|
|
5219
|
+
The encryption algorithm used by the system disk. Value range: aes-256.
|
|
5220
|
+
"""
|
|
5221
|
+
return pulumi.get(self, "system_disk_encrypt_algorithm")
|
|
5222
|
+
|
|
5223
|
+
@property
|
|
5224
|
+
@pulumi.getter(name="systemDiskEncrypted")
|
|
5225
|
+
def system_disk_encrypted(self) -> bool:
|
|
5226
|
+
"""
|
|
5227
|
+
Whether to encrypt the system disk. Value range: `true`: encryption. `false`: Do not encrypt.
|
|
5228
|
+
"""
|
|
5229
|
+
return pulumi.get(self, "system_disk_encrypted")
|
|
5230
|
+
|
|
5231
|
+
@property
|
|
5232
|
+
@pulumi.getter(name="systemDiskKmsKey")
|
|
5233
|
+
def system_disk_kms_key(self) -> str:
|
|
5234
|
+
"""
|
|
5235
|
+
The ID of the KMS key used by the system disk.
|
|
5236
|
+
"""
|
|
5237
|
+
return pulumi.get(self, "system_disk_kms_key")
|
|
5238
|
+
|
|
5239
|
+
@property
|
|
5240
|
+
@pulumi.getter(name="systemDiskPerformanceLevel")
|
|
5241
|
+
def system_disk_performance_level(self) -> str:
|
|
5242
|
+
"""
|
|
5243
|
+
The system disk performance of the node takes effect only for the ESSD disk.- `PL0`: maximum random read/write IOPS 10000 for a single disk.- `PL1`: maximum random read/write IOPS 50000 for a single disk.- `PL2`: highest random read/write IOPS 100000 for a single disk.- `PL3`: maximum random read/write IOPS 1 million for a single disk.
|
|
5244
|
+
"""
|
|
5245
|
+
return pulumi.get(self, "system_disk_performance_level")
|
|
5246
|
+
|
|
5247
|
+
@property
|
|
5248
|
+
@pulumi.getter(name="systemDiskProvisionedIops")
|
|
5249
|
+
def system_disk_provisioned_iops(self) -> int:
|
|
5250
|
+
"""
|
|
5251
|
+
The predefined IOPS of a system disk. Valid values: 0 to min{50,000, 1,000 × Capacity - Baseline IOPS}. Baseline IOPS = min{1,800 + 50 × Capacity, 50,000}. This parameter is supported only when `system_disk_category` is set to `cloud_auto`.
|
|
5252
|
+
"""
|
|
5253
|
+
return pulumi.get(self, "system_disk_provisioned_iops")
|
|
5254
|
+
|
|
5255
|
+
@property
|
|
5256
|
+
@pulumi.getter(name="systemDiskSize")
|
|
5257
|
+
def system_disk_size(self) -> int:
|
|
5258
|
+
"""
|
|
5259
|
+
The size of the system disk. Unit: GiB. The value of this parameter must be at least 1 and greater than or equal to the image size. Default value: 40 or the size of the image, whichever is larger.- Basic disk: 20 to 500.- ESSD (cloud_essd): The valid values vary based on the performance level of the ESSD. PL0 ESSD: 1 to 2048. PL1 ESSD: 20 to 2048. PL2 ESSD: 461 to 2048. PL3 ESSD: 1261 to 2048.- ESSD AutoPL disk (cloud_auto): 1 to 2048.- Other disk categories: 20 to 2048.
|
|
5260
|
+
"""
|
|
5261
|
+
return pulumi.get(self, "system_disk_size")
|
|
5262
|
+
|
|
5263
|
+
@property
|
|
5264
|
+
@pulumi.getter(name="systemDiskSnapshotPolicyId")
|
|
5265
|
+
def system_disk_snapshot_policy_id(self) -> str:
|
|
5266
|
+
"""
|
|
5267
|
+
The ID of the automatic snapshot policy used by the system disk.
|
|
5268
|
+
"""
|
|
5269
|
+
return pulumi.get(self, "system_disk_snapshot_policy_id")
|
|
5270
|
+
|
|
5271
|
+
@property
|
|
5272
|
+
@pulumi.getter
|
|
5273
|
+
def tags(self) -> Mapping[str, str]:
|
|
5274
|
+
"""
|
|
5275
|
+
Add tags only for ECS instances. The maximum length of the tag key is 128 characters. The tag key and value cannot start with aliyun or acs:, or contain https:// or http://.
|
|
5276
|
+
"""
|
|
5277
|
+
return pulumi.get(self, "tags")
|
|
5278
|
+
|
|
5279
|
+
@property
|
|
5280
|
+
@pulumi.getter
|
|
5281
|
+
def taints(self) -> Sequence['outputs.GetKubernetesNodePoolsNodepoolTaintResult']:
|
|
5282
|
+
"""
|
|
5283
|
+
A List of Kubernetes taints to assign to the nodes. Detailed below. More information in [Taints and Toleration](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).
|
|
5284
|
+
"""
|
|
5285
|
+
return pulumi.get(self, "taints")
|
|
5286
|
+
|
|
5287
|
+
@property
|
|
5288
|
+
@pulumi.getter(name="teeConfig")
|
|
5289
|
+
def tee_config(self) -> 'outputs.GetKubernetesNodePoolsNodepoolTeeConfigResult':
|
|
5290
|
+
"""
|
|
5291
|
+
The configuration about confidential computing for the cluster.
|
|
5292
|
+
"""
|
|
5293
|
+
return pulumi.get(self, "tee_config")
|
|
5294
|
+
|
|
5295
|
+
@property
|
|
5296
|
+
@pulumi.getter
|
|
5297
|
+
def unschedulable(self) -> bool:
|
|
5298
|
+
"""
|
|
5299
|
+
Whether the node after expansion can be scheduled.
|
|
5300
|
+
"""
|
|
5301
|
+
return pulumi.get(self, "unschedulable")
|
|
5302
|
+
|
|
5303
|
+
@property
|
|
5304
|
+
@pulumi.getter(name="userData")
|
|
5305
|
+
def user_data(self) -> str:
|
|
5306
|
+
"""
|
|
5307
|
+
Node custom data, base64-encoded.
|
|
5308
|
+
"""
|
|
5309
|
+
return pulumi.get(self, "user_data")
|
|
5310
|
+
|
|
5311
|
+
@property
|
|
5312
|
+
@pulumi.getter(name="vswitchIds")
|
|
5313
|
+
def vswitch_ids(self) -> Sequence[str]:
|
|
5314
|
+
"""
|
|
5315
|
+
The vswitches used by node pool workers.
|
|
5316
|
+
"""
|
|
5317
|
+
return pulumi.get(self, "vswitch_ids")
|
|
5318
|
+
|
|
5319
|
+
|
|
5320
|
+
@pulumi.output_type
|
|
5321
|
+
class GetKubernetesNodePoolsNodepoolDataDiskResult(dict):
|
|
5322
|
+
def __init__(__self__, *,
|
|
5323
|
+
auto_format: str,
|
|
5324
|
+
auto_snapshot_policy_id: str,
|
|
5325
|
+
bursting_enabled: bool,
|
|
5326
|
+
category: str,
|
|
5327
|
+
device: str,
|
|
5328
|
+
encrypted: str,
|
|
5329
|
+
file_system: str,
|
|
5330
|
+
kms_key_id: str,
|
|
5331
|
+
mount_target: str,
|
|
5332
|
+
name: str,
|
|
5333
|
+
performance_level: str,
|
|
5334
|
+
provisioned_iops: int,
|
|
5335
|
+
size: int,
|
|
5336
|
+
snapshot_id: str):
|
|
5337
|
+
"""
|
|
5338
|
+
:param str auto_format: Whether to automatically mount the data disk. Valid values: true and false.
|
|
5339
|
+
:param str auto_snapshot_policy_id: The ID of the automatic snapshot policy that you want to apply to the system disk.
|
|
5340
|
+
:param bool bursting_enabled: Whether the data disk is enabled with Burst (performance Burst). This is configured when the disk type is cloud_auto.
|
|
5341
|
+
:param str category: The type of data disk. Default value: `cloud_efficiency`. Valid values:- `cloud`: basic disk.- `cloud_efficiency`: ultra disk.- `cloud_ssd`: standard SSD.- `cloud_essd`: Enterprise SSD (ESSD).- `cloud_auto`: ESSD AutoPL disk.- `cloud_essd_entry`: ESSD Entry disk.- `elastic_ephemeral_disk_premium`: premium elastic ephemeral disk.- `elastic_ephemeral_disk_standard`: standard elastic ephemeral disk.
|
|
5342
|
+
:param str device: The mount target of data disk N. Valid values of N: 1 to 16. If you do not specify this parameter, the system automatically assigns a mount target when Auto Scaling creates ECS instances. The name of the mount target ranges from /dev/xvdb to /dev/xvdz.
|
|
5343
|
+
:param str encrypted: Specifies whether to encrypt data disks. Valid values: true and false. Default to `false`.
|
|
5344
|
+
:param str file_system: The type of the mounted file system. Works when auto_format is true. Optional value: `ext4`, `xfs`.
|
|
5345
|
+
:param str kms_key_id: The kms key id used to encrypt the data disk. It takes effect when `encrypted` is true.
|
|
5346
|
+
:param str mount_target: The Mount path. Works when auto_format is true.
|
|
5347
|
+
:param str name: The length is 2~128 English or Chinese characters. It must start with an uppercase or lowr letter or a Chinese character and cannot start with http:// or https. Can contain numbers, colons (:), underscores (_), or dashes (-). It will be overwritten if auto_format is set.
|
|
5348
|
+
:param str performance_level: Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
|
|
5349
|
+
:param int provisioned_iops: The read/write IOPS preconfigured for the data disk, which is configured when the disk type is cloud_auto.
|
|
5350
|
+
:param int size: The size of a data disk, Its valid value range [40~32768] in GB. Default to `40`.
|
|
5351
|
+
:param str snapshot_id: The ID of the snapshot that you want to use to create data disk N. Valid values of N: 1 to 16. If you specify this parameter, DataDisk.N.Size is ignored. The size of the disk is the same as the size of the specified snapshot. If you specify a snapshot that is created on or before July 15, 2013, the operation fails and InvalidSnapshot.TooOld is returned.
|
|
5352
|
+
"""
|
|
5353
|
+
pulumi.set(__self__, "auto_format", auto_format)
|
|
5354
|
+
pulumi.set(__self__, "auto_snapshot_policy_id", auto_snapshot_policy_id)
|
|
5355
|
+
pulumi.set(__self__, "bursting_enabled", bursting_enabled)
|
|
5356
|
+
pulumi.set(__self__, "category", category)
|
|
5357
|
+
pulumi.set(__self__, "device", device)
|
|
5358
|
+
pulumi.set(__self__, "encrypted", encrypted)
|
|
5359
|
+
pulumi.set(__self__, "file_system", file_system)
|
|
5360
|
+
pulumi.set(__self__, "kms_key_id", kms_key_id)
|
|
5361
|
+
pulumi.set(__self__, "mount_target", mount_target)
|
|
5362
|
+
pulumi.set(__self__, "name", name)
|
|
5363
|
+
pulumi.set(__self__, "performance_level", performance_level)
|
|
5364
|
+
pulumi.set(__self__, "provisioned_iops", provisioned_iops)
|
|
5365
|
+
pulumi.set(__self__, "size", size)
|
|
5366
|
+
pulumi.set(__self__, "snapshot_id", snapshot_id)
|
|
5367
|
+
|
|
5368
|
+
@property
|
|
5369
|
+
@pulumi.getter(name="autoFormat")
|
|
5370
|
+
def auto_format(self) -> str:
|
|
5371
|
+
"""
|
|
5372
|
+
Whether to automatically mount the data disk. Valid values: true and false.
|
|
5373
|
+
"""
|
|
5374
|
+
return pulumi.get(self, "auto_format")
|
|
5375
|
+
|
|
5376
|
+
@property
|
|
5377
|
+
@pulumi.getter(name="autoSnapshotPolicyId")
|
|
5378
|
+
def auto_snapshot_policy_id(self) -> str:
|
|
5379
|
+
"""
|
|
5380
|
+
The ID of the automatic snapshot policy that you want to apply to the system disk.
|
|
5381
|
+
"""
|
|
5382
|
+
return pulumi.get(self, "auto_snapshot_policy_id")
|
|
5383
|
+
|
|
5384
|
+
@property
|
|
5385
|
+
@pulumi.getter(name="burstingEnabled")
|
|
5386
|
+
def bursting_enabled(self) -> bool:
|
|
5387
|
+
"""
|
|
5388
|
+
Whether the data disk is enabled with Burst (performance Burst). This is configured when the disk type is cloud_auto.
|
|
5389
|
+
"""
|
|
5390
|
+
return pulumi.get(self, "bursting_enabled")
|
|
5391
|
+
|
|
5392
|
+
@property
|
|
5393
|
+
@pulumi.getter
|
|
5394
|
+
def category(self) -> str:
|
|
5395
|
+
"""
|
|
5396
|
+
The type of data disk. Default value: `cloud_efficiency`. Valid values:- `cloud`: basic disk.- `cloud_efficiency`: ultra disk.- `cloud_ssd`: standard SSD.- `cloud_essd`: Enterprise SSD (ESSD).- `cloud_auto`: ESSD AutoPL disk.- `cloud_essd_entry`: ESSD Entry disk.- `elastic_ephemeral_disk_premium`: premium elastic ephemeral disk.- `elastic_ephemeral_disk_standard`: standard elastic ephemeral disk.
|
|
5397
|
+
"""
|
|
5398
|
+
return pulumi.get(self, "category")
|
|
5399
|
+
|
|
5400
|
+
@property
|
|
5401
|
+
@pulumi.getter
|
|
5402
|
+
def device(self) -> str:
|
|
5403
|
+
"""
|
|
5404
|
+
The mount target of data disk N. Valid values of N: 1 to 16. If you do not specify this parameter, the system automatically assigns a mount target when Auto Scaling creates ECS instances. The name of the mount target ranges from /dev/xvdb to /dev/xvdz.
|
|
5405
|
+
"""
|
|
5406
|
+
return pulumi.get(self, "device")
|
|
5407
|
+
|
|
5408
|
+
@property
|
|
5409
|
+
@pulumi.getter
|
|
5410
|
+
def encrypted(self) -> str:
|
|
5411
|
+
"""
|
|
5412
|
+
Specifies whether to encrypt data disks. Valid values: true and false. Default to `false`.
|
|
5413
|
+
"""
|
|
5414
|
+
return pulumi.get(self, "encrypted")
|
|
5415
|
+
|
|
5416
|
+
@property
|
|
5417
|
+
@pulumi.getter(name="fileSystem")
|
|
5418
|
+
def file_system(self) -> str:
|
|
5419
|
+
"""
|
|
5420
|
+
The type of the mounted file system. Works when auto_format is true. Optional value: `ext4`, `xfs`.
|
|
5421
|
+
"""
|
|
5422
|
+
return pulumi.get(self, "file_system")
|
|
5423
|
+
|
|
5424
|
+
@property
|
|
5425
|
+
@pulumi.getter(name="kmsKeyId")
|
|
5426
|
+
def kms_key_id(self) -> str:
|
|
5427
|
+
"""
|
|
5428
|
+
The kms key id used to encrypt the data disk. It takes effect when `encrypted` is true.
|
|
5429
|
+
"""
|
|
5430
|
+
return pulumi.get(self, "kms_key_id")
|
|
5431
|
+
|
|
5432
|
+
@property
|
|
5433
|
+
@pulumi.getter(name="mountTarget")
|
|
5434
|
+
def mount_target(self) -> str:
|
|
5435
|
+
"""
|
|
5436
|
+
The Mount path. Works when auto_format is true.
|
|
5437
|
+
"""
|
|
5438
|
+
return pulumi.get(self, "mount_target")
|
|
5439
|
+
|
|
5440
|
+
@property
|
|
5441
|
+
@pulumi.getter
|
|
5442
|
+
def name(self) -> str:
|
|
5443
|
+
"""
|
|
5444
|
+
The length is 2~128 English or Chinese characters. It must start with an uppercase or lowr letter or a Chinese character and cannot start with http:// or https. Can contain numbers, colons (:), underscores (_), or dashes (-). It will be overwritten if auto_format is set.
|
|
5445
|
+
"""
|
|
5446
|
+
return pulumi.get(self, "name")
|
|
5447
|
+
|
|
5448
|
+
@property
|
|
5449
|
+
@pulumi.getter(name="performanceLevel")
|
|
5450
|
+
def performance_level(self) -> str:
|
|
5451
|
+
"""
|
|
5452
|
+
Worker node data disk performance level, when `category` values `cloud_essd`, the optional values are `PL0`, `PL1`, `PL2` or `PL3`, but the specific performance level is related to the disk capacity. For more information, see [Enhanced SSDs](https://www.alibabacloud.com/help/doc-detail/122389.htm). Default is `PL1`.
|
|
5453
|
+
"""
|
|
5454
|
+
return pulumi.get(self, "performance_level")
|
|
5455
|
+
|
|
5456
|
+
@property
|
|
5457
|
+
@pulumi.getter(name="provisionedIops")
|
|
5458
|
+
def provisioned_iops(self) -> int:
|
|
5459
|
+
"""
|
|
5460
|
+
The read/write IOPS preconfigured for the data disk, which is configured when the disk type is cloud_auto.
|
|
5461
|
+
"""
|
|
5462
|
+
return pulumi.get(self, "provisioned_iops")
|
|
5463
|
+
|
|
5464
|
+
@property
|
|
5465
|
+
@pulumi.getter
|
|
5466
|
+
def size(self) -> int:
|
|
5467
|
+
"""
|
|
5468
|
+
The size of a data disk, Its valid value range [40~32768] in GB. Default to `40`.
|
|
5469
|
+
"""
|
|
5470
|
+
return pulumi.get(self, "size")
|
|
5471
|
+
|
|
5472
|
+
@property
|
|
5473
|
+
@pulumi.getter(name="snapshotId")
|
|
5474
|
+
def snapshot_id(self) -> str:
|
|
5475
|
+
"""
|
|
5476
|
+
The ID of the snapshot that you want to use to create data disk N. Valid values of N: 1 to 16. If you specify this parameter, DataDisk.N.Size is ignored. The size of the disk is the same as the size of the specified snapshot. If you specify a snapshot that is created on or before July 15, 2013, the operation fails and InvalidSnapshot.TooOld is returned.
|
|
5477
|
+
"""
|
|
5478
|
+
return pulumi.get(self, "snapshot_id")
|
|
5479
|
+
|
|
5480
|
+
|
|
5481
|
+
@pulumi.output_type
|
|
5482
|
+
class GetKubernetesNodePoolsNodepoolKubeletConfigurationResult(dict):
|
|
5483
|
+
def __init__(__self__, *,
|
|
5484
|
+
allowed_unsafe_sysctls: Sequence[str],
|
|
5485
|
+
cluster_dns: Sequence[str],
|
|
5486
|
+
container_log_max_files: str,
|
|
5487
|
+
container_log_max_size: str,
|
|
5488
|
+
container_log_max_workers: str,
|
|
5489
|
+
container_log_monitor_interval: str,
|
|
5490
|
+
cpu_cfs_quota: str,
|
|
5491
|
+
cpu_cfs_quota_period: str,
|
|
5492
|
+
cpu_manager_policy: str,
|
|
5493
|
+
event_burst: str,
|
|
5494
|
+
event_record_qps: str,
|
|
5495
|
+
eviction_hard: Mapping[str, str],
|
|
5496
|
+
eviction_soft: Mapping[str, str],
|
|
5497
|
+
eviction_soft_grace_period: Mapping[str, str],
|
|
5498
|
+
feature_gates: Mapping[str, str],
|
|
5499
|
+
image_gc_high_threshold_percent: str,
|
|
5500
|
+
image_gc_low_threshold_percent: str,
|
|
5501
|
+
kube_api_burst: str,
|
|
5502
|
+
kube_api_qps: str,
|
|
5503
|
+
kube_reserved: Mapping[str, str],
|
|
5504
|
+
max_pods: str,
|
|
5505
|
+
memory_manager_policy: str,
|
|
5506
|
+
pod_pids_limit: str,
|
|
5507
|
+
read_only_port: str,
|
|
5508
|
+
registry_burst: str,
|
|
5509
|
+
registry_pull_qps: str,
|
|
5510
|
+
reserved_memories: Sequence['outputs.GetKubernetesNodePoolsNodepoolKubeletConfigurationReservedMemoryResult'],
|
|
5511
|
+
serialize_image_pulls: str,
|
|
5512
|
+
system_reserved: Mapping[str, str],
|
|
5513
|
+
topology_manager_policy: str,
|
|
5514
|
+
tracing: 'outputs.GetKubernetesNodePoolsNodepoolKubeletConfigurationTracingResult'):
|
|
5515
|
+
"""
|
|
5516
|
+
:param Sequence[str] allowed_unsafe_sysctls: Allowed sysctl mode whitelist.
|
|
5517
|
+
:param Sequence[str] cluster_dns: The list of IP addresses of the cluster DNS servers.
|
|
5518
|
+
:param str container_log_max_files: The maximum number of log files that can exist in each container.
|
|
5519
|
+
:param str container_log_max_size: The maximum size that can be reached before a log file is rotated.
|
|
5520
|
+
:param str container_log_max_workers: Specifies the maximum number of concurrent workers required to perform log rotation operations.
|
|
5521
|
+
:param str container_log_monitor_interval: Specifies the duration for which container logs are monitored for log rotation.
|
|
5522
|
+
:param str cpu_cfs_quota: CPU CFS quota constraint switch.
|
|
5523
|
+
:param str cpu_cfs_quota_period: CPU CFS quota period value.
|
|
5524
|
+
:param str cpu_manager_policy: Same as cpuManagerPolicy. The name of the policy to use. Requires the CPUManager feature gate to be enabled. Valid value is `none` or `static`.
|
|
5525
|
+
:param str event_burst: Same as eventBurst. The maximum size of a burst of event creations, temporarily allows event creations to burst to this number, while still not exceeding `event_record_qps`. It is only used when `event_record_qps` is greater than 0. Valid value is `[0-100]`.
|
|
5526
|
+
:param str event_record_qps: Same as eventRecordQPS. The maximum event creations per second. If 0, there is no limit enforced. Valid value is `[0-50]`.
|
|
5527
|
+
:param Mapping[str, str] eviction_hard: Same as evictionHard. The map of signal names to quantities that defines hard eviction thresholds. For example: `{"memory.available" = "300Mi"}`.
|
|
5528
|
+
:param Mapping[str, str] eviction_soft: Same as evictionSoft. The map of signal names to quantities that defines soft eviction thresholds. For example: `{"memory.available" = "300Mi"}`.
|
|
5529
|
+
:param Mapping[str, str] eviction_soft_grace_period: Same as evictionSoftGracePeriod. The map of signal names to quantities that defines grace periods for each soft eviction signal. For example: `{"memory.available" = "30s"}`.
|
|
5530
|
+
:param Mapping[str, str] feature_gates: Feature switch to enable configuration of experimental features.
|
|
5531
|
+
:param str image_gc_high_threshold_percent: If the image usage exceeds this threshold, image garbage collection will continue.
|
|
5532
|
+
:param str image_gc_low_threshold_percent: Image garbage collection is not performed when the image usage is below this threshold.
|
|
5533
|
+
:param str kube_api_burst: Same as kubeAPIBurst. The burst to allow while talking with kubernetes api-server. Valid value is `[0-100]`.
|
|
5534
|
+
:param str kube_api_qps: Same as kubeAPIQPS. The QPS to use while talking with kubernetes api-server. Valid value is `[0-50]`.
|
|
5535
|
+
:param Mapping[str, str] kube_reserved: Same as kubeReserved. The set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently, cpu, memory and local storage for root file system are supported. See [compute resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details.
|
|
5536
|
+
:param str max_pods: The maximum number of running pods.
|
|
5537
|
+
:param str memory_manager_policy: The policy to be used by the memory manager.
|
|
5538
|
+
:param str pod_pids_limit: The maximum number of PIDs that can be used in a Pod.
|
|
5539
|
+
:param str read_only_port: Read-only port number.
|
|
5540
|
+
:param str registry_burst: Same as registryBurst. The maximum size of burst pulls, temporarily allows pulls to burst to this number, while still not exceeding `registry_pull_qps`. Only used if `registry_pull_qps` is greater than 0. Valid value is `[0-100]`.
|
|
5541
|
+
:param str registry_pull_qps: Same as registryPullQPS. The limit of registry pulls per second. Setting it to `0` means no limit. Valid value is `[0-50]`.
|
|
5542
|
+
:param Sequence['GetKubernetesNodePoolsNodepoolKubeletConfigurationReservedMemoryArgs'] reserved_memories: Reserve memory for NUMA nodes.
|
|
5543
|
+
:param str serialize_image_pulls: Same as serializeImagePulls. When enabled, it tells the Kubelet to pull images one at a time. We recommend not changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Valid value is `true` or `false`.
|
|
5544
|
+
:param Mapping[str, str] system_reserved: Same as systemReserved. The set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently, only cpu and memory are supported. See [compute resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details.
|
|
5545
|
+
:param str topology_manager_policy: Name of the Topology Manager policy used.
|
|
5546
|
+
:param 'GetKubernetesNodePoolsNodepoolKubeletConfigurationTracingArgs' tracing: OpenTelemetry tracks the configuration information for client settings versioning.
|
|
5547
|
+
"""
|
|
5548
|
+
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
|
|
5549
|
+
pulumi.set(__self__, "cluster_dns", cluster_dns)
|
|
5550
|
+
pulumi.set(__self__, "container_log_max_files", container_log_max_files)
|
|
5551
|
+
pulumi.set(__self__, "container_log_max_size", container_log_max_size)
|
|
5552
|
+
pulumi.set(__self__, "container_log_max_workers", container_log_max_workers)
|
|
5553
|
+
pulumi.set(__self__, "container_log_monitor_interval", container_log_monitor_interval)
|
|
5554
|
+
pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
|
|
5555
|
+
pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
|
|
5556
|
+
pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
|
|
5557
|
+
pulumi.set(__self__, "event_burst", event_burst)
|
|
5558
|
+
pulumi.set(__self__, "event_record_qps", event_record_qps)
|
|
5559
|
+
pulumi.set(__self__, "eviction_hard", eviction_hard)
|
|
5560
|
+
pulumi.set(__self__, "eviction_soft", eviction_soft)
|
|
5561
|
+
pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
|
|
5562
|
+
pulumi.set(__self__, "feature_gates", feature_gates)
|
|
5563
|
+
pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
|
|
5564
|
+
pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
|
|
5565
|
+
pulumi.set(__self__, "kube_api_burst", kube_api_burst)
|
|
5566
|
+
pulumi.set(__self__, "kube_api_qps", kube_api_qps)
|
|
5567
|
+
pulumi.set(__self__, "kube_reserved", kube_reserved)
|
|
5568
|
+
pulumi.set(__self__, "max_pods", max_pods)
|
|
5569
|
+
pulumi.set(__self__, "memory_manager_policy", memory_manager_policy)
|
|
5570
|
+
pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
|
|
5571
|
+
pulumi.set(__self__, "read_only_port", read_only_port)
|
|
5572
|
+
pulumi.set(__self__, "registry_burst", registry_burst)
|
|
5573
|
+
pulumi.set(__self__, "registry_pull_qps", registry_pull_qps)
|
|
5574
|
+
pulumi.set(__self__, "reserved_memories", reserved_memories)
|
|
5575
|
+
pulumi.set(__self__, "serialize_image_pulls", serialize_image_pulls)
|
|
5576
|
+
pulumi.set(__self__, "system_reserved", system_reserved)
|
|
5577
|
+
pulumi.set(__self__, "topology_manager_policy", topology_manager_policy)
|
|
5578
|
+
pulumi.set(__self__, "tracing", tracing)
|
|
5579
|
+
|
|
5580
|
+
@property
|
|
5581
|
+
@pulumi.getter(name="allowedUnsafeSysctls")
|
|
5582
|
+
def allowed_unsafe_sysctls(self) -> Sequence[str]:
|
|
5583
|
+
"""
|
|
5584
|
+
Allowed sysctl mode whitelist.
|
|
5585
|
+
"""
|
|
5586
|
+
return pulumi.get(self, "allowed_unsafe_sysctls")
|
|
5587
|
+
|
|
5588
|
+
@property
|
|
5589
|
+
@pulumi.getter(name="clusterDns")
|
|
5590
|
+
def cluster_dns(self) -> Sequence[str]:
|
|
5591
|
+
"""
|
|
5592
|
+
The list of IP addresses of the cluster DNS servers.
|
|
5593
|
+
"""
|
|
5594
|
+
return pulumi.get(self, "cluster_dns")
|
|
5595
|
+
|
|
5596
|
+
@property
|
|
5597
|
+
@pulumi.getter(name="containerLogMaxFiles")
|
|
5598
|
+
def container_log_max_files(self) -> str:
|
|
5599
|
+
"""
|
|
5600
|
+
The maximum number of log files that can exist in each container.
|
|
5601
|
+
"""
|
|
5602
|
+
return pulumi.get(self, "container_log_max_files")
|
|
5603
|
+
|
|
5604
|
+
@property
|
|
5605
|
+
@pulumi.getter(name="containerLogMaxSize")
|
|
5606
|
+
def container_log_max_size(self) -> str:
|
|
5607
|
+
"""
|
|
5608
|
+
The maximum size that can be reached before a log file is rotated.
|
|
5609
|
+
"""
|
|
5610
|
+
return pulumi.get(self, "container_log_max_size")
|
|
5611
|
+
|
|
5612
|
+
@property
|
|
5613
|
+
@pulumi.getter(name="containerLogMaxWorkers")
|
|
5614
|
+
def container_log_max_workers(self) -> str:
|
|
5615
|
+
"""
|
|
5616
|
+
Specifies the maximum number of concurrent workers required to perform log rotation operations.
|
|
5617
|
+
"""
|
|
5618
|
+
return pulumi.get(self, "container_log_max_workers")
|
|
5619
|
+
|
|
5620
|
+
@property
|
|
5621
|
+
@pulumi.getter(name="containerLogMonitorInterval")
|
|
5622
|
+
def container_log_monitor_interval(self) -> str:
|
|
5623
|
+
"""
|
|
5624
|
+
Specifies the duration for which container logs are monitored for log rotation.
|
|
5625
|
+
"""
|
|
5626
|
+
return pulumi.get(self, "container_log_monitor_interval")
|
|
5627
|
+
|
|
5628
|
+
@property
|
|
5629
|
+
@pulumi.getter(name="cpuCfsQuota")
|
|
5630
|
+
def cpu_cfs_quota(self) -> str:
|
|
5631
|
+
"""
|
|
5632
|
+
CPU CFS quota constraint switch.
|
|
5633
|
+
"""
|
|
5634
|
+
return pulumi.get(self, "cpu_cfs_quota")
|
|
5635
|
+
|
|
5636
|
+
@property
|
|
5637
|
+
@pulumi.getter(name="cpuCfsQuotaPeriod")
|
|
5638
|
+
def cpu_cfs_quota_period(self) -> str:
|
|
5639
|
+
"""
|
|
5640
|
+
CPU CFS quota period value.
|
|
5641
|
+
"""
|
|
5642
|
+
return pulumi.get(self, "cpu_cfs_quota_period")
|
|
5643
|
+
|
|
5644
|
+
@property
|
|
5645
|
+
@pulumi.getter(name="cpuManagerPolicy")
|
|
5646
|
+
def cpu_manager_policy(self) -> str:
|
|
5647
|
+
"""
|
|
5648
|
+
Same as cpuManagerPolicy. The name of the policy to use. Requires the CPUManager feature gate to be enabled. Valid value is `none` or `static`.
|
|
5649
|
+
"""
|
|
5650
|
+
return pulumi.get(self, "cpu_manager_policy")
|
|
5651
|
+
|
|
5652
|
+
@property
|
|
5653
|
+
@pulumi.getter(name="eventBurst")
|
|
5654
|
+
def event_burst(self) -> str:
|
|
5655
|
+
"""
|
|
5656
|
+
Same as eventBurst. The maximum size of a burst of event creations, temporarily allows event creations to burst to this number, while still not exceeding `event_record_qps`. It is only used when `event_record_qps` is greater than 0. Valid value is `[0-100]`.
|
|
5657
|
+
"""
|
|
5658
|
+
return pulumi.get(self, "event_burst")
|
|
5659
|
+
|
|
5660
|
+
@property
|
|
5661
|
+
@pulumi.getter(name="eventRecordQps")
|
|
5662
|
+
def event_record_qps(self) -> str:
|
|
5663
|
+
"""
|
|
5664
|
+
Same as eventRecordQPS. The maximum event creations per second. If 0, there is no limit enforced. Valid value is `[0-50]`.
|
|
5665
|
+
"""
|
|
5666
|
+
return pulumi.get(self, "event_record_qps")
|
|
5667
|
+
|
|
5668
|
+
@property
|
|
5669
|
+
@pulumi.getter(name="evictionHard")
|
|
5670
|
+
def eviction_hard(self) -> Mapping[str, str]:
|
|
5671
|
+
"""
|
|
5672
|
+
Same as evictionHard. The map of signal names to quantities that defines hard eviction thresholds. For example: `{"memory.available" = "300Mi"}`.
|
|
5673
|
+
"""
|
|
5674
|
+
return pulumi.get(self, "eviction_hard")
|
|
5675
|
+
|
|
5676
|
+
@property
|
|
5677
|
+
@pulumi.getter(name="evictionSoft")
|
|
5678
|
+
def eviction_soft(self) -> Mapping[str, str]:
|
|
5679
|
+
"""
|
|
5680
|
+
Same as evictionSoft. The map of signal names to quantities that defines soft eviction thresholds. For example: `{"memory.available" = "300Mi"}`.
|
|
5681
|
+
"""
|
|
5682
|
+
return pulumi.get(self, "eviction_soft")
|
|
5683
|
+
|
|
5684
|
+
@property
|
|
5685
|
+
@pulumi.getter(name="evictionSoftGracePeriod")
|
|
5686
|
+
def eviction_soft_grace_period(self) -> Mapping[str, str]:
|
|
5687
|
+
"""
|
|
5688
|
+
Same as evictionSoftGracePeriod. The map of signal names to quantities that defines grace periods for each soft eviction signal. For example: `{"memory.available" = "30s"}`.
|
|
5689
|
+
"""
|
|
5690
|
+
return pulumi.get(self, "eviction_soft_grace_period")
|
|
5691
|
+
|
|
5692
|
+
@property
|
|
5693
|
+
@pulumi.getter(name="featureGates")
|
|
5694
|
+
def feature_gates(self) -> Mapping[str, str]:
|
|
5695
|
+
"""
|
|
5696
|
+
Feature switch to enable configuration of experimental features.
|
|
5697
|
+
"""
|
|
5698
|
+
return pulumi.get(self, "feature_gates")
|
|
5699
|
+
|
|
5700
|
+
@property
|
|
5701
|
+
@pulumi.getter(name="imageGcHighThresholdPercent")
|
|
5702
|
+
def image_gc_high_threshold_percent(self) -> str:
|
|
5703
|
+
"""
|
|
5704
|
+
If the image usage exceeds this threshold, image garbage collection will continue.
|
|
5705
|
+
"""
|
|
5706
|
+
return pulumi.get(self, "image_gc_high_threshold_percent")
|
|
5707
|
+
|
|
5708
|
+
@property
|
|
5709
|
+
@pulumi.getter(name="imageGcLowThresholdPercent")
|
|
5710
|
+
def image_gc_low_threshold_percent(self) -> str:
|
|
5711
|
+
"""
|
|
5712
|
+
Image garbage collection is not performed when the image usage is below this threshold.
|
|
5713
|
+
"""
|
|
5714
|
+
return pulumi.get(self, "image_gc_low_threshold_percent")
|
|
5715
|
+
|
|
5716
|
+
@property
|
|
5717
|
+
@pulumi.getter(name="kubeApiBurst")
|
|
5718
|
+
def kube_api_burst(self) -> str:
|
|
5719
|
+
"""
|
|
5720
|
+
Same as kubeAPIBurst. The burst to allow while talking with kubernetes api-server. Valid value is `[0-100]`.
|
|
5721
|
+
"""
|
|
5722
|
+
return pulumi.get(self, "kube_api_burst")
|
|
5723
|
+
|
|
5724
|
+
@property
|
|
5725
|
+
@pulumi.getter(name="kubeApiQps")
|
|
5726
|
+
def kube_api_qps(self) -> str:
|
|
5727
|
+
"""
|
|
5728
|
+
Same as kubeAPIQPS. The QPS to use while talking with kubernetes api-server. Valid value is `[0-50]`.
|
|
5729
|
+
"""
|
|
5730
|
+
return pulumi.get(self, "kube_api_qps")
|
|
5731
|
+
|
|
5732
|
+
@property
|
|
5733
|
+
@pulumi.getter(name="kubeReserved")
|
|
5734
|
+
def kube_reserved(self) -> Mapping[str, str]:
|
|
5735
|
+
"""
|
|
5736
|
+
Same as kubeReserved. The set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently, cpu, memory and local storage for root file system are supported. See [compute resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details.
|
|
5737
|
+
"""
|
|
5738
|
+
return pulumi.get(self, "kube_reserved")
|
|
5739
|
+
|
|
5740
|
+
@property
|
|
5741
|
+
@pulumi.getter(name="maxPods")
|
|
5742
|
+
def max_pods(self) -> str:
|
|
5743
|
+
"""
|
|
5744
|
+
The maximum number of running pods.
|
|
5745
|
+
"""
|
|
5746
|
+
return pulumi.get(self, "max_pods")
|
|
5747
|
+
|
|
5748
|
+
@property
|
|
5749
|
+
@pulumi.getter(name="memoryManagerPolicy")
|
|
5750
|
+
def memory_manager_policy(self) -> str:
|
|
5751
|
+
"""
|
|
5752
|
+
The policy to be used by the memory manager.
|
|
5753
|
+
"""
|
|
5754
|
+
return pulumi.get(self, "memory_manager_policy")
|
|
5755
|
+
|
|
5756
|
+
@property
|
|
5757
|
+
@pulumi.getter(name="podPidsLimit")
|
|
5758
|
+
def pod_pids_limit(self) -> str:
|
|
5759
|
+
"""
|
|
5760
|
+
The maximum number of PIDs that can be used in a Pod.
|
|
5761
|
+
"""
|
|
5762
|
+
return pulumi.get(self, "pod_pids_limit")
|
|
5763
|
+
|
|
5764
|
+
@property
|
|
5765
|
+
@pulumi.getter(name="readOnlyPort")
|
|
5766
|
+
def read_only_port(self) -> str:
|
|
5767
|
+
"""
|
|
5768
|
+
Read-only port number.
|
|
5769
|
+
"""
|
|
5770
|
+
return pulumi.get(self, "read_only_port")
|
|
5771
|
+
|
|
5772
|
+
@property
|
|
5773
|
+
@pulumi.getter(name="registryBurst")
|
|
5774
|
+
def registry_burst(self) -> str:
|
|
5775
|
+
"""
|
|
5776
|
+
Same as registryBurst. The maximum size of burst pulls, temporarily allows pulls to burst to this number, while still not exceeding `registry_pull_qps`. Only used if `registry_pull_qps` is greater than 0. Valid value is `[0-100]`.
|
|
5777
|
+
"""
|
|
5778
|
+
return pulumi.get(self, "registry_burst")
|
|
5779
|
+
|
|
5780
|
+
@property
|
|
5781
|
+
@pulumi.getter(name="registryPullQps")
|
|
5782
|
+
def registry_pull_qps(self) -> str:
|
|
5783
|
+
"""
|
|
5784
|
+
Same as registryPullQPS. The limit of registry pulls per second. Setting it to `0` means no limit. Valid value is `[0-50]`.
|
|
5785
|
+
"""
|
|
5786
|
+
return pulumi.get(self, "registry_pull_qps")
|
|
5787
|
+
|
|
5788
|
+
@property
|
|
5789
|
+
@pulumi.getter(name="reservedMemories")
|
|
5790
|
+
def reserved_memories(self) -> Sequence['outputs.GetKubernetesNodePoolsNodepoolKubeletConfigurationReservedMemoryResult']:
|
|
5791
|
+
"""
|
|
5792
|
+
Reserve memory for NUMA nodes.
|
|
5793
|
+
"""
|
|
5794
|
+
return pulumi.get(self, "reserved_memories")
|
|
5795
|
+
|
|
5796
|
+
@property
|
|
5797
|
+
@pulumi.getter(name="serializeImagePulls")
|
|
5798
|
+
def serialize_image_pulls(self) -> str:
|
|
5799
|
+
"""
|
|
5800
|
+
Same as serializeImagePulls. When enabled, it tells the Kubelet to pull images one at a time. We recommend not changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Valid value is `true` or `false`.
|
|
5801
|
+
"""
|
|
5802
|
+
return pulumi.get(self, "serialize_image_pulls")
|
|
5803
|
+
|
|
5804
|
+
@property
|
|
5805
|
+
@pulumi.getter(name="systemReserved")
|
|
5806
|
+
def system_reserved(self) -> Mapping[str, str]:
|
|
5807
|
+
"""
|
|
5808
|
+
Same as systemReserved. The set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently, only cpu and memory are supported. See [compute resources](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more details.
|
|
5809
|
+
"""
|
|
5810
|
+
return pulumi.get(self, "system_reserved")
|
|
5811
|
+
|
|
5812
|
+
@property
|
|
5813
|
+
@pulumi.getter(name="topologyManagerPolicy")
|
|
5814
|
+
def topology_manager_policy(self) -> str:
|
|
5815
|
+
"""
|
|
5816
|
+
Name of the Topology Manager policy used.
|
|
5817
|
+
"""
|
|
5818
|
+
return pulumi.get(self, "topology_manager_policy")
|
|
5819
|
+
|
|
5820
|
+
@property
|
|
5821
|
+
@pulumi.getter
|
|
5822
|
+
def tracing(self) -> 'outputs.GetKubernetesNodePoolsNodepoolKubeletConfigurationTracingResult':
|
|
5823
|
+
"""
|
|
5824
|
+
OpenTelemetry tracks the configuration information for client settings versioning.
|
|
5825
|
+
"""
|
|
5826
|
+
return pulumi.get(self, "tracing")
|
|
5827
|
+
|
|
5828
|
+
|
|
5829
|
+
@pulumi.output_type
|
|
5830
|
+
class GetKubernetesNodePoolsNodepoolKubeletConfigurationReservedMemoryResult(dict):
|
|
5831
|
+
def __init__(__self__, *,
|
|
5832
|
+
limits: Mapping[str, str],
|
|
5833
|
+
numa_node: int):
|
|
5834
|
+
"""
|
|
5835
|
+
:param Mapping[str, str] limits: Memory resource limit.
|
|
5836
|
+
:param int numa_node: The NUMA node.
|
|
5837
|
+
"""
|
|
5838
|
+
pulumi.set(__self__, "limits", limits)
|
|
5839
|
+
pulumi.set(__self__, "numa_node", numa_node)
|
|
5840
|
+
|
|
5841
|
+
@property
|
|
5842
|
+
@pulumi.getter
|
|
5843
|
+
def limits(self) -> Mapping[str, str]:
|
|
5844
|
+
"""
|
|
5845
|
+
Memory resource limit.
|
|
5846
|
+
"""
|
|
5847
|
+
return pulumi.get(self, "limits")
|
|
5848
|
+
|
|
5849
|
+
@property
|
|
5850
|
+
@pulumi.getter(name="numaNode")
|
|
5851
|
+
def numa_node(self) -> int:
|
|
5852
|
+
"""
|
|
5853
|
+
The NUMA node.
|
|
5854
|
+
"""
|
|
5855
|
+
return pulumi.get(self, "numa_node")
|
|
5856
|
+
|
|
5857
|
+
|
|
5858
|
+
@pulumi.output_type
|
|
5859
|
+
class GetKubernetesNodePoolsNodepoolKubeletConfigurationTracingResult(dict):
|
|
5860
|
+
def __init__(__self__, *,
|
|
5861
|
+
endpoint: str,
|
|
5862
|
+
sampling_rate_per_million: str):
|
|
5863
|
+
"""
|
|
5864
|
+
:param str endpoint: The endpoint of the collector.
|
|
5865
|
+
:param str sampling_rate_per_million: Number of samples to be collected per million span.
|
|
5866
|
+
"""
|
|
5867
|
+
pulumi.set(__self__, "endpoint", endpoint)
|
|
5868
|
+
pulumi.set(__self__, "sampling_rate_per_million", sampling_rate_per_million)
|
|
5869
|
+
|
|
5870
|
+
@property
|
|
5871
|
+
@pulumi.getter
|
|
5872
|
+
def endpoint(self) -> str:
|
|
5873
|
+
"""
|
|
5874
|
+
The endpoint of the collector.
|
|
5875
|
+
"""
|
|
5876
|
+
return pulumi.get(self, "endpoint")
|
|
5877
|
+
|
|
5878
|
+
@property
|
|
5879
|
+
@pulumi.getter(name="samplingRatePerMillion")
|
|
5880
|
+
def sampling_rate_per_million(self) -> str:
|
|
5881
|
+
"""
|
|
5882
|
+
Number of samples to be collected per million span.
|
|
5883
|
+
"""
|
|
5884
|
+
return pulumi.get(self, "sampling_rate_per_million")
|
|
5885
|
+
|
|
5886
|
+
|
|
5887
|
+
@pulumi.output_type
|
|
5888
|
+
class GetKubernetesNodePoolsNodepoolLabelResult(dict):
|
|
5889
|
+
def __init__(__self__, *,
|
|
5890
|
+
key: str,
|
|
5891
|
+
value: str):
|
|
5892
|
+
"""
|
|
5893
|
+
:param str key: The key of a taint.
|
|
5894
|
+
:param str value: The value of a taint.
|
|
5895
|
+
"""
|
|
5896
|
+
pulumi.set(__self__, "key", key)
|
|
5897
|
+
pulumi.set(__self__, "value", value)
|
|
5898
|
+
|
|
5899
|
+
@property
|
|
5900
|
+
@pulumi.getter
|
|
5901
|
+
def key(self) -> str:
|
|
5902
|
+
"""
|
|
5903
|
+
The key of a taint.
|
|
5904
|
+
"""
|
|
5905
|
+
return pulumi.get(self, "key")
|
|
5906
|
+
|
|
5907
|
+
@property
|
|
5908
|
+
@pulumi.getter
|
|
5909
|
+
def value(self) -> str:
|
|
5910
|
+
"""
|
|
5911
|
+
The value of a taint.
|
|
5912
|
+
"""
|
|
5913
|
+
return pulumi.get(self, "value")
|
|
5914
|
+
|
|
5915
|
+
|
|
5916
|
+
@pulumi.output_type
|
|
5917
|
+
class GetKubernetesNodePoolsNodepoolManagementResult(dict):
|
|
5918
|
+
def __init__(__self__, *,
|
|
5919
|
+
auto_repair: bool,
|
|
5920
|
+
auto_repair_policy: 'outputs.GetKubernetesNodePoolsNodepoolManagementAutoRepairPolicyResult',
|
|
5921
|
+
auto_upgrade: bool,
|
|
5922
|
+
auto_upgrade_policy: 'outputs.GetKubernetesNodePoolsNodepoolManagementAutoUpgradePolicyResult',
|
|
5923
|
+
auto_vul_fix: bool,
|
|
5924
|
+
auto_vul_fix_policy: 'outputs.GetKubernetesNodePoolsNodepoolManagementAutoVulFixPolicyResult',
|
|
5925
|
+
enable: bool,
|
|
5926
|
+
max_unavailable: int,
|
|
5927
|
+
surge: int,
|
|
5928
|
+
surge_percentage: int):
|
|
5929
|
+
"""
|
|
5930
|
+
:param bool auto_repair: Whether to enable automatic repair. Valid values: `true`: Automatic repair. `false`: not automatically repaired.
|
|
5931
|
+
:param 'GetKubernetesNodePoolsNodepoolManagementAutoRepairPolicyArgs' auto_repair_policy: Automatic repair node policy.
|
|
5932
|
+
:param bool auto_upgrade: Specifies whether to enable auto update. Valid values: `true`: enables auto update. `false`: disables auto update.
|
|
5933
|
+
:param 'GetKubernetesNodePoolsNodepoolManagementAutoUpgradePolicyArgs' auto_upgrade_policy: The auto update policy.
|
|
5934
|
+
:param bool auto_vul_fix: Specifies whether to automatically patch CVE vulnerabilities. Valid values: `true`, `false`.
|
|
5935
|
+
:param 'GetKubernetesNodePoolsNodepoolManagementAutoVulFixPolicyArgs' auto_vul_fix_policy: The auto CVE patching policy.
|
|
5936
|
+
:param bool enable: Whether to enable automatic scaling. Value:- `true`: enables the node pool auto-scaling function.- `false`: Auto scaling is not enabled. When the value is false, other `auto_scaling` configuration parameters do not take effect.
|
|
5937
|
+
:param int max_unavailable: Maximum number of unavailable nodes. Default value: 1. Value range:\\[1,1000\\].
|
|
5938
|
+
:param int surge: Number of additional nodes. You have to specify one of surge, surge_percentage.
|
|
5939
|
+
:param int surge_percentage: Proportion of additional nodes. You have to specify one of surge, surge_percentage.
|
|
5940
|
+
"""
|
|
5941
|
+
pulumi.set(__self__, "auto_repair", auto_repair)
|
|
5942
|
+
pulumi.set(__self__, "auto_repair_policy", auto_repair_policy)
|
|
5943
|
+
pulumi.set(__self__, "auto_upgrade", auto_upgrade)
|
|
5944
|
+
pulumi.set(__self__, "auto_upgrade_policy", auto_upgrade_policy)
|
|
5945
|
+
pulumi.set(__self__, "auto_vul_fix", auto_vul_fix)
|
|
5946
|
+
pulumi.set(__self__, "auto_vul_fix_policy", auto_vul_fix_policy)
|
|
5947
|
+
pulumi.set(__self__, "enable", enable)
|
|
5948
|
+
pulumi.set(__self__, "max_unavailable", max_unavailable)
|
|
5949
|
+
pulumi.set(__self__, "surge", surge)
|
|
5950
|
+
pulumi.set(__self__, "surge_percentage", surge_percentage)
|
|
5951
|
+
|
|
5952
|
+
@property
|
|
5953
|
+
@pulumi.getter(name="autoRepair")
|
|
5954
|
+
def auto_repair(self) -> bool:
|
|
5955
|
+
"""
|
|
5956
|
+
Whether to enable automatic repair. Valid values: `true`: Automatic repair. `false`: not automatically repaired.
|
|
5957
|
+
"""
|
|
5958
|
+
return pulumi.get(self, "auto_repair")
|
|
5959
|
+
|
|
5960
|
+
@property
|
|
5961
|
+
@pulumi.getter(name="autoRepairPolicy")
|
|
5962
|
+
def auto_repair_policy(self) -> 'outputs.GetKubernetesNodePoolsNodepoolManagementAutoRepairPolicyResult':
|
|
5963
|
+
"""
|
|
5964
|
+
Automatic repair node policy.
|
|
5965
|
+
"""
|
|
5966
|
+
return pulumi.get(self, "auto_repair_policy")
|
|
5967
|
+
|
|
5968
|
+
@property
|
|
5969
|
+
@pulumi.getter(name="autoUpgrade")
|
|
5970
|
+
def auto_upgrade(self) -> bool:
|
|
5971
|
+
"""
|
|
5972
|
+
Specifies whether to enable auto update. Valid values: `true`: enables auto update. `false`: disables auto update.
|
|
5973
|
+
"""
|
|
5974
|
+
return pulumi.get(self, "auto_upgrade")
|
|
5975
|
+
|
|
5976
|
+
@property
|
|
5977
|
+
@pulumi.getter(name="autoUpgradePolicy")
|
|
5978
|
+
def auto_upgrade_policy(self) -> 'outputs.GetKubernetesNodePoolsNodepoolManagementAutoUpgradePolicyResult':
|
|
5979
|
+
"""
|
|
5980
|
+
The auto update policy.
|
|
5981
|
+
"""
|
|
5982
|
+
return pulumi.get(self, "auto_upgrade_policy")
|
|
5983
|
+
|
|
5984
|
+
@property
|
|
5985
|
+
@pulumi.getter(name="autoVulFix")
|
|
5986
|
+
def auto_vul_fix(self) -> bool:
|
|
5987
|
+
"""
|
|
5988
|
+
Specifies whether to automatically patch CVE vulnerabilities. Valid values: `true`, `false`.
|
|
5989
|
+
"""
|
|
5990
|
+
return pulumi.get(self, "auto_vul_fix")
|
|
5991
|
+
|
|
5992
|
+
@property
|
|
5993
|
+
@pulumi.getter(name="autoVulFixPolicy")
|
|
5994
|
+
def auto_vul_fix_policy(self) -> 'outputs.GetKubernetesNodePoolsNodepoolManagementAutoVulFixPolicyResult':
|
|
5995
|
+
"""
|
|
5996
|
+
The auto CVE patching policy.
|
|
5997
|
+
"""
|
|
5998
|
+
return pulumi.get(self, "auto_vul_fix_policy")
|
|
5999
|
+
|
|
6000
|
+
@property
|
|
6001
|
+
@pulumi.getter
|
|
6002
|
+
def enable(self) -> bool:
|
|
6003
|
+
"""
|
|
6004
|
+
Whether to enable automatic scaling. Value:- `true`: enables the node pool auto-scaling function.- `false`: Auto scaling is not enabled. When the value is false, other `auto_scaling` configuration parameters do not take effect.
|
|
6005
|
+
"""
|
|
6006
|
+
return pulumi.get(self, "enable")
|
|
6007
|
+
|
|
6008
|
+
@property
|
|
6009
|
+
@pulumi.getter(name="maxUnavailable")
|
|
6010
|
+
def max_unavailable(self) -> int:
|
|
6011
|
+
"""
|
|
6012
|
+
Maximum number of unavailable nodes. Default value: 1. Value range:\\[1,1000\\].
|
|
6013
|
+
"""
|
|
6014
|
+
return pulumi.get(self, "max_unavailable")
|
|
6015
|
+
|
|
6016
|
+
@property
|
|
6017
|
+
@pulumi.getter
|
|
6018
|
+
def surge(self) -> int:
|
|
6019
|
+
"""
|
|
6020
|
+
Number of additional nodes. You have to specify one of surge, surge_percentage.
|
|
6021
|
+
"""
|
|
6022
|
+
return pulumi.get(self, "surge")
|
|
6023
|
+
|
|
6024
|
+
@property
|
|
6025
|
+
@pulumi.getter(name="surgePercentage")
|
|
6026
|
+
def surge_percentage(self) -> int:
|
|
6027
|
+
"""
|
|
6028
|
+
Proportion of additional nodes. You have to specify one of surge, surge_percentage.
|
|
6029
|
+
"""
|
|
6030
|
+
return pulumi.get(self, "surge_percentage")
|
|
6031
|
+
|
|
6032
|
+
|
|
6033
|
+
@pulumi.output_type
|
|
6034
|
+
class GetKubernetesNodePoolsNodepoolManagementAutoRepairPolicyResult(dict):
|
|
6035
|
+
def __init__(__self__, *,
|
|
6036
|
+
restart_node: bool):
|
|
6037
|
+
"""
|
|
6038
|
+
:param bool restart_node: Specifies whether to automatically restart nodes after patching CVE vulnerabilities. Valid values: `true`, `false`.
|
|
6039
|
+
"""
|
|
6040
|
+
pulumi.set(__self__, "restart_node", restart_node)
|
|
6041
|
+
|
|
6042
|
+
@property
|
|
6043
|
+
@pulumi.getter(name="restartNode")
|
|
6044
|
+
def restart_node(self) -> bool:
|
|
6045
|
+
"""
|
|
6046
|
+
Specifies whether to automatically restart nodes after patching CVE vulnerabilities. Valid values: `true`, `false`.
|
|
6047
|
+
"""
|
|
6048
|
+
return pulumi.get(self, "restart_node")
|
|
6049
|
+
|
|
6050
|
+
|
|
6051
|
+
@pulumi.output_type
|
|
6052
|
+
class GetKubernetesNodePoolsNodepoolManagementAutoUpgradePolicyResult(dict):
|
|
6053
|
+
def __init__(__self__, *,
|
|
6054
|
+
auto_upgrade_kubelet: bool):
|
|
6055
|
+
"""
|
|
6056
|
+
:param bool auto_upgrade_kubelet: Specifies whether to automatically update the kubelet. Valid values: `true`: yes; `false`: no.
|
|
6057
|
+
"""
|
|
6058
|
+
pulumi.set(__self__, "auto_upgrade_kubelet", auto_upgrade_kubelet)
|
|
6059
|
+
|
|
6060
|
+
@property
|
|
6061
|
+
@pulumi.getter(name="autoUpgradeKubelet")
|
|
6062
|
+
def auto_upgrade_kubelet(self) -> bool:
|
|
6063
|
+
"""
|
|
6064
|
+
Specifies whether to automatically update the kubelet. Valid values: `true`: yes; `false`: no.
|
|
6065
|
+
"""
|
|
6066
|
+
return pulumi.get(self, "auto_upgrade_kubelet")
|
|
6067
|
+
|
|
6068
|
+
|
|
6069
|
+
@pulumi.output_type
|
|
6070
|
+
class GetKubernetesNodePoolsNodepoolManagementAutoVulFixPolicyResult(dict):
|
|
6071
|
+
def __init__(__self__, *,
|
|
6072
|
+
restart_node: bool,
|
|
6073
|
+
vul_level: str):
|
|
6074
|
+
"""
|
|
6075
|
+
:param bool restart_node: Specifies whether to automatically restart nodes after patching CVE vulnerabilities. Valid values: `true`, `false`.
|
|
6076
|
+
:param str vul_level: The severity levels of vulnerabilities that is allowed to automatically patch. Multiple severity levels are separated by commas (,).
|
|
6077
|
+
"""
|
|
6078
|
+
pulumi.set(__self__, "restart_node", restart_node)
|
|
6079
|
+
pulumi.set(__self__, "vul_level", vul_level)
|
|
6080
|
+
|
|
6081
|
+
@property
|
|
6082
|
+
@pulumi.getter(name="restartNode")
|
|
6083
|
+
def restart_node(self) -> bool:
|
|
6084
|
+
"""
|
|
6085
|
+
Specifies whether to automatically restart nodes after patching CVE vulnerabilities. Valid values: `true`, `false`.
|
|
6086
|
+
"""
|
|
6087
|
+
return pulumi.get(self, "restart_node")
|
|
6088
|
+
|
|
6089
|
+
@property
|
|
6090
|
+
@pulumi.getter(name="vulLevel")
|
|
6091
|
+
def vul_level(self) -> str:
|
|
6092
|
+
"""
|
|
6093
|
+
The severity levels of vulnerabilities that is allowed to automatically patch. Multiple severity levels are separated by commas (,).
|
|
6094
|
+
"""
|
|
6095
|
+
return pulumi.get(self, "vul_level")
|
|
6096
|
+
|
|
6097
|
+
|
|
6098
|
+
@pulumi.output_type
|
|
6099
|
+
class GetKubernetesNodePoolsNodepoolPrivatePoolOptionsResult(dict):
|
|
6100
|
+
def __init__(__self__, *,
|
|
6101
|
+
private_pool_options_id: str,
|
|
6102
|
+
private_pool_options_match_criteria: str):
|
|
6103
|
+
"""
|
|
6104
|
+
:param str private_pool_options_id: The ID of the private node pool.
|
|
6105
|
+
:param str private_pool_options_match_criteria: The type of private node pool. This parameter specifies the type of the private pool that you want to use to create instances. A private node pool is generated when an elasticity assurance or a capacity reservation service takes effect. The system selects a private node pool to launch instances. Valid values: `Open`: specifies an open private node pool. The system selects an open private node pool to launch instances. If no matching open private node pool is available, the resources in the public node pool are used. `Target`: specifies a private node pool. The system uses the resources of the specified private node pool to launch instances. If the specified private node pool is unavailable, instances cannot be started. `None`: no private node pool is used. The resources of private node pools are not used to launch the instances.
|
|
6106
|
+
"""
|
|
6107
|
+
pulumi.set(__self__, "private_pool_options_id", private_pool_options_id)
|
|
6108
|
+
pulumi.set(__self__, "private_pool_options_match_criteria", private_pool_options_match_criteria)
|
|
6109
|
+
|
|
6110
|
+
@property
|
|
6111
|
+
@pulumi.getter(name="privatePoolOptionsId")
|
|
6112
|
+
def private_pool_options_id(self) -> str:
|
|
6113
|
+
"""
|
|
6114
|
+
The ID of the private node pool.
|
|
6115
|
+
"""
|
|
6116
|
+
return pulumi.get(self, "private_pool_options_id")
|
|
6117
|
+
|
|
6118
|
+
@property
|
|
6119
|
+
@pulumi.getter(name="privatePoolOptionsMatchCriteria")
|
|
6120
|
+
def private_pool_options_match_criteria(self) -> str:
|
|
6121
|
+
"""
|
|
6122
|
+
The type of private node pool. This parameter specifies the type of the private pool that you want to use to create instances. A private node pool is generated when an elasticity assurance or a capacity reservation service takes effect. The system selects a private node pool to launch instances. Valid values: `Open`: specifies an open private node pool. The system selects an open private node pool to launch instances. If no matching open private node pool is available, the resources in the public node pool are used. `Target`: specifies a private node pool. The system uses the resources of the specified private node pool to launch instances. If the specified private node pool is unavailable, instances cannot be started. `None`: no private node pool is used. The resources of private node pools are not used to launch the instances.
|
|
6123
|
+
"""
|
|
6124
|
+
return pulumi.get(self, "private_pool_options_match_criteria")
|
|
6125
|
+
|
|
6126
|
+
|
|
6127
|
+
@pulumi.output_type
|
|
6128
|
+
class GetKubernetesNodePoolsNodepoolScalingConfigResult(dict):
|
|
6129
|
+
def __init__(__self__, *,
|
|
6130
|
+
eip_bandwidth: int,
|
|
6131
|
+
eip_internet_charge_type: str,
|
|
6132
|
+
enable: bool,
|
|
6133
|
+
is_bond_eip: bool,
|
|
6134
|
+
max_size: int,
|
|
6135
|
+
min_size: int,
|
|
6136
|
+
type: str):
|
|
6137
|
+
"""
|
|
6138
|
+
:param int eip_bandwidth: Peak EIP bandwidth. Its valid value range [1~500] in Mbps. It works if `is_bond_eip=true`. Default to `5`.
|
|
6139
|
+
:param str eip_internet_charge_type: EIP billing type. `PayByBandwidth`: Charged at fixed bandwidth. `PayByTraffic`: Billed as used traffic. Default: `PayByBandwidth`. It works if `is_bond_eip=true`, conflict with `internet_charge_type`. EIP and public network IP can only choose one.
|
|
6140
|
+
:param bool enable: Whether to enable automatic scaling. Value:- `true`: enables the node pool auto-scaling function.- `false`: Auto scaling is not enabled. When the value is false, other `auto_scaling` configuration parameters do not take effect.
|
|
6141
|
+
:param bool is_bond_eip: Whether to bind EIP for an instance. Default: `false`.
|
|
6142
|
+
:param int max_size: Max number of instances in a auto scaling group, its valid value range [0~1000]. `max_size` has to be greater than `min_size`.
|
|
6143
|
+
:param int min_size: Min number of instances in a auto scaling group, its valid value range [0~1000].
|
|
6144
|
+
:param str type: Instance classification, not required. Vaild value: `cpu`, `gpu`, `gpushare` and `spot`. Default: `cpu`. The actual instance type is determined by `instance_types`.
|
|
6145
|
+
"""
|
|
6146
|
+
pulumi.set(__self__, "eip_bandwidth", eip_bandwidth)
|
|
6147
|
+
pulumi.set(__self__, "eip_internet_charge_type", eip_internet_charge_type)
|
|
6148
|
+
pulumi.set(__self__, "enable", enable)
|
|
6149
|
+
pulumi.set(__self__, "is_bond_eip", is_bond_eip)
|
|
6150
|
+
pulumi.set(__self__, "max_size", max_size)
|
|
6151
|
+
pulumi.set(__self__, "min_size", min_size)
|
|
6152
|
+
pulumi.set(__self__, "type", type)
|
|
6153
|
+
|
|
6154
|
+
@property
|
|
6155
|
+
@pulumi.getter(name="eipBandwidth")
|
|
6156
|
+
def eip_bandwidth(self) -> int:
|
|
6157
|
+
"""
|
|
6158
|
+
Peak EIP bandwidth. Its valid value range [1~500] in Mbps. It works if `is_bond_eip=true`. Default to `5`.
|
|
6159
|
+
"""
|
|
6160
|
+
return pulumi.get(self, "eip_bandwidth")
|
|
6161
|
+
|
|
6162
|
+
@property
|
|
6163
|
+
@pulumi.getter(name="eipInternetChargeType")
|
|
6164
|
+
def eip_internet_charge_type(self) -> str:
|
|
6165
|
+
"""
|
|
6166
|
+
EIP billing type. `PayByBandwidth`: Charged at fixed bandwidth. `PayByTraffic`: Billed as used traffic. Default: `PayByBandwidth`. It works if `is_bond_eip=true`, conflict with `internet_charge_type`. EIP and public network IP can only choose one.
|
|
6167
|
+
"""
|
|
6168
|
+
return pulumi.get(self, "eip_internet_charge_type")
|
|
6169
|
+
|
|
6170
|
+
@property
|
|
6171
|
+
@pulumi.getter
|
|
6172
|
+
def enable(self) -> bool:
|
|
6173
|
+
"""
|
|
6174
|
+
Whether to enable automatic scaling. Value:- `true`: enables the node pool auto-scaling function.- `false`: Auto scaling is not enabled. When the value is false, other `auto_scaling` configuration parameters do not take effect.
|
|
6175
|
+
"""
|
|
6176
|
+
return pulumi.get(self, "enable")
|
|
6177
|
+
|
|
6178
|
+
@property
|
|
6179
|
+
@pulumi.getter(name="isBondEip")
|
|
6180
|
+
def is_bond_eip(self) -> bool:
|
|
6181
|
+
"""
|
|
6182
|
+
Whether to bind EIP for an instance. Default: `false`.
|
|
6183
|
+
"""
|
|
6184
|
+
return pulumi.get(self, "is_bond_eip")
|
|
6185
|
+
|
|
6186
|
+
@property
|
|
6187
|
+
@pulumi.getter(name="maxSize")
|
|
6188
|
+
def max_size(self) -> int:
|
|
6189
|
+
"""
|
|
6190
|
+
Max number of instances in a auto scaling group, its valid value range [0~1000]. `max_size` has to be greater than `min_size`.
|
|
6191
|
+
"""
|
|
6192
|
+
return pulumi.get(self, "max_size")
|
|
6193
|
+
|
|
6194
|
+
@property
|
|
6195
|
+
@pulumi.getter(name="minSize")
|
|
6196
|
+
def min_size(self) -> int:
|
|
6197
|
+
"""
|
|
6198
|
+
Min number of instances in a auto scaling group, its valid value range [0~1000].
|
|
6199
|
+
"""
|
|
6200
|
+
return pulumi.get(self, "min_size")
|
|
6201
|
+
|
|
6202
|
+
@property
|
|
6203
|
+
@pulumi.getter
|
|
6204
|
+
def type(self) -> str:
|
|
6205
|
+
"""
|
|
6206
|
+
Instance classification, not required. Vaild value: `cpu`, `gpu`, `gpushare` and `spot`. Default: `cpu`. The actual instance type is determined by `instance_types`.
|
|
6207
|
+
"""
|
|
6208
|
+
return pulumi.get(self, "type")
|
|
6209
|
+
|
|
6210
|
+
|
|
6211
|
+
@pulumi.output_type
|
|
6212
|
+
class GetKubernetesNodePoolsNodepoolSpotPriceLimitResult(dict):
|
|
6213
|
+
def __init__(__self__, *,
|
|
6214
|
+
instance_type: str,
|
|
6215
|
+
price_limit: str):
|
|
6216
|
+
"""
|
|
6217
|
+
:param str instance_type: The type of the preemptible instance.
|
|
6218
|
+
:param str price_limit: The maximum price of a single instance.
|
|
6219
|
+
"""
|
|
6220
|
+
pulumi.set(__self__, "instance_type", instance_type)
|
|
6221
|
+
pulumi.set(__self__, "price_limit", price_limit)
|
|
6222
|
+
|
|
6223
|
+
@property
|
|
6224
|
+
@pulumi.getter(name="instanceType")
|
|
6225
|
+
def instance_type(self) -> str:
|
|
6226
|
+
"""
|
|
6227
|
+
The type of the preemptible instance.
|
|
6228
|
+
"""
|
|
6229
|
+
return pulumi.get(self, "instance_type")
|
|
6230
|
+
|
|
6231
|
+
@property
|
|
6232
|
+
@pulumi.getter(name="priceLimit")
|
|
6233
|
+
def price_limit(self) -> str:
|
|
6234
|
+
"""
|
|
6235
|
+
The maximum price of a single instance.
|
|
6236
|
+
"""
|
|
6237
|
+
return pulumi.get(self, "price_limit")
|
|
6238
|
+
|
|
6239
|
+
|
|
6240
|
+
@pulumi.output_type
|
|
6241
|
+
class GetKubernetesNodePoolsNodepoolTaintResult(dict):
|
|
6242
|
+
def __init__(__self__, *,
|
|
6243
|
+
effect: str,
|
|
6244
|
+
key: str,
|
|
6245
|
+
value: str):
|
|
6246
|
+
"""
|
|
6247
|
+
:param str effect: The scheduling policy.
|
|
6248
|
+
:param str key: The key of a taint.
|
|
6249
|
+
:param str value: The value of a taint.
|
|
6250
|
+
"""
|
|
6251
|
+
pulumi.set(__self__, "effect", effect)
|
|
6252
|
+
pulumi.set(__self__, "key", key)
|
|
6253
|
+
pulumi.set(__self__, "value", value)
|
|
6254
|
+
|
|
6255
|
+
@property
|
|
6256
|
+
@pulumi.getter
|
|
6257
|
+
def effect(self) -> str:
|
|
6258
|
+
"""
|
|
6259
|
+
The scheduling policy.
|
|
6260
|
+
"""
|
|
6261
|
+
return pulumi.get(self, "effect")
|
|
6262
|
+
|
|
6263
|
+
@property
|
|
6264
|
+
@pulumi.getter
|
|
6265
|
+
def key(self) -> str:
|
|
6266
|
+
"""
|
|
6267
|
+
The key of a taint.
|
|
6268
|
+
"""
|
|
6269
|
+
return pulumi.get(self, "key")
|
|
6270
|
+
|
|
6271
|
+
@property
|
|
6272
|
+
@pulumi.getter
|
|
6273
|
+
def value(self) -> str:
|
|
6274
|
+
"""
|
|
6275
|
+
The value of a taint.
|
|
6276
|
+
"""
|
|
6277
|
+
return pulumi.get(self, "value")
|
|
6278
|
+
|
|
6279
|
+
|
|
6280
|
+
@pulumi.output_type
|
|
6281
|
+
class GetKubernetesNodePoolsNodepoolTeeConfigResult(dict):
|
|
6282
|
+
def __init__(__self__, *,
|
|
6283
|
+
tee_enable: bool):
|
|
6284
|
+
"""
|
|
6285
|
+
:param bool tee_enable: Specifies whether to enable confidential computing for the cluster.
|
|
6286
|
+
"""
|
|
6287
|
+
pulumi.set(__self__, "tee_enable", tee_enable)
|
|
6288
|
+
|
|
6289
|
+
@property
|
|
6290
|
+
@pulumi.getter(name="teeEnable")
|
|
6291
|
+
def tee_enable(self) -> bool:
|
|
6292
|
+
"""
|
|
6293
|
+
Specifies whether to enable confidential computing for the cluster.
|
|
6294
|
+
"""
|
|
6295
|
+
return pulumi.get(self, "tee_enable")
|
|
6296
|
+
|
|
6297
|
+
|
|
6298
|
+
@pulumi.output_type
|
|
6299
|
+
class GetKubernetesPermissionPermissionResult(dict):
|
|
6300
|
+
def __init__(__self__, *,
|
|
6301
|
+
is_owner: bool,
|
|
6302
|
+
is_ram_role: bool,
|
|
6303
|
+
resource_id: str,
|
|
6304
|
+
resource_type: str,
|
|
6305
|
+
role_name: str,
|
|
6306
|
+
role_type: str):
|
|
6307
|
+
"""
|
|
6308
|
+
:param bool is_owner: Indicates whether the permissions are granted to the cluster owner. Valid values `false`, `true`.
|
|
6309
|
+
:param bool is_ram_role: Indicates whether the permissions are granted to the RAM role. Valid values `false`, `true`.
|
|
6310
|
+
:param str resource_id: The permission settings to manage ACK clusters.
|
|
6311
|
+
:param str resource_type: The authorization type. Valid values `cluster`, `namespace` and `console`.
|
|
6312
|
+
:param str role_name: The name of the predefined role. If a custom role is assigned, the value is the name of the assigined custom role.
|
|
6313
|
+
:param str role_type: The predefined role. Valid values `admin`,`ops`,`dev`,`restricted` and `custom`.
|
|
6314
|
+
"""
|
|
6315
|
+
pulumi.set(__self__, "is_owner", is_owner)
|
|
6316
|
+
pulumi.set(__self__, "is_ram_role", is_ram_role)
|
|
6317
|
+
pulumi.set(__self__, "resource_id", resource_id)
|
|
6318
|
+
pulumi.set(__self__, "resource_type", resource_type)
|
|
6319
|
+
pulumi.set(__self__, "role_name", role_name)
|
|
6320
|
+
pulumi.set(__self__, "role_type", role_type)
|
|
6321
|
+
|
|
6322
|
+
@property
|
|
6323
|
+
@pulumi.getter(name="isOwner")
|
|
6324
|
+
def is_owner(self) -> bool:
|
|
6325
|
+
"""
|
|
6326
|
+
Indicates whether the permissions are granted to the cluster owner. Valid values `false`, `true`.
|
|
6327
|
+
"""
|
|
6328
|
+
return pulumi.get(self, "is_owner")
|
|
6329
|
+
|
|
6330
|
+
@property
|
|
6331
|
+
@pulumi.getter(name="isRamRole")
|
|
6332
|
+
def is_ram_role(self) -> bool:
|
|
6333
|
+
"""
|
|
6334
|
+
Indicates whether the permissions are granted to the RAM role. Valid values `false`, `true`.
|
|
6335
|
+
"""
|
|
6336
|
+
return pulumi.get(self, "is_ram_role")
|
|
6337
|
+
|
|
6338
|
+
@property
|
|
6339
|
+
@pulumi.getter(name="resourceId")
|
|
6340
|
+
def resource_id(self) -> str:
|
|
6341
|
+
"""
|
|
6342
|
+
The permission settings to manage ACK clusters.
|
|
6343
|
+
"""
|
|
6344
|
+
return pulumi.get(self, "resource_id")
|
|
6345
|
+
|
|
6346
|
+
@property
|
|
6347
|
+
@pulumi.getter(name="resourceType")
|
|
6348
|
+
def resource_type(self) -> str:
|
|
6349
|
+
"""
|
|
6350
|
+
The authorization type. Valid values `cluster`, `namespace` and `console`.
|
|
6351
|
+
"""
|
|
6352
|
+
return pulumi.get(self, "resource_type")
|
|
6353
|
+
|
|
6354
|
+
@property
|
|
6355
|
+
@pulumi.getter(name="roleName")
|
|
6356
|
+
def role_name(self) -> str:
|
|
6357
|
+
"""
|
|
6358
|
+
The name of the predefined role. If a custom role is assigned, the value is the name of the assigined custom role.
|
|
6359
|
+
"""
|
|
6360
|
+
return pulumi.get(self, "role_name")
|
|
6361
|
+
|
|
6362
|
+
@property
|
|
6363
|
+
@pulumi.getter(name="roleType")
|
|
6364
|
+
def role_type(self) -> str:
|
|
6365
|
+
"""
|
|
6366
|
+
The predefined role. Valid values `admin`,`ops`,`dev`,`restricted` and `custom`.
|
|
6367
|
+
"""
|
|
6368
|
+
return pulumi.get(self, "role_type")
|
|
6369
|
+
|
|
6370
|
+
|
|
6371
|
+
@pulumi.output_type
|
|
6372
|
+
class GetKubernetesVersionMetadataResult(dict):
|
|
6373
|
+
def __init__(__self__, *,
|
|
6374
|
+
runtimes: Sequence['outputs.GetKubernetesVersionMetadataRuntimeResult'],
|
|
6375
|
+
version: str):
|
|
6376
|
+
"""
|
|
6377
|
+
:param Sequence['GetKubernetesVersionMetadataRuntimeArgs'] runtimes: The list of supported runtime.
|
|
6378
|
+
:param str version: The runtime version.
|
|
6379
|
+
"""
|
|
6380
|
+
pulumi.set(__self__, "runtimes", runtimes)
|
|
6381
|
+
pulumi.set(__self__, "version", version)
|
|
6382
|
+
|
|
6383
|
+
@property
|
|
6384
|
+
@pulumi.getter
|
|
6385
|
+
def runtimes(self) -> Sequence['outputs.GetKubernetesVersionMetadataRuntimeResult']:
|
|
6386
|
+
"""
|
|
6387
|
+
The list of supported runtime.
|
|
6388
|
+
"""
|
|
6389
|
+
return pulumi.get(self, "runtimes")
|
|
6390
|
+
|
|
6391
|
+
@property
|
|
6392
|
+
@pulumi.getter
|
|
6393
|
+
def version(self) -> str:
|
|
6394
|
+
"""
|
|
6395
|
+
The runtime version.
|
|
6396
|
+
"""
|
|
6397
|
+
return pulumi.get(self, "version")
|
|
6398
|
+
|
|
6399
|
+
|
|
6400
|
+
@pulumi.output_type
|
|
6401
|
+
class GetKubernetesVersionMetadataRuntimeResult(dict):
|
|
6402
|
+
def __init__(__self__, *,
|
|
6403
|
+
name: str,
|
|
6404
|
+
version: str):
|
|
6405
|
+
"""
|
|
6406
|
+
:param str name: The runtime name.
|
|
6407
|
+
:param str version: The runtime version.
|
|
6408
|
+
"""
|
|
6409
|
+
pulumi.set(__self__, "name", name)
|
|
6410
|
+
pulumi.set(__self__, "version", version)
|
|
6411
|
+
|
|
6412
|
+
@property
|
|
6413
|
+
@pulumi.getter
|
|
6414
|
+
def name(self) -> str:
|
|
6415
|
+
"""
|
|
6416
|
+
The runtime name.
|
|
6417
|
+
"""
|
|
6418
|
+
return pulumi.get(self, "name")
|
|
6419
|
+
|
|
6420
|
+
@property
|
|
6421
|
+
@pulumi.getter
|
|
6422
|
+
def version(self) -> str:
|
|
6423
|
+
"""
|
|
6424
|
+
The runtime version.
|
|
6425
|
+
"""
|
|
6426
|
+
return pulumi.get(self, "version")
|
|
6427
|
+
|
|
6428
|
+
|
|
6429
|
+
@pulumi.output_type
|
|
6430
|
+
class GetManagedKubernetesClustersClusterResult(dict):
|
|
6431
|
+
def __init__(__self__, *,
|
|
6432
|
+
availability_zone: str,
|
|
6433
|
+
cluster_network_type: str,
|
|
6434
|
+
connections: 'outputs.GetManagedKubernetesClustersClusterConnectionsResult',
|
|
6435
|
+
id: str,
|
|
6436
|
+
image_id: str,
|
|
6437
|
+
key_name: str,
|
|
6438
|
+
log_configs: Sequence['outputs.GetManagedKubernetesClustersClusterLogConfigResult'],
|
|
6439
|
+
name: str,
|
|
6440
|
+
nat_gateway_id: str,
|
|
6441
|
+
pod_cidr: str,
|
|
6442
|
+
rrsa_config: 'outputs.GetManagedKubernetesClustersClusterRrsaConfigResult',
|
|
6443
|
+
security_group_id: str,
|
|
6444
|
+
service_cidr: str,
|
|
6445
|
+
slb_internet_enabled: bool,
|
|
6446
|
+
state: str,
|
|
6447
|
+
vpc_id: str,
|
|
6448
|
+
vswitch_ids: Sequence[str],
|
|
6449
|
+
worker_auto_renew: bool,
|
|
6450
|
+
worker_auto_renew_period: int,
|
|
6451
|
+
worker_data_disk_category: str,
|
|
6452
|
+
worker_data_disk_size: int,
|
|
6453
|
+
worker_disk_category: str,
|
|
6454
|
+
worker_disk_size: int,
|
|
6455
|
+
worker_instance_charge_type: str,
|
|
6456
|
+
worker_instance_types: Sequence[str],
|
|
6457
|
+
worker_nodes: Sequence['outputs.GetManagedKubernetesClustersClusterWorkerNodeResult'],
|
|
6458
|
+
worker_numbers: Sequence[int],
|
|
6459
|
+
worker_period: int,
|
|
6460
|
+
worker_period_unit: str):
|
|
6461
|
+
"""
|
|
6462
|
+
:param str availability_zone: The ID of availability zone.
|
|
6463
|
+
:param 'GetManagedKubernetesClustersClusterConnectionsArgs' connections: Map of kubernetes cluster connection information.
|
|
6464
|
+
:param str id: ID of the node.
|
|
6465
|
+
:param str key_name: The keypair of ssh login cluster node, you have to create it first.
|
|
6466
|
+
:param Sequence['GetManagedKubernetesClustersClusterLogConfigArgs'] log_configs: A list of one element containing information about the associated log store. It contains the following attributes:
|
|
6467
|
+
:param str name: Node name.
|
|
6468
|
+
:param str nat_gateway_id: The ID of nat gateway used to launch kubernetes cluster.
|
|
6469
|
+
:param 'GetManagedKubernetesClustersClusterRrsaConfigArgs' rrsa_config: (Available since v1.245.0) Nested attribute containing RRSA related data for your cluster.
|
|
6470
|
+
:param str security_group_id: The ID of security group where the current cluster worker node is located.
|
|
6471
|
+
:param str state: (Available since v1.245.0) The state of cluster.
|
|
6472
|
+
:param str vpc_id: The ID of VPC where the current cluster is located.
|
|
6473
|
+
:param Sequence[str] vswitch_ids: The ID of VSwitches where the current cluster is located.
|
|
6474
|
+
:param Sequence['GetManagedKubernetesClustersClusterWorkerNodeArgs'] worker_nodes: List of cluster worker nodes.
|
|
6475
|
+
:param Sequence[int] worker_numbers: The ECS instance node number in the current container cluster.
|
|
6476
|
+
"""
|
|
6477
|
+
pulumi.set(__self__, "availability_zone", availability_zone)
|
|
6478
|
+
pulumi.set(__self__, "cluster_network_type", cluster_network_type)
|
|
6479
|
+
pulumi.set(__self__, "connections", connections)
|
|
6480
|
+
pulumi.set(__self__, "id", id)
|
|
6481
|
+
pulumi.set(__self__, "image_id", image_id)
|
|
6482
|
+
pulumi.set(__self__, "key_name", key_name)
|
|
6483
|
+
pulumi.set(__self__, "log_configs", log_configs)
|
|
6484
|
+
pulumi.set(__self__, "name", name)
|
|
6485
|
+
pulumi.set(__self__, "nat_gateway_id", nat_gateway_id)
|
|
6486
|
+
pulumi.set(__self__, "pod_cidr", pod_cidr)
|
|
6487
|
+
pulumi.set(__self__, "rrsa_config", rrsa_config)
|
|
6488
|
+
pulumi.set(__self__, "security_group_id", security_group_id)
|
|
6489
|
+
pulumi.set(__self__, "service_cidr", service_cidr)
|
|
6490
|
+
pulumi.set(__self__, "slb_internet_enabled", slb_internet_enabled)
|
|
6491
|
+
pulumi.set(__self__, "state", state)
|
|
6492
|
+
pulumi.set(__self__, "vpc_id", vpc_id)
|
|
6493
|
+
pulumi.set(__self__, "vswitch_ids", vswitch_ids)
|
|
6494
|
+
pulumi.set(__self__, "worker_auto_renew", worker_auto_renew)
|
|
6495
|
+
pulumi.set(__self__, "worker_auto_renew_period", worker_auto_renew_period)
|
|
6496
|
+
pulumi.set(__self__, "worker_data_disk_category", worker_data_disk_category)
|
|
6497
|
+
pulumi.set(__self__, "worker_data_disk_size", worker_data_disk_size)
|
|
6498
|
+
pulumi.set(__self__, "worker_disk_category", worker_disk_category)
|
|
6499
|
+
pulumi.set(__self__, "worker_disk_size", worker_disk_size)
|
|
6500
|
+
pulumi.set(__self__, "worker_instance_charge_type", worker_instance_charge_type)
|
|
6501
|
+
pulumi.set(__self__, "worker_instance_types", worker_instance_types)
|
|
6502
|
+
pulumi.set(__self__, "worker_nodes", worker_nodes)
|
|
6503
|
+
pulumi.set(__self__, "worker_numbers", worker_numbers)
|
|
6504
|
+
pulumi.set(__self__, "worker_period", worker_period)
|
|
6505
|
+
pulumi.set(__self__, "worker_period_unit", worker_period_unit)
|
|
6506
|
+
|
|
6507
|
+
@property
|
|
6508
|
+
@pulumi.getter(name="availabilityZone")
|
|
6509
|
+
def availability_zone(self) -> str:
|
|
6510
|
+
"""
|
|
6511
|
+
The ID of availability zone.
|
|
6512
|
+
"""
|
|
6513
|
+
return pulumi.get(self, "availability_zone")
|
|
6514
|
+
|
|
6515
|
+
@property
|
|
6516
|
+
@pulumi.getter(name="clusterNetworkType")
|
|
6517
|
+
def cluster_network_type(self) -> str:
|
|
6518
|
+
return pulumi.get(self, "cluster_network_type")
|
|
6519
|
+
|
|
6520
|
+
@property
|
|
6521
|
+
@pulumi.getter
|
|
6522
|
+
def connections(self) -> 'outputs.GetManagedKubernetesClustersClusterConnectionsResult':
|
|
6523
|
+
"""
|
|
6524
|
+
Map of kubernetes cluster connection information.
|
|
6525
|
+
"""
|
|
6526
|
+
return pulumi.get(self, "connections")
|
|
6527
|
+
|
|
6528
|
+
@property
|
|
6529
|
+
@pulumi.getter
|
|
6530
|
+
def id(self) -> str:
|
|
6531
|
+
"""
|
|
6532
|
+
ID of the node.
|
|
6533
|
+
"""
|
|
6534
|
+
return pulumi.get(self, "id")
|
|
6535
|
+
|
|
6536
|
+
@property
|
|
6537
|
+
@pulumi.getter(name="imageId")
|
|
6538
|
+
def image_id(self) -> str:
|
|
6539
|
+
return pulumi.get(self, "image_id")
|
|
6540
|
+
|
|
6541
|
+
@property
|
|
6542
|
+
@pulumi.getter(name="keyName")
|
|
6543
|
+
def key_name(self) -> str:
|
|
6544
|
+
"""
|
|
6545
|
+
The keypair of ssh login cluster node, you have to create it first.
|
|
6546
|
+
"""
|
|
6547
|
+
return pulumi.get(self, "key_name")
|
|
6548
|
+
|
|
6549
|
+
@property
|
|
6550
|
+
@pulumi.getter(name="logConfigs")
|
|
6551
|
+
def log_configs(self) -> Sequence['outputs.GetManagedKubernetesClustersClusterLogConfigResult']:
|
|
6552
|
+
"""
|
|
6553
|
+
A list of one element containing information about the associated log store. It contains the following attributes:
|
|
6554
|
+
"""
|
|
6555
|
+
return pulumi.get(self, "log_configs")
|
|
6556
|
+
|
|
6557
|
+
@property
|
|
6558
|
+
@pulumi.getter
|
|
6559
|
+
def name(self) -> str:
|
|
6560
|
+
"""
|
|
6561
|
+
Node name.
|
|
6562
|
+
"""
|
|
6563
|
+
return pulumi.get(self, "name")
|
|
6564
|
+
|
|
6565
|
+
@property
|
|
6566
|
+
@pulumi.getter(name="natGatewayId")
|
|
6567
|
+
def nat_gateway_id(self) -> str:
|
|
6568
|
+
"""
|
|
6569
|
+
The ID of nat gateway used to launch kubernetes cluster.
|
|
6570
|
+
"""
|
|
6571
|
+
return pulumi.get(self, "nat_gateway_id")
|
|
6572
|
+
|
|
6573
|
+
@property
|
|
6574
|
+
@pulumi.getter(name="podCidr")
|
|
6575
|
+
def pod_cidr(self) -> str:
|
|
6576
|
+
return pulumi.get(self, "pod_cidr")
|
|
6577
|
+
|
|
6578
|
+
@property
|
|
6579
|
+
@pulumi.getter(name="rrsaConfig")
|
|
6580
|
+
def rrsa_config(self) -> 'outputs.GetManagedKubernetesClustersClusterRrsaConfigResult':
|
|
6581
|
+
"""
|
|
6582
|
+
(Available since v1.245.0) Nested attribute containing RRSA related data for your cluster.
|
|
6583
|
+
"""
|
|
6584
|
+
return pulumi.get(self, "rrsa_config")
|
|
6585
|
+
|
|
6586
|
+
@property
|
|
6587
|
+
@pulumi.getter(name="securityGroupId")
|
|
6588
|
+
def security_group_id(self) -> str:
|
|
6589
|
+
"""
|
|
6590
|
+
The ID of security group where the current cluster worker node is located.
|
|
6591
|
+
"""
|
|
6592
|
+
return pulumi.get(self, "security_group_id")
|
|
6593
|
+
|
|
6594
|
+
@property
|
|
6595
|
+
@pulumi.getter(name="serviceCidr")
|
|
6596
|
+
def service_cidr(self) -> str:
|
|
6597
|
+
return pulumi.get(self, "service_cidr")
|
|
6598
|
+
|
|
6599
|
+
@property
|
|
6600
|
+
@pulumi.getter(name="slbInternetEnabled")
|
|
6601
|
+
def slb_internet_enabled(self) -> bool:
|
|
6602
|
+
return pulumi.get(self, "slb_internet_enabled")
|
|
6603
|
+
|
|
6604
|
+
@property
|
|
6605
|
+
@pulumi.getter
|
|
6606
|
+
def state(self) -> str:
|
|
6607
|
+
"""
|
|
6608
|
+
(Available since v1.245.0) The state of cluster.
|
|
6609
|
+
"""
|
|
6610
|
+
return pulumi.get(self, "state")
|
|
6611
|
+
|
|
6612
|
+
@property
|
|
6613
|
+
@pulumi.getter(name="vpcId")
|
|
6614
|
+
def vpc_id(self) -> str:
|
|
6615
|
+
"""
|
|
6616
|
+
The ID of VPC where the current cluster is located.
|
|
6617
|
+
"""
|
|
6618
|
+
return pulumi.get(self, "vpc_id")
|
|
6619
|
+
|
|
6620
|
+
@property
|
|
6621
|
+
@pulumi.getter(name="vswitchIds")
|
|
6622
|
+
def vswitch_ids(self) -> Sequence[str]:
|
|
6623
|
+
"""
|
|
6624
|
+
The ID of VSwitches where the current cluster is located.
|
|
6625
|
+
"""
|
|
6626
|
+
return pulumi.get(self, "vswitch_ids")
|
|
6627
|
+
|
|
6628
|
+
@property
|
|
6629
|
+
@pulumi.getter(name="workerAutoRenew")
|
|
6630
|
+
def worker_auto_renew(self) -> bool:
|
|
6631
|
+
return pulumi.get(self, "worker_auto_renew")
|
|
6632
|
+
|
|
6633
|
+
@property
|
|
6634
|
+
@pulumi.getter(name="workerAutoRenewPeriod")
|
|
6635
|
+
def worker_auto_renew_period(self) -> int:
|
|
6636
|
+
return pulumi.get(self, "worker_auto_renew_period")
|
|
4910
6637
|
|
|
4911
6638
|
@property
|
|
4912
6639
|
@pulumi.getter(name="workerDataDiskCategory")
|
|
@@ -4942,7 +6669,7 @@ class GetManagedKubernetesClustersClusterResult(dict):
|
|
|
4942
6669
|
@pulumi.getter(name="workerNodes")
|
|
4943
6670
|
def worker_nodes(self) -> Sequence['outputs.GetManagedKubernetesClustersClusterWorkerNodeResult']:
|
|
4944
6671
|
"""
|
|
4945
|
-
List of cluster worker nodes.
|
|
6672
|
+
List of cluster worker nodes.
|
|
4946
6673
|
"""
|
|
4947
6674
|
return pulumi.get(self, "worker_nodes")
|
|
4948
6675
|
|
|
@@ -5045,6 +6772,57 @@ class GetManagedKubernetesClustersClusterLogConfigResult(dict):
|
|
|
5045
6772
|
return pulumi.get(self, "type")
|
|
5046
6773
|
|
|
5047
6774
|
|
|
6775
|
+
@pulumi.output_type
|
|
6776
|
+
class GetManagedKubernetesClustersClusterRrsaConfigResult(dict):
|
|
6777
|
+
def __init__(__self__, *,
|
|
6778
|
+
enabled: bool,
|
|
6779
|
+
ram_oidc_provider_arn: str,
|
|
6780
|
+
ram_oidc_provider_name: str,
|
|
6781
|
+
rrsa_oidc_issuer_url: str):
|
|
6782
|
+
"""
|
|
6783
|
+
:param bool enabled: Whether the RRSA feature has been enabled.
|
|
6784
|
+
:param str ram_oidc_provider_arn: The arn of OIDC provider that was registered in RAM.
|
|
6785
|
+
:param str ram_oidc_provider_name: The name of OIDC Provider that was registered in RAM.
|
|
6786
|
+
:param str rrsa_oidc_issuer_url: The issuer URL of RRSA OIDC Token.
|
|
6787
|
+
"""
|
|
6788
|
+
pulumi.set(__self__, "enabled", enabled)
|
|
6789
|
+
pulumi.set(__self__, "ram_oidc_provider_arn", ram_oidc_provider_arn)
|
|
6790
|
+
pulumi.set(__self__, "ram_oidc_provider_name", ram_oidc_provider_name)
|
|
6791
|
+
pulumi.set(__self__, "rrsa_oidc_issuer_url", rrsa_oidc_issuer_url)
|
|
6792
|
+
|
|
6793
|
+
@property
|
|
6794
|
+
@pulumi.getter
|
|
6795
|
+
def enabled(self) -> bool:
|
|
6796
|
+
"""
|
|
6797
|
+
Whether the RRSA feature has been enabled.
|
|
6798
|
+
"""
|
|
6799
|
+
return pulumi.get(self, "enabled")
|
|
6800
|
+
|
|
6801
|
+
@property
|
|
6802
|
+
@pulumi.getter(name="ramOidcProviderArn")
|
|
6803
|
+
def ram_oidc_provider_arn(self) -> str:
|
|
6804
|
+
"""
|
|
6805
|
+
The arn of OIDC provider that was registered in RAM.
|
|
6806
|
+
"""
|
|
6807
|
+
return pulumi.get(self, "ram_oidc_provider_arn")
|
|
6808
|
+
|
|
6809
|
+
@property
|
|
6810
|
+
@pulumi.getter(name="ramOidcProviderName")
|
|
6811
|
+
def ram_oidc_provider_name(self) -> str:
|
|
6812
|
+
"""
|
|
6813
|
+
The name of OIDC Provider that was registered in RAM.
|
|
6814
|
+
"""
|
|
6815
|
+
return pulumi.get(self, "ram_oidc_provider_name")
|
|
6816
|
+
|
|
6817
|
+
@property
|
|
6818
|
+
@pulumi.getter(name="rrsaOidcIssuerUrl")
|
|
6819
|
+
def rrsa_oidc_issuer_url(self) -> str:
|
|
6820
|
+
"""
|
|
6821
|
+
The issuer URL of RRSA OIDC Token.
|
|
6822
|
+
"""
|
|
6823
|
+
return pulumi.get(self, "rrsa_oidc_issuer_url")
|
|
6824
|
+
|
|
6825
|
+
|
|
5048
6826
|
@pulumi.output_type
|
|
5049
6827
|
class GetManagedKubernetesClustersClusterWorkerNodeResult(dict):
|
|
5050
6828
|
def __init__(__self__, *,
|