pulumi-vsphere 4.14.0a1741997722__py3-none-any.whl → 4.14.0a1746734806__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-vsphere might be problematic. Click here for more details.

Files changed (86) hide show
  1. pulumi_vsphere/__init__.py +1 -0
  2. pulumi_vsphere/_inputs.py +969 -968
  3. pulumi_vsphere/compute_cluster.py +919 -917
  4. pulumi_vsphere/compute_cluster_host_group.py +44 -42
  5. pulumi_vsphere/compute_cluster_vm_affinity_rule.py +72 -70
  6. pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +72 -70
  7. pulumi_vsphere/compute_cluster_vm_dependency_rule.py +86 -84
  8. pulumi_vsphere/compute_cluster_vm_group.py +44 -42
  9. pulumi_vsphere/compute_cluster_vm_host_rule.py +100 -98
  10. pulumi_vsphere/config/__init__.py +1 -0
  11. pulumi_vsphere/config/__init__.pyi +1 -0
  12. pulumi_vsphere/config/vars.py +1 -0
  13. pulumi_vsphere/content_library.py +44 -42
  14. pulumi_vsphere/content_library_item.py +86 -84
  15. pulumi_vsphere/custom_attribute.py +30 -28
  16. pulumi_vsphere/datacenter.py +65 -63
  17. pulumi_vsphere/datastore_cluster.py +352 -350
  18. pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +72 -70
  19. pulumi_vsphere/distributed_port_group.py +646 -644
  20. pulumi_vsphere/distributed_virtual_switch.py +1283 -1281
  21. pulumi_vsphere/distributed_virtual_switch_pvlan_mapping.py +58 -56
  22. pulumi_vsphere/dpm_host_override.py +58 -56
  23. pulumi_vsphere/drs_vm_override.py +58 -56
  24. pulumi_vsphere/entity_permissions.py +30 -28
  25. pulumi_vsphere/file.py +96 -94
  26. pulumi_vsphere/folder.py +72 -70
  27. pulumi_vsphere/get_compute_cluster.py +13 -12
  28. pulumi_vsphere/get_compute_cluster_host_group.py +13 -12
  29. pulumi_vsphere/get_content_library.py +7 -6
  30. pulumi_vsphere/get_content_library_item.py +17 -16
  31. pulumi_vsphere/get_custom_attribute.py +8 -7
  32. pulumi_vsphere/get_datacenter.py +8 -7
  33. pulumi_vsphere/get_datastore.py +17 -16
  34. pulumi_vsphere/get_datastore_cluster.py +13 -12
  35. pulumi_vsphere/get_datastore_stats.py +17 -16
  36. pulumi_vsphere/get_distributed_virtual_switch.py +13 -12
  37. pulumi_vsphere/get_dynamic.py +17 -16
  38. pulumi_vsphere/get_folder.py +7 -6
  39. pulumi_vsphere/get_guest_os_customization.py +11 -10
  40. pulumi_vsphere/get_host.py +13 -12
  41. pulumi_vsphere/get_host_base_images.py +3 -2
  42. pulumi_vsphere/get_host_pci_device.py +23 -22
  43. pulumi_vsphere/get_host_thumbprint.py +17 -16
  44. pulumi_vsphere/get_host_vgpu_profile.py +12 -11
  45. pulumi_vsphere/get_license.py +12 -11
  46. pulumi_vsphere/get_network.py +18 -17
  47. pulumi_vsphere/get_ovf_vm_template.py +89 -88
  48. pulumi_vsphere/get_policy.py +7 -6
  49. pulumi_vsphere/get_resource_pool.py +12 -11
  50. pulumi_vsphere/get_role.py +20 -19
  51. pulumi_vsphere/get_tag.py +13 -12
  52. pulumi_vsphere/get_tag_category.py +10 -9
  53. pulumi_vsphere/get_vapp_container.py +12 -11
  54. pulumi_vsphere/get_virtual_machine.py +204 -203
  55. pulumi_vsphere/get_vmfs_disks.py +18 -17
  56. pulumi_vsphere/guest_os_customization.py +58 -56
  57. pulumi_vsphere/ha_vm_override.py +212 -210
  58. pulumi_vsphere/host.py +198 -196
  59. pulumi_vsphere/host_port_group.py +254 -252
  60. pulumi_vsphere/host_virtual_switch.py +296 -294
  61. pulumi_vsphere/license.py +58 -56
  62. pulumi_vsphere/nas_datastore.py +212 -210
  63. pulumi_vsphere/offline_software_depot.py +16 -14
  64. pulumi_vsphere/outputs.py +721 -720
  65. pulumi_vsphere/provider.py +119 -97
  66. pulumi_vsphere/pulumi-plugin.json +1 -1
  67. pulumi_vsphere/resource_pool.py +212 -210
  68. pulumi_vsphere/role.py +37 -35
  69. pulumi_vsphere/storage_drs_vm_override.py +72 -70
  70. pulumi_vsphere/supervisor.py +156 -154
  71. pulumi_vsphere/tag.py +44 -42
  72. pulumi_vsphere/tag_category.py +58 -56
  73. pulumi_vsphere/vapp_container.py +212 -210
  74. pulumi_vsphere/vapp_entity.py +142 -140
  75. pulumi_vsphere/virtual_disk.py +100 -98
  76. pulumi_vsphere/virtual_machine.py +1041 -1039
  77. pulumi_vsphere/virtual_machine_class.py +86 -84
  78. pulumi_vsphere/virtual_machine_snapshot.py +100 -98
  79. pulumi_vsphere/vm_storage_policy.py +30 -28
  80. pulumi_vsphere/vmfs_datastore.py +149 -147
  81. pulumi_vsphere/vnic.py +114 -112
  82. {pulumi_vsphere-4.14.0a1741997722.dist-info → pulumi_vsphere-4.14.0a1746734806.dist-info}/METADATA +4 -4
  83. pulumi_vsphere-4.14.0a1746734806.dist-info/RECORD +87 -0
  84. {pulumi_vsphere-4.14.0a1741997722.dist-info → pulumi_vsphere-4.14.0a1746734806.dist-info}/WHEEL +1 -1
  85. pulumi_vsphere-4.14.0a1741997722.dist-info/RECORD +0 -87
  86. {pulumi_vsphere-4.14.0a1741997722.dist-info → pulumi_vsphere-4.14.0a1746734806.dist-info}/top_level.txt +0 -0
@@ -2,6 +2,7 @@
2
2
  # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
3
3
  # *** Do not edit by hand unless you're certain you know what you are doing! ***
4
4
 
5
+ import builtins
5
6
  import copy
6
7
  import warnings
7
8
  import sys
@@ -21,193 +22,193 @@ __all__ = ['ComputeClusterArgs', 'ComputeCluster']
21
22
  @pulumi.input_type
22
23
  class ComputeClusterArgs:
23
24
  def __init__(__self__, *,
24
- datacenter_id: pulumi.Input[str],
25
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
26
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
27
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
28
- dpm_threshold: Optional[pulumi.Input[int]] = None,
29
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
30
- drs_automation_level: Optional[pulumi.Input[str]] = None,
31
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
32
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
33
- drs_enabled: Optional[pulumi.Input[bool]] = None,
34
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
35
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
36
- folder: Optional[pulumi.Input[str]] = None,
37
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
38
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
39
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
40
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
41
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
42
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
43
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
44
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
45
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
46
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
47
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
48
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
49
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
50
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
51
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
52
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
53
- ha_enabled: Optional[pulumi.Input[bool]] = None,
54
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
55
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
56
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
57
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
58
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
59
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
60
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
61
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
62
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
63
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
64
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
65
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
66
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
67
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
68
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
25
+ datacenter_id: pulumi.Input[builtins.str],
26
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
27
+ dpm_automation_level: Optional[pulumi.Input[builtins.str]] = None,
28
+ dpm_enabled: Optional[pulumi.Input[builtins.bool]] = None,
29
+ dpm_threshold: Optional[pulumi.Input[builtins.int]] = None,
30
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
31
+ drs_automation_level: Optional[pulumi.Input[builtins.str]] = None,
32
+ drs_enable_predictive_drs: Optional[pulumi.Input[builtins.bool]] = None,
33
+ drs_enable_vm_overrides: Optional[pulumi.Input[builtins.bool]] = None,
34
+ drs_enabled: Optional[pulumi.Input[builtins.bool]] = None,
35
+ drs_migration_threshold: Optional[pulumi.Input[builtins.int]] = None,
36
+ drs_scale_descendants_shares: Optional[pulumi.Input[builtins.str]] = None,
37
+ folder: Optional[pulumi.Input[builtins.str]] = None,
38
+ force_evacuate_on_destroy: Optional[pulumi.Input[builtins.bool]] = None,
39
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
40
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[builtins.int]] = None,
41
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[builtins.int]] = None,
42
+ ha_admission_control_policy: Optional[pulumi.Input[builtins.str]] = None,
43
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[builtins.bool]] = None,
44
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[builtins.int]] = None,
45
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[builtins.int]] = None,
46
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[builtins.int]] = None,
47
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[builtins.int]] = None,
48
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[builtins.bool]] = None,
49
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
50
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[builtins.str]] = None,
51
+ ha_datastore_apd_response: Optional[pulumi.Input[builtins.str]] = None,
52
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[builtins.int]] = None,
53
+ ha_datastore_pdl_response: Optional[pulumi.Input[builtins.str]] = None,
54
+ ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
55
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
56
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[builtins.str]] = None,
57
+ ha_host_isolation_response: Optional[pulumi.Input[builtins.str]] = None,
58
+ ha_host_monitoring: Optional[pulumi.Input[builtins.str]] = None,
59
+ ha_vm_component_protection: Optional[pulumi.Input[builtins.str]] = None,
60
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[builtins.str]] = None,
61
+ ha_vm_failure_interval: Optional[pulumi.Input[builtins.int]] = None,
62
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[builtins.int]] = None,
63
+ ha_vm_maximum_resets: Optional[pulumi.Input[builtins.int]] = None,
64
+ ha_vm_minimum_uptime: Optional[pulumi.Input[builtins.int]] = None,
65
+ ha_vm_monitoring: Optional[pulumi.Input[builtins.str]] = None,
66
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[builtins.int]] = None,
67
+ ha_vm_restart_priority: Optional[pulumi.Input[builtins.str]] = None,
68
+ ha_vm_restart_timeout: Optional[pulumi.Input[builtins.int]] = None,
69
+ host_cluster_exit_timeout: Optional[pulumi.Input[builtins.int]] = None,
69
70
  host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
70
- host_managed: Optional[pulumi.Input[bool]] = None,
71
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
72
- name: Optional[pulumi.Input[str]] = None,
73
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
74
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
75
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
76
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
77
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
78
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
79
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
80
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
71
+ host_managed: Optional[pulumi.Input[builtins.bool]] = None,
72
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
73
+ name: Optional[pulumi.Input[builtins.str]] = None,
74
+ proactive_ha_automation_level: Optional[pulumi.Input[builtins.str]] = None,
75
+ proactive_ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
76
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[builtins.str]] = None,
77
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
78
+ proactive_ha_severe_remediation: Optional[pulumi.Input[builtins.str]] = None,
79
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
80
+ vsan_compression_enabled: Optional[pulumi.Input[builtins.bool]] = None,
81
+ vsan_dedup_enabled: Optional[pulumi.Input[builtins.bool]] = None,
81
82
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]] = None,
82
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
83
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
84
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
85
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
83
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[builtins.bool]] = None,
84
+ vsan_dit_rekey_interval: Optional[pulumi.Input[builtins.int]] = None,
85
+ vsan_enabled: Optional[pulumi.Input[builtins.bool]] = None,
86
+ vsan_esa_enabled: Optional[pulumi.Input[builtins.bool]] = None,
86
87
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]] = None,
87
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
88
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
89
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
88
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None,
89
+ vsan_performance_enabled: Optional[pulumi.Input[builtins.bool]] = None,
90
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
90
91
  vsan_stretched_cluster: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']] = None,
91
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
92
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None):
92
+ vsan_unmap_enabled: Optional[pulumi.Input[builtins.bool]] = None,
93
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None):
93
94
  """
94
95
  The set of arguments for constructing a ComputeCluster resource.
95
- :param pulumi.Input[str] datacenter_id: The managed object ID of
96
+ :param pulumi.Input[builtins.str] datacenter_id: The managed object ID of
96
97
  the datacenter to create the cluster in. Forces a new resource if changed.
97
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
98
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
98
99
  value strings to set for the datastore cluster.
99
100
 
100
101
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
101
102
  and require vCenter Server.
102
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
103
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
103
+ :param pulumi.Input[builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
104
+ :param pulumi.Input[builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
104
105
  machines in the cluster. Requires that DRS be enabled.
105
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
106
+ :param pulumi.Input[builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
106
107
  affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
107
108
  setting.
108
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
109
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
109
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
110
+ :param pulumi.Input[builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
110
111
  fullyAutomated.
111
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
112
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
113
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
114
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
112
+ :param pulumi.Input[builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
113
+ :param pulumi.Input[builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
114
+ :param pulumi.Input[builtins.bool] drs_enabled: Enable DRS for this cluster.
115
+ :param pulumi.Input[builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
115
116
  more imbalance while a higher setting will tolerate less.
116
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
117
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
117
+ :param pulumi.Input[builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
118
+ :param pulumi.Input[builtins.str] folder: The relative path to a folder to put this cluster in.
118
119
  This is a path relative to the datacenter you are deploying the cluster to.
119
120
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
120
121
  The provider will place a cluster named `compute-cluster-test` in a
121
122
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
122
123
  being `/dc1/host/foo/bar/datastore-cluster-test`.
123
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
124
+ :param pulumi.Input[builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
124
125
  for testing and is not recommended in normal use.
125
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
126
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
126
127
  failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
127
128
  will ignore the host when making recommendations.
128
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
129
+ :param pulumi.Input[builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
129
130
  machine operations. The maximum is one less than the number of hosts in the cluster.
130
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
131
+ :param pulumi.Input[builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
131
132
  warnings only, whereas a value of 100 disables the setting.
132
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
133
+ :param pulumi.Input[builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
133
134
  permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
134
135
  slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
135
136
  issues.
136
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
137
+ :param pulumi.Input[builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
137
138
  subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
138
139
  from the total amount of resources in the cluster. Disable to supply user-defined values.
139
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
140
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
140
141
  the cluster to reserve for failover.
141
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
142
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
142
143
  the cluster to reserve for failover.
143
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
144
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
145
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
144
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
145
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
146
+ :param pulumi.Input[builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
146
147
  to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
147
148
  currently in the cluster.
148
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
149
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
149
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
150
+ :param pulumi.Input[builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
150
151
  affected datastore clears in the middle of an APD event. Can be one of none or reset.
151
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
152
+ :param pulumi.Input[builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
152
153
  detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
153
154
  restartAggressive.
154
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
155
+ :param pulumi.Input[builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
155
156
  the response action defined in ha_datastore_apd_response.
156
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
157
+ :param pulumi.Input[builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
157
158
  detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
158
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
159
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
159
+ :param pulumi.Input[builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
160
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
160
161
  ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
161
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
162
+ :param pulumi.Input[builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
162
163
  allFeasibleDsWithUserPreference.
163
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
164
+ :param pulumi.Input[builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
164
165
  Can be one of none, powerOff, or shutdown.
165
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
166
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
166
+ :param pulumi.Input[builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
167
+ :param pulumi.Input[builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
167
168
  failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
168
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
169
+ :param pulumi.Input[builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
169
170
  on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
170
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
171
+ :param pulumi.Input[builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
171
172
  failed. The value is in seconds.
172
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
173
+ :param pulumi.Input[builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
173
174
  attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
174
175
  time is allotted.
175
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
176
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
177
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
176
+ :param pulumi.Input[builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
177
+ :param pulumi.Input[builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
178
+ :param pulumi.Input[builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
178
179
  vmMonitoringOnly, or vmAndAppMonitoring.
179
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
180
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
180
+ :param pulumi.Input[builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
181
+ :param pulumi.Input[builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
181
182
  high, or highest.
182
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
183
+ :param pulumi.Input[builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
183
184
  proceeding with the next priority.
184
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
185
+ :param pulumi.Input[builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
185
186
  :param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
186
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
187
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
188
- :param pulumi.Input[str] name: The name of the cluster.
189
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
190
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
191
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
187
+ :param pulumi.Input[builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
188
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
189
+ :param pulumi.Input[builtins.str] name: The name of the cluster.
190
+ :param pulumi.Input[builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
191
+ :param pulumi.Input[builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
192
+ :param pulumi.Input[builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
192
193
  this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
193
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
194
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
194
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
195
+ :param pulumi.Input[builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
195
196
  cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
196
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
197
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
198
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
197
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] tags: The IDs of any tags to attach to this resource.
198
+ :param pulumi.Input[builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
199
+ :param pulumi.Input[builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
199
200
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
200
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
201
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
202
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
203
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
201
+ :param pulumi.Input[builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
202
+ :param pulumi.Input[builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
203
+ :param pulumi.Input[builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
204
+ :param pulumi.Input[builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
204
205
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
205
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
206
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
207
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
206
+ :param pulumi.Input[builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
207
+ :param pulumi.Input[builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
208
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
208
209
  :param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
209
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
210
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
210
+ :param pulumi.Input[builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
211
+ :param pulumi.Input[builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
211
212
  """
212
213
  pulumi.set(__self__, "datacenter_id", datacenter_id)
213
214
  if custom_attributes is not None:
@@ -349,7 +350,7 @@ class ComputeClusterArgs:
349
350
 
350
351
  @property
351
352
  @pulumi.getter(name="datacenterId")
352
- def datacenter_id(self) -> pulumi.Input[str]:
353
+ def datacenter_id(self) -> pulumi.Input[builtins.str]:
353
354
  """
354
355
  The managed object ID of
355
356
  the datacenter to create the cluster in. Forces a new resource if changed.
@@ -357,12 +358,12 @@ class ComputeClusterArgs:
357
358
  return pulumi.get(self, "datacenter_id")
358
359
 
359
360
  @datacenter_id.setter
360
- def datacenter_id(self, value: pulumi.Input[str]):
361
+ def datacenter_id(self, value: pulumi.Input[builtins.str]):
361
362
  pulumi.set(self, "datacenter_id", value)
362
363
 
363
364
  @property
364
365
  @pulumi.getter(name="customAttributes")
365
- def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
366
+ def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
366
367
  """
367
368
  A map of custom attribute ids to attribute
368
369
  value strings to set for the datastore cluster.
@@ -373,24 +374,24 @@ class ComputeClusterArgs:
373
374
  return pulumi.get(self, "custom_attributes")
374
375
 
375
376
  @custom_attributes.setter
376
- def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
377
+ def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
377
378
  pulumi.set(self, "custom_attributes", value)
378
379
 
379
380
  @property
380
381
  @pulumi.getter(name="dpmAutomationLevel")
381
- def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
382
+ def dpm_automation_level(self) -> Optional[pulumi.Input[builtins.str]]:
382
383
  """
383
384
  The automation level for host power operations in this cluster. Can be one of manual or automated.
384
385
  """
385
386
  return pulumi.get(self, "dpm_automation_level")
386
387
 
387
388
  @dpm_automation_level.setter
388
- def dpm_automation_level(self, value: Optional[pulumi.Input[str]]):
389
+ def dpm_automation_level(self, value: Optional[pulumi.Input[builtins.str]]):
389
390
  pulumi.set(self, "dpm_automation_level", value)
390
391
 
391
392
  @property
392
393
  @pulumi.getter(name="dpmEnabled")
393
- def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
394
+ def dpm_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
394
395
  """
395
396
  Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
396
397
  machines in the cluster. Requires that DRS be enabled.
@@ -398,12 +399,12 @@ class ComputeClusterArgs:
398
399
  return pulumi.get(self, "dpm_enabled")
399
400
 
400
401
  @dpm_enabled.setter
401
- def dpm_enabled(self, value: Optional[pulumi.Input[bool]]):
402
+ def dpm_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
402
403
  pulumi.set(self, "dpm_enabled", value)
403
404
 
404
405
  @property
405
406
  @pulumi.getter(name="dpmThreshold")
406
- def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
407
+ def dpm_threshold(self) -> Optional[pulumi.Input[builtins.int]]:
407
408
  """
408
409
  A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
409
410
  affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
@@ -412,24 +413,24 @@ class ComputeClusterArgs:
412
413
  return pulumi.get(self, "dpm_threshold")
413
414
 
414
415
  @dpm_threshold.setter
415
- def dpm_threshold(self, value: Optional[pulumi.Input[int]]):
416
+ def dpm_threshold(self, value: Optional[pulumi.Input[builtins.int]]):
416
417
  pulumi.set(self, "dpm_threshold", value)
417
418
 
418
419
  @property
419
420
  @pulumi.getter(name="drsAdvancedOptions")
420
- def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
421
+ def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
421
422
  """
422
423
  Advanced configuration options for DRS and DPM.
423
424
  """
424
425
  return pulumi.get(self, "drs_advanced_options")
425
426
 
426
427
  @drs_advanced_options.setter
427
- def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
428
+ def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
428
429
  pulumi.set(self, "drs_advanced_options", value)
429
430
 
430
431
  @property
431
432
  @pulumi.getter(name="drsAutomationLevel")
432
- def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
433
+ def drs_automation_level(self) -> Optional[pulumi.Input[builtins.str]]:
433
434
  """
434
435
  The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
435
436
  fullyAutomated.
@@ -437,48 +438,48 @@ class ComputeClusterArgs:
437
438
  return pulumi.get(self, "drs_automation_level")
438
439
 
439
440
  @drs_automation_level.setter
440
- def drs_automation_level(self, value: Optional[pulumi.Input[str]]):
441
+ def drs_automation_level(self, value: Optional[pulumi.Input[builtins.str]]):
441
442
  pulumi.set(self, "drs_automation_level", value)
442
443
 
443
444
  @property
444
445
  @pulumi.getter(name="drsEnablePredictiveDrs")
445
- def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
446
+ def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[builtins.bool]]:
446
447
  """
447
448
  When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
448
449
  """
449
450
  return pulumi.get(self, "drs_enable_predictive_drs")
450
451
 
451
452
  @drs_enable_predictive_drs.setter
452
- def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[bool]]):
453
+ def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[builtins.bool]]):
453
454
  pulumi.set(self, "drs_enable_predictive_drs", value)
454
455
 
455
456
  @property
456
457
  @pulumi.getter(name="drsEnableVmOverrides")
457
- def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
458
+ def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[builtins.bool]]:
458
459
  """
459
460
  When true, allows individual VM overrides within this cluster to be set.
460
461
  """
461
462
  return pulumi.get(self, "drs_enable_vm_overrides")
462
463
 
463
464
  @drs_enable_vm_overrides.setter
464
- def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[bool]]):
465
+ def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[builtins.bool]]):
465
466
  pulumi.set(self, "drs_enable_vm_overrides", value)
466
467
 
467
468
  @property
468
469
  @pulumi.getter(name="drsEnabled")
469
- def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
470
+ def drs_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
470
471
  """
471
472
  Enable DRS for this cluster.
472
473
  """
473
474
  return pulumi.get(self, "drs_enabled")
474
475
 
475
476
  @drs_enabled.setter
476
- def drs_enabled(self, value: Optional[pulumi.Input[bool]]):
477
+ def drs_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
477
478
  pulumi.set(self, "drs_enabled", value)
478
479
 
479
480
  @property
480
481
  @pulumi.getter(name="drsMigrationThreshold")
481
- def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
482
+ def drs_migration_threshold(self) -> Optional[pulumi.Input[builtins.int]]:
482
483
  """
483
484
  A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
484
485
  more imbalance while a higher setting will tolerate less.
@@ -486,24 +487,24 @@ class ComputeClusterArgs:
486
487
  return pulumi.get(self, "drs_migration_threshold")
487
488
 
488
489
  @drs_migration_threshold.setter
489
- def drs_migration_threshold(self, value: Optional[pulumi.Input[int]]):
490
+ def drs_migration_threshold(self, value: Optional[pulumi.Input[builtins.int]]):
490
491
  pulumi.set(self, "drs_migration_threshold", value)
491
492
 
492
493
  @property
493
494
  @pulumi.getter(name="drsScaleDescendantsShares")
494
- def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
495
+ def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[builtins.str]]:
495
496
  """
496
497
  Enable scalable shares for all descendants of this cluster.
497
498
  """
498
499
  return pulumi.get(self, "drs_scale_descendants_shares")
499
500
 
500
501
  @drs_scale_descendants_shares.setter
501
- def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[str]]):
502
+ def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[builtins.str]]):
502
503
  pulumi.set(self, "drs_scale_descendants_shares", value)
503
504
 
504
505
  @property
505
506
  @pulumi.getter
506
- def folder(self) -> Optional[pulumi.Input[str]]:
507
+ def folder(self) -> Optional[pulumi.Input[builtins.str]]:
507
508
  """
508
509
  The relative path to a folder to put this cluster in.
509
510
  This is a path relative to the datacenter you are deploying the cluster to.
@@ -515,12 +516,12 @@ class ComputeClusterArgs:
515
516
  return pulumi.get(self, "folder")
516
517
 
517
518
  @folder.setter
518
- def folder(self, value: Optional[pulumi.Input[str]]):
519
+ def folder(self, value: Optional[pulumi.Input[builtins.str]]):
519
520
  pulumi.set(self, "folder", value)
520
521
 
521
522
  @property
522
523
  @pulumi.getter(name="forceEvacuateOnDestroy")
523
- def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
524
+ def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[builtins.bool]]:
524
525
  """
525
526
  Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
526
527
  for testing and is not recommended in normal use.
@@ -528,12 +529,12 @@ class ComputeClusterArgs:
528
529
  return pulumi.get(self, "force_evacuate_on_destroy")
529
530
 
530
531
  @force_evacuate_on_destroy.setter
531
- def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[bool]]):
532
+ def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[builtins.bool]]):
532
533
  pulumi.set(self, "force_evacuate_on_destroy", value)
533
534
 
534
535
  @property
535
536
  @pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
536
- def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
537
+ def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
537
538
  """
538
539
  When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
539
540
  failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
@@ -542,12 +543,12 @@ class ComputeClusterArgs:
542
543
  return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
543
544
 
544
545
  @ha_admission_control_failover_host_system_ids.setter
545
- def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
546
+ def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
546
547
  pulumi.set(self, "ha_admission_control_failover_host_system_ids", value)
547
548
 
548
549
  @property
549
550
  @pulumi.getter(name="haAdmissionControlHostFailureTolerance")
550
- def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
551
+ def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[builtins.int]]:
551
552
  """
552
553
  The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
553
554
  machine operations. The maximum is one less than the number of hosts in the cluster.
@@ -555,12 +556,12 @@ class ComputeClusterArgs:
555
556
  return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
556
557
 
557
558
  @ha_admission_control_host_failure_tolerance.setter
558
- def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[int]]):
559
+ def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[builtins.int]]):
559
560
  pulumi.set(self, "ha_admission_control_host_failure_tolerance", value)
560
561
 
561
562
  @property
562
563
  @pulumi.getter(name="haAdmissionControlPerformanceTolerance")
563
- def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
564
+ def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[builtins.int]]:
564
565
  """
565
566
  The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
566
567
  warnings only, whereas a value of 100 disables the setting.
@@ -568,12 +569,12 @@ class ComputeClusterArgs:
568
569
  return pulumi.get(self, "ha_admission_control_performance_tolerance")
569
570
 
570
571
  @ha_admission_control_performance_tolerance.setter
571
- def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[int]]):
572
+ def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[builtins.int]]):
572
573
  pulumi.set(self, "ha_admission_control_performance_tolerance", value)
573
574
 
574
575
  @property
575
576
  @pulumi.getter(name="haAdmissionControlPolicy")
576
- def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
577
+ def ha_admission_control_policy(self) -> Optional[pulumi.Input[builtins.str]]:
577
578
  """
578
579
  The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
579
580
  permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
@@ -583,12 +584,12 @@ class ComputeClusterArgs:
583
584
  return pulumi.get(self, "ha_admission_control_policy")
584
585
 
585
586
  @ha_admission_control_policy.setter
586
- def ha_admission_control_policy(self, value: Optional[pulumi.Input[str]]):
587
+ def ha_admission_control_policy(self, value: Optional[pulumi.Input[builtins.str]]):
587
588
  pulumi.set(self, "ha_admission_control_policy", value)
588
589
 
589
590
  @property
590
591
  @pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
591
- def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
592
+ def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[builtins.bool]]:
592
593
  """
593
594
  When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
594
595
  subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
@@ -597,12 +598,12 @@ class ComputeClusterArgs:
597
598
  return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
598
599
 
599
600
  @ha_admission_control_resource_percentage_auto_compute.setter
600
- def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[bool]]):
601
+ def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[builtins.bool]]):
601
602
  pulumi.set(self, "ha_admission_control_resource_percentage_auto_compute", value)
602
603
 
603
604
  @property
604
605
  @pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
605
- def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
606
+ def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[builtins.int]]:
606
607
  """
607
608
  When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
608
609
  the cluster to reserve for failover.
@@ -610,12 +611,12 @@ class ComputeClusterArgs:
610
611
  return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
611
612
 
612
613
  @ha_admission_control_resource_percentage_cpu.setter
613
- def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[int]]):
614
+ def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[builtins.int]]):
614
615
  pulumi.set(self, "ha_admission_control_resource_percentage_cpu", value)
615
616
 
616
617
  @property
617
618
  @pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
618
- def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
619
+ def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[builtins.int]]:
619
620
  """
620
621
  When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
621
622
  the cluster to reserve for failover.
@@ -623,36 +624,36 @@ class ComputeClusterArgs:
623
624
  return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
624
625
 
625
626
  @ha_admission_control_resource_percentage_memory.setter
626
- def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[int]]):
627
+ def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[builtins.int]]):
627
628
  pulumi.set(self, "ha_admission_control_resource_percentage_memory", value)
628
629
 
629
630
  @property
630
631
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
631
- def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
632
+ def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[builtins.int]]:
632
633
  """
633
634
  When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
634
635
  """
635
636
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
636
637
 
637
638
  @ha_admission_control_slot_policy_explicit_cpu.setter
638
- def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[int]]):
639
+ def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[builtins.int]]):
639
640
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_cpu", value)
640
641
 
641
642
  @property
642
643
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
643
- def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
644
+ def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[builtins.int]]:
644
645
  """
645
646
  When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
646
647
  """
647
648
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
648
649
 
649
650
  @ha_admission_control_slot_policy_explicit_memory.setter
650
- def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[int]]):
651
+ def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[builtins.int]]):
651
652
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_memory", value)
652
653
 
653
654
  @property
654
655
  @pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
655
- def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
656
+ def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[builtins.bool]]:
656
657
  """
657
658
  When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
658
659
  to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
@@ -661,24 +662,24 @@ class ComputeClusterArgs:
661
662
  return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
662
663
 
663
664
  @ha_admission_control_slot_policy_use_explicit_size.setter
664
- def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[bool]]):
665
+ def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[builtins.bool]]):
665
666
  pulumi.set(self, "ha_admission_control_slot_policy_use_explicit_size", value)
666
667
 
667
668
  @property
668
669
  @pulumi.getter(name="haAdvancedOptions")
669
- def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
670
+ def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
670
671
  """
671
672
  Advanced configuration options for vSphere HA.
672
673
  """
673
674
  return pulumi.get(self, "ha_advanced_options")
674
675
 
675
676
  @ha_advanced_options.setter
676
- def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
677
+ def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
677
678
  pulumi.set(self, "ha_advanced_options", value)
678
679
 
679
680
  @property
680
681
  @pulumi.getter(name="haDatastoreApdRecoveryAction")
681
- def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
682
+ def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[builtins.str]]:
682
683
  """
683
684
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
684
685
  affected datastore clears in the middle of an APD event. Can be one of none or reset.
@@ -686,12 +687,12 @@ class ComputeClusterArgs:
686
687
  return pulumi.get(self, "ha_datastore_apd_recovery_action")
687
688
 
688
689
  @ha_datastore_apd_recovery_action.setter
689
- def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[str]]):
690
+ def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[builtins.str]]):
690
691
  pulumi.set(self, "ha_datastore_apd_recovery_action", value)
691
692
 
692
693
  @property
693
694
  @pulumi.getter(name="haDatastoreApdResponse")
694
- def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
695
+ def ha_datastore_apd_response(self) -> Optional[pulumi.Input[builtins.str]]:
695
696
  """
696
697
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
697
698
  detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
@@ -700,12 +701,12 @@ class ComputeClusterArgs:
700
701
  return pulumi.get(self, "ha_datastore_apd_response")
701
702
 
702
703
  @ha_datastore_apd_response.setter
703
- def ha_datastore_apd_response(self, value: Optional[pulumi.Input[str]]):
704
+ def ha_datastore_apd_response(self, value: Optional[pulumi.Input[builtins.str]]):
704
705
  pulumi.set(self, "ha_datastore_apd_response", value)
705
706
 
706
707
  @property
707
708
  @pulumi.getter(name="haDatastoreApdResponseDelay")
708
- def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
709
+ def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[builtins.int]]:
709
710
  """
710
711
  When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
711
712
  the response action defined in ha_datastore_apd_response.
@@ -713,12 +714,12 @@ class ComputeClusterArgs:
713
714
  return pulumi.get(self, "ha_datastore_apd_response_delay")
714
715
 
715
716
  @ha_datastore_apd_response_delay.setter
716
- def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[int]]):
717
+ def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[builtins.int]]):
717
718
  pulumi.set(self, "ha_datastore_apd_response_delay", value)
718
719
 
719
720
  @property
720
721
  @pulumi.getter(name="haDatastorePdlResponse")
721
- def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
722
+ def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[builtins.str]]:
722
723
  """
723
724
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
724
725
  detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
@@ -726,24 +727,24 @@ class ComputeClusterArgs:
726
727
  return pulumi.get(self, "ha_datastore_pdl_response")
727
728
 
728
729
  @ha_datastore_pdl_response.setter
729
- def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[str]]):
730
+ def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[builtins.str]]):
730
731
  pulumi.set(self, "ha_datastore_pdl_response", value)
731
732
 
732
733
  @property
733
734
  @pulumi.getter(name="haEnabled")
734
- def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
735
+ def ha_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
735
736
  """
736
737
  Enable vSphere HA for this cluster.
737
738
  """
738
739
  return pulumi.get(self, "ha_enabled")
739
740
 
740
741
  @ha_enabled.setter
741
- def ha_enabled(self, value: Optional[pulumi.Input[bool]]):
742
+ def ha_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
742
743
  pulumi.set(self, "ha_enabled", value)
743
744
 
744
745
  @property
745
746
  @pulumi.getter(name="haHeartbeatDatastoreIds")
746
- def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
747
+ def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
747
748
  """
748
749
  The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
749
750
  ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
@@ -751,12 +752,12 @@ class ComputeClusterArgs:
751
752
  return pulumi.get(self, "ha_heartbeat_datastore_ids")
752
753
 
753
754
  @ha_heartbeat_datastore_ids.setter
754
- def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
755
+ def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
755
756
  pulumi.set(self, "ha_heartbeat_datastore_ids", value)
756
757
 
757
758
  @property
758
759
  @pulumi.getter(name="haHeartbeatDatastorePolicy")
759
- def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
760
+ def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[builtins.str]]:
760
761
  """
761
762
  The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
762
763
  allFeasibleDsWithUserPreference.
@@ -764,12 +765,12 @@ class ComputeClusterArgs:
764
765
  return pulumi.get(self, "ha_heartbeat_datastore_policy")
765
766
 
766
767
  @ha_heartbeat_datastore_policy.setter
767
- def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[str]]):
768
+ def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[builtins.str]]):
768
769
  pulumi.set(self, "ha_heartbeat_datastore_policy", value)
769
770
 
770
771
  @property
771
772
  @pulumi.getter(name="haHostIsolationResponse")
772
- def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
773
+ def ha_host_isolation_response(self) -> Optional[pulumi.Input[builtins.str]]:
773
774
  """
774
775
  The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
775
776
  Can be one of none, powerOff, or shutdown.
@@ -777,24 +778,24 @@ class ComputeClusterArgs:
777
778
  return pulumi.get(self, "ha_host_isolation_response")
778
779
 
779
780
  @ha_host_isolation_response.setter
780
- def ha_host_isolation_response(self, value: Optional[pulumi.Input[str]]):
781
+ def ha_host_isolation_response(self, value: Optional[pulumi.Input[builtins.str]]):
781
782
  pulumi.set(self, "ha_host_isolation_response", value)
782
783
 
783
784
  @property
784
785
  @pulumi.getter(name="haHostMonitoring")
785
- def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
786
+ def ha_host_monitoring(self) -> Optional[pulumi.Input[builtins.str]]:
786
787
  """
787
788
  Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
788
789
  """
789
790
  return pulumi.get(self, "ha_host_monitoring")
790
791
 
791
792
  @ha_host_monitoring.setter
792
- def ha_host_monitoring(self, value: Optional[pulumi.Input[str]]):
793
+ def ha_host_monitoring(self, value: Optional[pulumi.Input[builtins.str]]):
793
794
  pulumi.set(self, "ha_host_monitoring", value)
794
795
 
795
796
  @property
796
797
  @pulumi.getter(name="haVmComponentProtection")
797
- def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
798
+ def ha_vm_component_protection(self) -> Optional[pulumi.Input[builtins.str]]:
798
799
  """
799
800
  Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
800
801
  failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
@@ -802,12 +803,12 @@ class ComputeClusterArgs:
802
803
  return pulumi.get(self, "ha_vm_component_protection")
803
804
 
804
805
  @ha_vm_component_protection.setter
805
- def ha_vm_component_protection(self, value: Optional[pulumi.Input[str]]):
806
+ def ha_vm_component_protection(self, value: Optional[pulumi.Input[builtins.str]]):
806
807
  pulumi.set(self, "ha_vm_component_protection", value)
807
808
 
808
809
  @property
809
810
  @pulumi.getter(name="haVmDependencyRestartCondition")
810
- def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
811
+ def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[builtins.str]]:
811
812
  """
812
813
  The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
813
814
  on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
@@ -815,12 +816,12 @@ class ComputeClusterArgs:
815
816
  return pulumi.get(self, "ha_vm_dependency_restart_condition")
816
817
 
817
818
  @ha_vm_dependency_restart_condition.setter
818
- def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[str]]):
819
+ def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[builtins.str]]):
819
820
  pulumi.set(self, "ha_vm_dependency_restart_condition", value)
820
821
 
821
822
  @property
822
823
  @pulumi.getter(name="haVmFailureInterval")
823
- def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
824
+ def ha_vm_failure_interval(self) -> Optional[pulumi.Input[builtins.int]]:
824
825
  """
825
826
  If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
826
827
  failed. The value is in seconds.
@@ -828,12 +829,12 @@ class ComputeClusterArgs:
828
829
  return pulumi.get(self, "ha_vm_failure_interval")
829
830
 
830
831
  @ha_vm_failure_interval.setter
831
- def ha_vm_failure_interval(self, value: Optional[pulumi.Input[int]]):
832
+ def ha_vm_failure_interval(self, value: Optional[pulumi.Input[builtins.int]]):
832
833
  pulumi.set(self, "ha_vm_failure_interval", value)
833
834
 
834
835
  @property
835
836
  @pulumi.getter(name="haVmMaximumFailureWindow")
836
- def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
837
+ def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[builtins.int]]:
837
838
  """
838
839
  The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
839
840
  attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
@@ -842,36 +843,36 @@ class ComputeClusterArgs:
842
843
  return pulumi.get(self, "ha_vm_maximum_failure_window")
843
844
 
844
845
  @ha_vm_maximum_failure_window.setter
845
- def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[int]]):
846
+ def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[builtins.int]]):
846
847
  pulumi.set(self, "ha_vm_maximum_failure_window", value)
847
848
 
848
849
  @property
849
850
  @pulumi.getter(name="haVmMaximumResets")
850
- def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
851
+ def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[builtins.int]]:
851
852
  """
852
853
  The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
853
854
  """
854
855
  return pulumi.get(self, "ha_vm_maximum_resets")
855
856
 
856
857
  @ha_vm_maximum_resets.setter
857
- def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[int]]):
858
+ def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[builtins.int]]):
858
859
  pulumi.set(self, "ha_vm_maximum_resets", value)
859
860
 
860
861
  @property
861
862
  @pulumi.getter(name="haVmMinimumUptime")
862
- def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
863
+ def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[builtins.int]]:
863
864
  """
864
865
  The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
865
866
  """
866
867
  return pulumi.get(self, "ha_vm_minimum_uptime")
867
868
 
868
869
  @ha_vm_minimum_uptime.setter
869
- def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[int]]):
870
+ def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[builtins.int]]):
870
871
  pulumi.set(self, "ha_vm_minimum_uptime", value)
871
872
 
872
873
  @property
873
874
  @pulumi.getter(name="haVmMonitoring")
874
- def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
875
+ def ha_vm_monitoring(self) -> Optional[pulumi.Input[builtins.str]]:
875
876
  """
876
877
  The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
877
878
  vmMonitoringOnly, or vmAndAppMonitoring.
@@ -879,24 +880,24 @@ class ComputeClusterArgs:
879
880
  return pulumi.get(self, "ha_vm_monitoring")
880
881
 
881
882
  @ha_vm_monitoring.setter
882
- def ha_vm_monitoring(self, value: Optional[pulumi.Input[str]]):
883
+ def ha_vm_monitoring(self, value: Optional[pulumi.Input[builtins.str]]):
883
884
  pulumi.set(self, "ha_vm_monitoring", value)
884
885
 
885
886
  @property
886
887
  @pulumi.getter(name="haVmRestartAdditionalDelay")
887
- def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
888
+ def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[builtins.int]]:
888
889
  """
889
890
  Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
890
891
  """
891
892
  return pulumi.get(self, "ha_vm_restart_additional_delay")
892
893
 
893
894
  @ha_vm_restart_additional_delay.setter
894
- def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[int]]):
895
+ def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[builtins.int]]):
895
896
  pulumi.set(self, "ha_vm_restart_additional_delay", value)
896
897
 
897
898
  @property
898
899
  @pulumi.getter(name="haVmRestartPriority")
899
- def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
900
+ def ha_vm_restart_priority(self) -> Optional[pulumi.Input[builtins.str]]:
900
901
  """
901
902
  The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
902
903
  high, or highest.
@@ -904,12 +905,12 @@ class ComputeClusterArgs:
904
905
  return pulumi.get(self, "ha_vm_restart_priority")
905
906
 
906
907
  @ha_vm_restart_priority.setter
907
- def ha_vm_restart_priority(self, value: Optional[pulumi.Input[str]]):
908
+ def ha_vm_restart_priority(self, value: Optional[pulumi.Input[builtins.str]]):
908
909
  pulumi.set(self, "ha_vm_restart_priority", value)
909
910
 
910
911
  @property
911
912
  @pulumi.getter(name="haVmRestartTimeout")
912
- def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
913
+ def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[builtins.int]]:
913
914
  """
914
915
  The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
915
916
  proceeding with the next priority.
@@ -917,19 +918,19 @@ class ComputeClusterArgs:
917
918
  return pulumi.get(self, "ha_vm_restart_timeout")
918
919
 
919
920
  @ha_vm_restart_timeout.setter
920
- def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[int]]):
921
+ def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[builtins.int]]):
921
922
  pulumi.set(self, "ha_vm_restart_timeout", value)
922
923
 
923
924
  @property
924
925
  @pulumi.getter(name="hostClusterExitTimeout")
925
- def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
926
+ def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[builtins.int]]:
926
927
  """
927
928
  The timeout for each host maintenance mode operation when removing hosts from a cluster.
928
929
  """
929
930
  return pulumi.get(self, "host_cluster_exit_timeout")
930
931
 
931
932
  @host_cluster_exit_timeout.setter
932
- def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
933
+ def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[builtins.int]]):
933
934
  pulumi.set(self, "host_cluster_exit_timeout", value)
934
935
 
935
936
  @property
@@ -946,67 +947,67 @@ class ComputeClusterArgs:
946
947
 
947
948
  @property
948
949
  @pulumi.getter(name="hostManaged")
949
- def host_managed(self) -> Optional[pulumi.Input[bool]]:
950
+ def host_managed(self) -> Optional[pulumi.Input[builtins.bool]]:
950
951
  """
951
952
  Must be set if cluster enrollment is managed from host resource.
952
953
  """
953
954
  return pulumi.get(self, "host_managed")
954
955
 
955
956
  @host_managed.setter
956
- def host_managed(self, value: Optional[pulumi.Input[bool]]):
957
+ def host_managed(self, value: Optional[pulumi.Input[builtins.bool]]):
957
958
  pulumi.set(self, "host_managed", value)
958
959
 
959
960
  @property
960
961
  @pulumi.getter(name="hostSystemIds")
961
- def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
962
+ def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
962
963
  """
963
964
  The managed object IDs of the hosts to put in the cluster.
964
965
  """
965
966
  return pulumi.get(self, "host_system_ids")
966
967
 
967
968
  @host_system_ids.setter
968
- def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
969
+ def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
969
970
  pulumi.set(self, "host_system_ids", value)
970
971
 
971
972
  @property
972
973
  @pulumi.getter
973
- def name(self) -> Optional[pulumi.Input[str]]:
974
+ def name(self) -> Optional[pulumi.Input[builtins.str]]:
974
975
  """
975
976
  The name of the cluster.
976
977
  """
977
978
  return pulumi.get(self, "name")
978
979
 
979
980
  @name.setter
980
- def name(self, value: Optional[pulumi.Input[str]]):
981
+ def name(self, value: Optional[pulumi.Input[builtins.str]]):
981
982
  pulumi.set(self, "name", value)
982
983
 
983
984
  @property
984
985
  @pulumi.getter(name="proactiveHaAutomationLevel")
985
- def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
986
+ def proactive_ha_automation_level(self) -> Optional[pulumi.Input[builtins.str]]:
986
987
  """
987
988
  The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
988
989
  """
989
990
  return pulumi.get(self, "proactive_ha_automation_level")
990
991
 
991
992
  @proactive_ha_automation_level.setter
992
- def proactive_ha_automation_level(self, value: Optional[pulumi.Input[str]]):
993
+ def proactive_ha_automation_level(self, value: Optional[pulumi.Input[builtins.str]]):
993
994
  pulumi.set(self, "proactive_ha_automation_level", value)
994
995
 
995
996
  @property
996
997
  @pulumi.getter(name="proactiveHaEnabled")
997
- def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
998
+ def proactive_ha_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
998
999
  """
999
1000
  Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
1000
1001
  """
1001
1002
  return pulumi.get(self, "proactive_ha_enabled")
1002
1003
 
1003
1004
  @proactive_ha_enabled.setter
1004
- def proactive_ha_enabled(self, value: Optional[pulumi.Input[bool]]):
1005
+ def proactive_ha_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1005
1006
  pulumi.set(self, "proactive_ha_enabled", value)
1006
1007
 
1007
1008
  @property
1008
1009
  @pulumi.getter(name="proactiveHaModerateRemediation")
1009
- def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
1010
+ def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[builtins.str]]:
1010
1011
  """
1011
1012
  The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
1012
1013
  this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
@@ -1014,24 +1015,24 @@ class ComputeClusterArgs:
1014
1015
  return pulumi.get(self, "proactive_ha_moderate_remediation")
1015
1016
 
1016
1017
  @proactive_ha_moderate_remediation.setter
1017
- def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[str]]):
1018
+ def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[builtins.str]]):
1018
1019
  pulumi.set(self, "proactive_ha_moderate_remediation", value)
1019
1020
 
1020
1021
  @property
1021
1022
  @pulumi.getter(name="proactiveHaProviderIds")
1022
- def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1023
+ def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
1023
1024
  """
1024
1025
  The list of IDs for health update providers configured for this cluster.
1025
1026
  """
1026
1027
  return pulumi.get(self, "proactive_ha_provider_ids")
1027
1028
 
1028
1029
  @proactive_ha_provider_ids.setter
1029
- def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1030
+ def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
1030
1031
  pulumi.set(self, "proactive_ha_provider_ids", value)
1031
1032
 
1032
1033
  @property
1033
1034
  @pulumi.getter(name="proactiveHaSevereRemediation")
1034
- def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
1035
+ def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[builtins.str]]:
1035
1036
  """
1036
1037
  The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
1037
1038
  cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
@@ -1039,43 +1040,43 @@ class ComputeClusterArgs:
1039
1040
  return pulumi.get(self, "proactive_ha_severe_remediation")
1040
1041
 
1041
1042
  @proactive_ha_severe_remediation.setter
1042
- def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[str]]):
1043
+ def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[builtins.str]]):
1043
1044
  pulumi.set(self, "proactive_ha_severe_remediation", value)
1044
1045
 
1045
1046
  @property
1046
1047
  @pulumi.getter
1047
- def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1048
+ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
1048
1049
  """
1049
1050
  The IDs of any tags to attach to this resource.
1050
1051
  """
1051
1052
  return pulumi.get(self, "tags")
1052
1053
 
1053
1054
  @tags.setter
1054
- def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1055
+ def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
1055
1056
  pulumi.set(self, "tags", value)
1056
1057
 
1057
1058
  @property
1058
1059
  @pulumi.getter(name="vsanCompressionEnabled")
1059
- def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
1060
+ def vsan_compression_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1060
1061
  """
1061
1062
  Whether the vSAN compression service is enabled for the cluster.
1062
1063
  """
1063
1064
  return pulumi.get(self, "vsan_compression_enabled")
1064
1065
 
1065
1066
  @vsan_compression_enabled.setter
1066
- def vsan_compression_enabled(self, value: Optional[pulumi.Input[bool]]):
1067
+ def vsan_compression_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1067
1068
  pulumi.set(self, "vsan_compression_enabled", value)
1068
1069
 
1069
1070
  @property
1070
1071
  @pulumi.getter(name="vsanDedupEnabled")
1071
- def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
1072
+ def vsan_dedup_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1072
1073
  """
1073
1074
  Whether the vSAN deduplication service is enabled for the cluster.
1074
1075
  """
1075
1076
  return pulumi.get(self, "vsan_dedup_enabled")
1076
1077
 
1077
1078
  @vsan_dedup_enabled.setter
1078
- def vsan_dedup_enabled(self, value: Optional[pulumi.Input[bool]]):
1079
+ def vsan_dedup_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1079
1080
  pulumi.set(self, "vsan_dedup_enabled", value)
1080
1081
 
1081
1082
  @property
@@ -1092,50 +1093,50 @@ class ComputeClusterArgs:
1092
1093
 
1093
1094
  @property
1094
1095
  @pulumi.getter(name="vsanDitEncryptionEnabled")
1095
- def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
1096
+ def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1096
1097
  """
1097
1098
  Whether the vSAN data-in-transit encryption is enabled for the cluster.
1098
1099
  """
1099
1100
  return pulumi.get(self, "vsan_dit_encryption_enabled")
1100
1101
 
1101
1102
  @vsan_dit_encryption_enabled.setter
1102
- def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[bool]]):
1103
+ def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1103
1104
  pulumi.set(self, "vsan_dit_encryption_enabled", value)
1104
1105
 
1105
1106
  @property
1106
1107
  @pulumi.getter(name="vsanDitRekeyInterval")
1107
- def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
1108
+ def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[builtins.int]]:
1108
1109
  """
1109
1110
  When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
1110
1111
  """
1111
1112
  return pulumi.get(self, "vsan_dit_rekey_interval")
1112
1113
 
1113
1114
  @vsan_dit_rekey_interval.setter
1114
- def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[int]]):
1115
+ def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[builtins.int]]):
1115
1116
  pulumi.set(self, "vsan_dit_rekey_interval", value)
1116
1117
 
1117
1118
  @property
1118
1119
  @pulumi.getter(name="vsanEnabled")
1119
- def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
1120
+ def vsan_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1120
1121
  """
1121
1122
  Whether the vSAN service is enabled for the cluster.
1122
1123
  """
1123
1124
  return pulumi.get(self, "vsan_enabled")
1124
1125
 
1125
1126
  @vsan_enabled.setter
1126
- def vsan_enabled(self, value: Optional[pulumi.Input[bool]]):
1127
+ def vsan_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1127
1128
  pulumi.set(self, "vsan_enabled", value)
1128
1129
 
1129
1130
  @property
1130
1131
  @pulumi.getter(name="vsanEsaEnabled")
1131
- def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
1132
+ def vsan_esa_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1132
1133
  """
1133
1134
  Whether the vSAN ESA service is enabled for the cluster.
1134
1135
  """
1135
1136
  return pulumi.get(self, "vsan_esa_enabled")
1136
1137
 
1137
1138
  @vsan_esa_enabled.setter
1138
- def vsan_esa_enabled(self, value: Optional[pulumi.Input[bool]]):
1139
+ def vsan_esa_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1139
1140
  pulumi.set(self, "vsan_esa_enabled", value)
1140
1141
 
1141
1142
  @property
@@ -1152,38 +1153,38 @@ class ComputeClusterArgs:
1152
1153
 
1153
1154
  @property
1154
1155
  @pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
1155
- def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
1156
+ def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1156
1157
  """
1157
1158
  Whether the vSAN network diagnostic mode is enabled for the cluster.
1158
1159
  """
1159
1160
  return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
1160
1161
 
1161
1162
  @vsan_network_diagnostic_mode_enabled.setter
1162
- def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
1163
+ def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1163
1164
  pulumi.set(self, "vsan_network_diagnostic_mode_enabled", value)
1164
1165
 
1165
1166
  @property
1166
1167
  @pulumi.getter(name="vsanPerformanceEnabled")
1167
- def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
1168
+ def vsan_performance_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1168
1169
  """
1169
1170
  Whether the vSAN performance service is enabled for the cluster.
1170
1171
  """
1171
1172
  return pulumi.get(self, "vsan_performance_enabled")
1172
1173
 
1173
1174
  @vsan_performance_enabled.setter
1174
- def vsan_performance_enabled(self, value: Optional[pulumi.Input[bool]]):
1175
+ def vsan_performance_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1175
1176
  pulumi.set(self, "vsan_performance_enabled", value)
1176
1177
 
1177
1178
  @property
1178
1179
  @pulumi.getter(name="vsanRemoteDatastoreIds")
1179
- def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1180
+ def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
1180
1181
  """
1181
1182
  The managed object IDs of the vSAN datastore to be mounted on the cluster.
1182
1183
  """
1183
1184
  return pulumi.get(self, "vsan_remote_datastore_ids")
1184
1185
 
1185
1186
  @vsan_remote_datastore_ids.setter
1186
- def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1187
+ def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
1187
1188
  pulumi.set(self, "vsan_remote_datastore_ids", value)
1188
1189
 
1189
1190
  @property
@@ -1200,225 +1201,225 @@ class ComputeClusterArgs:
1200
1201
 
1201
1202
  @property
1202
1203
  @pulumi.getter(name="vsanUnmapEnabled")
1203
- def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
1204
+ def vsan_unmap_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1204
1205
  """
1205
1206
  Whether the vSAN unmap service is enabled for the cluster.
1206
1207
  """
1207
1208
  return pulumi.get(self, "vsan_unmap_enabled")
1208
1209
 
1209
1210
  @vsan_unmap_enabled.setter
1210
- def vsan_unmap_enabled(self, value: Optional[pulumi.Input[bool]]):
1211
+ def vsan_unmap_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1211
1212
  pulumi.set(self, "vsan_unmap_enabled", value)
1212
1213
 
1213
1214
  @property
1214
1215
  @pulumi.getter(name="vsanVerboseModeEnabled")
1215
- def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
1216
+ def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1216
1217
  """
1217
1218
  Whether the vSAN verbose mode is enabled for the cluster.
1218
1219
  """
1219
1220
  return pulumi.get(self, "vsan_verbose_mode_enabled")
1220
1221
 
1221
1222
  @vsan_verbose_mode_enabled.setter
1222
- def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
1223
+ def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1223
1224
  pulumi.set(self, "vsan_verbose_mode_enabled", value)
1224
1225
 
1225
1226
 
1226
1227
  @pulumi.input_type
1227
1228
  class _ComputeClusterState:
1228
1229
  def __init__(__self__, *,
1229
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
1230
- datacenter_id: Optional[pulumi.Input[str]] = None,
1231
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
1232
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
1233
- dpm_threshold: Optional[pulumi.Input[int]] = None,
1234
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
1235
- drs_automation_level: Optional[pulumi.Input[str]] = None,
1236
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
1237
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
1238
- drs_enabled: Optional[pulumi.Input[bool]] = None,
1239
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
1240
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
1241
- folder: Optional[pulumi.Input[str]] = None,
1242
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
1243
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1244
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
1245
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
1246
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
1247
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
1248
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
1249
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
1250
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
1251
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
1252
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
1253
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
1254
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
1255
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
1256
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
1257
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
1258
- ha_enabled: Optional[pulumi.Input[bool]] = None,
1259
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1260
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
1261
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
1262
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
1263
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
1264
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
1265
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
1266
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
1267
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
1268
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
1269
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
1270
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
1271
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
1272
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
1273
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
1230
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1231
+ datacenter_id: Optional[pulumi.Input[builtins.str]] = None,
1232
+ dpm_automation_level: Optional[pulumi.Input[builtins.str]] = None,
1233
+ dpm_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1234
+ dpm_threshold: Optional[pulumi.Input[builtins.int]] = None,
1235
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1236
+ drs_automation_level: Optional[pulumi.Input[builtins.str]] = None,
1237
+ drs_enable_predictive_drs: Optional[pulumi.Input[builtins.bool]] = None,
1238
+ drs_enable_vm_overrides: Optional[pulumi.Input[builtins.bool]] = None,
1239
+ drs_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1240
+ drs_migration_threshold: Optional[pulumi.Input[builtins.int]] = None,
1241
+ drs_scale_descendants_shares: Optional[pulumi.Input[builtins.str]] = None,
1242
+ folder: Optional[pulumi.Input[builtins.str]] = None,
1243
+ force_evacuate_on_destroy: Optional[pulumi.Input[builtins.bool]] = None,
1244
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1245
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[builtins.int]] = None,
1246
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[builtins.int]] = None,
1247
+ ha_admission_control_policy: Optional[pulumi.Input[builtins.str]] = None,
1248
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[builtins.bool]] = None,
1249
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[builtins.int]] = None,
1250
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[builtins.int]] = None,
1251
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[builtins.int]] = None,
1252
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[builtins.int]] = None,
1253
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[builtins.bool]] = None,
1254
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
1255
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[builtins.str]] = None,
1256
+ ha_datastore_apd_response: Optional[pulumi.Input[builtins.str]] = None,
1257
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[builtins.int]] = None,
1258
+ ha_datastore_pdl_response: Optional[pulumi.Input[builtins.str]] = None,
1259
+ ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1260
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1261
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[builtins.str]] = None,
1262
+ ha_host_isolation_response: Optional[pulumi.Input[builtins.str]] = None,
1263
+ ha_host_monitoring: Optional[pulumi.Input[builtins.str]] = None,
1264
+ ha_vm_component_protection: Optional[pulumi.Input[builtins.str]] = None,
1265
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[builtins.str]] = None,
1266
+ ha_vm_failure_interval: Optional[pulumi.Input[builtins.int]] = None,
1267
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[builtins.int]] = None,
1268
+ ha_vm_maximum_resets: Optional[pulumi.Input[builtins.int]] = None,
1269
+ ha_vm_minimum_uptime: Optional[pulumi.Input[builtins.int]] = None,
1270
+ ha_vm_monitoring: Optional[pulumi.Input[builtins.str]] = None,
1271
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[builtins.int]] = None,
1272
+ ha_vm_restart_priority: Optional[pulumi.Input[builtins.str]] = None,
1273
+ ha_vm_restart_timeout: Optional[pulumi.Input[builtins.int]] = None,
1274
+ host_cluster_exit_timeout: Optional[pulumi.Input[builtins.int]] = None,
1274
1275
  host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
1275
- host_managed: Optional[pulumi.Input[bool]] = None,
1276
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1277
- name: Optional[pulumi.Input[str]] = None,
1278
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
1279
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
1280
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
1281
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1282
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
1283
- resource_pool_id: Optional[pulumi.Input[str]] = None,
1284
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1285
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
1286
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
1276
+ host_managed: Optional[pulumi.Input[builtins.bool]] = None,
1277
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1278
+ name: Optional[pulumi.Input[builtins.str]] = None,
1279
+ proactive_ha_automation_level: Optional[pulumi.Input[builtins.str]] = None,
1280
+ proactive_ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1281
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[builtins.str]] = None,
1282
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1283
+ proactive_ha_severe_remediation: Optional[pulumi.Input[builtins.str]] = None,
1284
+ resource_pool_id: Optional[pulumi.Input[builtins.str]] = None,
1285
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1286
+ vsan_compression_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1287
+ vsan_dedup_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1287
1288
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]] = None,
1288
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
1289
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
1290
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
1291
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
1289
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1290
+ vsan_dit_rekey_interval: Optional[pulumi.Input[builtins.int]] = None,
1291
+ vsan_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1292
+ vsan_esa_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1292
1293
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]] = None,
1293
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
1294
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
1295
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1294
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1295
+ vsan_performance_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1296
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
1296
1297
  vsan_stretched_cluster: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']] = None,
1297
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
1298
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None):
1298
+ vsan_unmap_enabled: Optional[pulumi.Input[builtins.bool]] = None,
1299
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None):
1299
1300
  """
1300
1301
  Input properties used for looking up and filtering ComputeCluster resources.
1301
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
1302
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
1302
1303
  value strings to set for the datastore cluster.
1303
1304
 
1304
1305
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
1305
1306
  and require vCenter Server.
1306
- :param pulumi.Input[str] datacenter_id: The managed object ID of
1307
+ :param pulumi.Input[builtins.str] datacenter_id: The managed object ID of
1307
1308
  the datacenter to create the cluster in. Forces a new resource if changed.
1308
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
1309
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
1309
+ :param pulumi.Input[builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
1310
+ :param pulumi.Input[builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
1310
1311
  machines in the cluster. Requires that DRS be enabled.
1311
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
1312
+ :param pulumi.Input[builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
1312
1313
  affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
1313
1314
  setting.
1314
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
1315
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
1315
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
1316
+ :param pulumi.Input[builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
1316
1317
  fullyAutomated.
1317
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
1318
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
1319
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
1320
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
1318
+ :param pulumi.Input[builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
1319
+ :param pulumi.Input[builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
1320
+ :param pulumi.Input[builtins.bool] drs_enabled: Enable DRS for this cluster.
1321
+ :param pulumi.Input[builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
1321
1322
  more imbalance while a higher setting will tolerate less.
1322
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
1323
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
1323
+ :param pulumi.Input[builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
1324
+ :param pulumi.Input[builtins.str] folder: The relative path to a folder to put this cluster in.
1324
1325
  This is a path relative to the datacenter you are deploying the cluster to.
1325
1326
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
1326
1327
  The provider will place a cluster named `compute-cluster-test` in a
1327
1328
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
1328
1329
  being `/dc1/host/foo/bar/datastore-cluster-test`.
1329
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
1330
+ :param pulumi.Input[builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
1330
1331
  for testing and is not recommended in normal use.
1331
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
1332
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
1332
1333
  failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
1333
1334
  will ignore the host when making recommendations.
1334
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
1335
+ :param pulumi.Input[builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
1335
1336
  machine operations. The maximum is one less than the number of hosts in the cluster.
1336
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
1337
+ :param pulumi.Input[builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
1337
1338
  warnings only, whereas a value of 100 disables the setting.
1338
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
1339
+ :param pulumi.Input[builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
1339
1340
  permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
1340
1341
  slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
1341
1342
  issues.
1342
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
1343
+ :param pulumi.Input[builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
1343
1344
  subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
1344
1345
  from the total amount of resources in the cluster. Disable to supply user-defined values.
1345
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
1346
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
1346
1347
  the cluster to reserve for failover.
1347
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
1348
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
1348
1349
  the cluster to reserve for failover.
1349
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
1350
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
1351
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
1350
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
1351
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
1352
+ :param pulumi.Input[builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
1352
1353
  to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
1353
1354
  currently in the cluster.
1354
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
1355
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
1355
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
1356
+ :param pulumi.Input[builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
1356
1357
  affected datastore clears in the middle of an APD event. Can be one of none or reset.
1357
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1358
+ :param pulumi.Input[builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1358
1359
  detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
1359
1360
  restartAggressive.
1360
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
1361
+ :param pulumi.Input[builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
1361
1362
  the response action defined in ha_datastore_apd_response.
1362
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1363
+ :param pulumi.Input[builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1363
1364
  detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
1364
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
1365
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
1365
+ :param pulumi.Input[builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
1366
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
1366
1367
  ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
1367
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
1368
+ :param pulumi.Input[builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
1368
1369
  allFeasibleDsWithUserPreference.
1369
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
1370
+ :param pulumi.Input[builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
1370
1371
  Can be one of none, powerOff, or shutdown.
1371
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
1372
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
1372
+ :param pulumi.Input[builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
1373
+ :param pulumi.Input[builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
1373
1374
  failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
1374
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
1375
+ :param pulumi.Input[builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
1375
1376
  on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
1376
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
1377
+ :param pulumi.Input[builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
1377
1378
  failed. The value is in seconds.
1378
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
1379
+ :param pulumi.Input[builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
1379
1380
  attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
1380
1381
  time is allotted.
1381
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
1382
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
1383
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
1382
+ :param pulumi.Input[builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
1383
+ :param pulumi.Input[builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
1384
+ :param pulumi.Input[builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
1384
1385
  vmMonitoringOnly, or vmAndAppMonitoring.
1385
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
1386
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
1386
+ :param pulumi.Input[builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
1387
+ :param pulumi.Input[builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
1387
1388
  high, or highest.
1388
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
1389
+ :param pulumi.Input[builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
1389
1390
  proceeding with the next priority.
1390
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
1391
+ :param pulumi.Input[builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
1391
1392
  :param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
1392
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
1393
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
1394
- :param pulumi.Input[str] name: The name of the cluster.
1395
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
1396
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
1397
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
1393
+ :param pulumi.Input[builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
1394
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
1395
+ :param pulumi.Input[builtins.str] name: The name of the cluster.
1396
+ :param pulumi.Input[builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
1397
+ :param pulumi.Input[builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
1398
+ :param pulumi.Input[builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
1398
1399
  this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
1399
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
1400
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
1400
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
1401
+ :param pulumi.Input[builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
1401
1402
  cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
1402
- :param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
1403
+ :param pulumi.Input[builtins.str] resource_pool_id: The managed object ID of the primary
1403
1404
  resource pool for this cluster. This can be passed directly to the
1404
1405
  `resource_pool_id`
1405
1406
  attribute of the
1406
1407
  `VirtualMachine` resource.
1407
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
1408
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
1409
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
1408
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] tags: The IDs of any tags to attach to this resource.
1409
+ :param pulumi.Input[builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
1410
+ :param pulumi.Input[builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
1410
1411
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
1411
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
1412
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
1413
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
1414
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
1412
+ :param pulumi.Input[builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
1413
+ :param pulumi.Input[builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
1414
+ :param pulumi.Input[builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
1415
+ :param pulumi.Input[builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
1415
1416
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
1416
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
1417
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
1418
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
1417
+ :param pulumi.Input[builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
1418
+ :param pulumi.Input[builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
1419
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
1419
1420
  :param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
1420
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
1421
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
1421
+ :param pulumi.Input[builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
1422
+ :param pulumi.Input[builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
1422
1423
  """
1423
1424
  if custom_attributes is not None:
1424
1425
  pulumi.set(__self__, "custom_attributes", custom_attributes)
@@ -1563,7 +1564,7 @@ class _ComputeClusterState:
1563
1564
 
1564
1565
  @property
1565
1566
  @pulumi.getter(name="customAttributes")
1566
- def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
1567
+ def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
1567
1568
  """
1568
1569
  A map of custom attribute ids to attribute
1569
1570
  value strings to set for the datastore cluster.
@@ -1574,12 +1575,12 @@ class _ComputeClusterState:
1574
1575
  return pulumi.get(self, "custom_attributes")
1575
1576
 
1576
1577
  @custom_attributes.setter
1577
- def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
1578
+ def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
1578
1579
  pulumi.set(self, "custom_attributes", value)
1579
1580
 
1580
1581
  @property
1581
1582
  @pulumi.getter(name="datacenterId")
1582
- def datacenter_id(self) -> Optional[pulumi.Input[str]]:
1583
+ def datacenter_id(self) -> Optional[pulumi.Input[builtins.str]]:
1583
1584
  """
1584
1585
  The managed object ID of
1585
1586
  the datacenter to create the cluster in. Forces a new resource if changed.
@@ -1587,24 +1588,24 @@ class _ComputeClusterState:
1587
1588
  return pulumi.get(self, "datacenter_id")
1588
1589
 
1589
1590
  @datacenter_id.setter
1590
- def datacenter_id(self, value: Optional[pulumi.Input[str]]):
1591
+ def datacenter_id(self, value: Optional[pulumi.Input[builtins.str]]):
1591
1592
  pulumi.set(self, "datacenter_id", value)
1592
1593
 
1593
1594
  @property
1594
1595
  @pulumi.getter(name="dpmAutomationLevel")
1595
- def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
1596
+ def dpm_automation_level(self) -> Optional[pulumi.Input[builtins.str]]:
1596
1597
  """
1597
1598
  The automation level for host power operations in this cluster. Can be one of manual or automated.
1598
1599
  """
1599
1600
  return pulumi.get(self, "dpm_automation_level")
1600
1601
 
1601
1602
  @dpm_automation_level.setter
1602
- def dpm_automation_level(self, value: Optional[pulumi.Input[str]]):
1603
+ def dpm_automation_level(self, value: Optional[pulumi.Input[builtins.str]]):
1603
1604
  pulumi.set(self, "dpm_automation_level", value)
1604
1605
 
1605
1606
  @property
1606
1607
  @pulumi.getter(name="dpmEnabled")
1607
- def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
1608
+ def dpm_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1608
1609
  """
1609
1610
  Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
1610
1611
  machines in the cluster. Requires that DRS be enabled.
@@ -1612,12 +1613,12 @@ class _ComputeClusterState:
1612
1613
  return pulumi.get(self, "dpm_enabled")
1613
1614
 
1614
1615
  @dpm_enabled.setter
1615
- def dpm_enabled(self, value: Optional[pulumi.Input[bool]]):
1616
+ def dpm_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1616
1617
  pulumi.set(self, "dpm_enabled", value)
1617
1618
 
1618
1619
  @property
1619
1620
  @pulumi.getter(name="dpmThreshold")
1620
- def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
1621
+ def dpm_threshold(self) -> Optional[pulumi.Input[builtins.int]]:
1621
1622
  """
1622
1623
  A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
1623
1624
  affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
@@ -1626,24 +1627,24 @@ class _ComputeClusterState:
1626
1627
  return pulumi.get(self, "dpm_threshold")
1627
1628
 
1628
1629
  @dpm_threshold.setter
1629
- def dpm_threshold(self, value: Optional[pulumi.Input[int]]):
1630
+ def dpm_threshold(self, value: Optional[pulumi.Input[builtins.int]]):
1630
1631
  pulumi.set(self, "dpm_threshold", value)
1631
1632
 
1632
1633
  @property
1633
1634
  @pulumi.getter(name="drsAdvancedOptions")
1634
- def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
1635
+ def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
1635
1636
  """
1636
1637
  Advanced configuration options for DRS and DPM.
1637
1638
  """
1638
1639
  return pulumi.get(self, "drs_advanced_options")
1639
1640
 
1640
1641
  @drs_advanced_options.setter
1641
- def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
1642
+ def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
1642
1643
  pulumi.set(self, "drs_advanced_options", value)
1643
1644
 
1644
1645
  @property
1645
1646
  @pulumi.getter(name="drsAutomationLevel")
1646
- def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
1647
+ def drs_automation_level(self) -> Optional[pulumi.Input[builtins.str]]:
1647
1648
  """
1648
1649
  The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
1649
1650
  fullyAutomated.
@@ -1651,48 +1652,48 @@ class _ComputeClusterState:
1651
1652
  return pulumi.get(self, "drs_automation_level")
1652
1653
 
1653
1654
  @drs_automation_level.setter
1654
- def drs_automation_level(self, value: Optional[pulumi.Input[str]]):
1655
+ def drs_automation_level(self, value: Optional[pulumi.Input[builtins.str]]):
1655
1656
  pulumi.set(self, "drs_automation_level", value)
1656
1657
 
1657
1658
  @property
1658
1659
  @pulumi.getter(name="drsEnablePredictiveDrs")
1659
- def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
1660
+ def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[builtins.bool]]:
1660
1661
  """
1661
1662
  When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
1662
1663
  """
1663
1664
  return pulumi.get(self, "drs_enable_predictive_drs")
1664
1665
 
1665
1666
  @drs_enable_predictive_drs.setter
1666
- def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[bool]]):
1667
+ def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[builtins.bool]]):
1667
1668
  pulumi.set(self, "drs_enable_predictive_drs", value)
1668
1669
 
1669
1670
  @property
1670
1671
  @pulumi.getter(name="drsEnableVmOverrides")
1671
- def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
1672
+ def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[builtins.bool]]:
1672
1673
  """
1673
1674
  When true, allows individual VM overrides within this cluster to be set.
1674
1675
  """
1675
1676
  return pulumi.get(self, "drs_enable_vm_overrides")
1676
1677
 
1677
1678
  @drs_enable_vm_overrides.setter
1678
- def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[bool]]):
1679
+ def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[builtins.bool]]):
1679
1680
  pulumi.set(self, "drs_enable_vm_overrides", value)
1680
1681
 
1681
1682
  @property
1682
1683
  @pulumi.getter(name="drsEnabled")
1683
- def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
1684
+ def drs_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1684
1685
  """
1685
1686
  Enable DRS for this cluster.
1686
1687
  """
1687
1688
  return pulumi.get(self, "drs_enabled")
1688
1689
 
1689
1690
  @drs_enabled.setter
1690
- def drs_enabled(self, value: Optional[pulumi.Input[bool]]):
1691
+ def drs_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1691
1692
  pulumi.set(self, "drs_enabled", value)
1692
1693
 
1693
1694
  @property
1694
1695
  @pulumi.getter(name="drsMigrationThreshold")
1695
- def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
1696
+ def drs_migration_threshold(self) -> Optional[pulumi.Input[builtins.int]]:
1696
1697
  """
1697
1698
  A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
1698
1699
  more imbalance while a higher setting will tolerate less.
@@ -1700,24 +1701,24 @@ class _ComputeClusterState:
1700
1701
  return pulumi.get(self, "drs_migration_threshold")
1701
1702
 
1702
1703
  @drs_migration_threshold.setter
1703
- def drs_migration_threshold(self, value: Optional[pulumi.Input[int]]):
1704
+ def drs_migration_threshold(self, value: Optional[pulumi.Input[builtins.int]]):
1704
1705
  pulumi.set(self, "drs_migration_threshold", value)
1705
1706
 
1706
1707
  @property
1707
1708
  @pulumi.getter(name="drsScaleDescendantsShares")
1708
- def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
1709
+ def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[builtins.str]]:
1709
1710
  """
1710
1711
  Enable scalable shares for all descendants of this cluster.
1711
1712
  """
1712
1713
  return pulumi.get(self, "drs_scale_descendants_shares")
1713
1714
 
1714
1715
  @drs_scale_descendants_shares.setter
1715
- def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[str]]):
1716
+ def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[builtins.str]]):
1716
1717
  pulumi.set(self, "drs_scale_descendants_shares", value)
1717
1718
 
1718
1719
  @property
1719
1720
  @pulumi.getter
1720
- def folder(self) -> Optional[pulumi.Input[str]]:
1721
+ def folder(self) -> Optional[pulumi.Input[builtins.str]]:
1721
1722
  """
1722
1723
  The relative path to a folder to put this cluster in.
1723
1724
  This is a path relative to the datacenter you are deploying the cluster to.
@@ -1729,12 +1730,12 @@ class _ComputeClusterState:
1729
1730
  return pulumi.get(self, "folder")
1730
1731
 
1731
1732
  @folder.setter
1732
- def folder(self, value: Optional[pulumi.Input[str]]):
1733
+ def folder(self, value: Optional[pulumi.Input[builtins.str]]):
1733
1734
  pulumi.set(self, "folder", value)
1734
1735
 
1735
1736
  @property
1736
1737
  @pulumi.getter(name="forceEvacuateOnDestroy")
1737
- def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
1738
+ def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[builtins.bool]]:
1738
1739
  """
1739
1740
  Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
1740
1741
  for testing and is not recommended in normal use.
@@ -1742,12 +1743,12 @@ class _ComputeClusterState:
1742
1743
  return pulumi.get(self, "force_evacuate_on_destroy")
1743
1744
 
1744
1745
  @force_evacuate_on_destroy.setter
1745
- def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[bool]]):
1746
+ def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[builtins.bool]]):
1746
1747
  pulumi.set(self, "force_evacuate_on_destroy", value)
1747
1748
 
1748
1749
  @property
1749
1750
  @pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
1750
- def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1751
+ def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
1751
1752
  """
1752
1753
  When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
1753
1754
  failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
@@ -1756,12 +1757,12 @@ class _ComputeClusterState:
1756
1757
  return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
1757
1758
 
1758
1759
  @ha_admission_control_failover_host_system_ids.setter
1759
- def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1760
+ def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
1760
1761
  pulumi.set(self, "ha_admission_control_failover_host_system_ids", value)
1761
1762
 
1762
1763
  @property
1763
1764
  @pulumi.getter(name="haAdmissionControlHostFailureTolerance")
1764
- def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
1765
+ def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[builtins.int]]:
1765
1766
  """
1766
1767
  The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
1767
1768
  machine operations. The maximum is one less than the number of hosts in the cluster.
@@ -1769,12 +1770,12 @@ class _ComputeClusterState:
1769
1770
  return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
1770
1771
 
1771
1772
  @ha_admission_control_host_failure_tolerance.setter
1772
- def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[int]]):
1773
+ def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[builtins.int]]):
1773
1774
  pulumi.set(self, "ha_admission_control_host_failure_tolerance", value)
1774
1775
 
1775
1776
  @property
1776
1777
  @pulumi.getter(name="haAdmissionControlPerformanceTolerance")
1777
- def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
1778
+ def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[builtins.int]]:
1778
1779
  """
1779
1780
  The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
1780
1781
  warnings only, whereas a value of 100 disables the setting.
@@ -1782,12 +1783,12 @@ class _ComputeClusterState:
1782
1783
  return pulumi.get(self, "ha_admission_control_performance_tolerance")
1783
1784
 
1784
1785
  @ha_admission_control_performance_tolerance.setter
1785
- def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[int]]):
1786
+ def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[builtins.int]]):
1786
1787
  pulumi.set(self, "ha_admission_control_performance_tolerance", value)
1787
1788
 
1788
1789
  @property
1789
1790
  @pulumi.getter(name="haAdmissionControlPolicy")
1790
- def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
1791
+ def ha_admission_control_policy(self) -> Optional[pulumi.Input[builtins.str]]:
1791
1792
  """
1792
1793
  The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
1793
1794
  permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
@@ -1797,12 +1798,12 @@ class _ComputeClusterState:
1797
1798
  return pulumi.get(self, "ha_admission_control_policy")
1798
1799
 
1799
1800
  @ha_admission_control_policy.setter
1800
- def ha_admission_control_policy(self, value: Optional[pulumi.Input[str]]):
1801
+ def ha_admission_control_policy(self, value: Optional[pulumi.Input[builtins.str]]):
1801
1802
  pulumi.set(self, "ha_admission_control_policy", value)
1802
1803
 
1803
1804
  @property
1804
1805
  @pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
1805
- def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
1806
+ def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[builtins.bool]]:
1806
1807
  """
1807
1808
  When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
1808
1809
  subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
@@ -1811,12 +1812,12 @@ class _ComputeClusterState:
1811
1812
  return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
1812
1813
 
1813
1814
  @ha_admission_control_resource_percentage_auto_compute.setter
1814
- def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[bool]]):
1815
+ def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[builtins.bool]]):
1815
1816
  pulumi.set(self, "ha_admission_control_resource_percentage_auto_compute", value)
1816
1817
 
1817
1818
  @property
1818
1819
  @pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
1819
- def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
1820
+ def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[builtins.int]]:
1820
1821
  """
1821
1822
  When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
1822
1823
  the cluster to reserve for failover.
@@ -1824,12 +1825,12 @@ class _ComputeClusterState:
1824
1825
  return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
1825
1826
 
1826
1827
  @ha_admission_control_resource_percentage_cpu.setter
1827
- def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[int]]):
1828
+ def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[builtins.int]]):
1828
1829
  pulumi.set(self, "ha_admission_control_resource_percentage_cpu", value)
1829
1830
 
1830
1831
  @property
1831
1832
  @pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
1832
- def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
1833
+ def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[builtins.int]]:
1833
1834
  """
1834
1835
  When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
1835
1836
  the cluster to reserve for failover.
@@ -1837,36 +1838,36 @@ class _ComputeClusterState:
1837
1838
  return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
1838
1839
 
1839
1840
  @ha_admission_control_resource_percentage_memory.setter
1840
- def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[int]]):
1841
+ def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[builtins.int]]):
1841
1842
  pulumi.set(self, "ha_admission_control_resource_percentage_memory", value)
1842
1843
 
1843
1844
  @property
1844
1845
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
1845
- def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
1846
+ def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[builtins.int]]:
1846
1847
  """
1847
1848
  When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
1848
1849
  """
1849
1850
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
1850
1851
 
1851
1852
  @ha_admission_control_slot_policy_explicit_cpu.setter
1852
- def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[int]]):
1853
+ def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[builtins.int]]):
1853
1854
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_cpu", value)
1854
1855
 
1855
1856
  @property
1856
1857
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
1857
- def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
1858
+ def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[builtins.int]]:
1858
1859
  """
1859
1860
  When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
1860
1861
  """
1861
1862
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
1862
1863
 
1863
1864
  @ha_admission_control_slot_policy_explicit_memory.setter
1864
- def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[int]]):
1865
+ def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[builtins.int]]):
1865
1866
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_memory", value)
1866
1867
 
1867
1868
  @property
1868
1869
  @pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
1869
- def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
1870
+ def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[builtins.bool]]:
1870
1871
  """
1871
1872
  When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
1872
1873
  to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
@@ -1875,24 +1876,24 @@ class _ComputeClusterState:
1875
1876
  return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
1876
1877
 
1877
1878
  @ha_admission_control_slot_policy_use_explicit_size.setter
1878
- def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[bool]]):
1879
+ def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[builtins.bool]]):
1879
1880
  pulumi.set(self, "ha_admission_control_slot_policy_use_explicit_size", value)
1880
1881
 
1881
1882
  @property
1882
1883
  @pulumi.getter(name="haAdvancedOptions")
1883
- def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
1884
+ def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]:
1884
1885
  """
1885
1886
  Advanced configuration options for vSphere HA.
1886
1887
  """
1887
1888
  return pulumi.get(self, "ha_advanced_options")
1888
1889
 
1889
1890
  @ha_advanced_options.setter
1890
- def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
1891
+ def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]]):
1891
1892
  pulumi.set(self, "ha_advanced_options", value)
1892
1893
 
1893
1894
  @property
1894
1895
  @pulumi.getter(name="haDatastoreApdRecoveryAction")
1895
- def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
1896
+ def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[builtins.str]]:
1896
1897
  """
1897
1898
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
1898
1899
  affected datastore clears in the middle of an APD event. Can be one of none or reset.
@@ -1900,12 +1901,12 @@ class _ComputeClusterState:
1900
1901
  return pulumi.get(self, "ha_datastore_apd_recovery_action")
1901
1902
 
1902
1903
  @ha_datastore_apd_recovery_action.setter
1903
- def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[str]]):
1904
+ def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[builtins.str]]):
1904
1905
  pulumi.set(self, "ha_datastore_apd_recovery_action", value)
1905
1906
 
1906
1907
  @property
1907
1908
  @pulumi.getter(name="haDatastoreApdResponse")
1908
- def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
1909
+ def ha_datastore_apd_response(self) -> Optional[pulumi.Input[builtins.str]]:
1909
1910
  """
1910
1911
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1911
1912
  detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
@@ -1914,12 +1915,12 @@ class _ComputeClusterState:
1914
1915
  return pulumi.get(self, "ha_datastore_apd_response")
1915
1916
 
1916
1917
  @ha_datastore_apd_response.setter
1917
- def ha_datastore_apd_response(self, value: Optional[pulumi.Input[str]]):
1918
+ def ha_datastore_apd_response(self, value: Optional[pulumi.Input[builtins.str]]):
1918
1919
  pulumi.set(self, "ha_datastore_apd_response", value)
1919
1920
 
1920
1921
  @property
1921
1922
  @pulumi.getter(name="haDatastoreApdResponseDelay")
1922
- def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
1923
+ def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[builtins.int]]:
1923
1924
  """
1924
1925
  When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
1925
1926
  the response action defined in ha_datastore_apd_response.
@@ -1927,12 +1928,12 @@ class _ComputeClusterState:
1927
1928
  return pulumi.get(self, "ha_datastore_apd_response_delay")
1928
1929
 
1929
1930
  @ha_datastore_apd_response_delay.setter
1930
- def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[int]]):
1931
+ def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[builtins.int]]):
1931
1932
  pulumi.set(self, "ha_datastore_apd_response_delay", value)
1932
1933
 
1933
1934
  @property
1934
1935
  @pulumi.getter(name="haDatastorePdlResponse")
1935
- def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
1936
+ def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[builtins.str]]:
1936
1937
  """
1937
1938
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1938
1939
  detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
@@ -1940,24 +1941,24 @@ class _ComputeClusterState:
1940
1941
  return pulumi.get(self, "ha_datastore_pdl_response")
1941
1942
 
1942
1943
  @ha_datastore_pdl_response.setter
1943
- def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[str]]):
1944
+ def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[builtins.str]]):
1944
1945
  pulumi.set(self, "ha_datastore_pdl_response", value)
1945
1946
 
1946
1947
  @property
1947
1948
  @pulumi.getter(name="haEnabled")
1948
- def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
1949
+ def ha_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
1949
1950
  """
1950
1951
  Enable vSphere HA for this cluster.
1951
1952
  """
1952
1953
  return pulumi.get(self, "ha_enabled")
1953
1954
 
1954
1955
  @ha_enabled.setter
1955
- def ha_enabled(self, value: Optional[pulumi.Input[bool]]):
1956
+ def ha_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
1956
1957
  pulumi.set(self, "ha_enabled", value)
1957
1958
 
1958
1959
  @property
1959
1960
  @pulumi.getter(name="haHeartbeatDatastoreIds")
1960
- def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1961
+ def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
1961
1962
  """
1962
1963
  The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
1963
1964
  ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
@@ -1965,12 +1966,12 @@ class _ComputeClusterState:
1965
1966
  return pulumi.get(self, "ha_heartbeat_datastore_ids")
1966
1967
 
1967
1968
  @ha_heartbeat_datastore_ids.setter
1968
- def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1969
+ def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
1969
1970
  pulumi.set(self, "ha_heartbeat_datastore_ids", value)
1970
1971
 
1971
1972
  @property
1972
1973
  @pulumi.getter(name="haHeartbeatDatastorePolicy")
1973
- def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
1974
+ def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[builtins.str]]:
1974
1975
  """
1975
1976
  The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
1976
1977
  allFeasibleDsWithUserPreference.
@@ -1978,12 +1979,12 @@ class _ComputeClusterState:
1978
1979
  return pulumi.get(self, "ha_heartbeat_datastore_policy")
1979
1980
 
1980
1981
  @ha_heartbeat_datastore_policy.setter
1981
- def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[str]]):
1982
+ def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[builtins.str]]):
1982
1983
  pulumi.set(self, "ha_heartbeat_datastore_policy", value)
1983
1984
 
1984
1985
  @property
1985
1986
  @pulumi.getter(name="haHostIsolationResponse")
1986
- def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
1987
+ def ha_host_isolation_response(self) -> Optional[pulumi.Input[builtins.str]]:
1987
1988
  """
1988
1989
  The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
1989
1990
  Can be one of none, powerOff, or shutdown.
@@ -1991,24 +1992,24 @@ class _ComputeClusterState:
1991
1992
  return pulumi.get(self, "ha_host_isolation_response")
1992
1993
 
1993
1994
  @ha_host_isolation_response.setter
1994
- def ha_host_isolation_response(self, value: Optional[pulumi.Input[str]]):
1995
+ def ha_host_isolation_response(self, value: Optional[pulumi.Input[builtins.str]]):
1995
1996
  pulumi.set(self, "ha_host_isolation_response", value)
1996
1997
 
1997
1998
  @property
1998
1999
  @pulumi.getter(name="haHostMonitoring")
1999
- def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
2000
+ def ha_host_monitoring(self) -> Optional[pulumi.Input[builtins.str]]:
2000
2001
  """
2001
2002
  Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
2002
2003
  """
2003
2004
  return pulumi.get(self, "ha_host_monitoring")
2004
2005
 
2005
2006
  @ha_host_monitoring.setter
2006
- def ha_host_monitoring(self, value: Optional[pulumi.Input[str]]):
2007
+ def ha_host_monitoring(self, value: Optional[pulumi.Input[builtins.str]]):
2007
2008
  pulumi.set(self, "ha_host_monitoring", value)
2008
2009
 
2009
2010
  @property
2010
2011
  @pulumi.getter(name="haVmComponentProtection")
2011
- def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
2012
+ def ha_vm_component_protection(self) -> Optional[pulumi.Input[builtins.str]]:
2012
2013
  """
2013
2014
  Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
2014
2015
  failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
@@ -2016,12 +2017,12 @@ class _ComputeClusterState:
2016
2017
  return pulumi.get(self, "ha_vm_component_protection")
2017
2018
 
2018
2019
  @ha_vm_component_protection.setter
2019
- def ha_vm_component_protection(self, value: Optional[pulumi.Input[str]]):
2020
+ def ha_vm_component_protection(self, value: Optional[pulumi.Input[builtins.str]]):
2020
2021
  pulumi.set(self, "ha_vm_component_protection", value)
2021
2022
 
2022
2023
  @property
2023
2024
  @pulumi.getter(name="haVmDependencyRestartCondition")
2024
- def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
2025
+ def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[builtins.str]]:
2025
2026
  """
2026
2027
  The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
2027
2028
  on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
@@ -2029,12 +2030,12 @@ class _ComputeClusterState:
2029
2030
  return pulumi.get(self, "ha_vm_dependency_restart_condition")
2030
2031
 
2031
2032
  @ha_vm_dependency_restart_condition.setter
2032
- def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[str]]):
2033
+ def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[builtins.str]]):
2033
2034
  pulumi.set(self, "ha_vm_dependency_restart_condition", value)
2034
2035
 
2035
2036
  @property
2036
2037
  @pulumi.getter(name="haVmFailureInterval")
2037
- def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
2038
+ def ha_vm_failure_interval(self) -> Optional[pulumi.Input[builtins.int]]:
2038
2039
  """
2039
2040
  If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
2040
2041
  failed. The value is in seconds.
@@ -2042,12 +2043,12 @@ class _ComputeClusterState:
2042
2043
  return pulumi.get(self, "ha_vm_failure_interval")
2043
2044
 
2044
2045
  @ha_vm_failure_interval.setter
2045
- def ha_vm_failure_interval(self, value: Optional[pulumi.Input[int]]):
2046
+ def ha_vm_failure_interval(self, value: Optional[pulumi.Input[builtins.int]]):
2046
2047
  pulumi.set(self, "ha_vm_failure_interval", value)
2047
2048
 
2048
2049
  @property
2049
2050
  @pulumi.getter(name="haVmMaximumFailureWindow")
2050
- def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
2051
+ def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[builtins.int]]:
2051
2052
  """
2052
2053
  The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
2053
2054
  attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
@@ -2056,36 +2057,36 @@ class _ComputeClusterState:
2056
2057
  return pulumi.get(self, "ha_vm_maximum_failure_window")
2057
2058
 
2058
2059
  @ha_vm_maximum_failure_window.setter
2059
- def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[int]]):
2060
+ def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[builtins.int]]):
2060
2061
  pulumi.set(self, "ha_vm_maximum_failure_window", value)
2061
2062
 
2062
2063
  @property
2063
2064
  @pulumi.getter(name="haVmMaximumResets")
2064
- def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
2065
+ def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[builtins.int]]:
2065
2066
  """
2066
2067
  The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
2067
2068
  """
2068
2069
  return pulumi.get(self, "ha_vm_maximum_resets")
2069
2070
 
2070
2071
  @ha_vm_maximum_resets.setter
2071
- def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[int]]):
2072
+ def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[builtins.int]]):
2072
2073
  pulumi.set(self, "ha_vm_maximum_resets", value)
2073
2074
 
2074
2075
  @property
2075
2076
  @pulumi.getter(name="haVmMinimumUptime")
2076
- def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
2077
+ def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[builtins.int]]:
2077
2078
  """
2078
2079
  The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
2079
2080
  """
2080
2081
  return pulumi.get(self, "ha_vm_minimum_uptime")
2081
2082
 
2082
2083
  @ha_vm_minimum_uptime.setter
2083
- def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[int]]):
2084
+ def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[builtins.int]]):
2084
2085
  pulumi.set(self, "ha_vm_minimum_uptime", value)
2085
2086
 
2086
2087
  @property
2087
2088
  @pulumi.getter(name="haVmMonitoring")
2088
- def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
2089
+ def ha_vm_monitoring(self) -> Optional[pulumi.Input[builtins.str]]:
2089
2090
  """
2090
2091
  The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
2091
2092
  vmMonitoringOnly, or vmAndAppMonitoring.
@@ -2093,24 +2094,24 @@ class _ComputeClusterState:
2093
2094
  return pulumi.get(self, "ha_vm_monitoring")
2094
2095
 
2095
2096
  @ha_vm_monitoring.setter
2096
- def ha_vm_monitoring(self, value: Optional[pulumi.Input[str]]):
2097
+ def ha_vm_monitoring(self, value: Optional[pulumi.Input[builtins.str]]):
2097
2098
  pulumi.set(self, "ha_vm_monitoring", value)
2098
2099
 
2099
2100
  @property
2100
2101
  @pulumi.getter(name="haVmRestartAdditionalDelay")
2101
- def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
2102
+ def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[builtins.int]]:
2102
2103
  """
2103
2104
  Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
2104
2105
  """
2105
2106
  return pulumi.get(self, "ha_vm_restart_additional_delay")
2106
2107
 
2107
2108
  @ha_vm_restart_additional_delay.setter
2108
- def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[int]]):
2109
+ def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[builtins.int]]):
2109
2110
  pulumi.set(self, "ha_vm_restart_additional_delay", value)
2110
2111
 
2111
2112
  @property
2112
2113
  @pulumi.getter(name="haVmRestartPriority")
2113
- def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
2114
+ def ha_vm_restart_priority(self) -> Optional[pulumi.Input[builtins.str]]:
2114
2115
  """
2115
2116
  The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
2116
2117
  high, or highest.
@@ -2118,12 +2119,12 @@ class _ComputeClusterState:
2118
2119
  return pulumi.get(self, "ha_vm_restart_priority")
2119
2120
 
2120
2121
  @ha_vm_restart_priority.setter
2121
- def ha_vm_restart_priority(self, value: Optional[pulumi.Input[str]]):
2122
+ def ha_vm_restart_priority(self, value: Optional[pulumi.Input[builtins.str]]):
2122
2123
  pulumi.set(self, "ha_vm_restart_priority", value)
2123
2124
 
2124
2125
  @property
2125
2126
  @pulumi.getter(name="haVmRestartTimeout")
2126
- def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
2127
+ def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[builtins.int]]:
2127
2128
  """
2128
2129
  The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
2129
2130
  proceeding with the next priority.
@@ -2131,19 +2132,19 @@ class _ComputeClusterState:
2131
2132
  return pulumi.get(self, "ha_vm_restart_timeout")
2132
2133
 
2133
2134
  @ha_vm_restart_timeout.setter
2134
- def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[int]]):
2135
+ def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[builtins.int]]):
2135
2136
  pulumi.set(self, "ha_vm_restart_timeout", value)
2136
2137
 
2137
2138
  @property
2138
2139
  @pulumi.getter(name="hostClusterExitTimeout")
2139
- def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
2140
+ def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[builtins.int]]:
2140
2141
  """
2141
2142
  The timeout for each host maintenance mode operation when removing hosts from a cluster.
2142
2143
  """
2143
2144
  return pulumi.get(self, "host_cluster_exit_timeout")
2144
2145
 
2145
2146
  @host_cluster_exit_timeout.setter
2146
- def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
2147
+ def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[builtins.int]]):
2147
2148
  pulumi.set(self, "host_cluster_exit_timeout", value)
2148
2149
 
2149
2150
  @property
@@ -2160,67 +2161,67 @@ class _ComputeClusterState:
2160
2161
 
2161
2162
  @property
2162
2163
  @pulumi.getter(name="hostManaged")
2163
- def host_managed(self) -> Optional[pulumi.Input[bool]]:
2164
+ def host_managed(self) -> Optional[pulumi.Input[builtins.bool]]:
2164
2165
  """
2165
2166
  Must be set if cluster enrollment is managed from host resource.
2166
2167
  """
2167
2168
  return pulumi.get(self, "host_managed")
2168
2169
 
2169
2170
  @host_managed.setter
2170
- def host_managed(self, value: Optional[pulumi.Input[bool]]):
2171
+ def host_managed(self, value: Optional[pulumi.Input[builtins.bool]]):
2171
2172
  pulumi.set(self, "host_managed", value)
2172
2173
 
2173
2174
  @property
2174
2175
  @pulumi.getter(name="hostSystemIds")
2175
- def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2176
+ def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
2176
2177
  """
2177
2178
  The managed object IDs of the hosts to put in the cluster.
2178
2179
  """
2179
2180
  return pulumi.get(self, "host_system_ids")
2180
2181
 
2181
2182
  @host_system_ids.setter
2182
- def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2183
+ def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
2183
2184
  pulumi.set(self, "host_system_ids", value)
2184
2185
 
2185
2186
  @property
2186
2187
  @pulumi.getter
2187
- def name(self) -> Optional[pulumi.Input[str]]:
2188
+ def name(self) -> Optional[pulumi.Input[builtins.str]]:
2188
2189
  """
2189
2190
  The name of the cluster.
2190
2191
  """
2191
2192
  return pulumi.get(self, "name")
2192
2193
 
2193
2194
  @name.setter
2194
- def name(self, value: Optional[pulumi.Input[str]]):
2195
+ def name(self, value: Optional[pulumi.Input[builtins.str]]):
2195
2196
  pulumi.set(self, "name", value)
2196
2197
 
2197
2198
  @property
2198
2199
  @pulumi.getter(name="proactiveHaAutomationLevel")
2199
- def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
2200
+ def proactive_ha_automation_level(self) -> Optional[pulumi.Input[builtins.str]]:
2200
2201
  """
2201
2202
  The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
2202
2203
  """
2203
2204
  return pulumi.get(self, "proactive_ha_automation_level")
2204
2205
 
2205
2206
  @proactive_ha_automation_level.setter
2206
- def proactive_ha_automation_level(self, value: Optional[pulumi.Input[str]]):
2207
+ def proactive_ha_automation_level(self, value: Optional[pulumi.Input[builtins.str]]):
2207
2208
  pulumi.set(self, "proactive_ha_automation_level", value)
2208
2209
 
2209
2210
  @property
2210
2211
  @pulumi.getter(name="proactiveHaEnabled")
2211
- def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
2212
+ def proactive_ha_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2212
2213
  """
2213
2214
  Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
2214
2215
  """
2215
2216
  return pulumi.get(self, "proactive_ha_enabled")
2216
2217
 
2217
2218
  @proactive_ha_enabled.setter
2218
- def proactive_ha_enabled(self, value: Optional[pulumi.Input[bool]]):
2219
+ def proactive_ha_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2219
2220
  pulumi.set(self, "proactive_ha_enabled", value)
2220
2221
 
2221
2222
  @property
2222
2223
  @pulumi.getter(name="proactiveHaModerateRemediation")
2223
- def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
2224
+ def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[builtins.str]]:
2224
2225
  """
2225
2226
  The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
2226
2227
  this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
@@ -2228,24 +2229,24 @@ class _ComputeClusterState:
2228
2229
  return pulumi.get(self, "proactive_ha_moderate_remediation")
2229
2230
 
2230
2231
  @proactive_ha_moderate_remediation.setter
2231
- def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[str]]):
2232
+ def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[builtins.str]]):
2232
2233
  pulumi.set(self, "proactive_ha_moderate_remediation", value)
2233
2234
 
2234
2235
  @property
2235
2236
  @pulumi.getter(name="proactiveHaProviderIds")
2236
- def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2237
+ def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
2237
2238
  """
2238
2239
  The list of IDs for health update providers configured for this cluster.
2239
2240
  """
2240
2241
  return pulumi.get(self, "proactive_ha_provider_ids")
2241
2242
 
2242
2243
  @proactive_ha_provider_ids.setter
2243
- def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2244
+ def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
2244
2245
  pulumi.set(self, "proactive_ha_provider_ids", value)
2245
2246
 
2246
2247
  @property
2247
2248
  @pulumi.getter(name="proactiveHaSevereRemediation")
2248
- def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
2249
+ def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[builtins.str]]:
2249
2250
  """
2250
2251
  The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
2251
2252
  cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
@@ -2253,12 +2254,12 @@ class _ComputeClusterState:
2253
2254
  return pulumi.get(self, "proactive_ha_severe_remediation")
2254
2255
 
2255
2256
  @proactive_ha_severe_remediation.setter
2256
- def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[str]]):
2257
+ def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[builtins.str]]):
2257
2258
  pulumi.set(self, "proactive_ha_severe_remediation", value)
2258
2259
 
2259
2260
  @property
2260
2261
  @pulumi.getter(name="resourcePoolId")
2261
- def resource_pool_id(self) -> Optional[pulumi.Input[str]]:
2262
+ def resource_pool_id(self) -> Optional[pulumi.Input[builtins.str]]:
2262
2263
  """
2263
2264
  The managed object ID of the primary
2264
2265
  resource pool for this cluster. This can be passed directly to the
@@ -2269,43 +2270,43 @@ class _ComputeClusterState:
2269
2270
  return pulumi.get(self, "resource_pool_id")
2270
2271
 
2271
2272
  @resource_pool_id.setter
2272
- def resource_pool_id(self, value: Optional[pulumi.Input[str]]):
2273
+ def resource_pool_id(self, value: Optional[pulumi.Input[builtins.str]]):
2273
2274
  pulumi.set(self, "resource_pool_id", value)
2274
2275
 
2275
2276
  @property
2276
2277
  @pulumi.getter
2277
- def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2278
+ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
2278
2279
  """
2279
2280
  The IDs of any tags to attach to this resource.
2280
2281
  """
2281
2282
  return pulumi.get(self, "tags")
2282
2283
 
2283
2284
  @tags.setter
2284
- def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2285
+ def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
2285
2286
  pulumi.set(self, "tags", value)
2286
2287
 
2287
2288
  @property
2288
2289
  @pulumi.getter(name="vsanCompressionEnabled")
2289
- def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
2290
+ def vsan_compression_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2290
2291
  """
2291
2292
  Whether the vSAN compression service is enabled for the cluster.
2292
2293
  """
2293
2294
  return pulumi.get(self, "vsan_compression_enabled")
2294
2295
 
2295
2296
  @vsan_compression_enabled.setter
2296
- def vsan_compression_enabled(self, value: Optional[pulumi.Input[bool]]):
2297
+ def vsan_compression_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2297
2298
  pulumi.set(self, "vsan_compression_enabled", value)
2298
2299
 
2299
2300
  @property
2300
2301
  @pulumi.getter(name="vsanDedupEnabled")
2301
- def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
2302
+ def vsan_dedup_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2302
2303
  """
2303
2304
  Whether the vSAN deduplication service is enabled for the cluster.
2304
2305
  """
2305
2306
  return pulumi.get(self, "vsan_dedup_enabled")
2306
2307
 
2307
2308
  @vsan_dedup_enabled.setter
2308
- def vsan_dedup_enabled(self, value: Optional[pulumi.Input[bool]]):
2309
+ def vsan_dedup_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2309
2310
  pulumi.set(self, "vsan_dedup_enabled", value)
2310
2311
 
2311
2312
  @property
@@ -2322,50 +2323,50 @@ class _ComputeClusterState:
2322
2323
 
2323
2324
  @property
2324
2325
  @pulumi.getter(name="vsanDitEncryptionEnabled")
2325
- def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
2326
+ def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2326
2327
  """
2327
2328
  Whether the vSAN data-in-transit encryption is enabled for the cluster.
2328
2329
  """
2329
2330
  return pulumi.get(self, "vsan_dit_encryption_enabled")
2330
2331
 
2331
2332
  @vsan_dit_encryption_enabled.setter
2332
- def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[bool]]):
2333
+ def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2333
2334
  pulumi.set(self, "vsan_dit_encryption_enabled", value)
2334
2335
 
2335
2336
  @property
2336
2337
  @pulumi.getter(name="vsanDitRekeyInterval")
2337
- def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
2338
+ def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[builtins.int]]:
2338
2339
  """
2339
2340
  When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
2340
2341
  """
2341
2342
  return pulumi.get(self, "vsan_dit_rekey_interval")
2342
2343
 
2343
2344
  @vsan_dit_rekey_interval.setter
2344
- def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[int]]):
2345
+ def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[builtins.int]]):
2345
2346
  pulumi.set(self, "vsan_dit_rekey_interval", value)
2346
2347
 
2347
2348
  @property
2348
2349
  @pulumi.getter(name="vsanEnabled")
2349
- def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
2350
+ def vsan_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2350
2351
  """
2351
2352
  Whether the vSAN service is enabled for the cluster.
2352
2353
  """
2353
2354
  return pulumi.get(self, "vsan_enabled")
2354
2355
 
2355
2356
  @vsan_enabled.setter
2356
- def vsan_enabled(self, value: Optional[pulumi.Input[bool]]):
2357
+ def vsan_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2357
2358
  pulumi.set(self, "vsan_enabled", value)
2358
2359
 
2359
2360
  @property
2360
2361
  @pulumi.getter(name="vsanEsaEnabled")
2361
- def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
2362
+ def vsan_esa_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2362
2363
  """
2363
2364
  Whether the vSAN ESA service is enabled for the cluster.
2364
2365
  """
2365
2366
  return pulumi.get(self, "vsan_esa_enabled")
2366
2367
 
2367
2368
  @vsan_esa_enabled.setter
2368
- def vsan_esa_enabled(self, value: Optional[pulumi.Input[bool]]):
2369
+ def vsan_esa_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2369
2370
  pulumi.set(self, "vsan_esa_enabled", value)
2370
2371
 
2371
2372
  @property
@@ -2382,38 +2383,38 @@ class _ComputeClusterState:
2382
2383
 
2383
2384
  @property
2384
2385
  @pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
2385
- def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
2386
+ def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2386
2387
  """
2387
2388
  Whether the vSAN network diagnostic mode is enabled for the cluster.
2388
2389
  """
2389
2390
  return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
2390
2391
 
2391
2392
  @vsan_network_diagnostic_mode_enabled.setter
2392
- def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
2393
+ def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2393
2394
  pulumi.set(self, "vsan_network_diagnostic_mode_enabled", value)
2394
2395
 
2395
2396
  @property
2396
2397
  @pulumi.getter(name="vsanPerformanceEnabled")
2397
- def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
2398
+ def vsan_performance_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2398
2399
  """
2399
2400
  Whether the vSAN performance service is enabled for the cluster.
2400
2401
  """
2401
2402
  return pulumi.get(self, "vsan_performance_enabled")
2402
2403
 
2403
2404
  @vsan_performance_enabled.setter
2404
- def vsan_performance_enabled(self, value: Optional[pulumi.Input[bool]]):
2405
+ def vsan_performance_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2405
2406
  pulumi.set(self, "vsan_performance_enabled", value)
2406
2407
 
2407
2408
  @property
2408
2409
  @pulumi.getter(name="vsanRemoteDatastoreIds")
2409
- def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2410
+ def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]:
2410
2411
  """
2411
2412
  The managed object IDs of the vSAN datastore to be mounted on the cluster.
2412
2413
  """
2413
2414
  return pulumi.get(self, "vsan_remote_datastore_ids")
2414
2415
 
2415
2416
  @vsan_remote_datastore_ids.setter
2416
- def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2417
+ def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]]):
2417
2418
  pulumi.set(self, "vsan_remote_datastore_ids", value)
2418
2419
 
2419
2420
  @property
@@ -2430,103 +2431,104 @@ class _ComputeClusterState:
2430
2431
 
2431
2432
  @property
2432
2433
  @pulumi.getter(name="vsanUnmapEnabled")
2433
- def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
2434
+ def vsan_unmap_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2434
2435
  """
2435
2436
  Whether the vSAN unmap service is enabled for the cluster.
2436
2437
  """
2437
2438
  return pulumi.get(self, "vsan_unmap_enabled")
2438
2439
 
2439
2440
  @vsan_unmap_enabled.setter
2440
- def vsan_unmap_enabled(self, value: Optional[pulumi.Input[bool]]):
2441
+ def vsan_unmap_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2441
2442
  pulumi.set(self, "vsan_unmap_enabled", value)
2442
2443
 
2443
2444
  @property
2444
2445
  @pulumi.getter(name="vsanVerboseModeEnabled")
2445
- def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
2446
+ def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[builtins.bool]]:
2446
2447
  """
2447
2448
  Whether the vSAN verbose mode is enabled for the cluster.
2448
2449
  """
2449
2450
  return pulumi.get(self, "vsan_verbose_mode_enabled")
2450
2451
 
2451
2452
  @vsan_verbose_mode_enabled.setter
2452
- def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
2453
+ def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[builtins.bool]]):
2453
2454
  pulumi.set(self, "vsan_verbose_mode_enabled", value)
2454
2455
 
2455
2456
 
2457
+ @pulumi.type_token("vsphere:index/computeCluster:ComputeCluster")
2456
2458
  class ComputeCluster(pulumi.CustomResource):
2457
2459
  @overload
2458
2460
  def __init__(__self__,
2459
2461
  resource_name: str,
2460
2462
  opts: Optional[pulumi.ResourceOptions] = None,
2461
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2462
- datacenter_id: Optional[pulumi.Input[str]] = None,
2463
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
2464
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
2465
- dpm_threshold: Optional[pulumi.Input[int]] = None,
2466
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2467
- drs_automation_level: Optional[pulumi.Input[str]] = None,
2468
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
2469
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
2470
- drs_enabled: Optional[pulumi.Input[bool]] = None,
2471
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
2472
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
2473
- folder: Optional[pulumi.Input[str]] = None,
2474
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
2475
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2476
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
2477
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
2478
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
2479
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
2480
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
2481
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
2482
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
2483
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
2484
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
2485
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2486
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
2487
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
2488
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
2489
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
2490
- ha_enabled: Optional[pulumi.Input[bool]] = None,
2491
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2492
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
2493
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
2494
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
2495
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
2496
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
2497
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
2498
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
2499
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
2500
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
2501
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
2502
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
2503
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
2504
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
2505
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
2463
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
2464
+ datacenter_id: Optional[pulumi.Input[builtins.str]] = None,
2465
+ dpm_automation_level: Optional[pulumi.Input[builtins.str]] = None,
2466
+ dpm_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2467
+ dpm_threshold: Optional[pulumi.Input[builtins.int]] = None,
2468
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
2469
+ drs_automation_level: Optional[pulumi.Input[builtins.str]] = None,
2470
+ drs_enable_predictive_drs: Optional[pulumi.Input[builtins.bool]] = None,
2471
+ drs_enable_vm_overrides: Optional[pulumi.Input[builtins.bool]] = None,
2472
+ drs_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2473
+ drs_migration_threshold: Optional[pulumi.Input[builtins.int]] = None,
2474
+ drs_scale_descendants_shares: Optional[pulumi.Input[builtins.str]] = None,
2475
+ folder: Optional[pulumi.Input[builtins.str]] = None,
2476
+ force_evacuate_on_destroy: Optional[pulumi.Input[builtins.bool]] = None,
2477
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2478
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[builtins.int]] = None,
2479
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[builtins.int]] = None,
2480
+ ha_admission_control_policy: Optional[pulumi.Input[builtins.str]] = None,
2481
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[builtins.bool]] = None,
2482
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[builtins.int]] = None,
2483
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[builtins.int]] = None,
2484
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[builtins.int]] = None,
2485
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[builtins.int]] = None,
2486
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[builtins.bool]] = None,
2487
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
2488
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[builtins.str]] = None,
2489
+ ha_datastore_apd_response: Optional[pulumi.Input[builtins.str]] = None,
2490
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[builtins.int]] = None,
2491
+ ha_datastore_pdl_response: Optional[pulumi.Input[builtins.str]] = None,
2492
+ ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2493
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2494
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[builtins.str]] = None,
2495
+ ha_host_isolation_response: Optional[pulumi.Input[builtins.str]] = None,
2496
+ ha_host_monitoring: Optional[pulumi.Input[builtins.str]] = None,
2497
+ ha_vm_component_protection: Optional[pulumi.Input[builtins.str]] = None,
2498
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[builtins.str]] = None,
2499
+ ha_vm_failure_interval: Optional[pulumi.Input[builtins.int]] = None,
2500
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[builtins.int]] = None,
2501
+ ha_vm_maximum_resets: Optional[pulumi.Input[builtins.int]] = None,
2502
+ ha_vm_minimum_uptime: Optional[pulumi.Input[builtins.int]] = None,
2503
+ ha_vm_monitoring: Optional[pulumi.Input[builtins.str]] = None,
2504
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[builtins.int]] = None,
2505
+ ha_vm_restart_priority: Optional[pulumi.Input[builtins.str]] = None,
2506
+ ha_vm_restart_timeout: Optional[pulumi.Input[builtins.int]] = None,
2507
+ host_cluster_exit_timeout: Optional[pulumi.Input[builtins.int]] = None,
2506
2508
  host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
2507
- host_managed: Optional[pulumi.Input[bool]] = None,
2508
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2509
- name: Optional[pulumi.Input[str]] = None,
2510
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
2511
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
2512
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
2513
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2514
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
2515
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2516
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
2517
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
2509
+ host_managed: Optional[pulumi.Input[builtins.bool]] = None,
2510
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2511
+ name: Optional[pulumi.Input[builtins.str]] = None,
2512
+ proactive_ha_automation_level: Optional[pulumi.Input[builtins.str]] = None,
2513
+ proactive_ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2514
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[builtins.str]] = None,
2515
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2516
+ proactive_ha_severe_remediation: Optional[pulumi.Input[builtins.str]] = None,
2517
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2518
+ vsan_compression_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2519
+ vsan_dedup_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2518
2520
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
2519
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
2520
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
2521
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
2522
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
2521
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2522
+ vsan_dit_rekey_interval: Optional[pulumi.Input[builtins.int]] = None,
2523
+ vsan_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2524
+ vsan_esa_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2523
2525
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
2524
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
2525
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
2526
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2526
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2527
+ vsan_performance_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2528
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2527
2529
  vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
2528
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
2529
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None,
2530
+ vsan_unmap_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2531
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2530
2532
  __props__=None):
2531
2533
  """
2532
2534
  > **A note on the naming of this resource:** VMware refers to clusters of
@@ -2619,122 +2621,122 @@ class ComputeCluster(pulumi.CustomResource):
2619
2621
 
2620
2622
  :param str resource_name: The name of the resource.
2621
2623
  :param pulumi.ResourceOptions opts: Options for the resource.
2622
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
2624
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
2623
2625
  value strings to set for the datastore cluster.
2624
2626
 
2625
2627
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
2626
2628
  and require vCenter Server.
2627
- :param pulumi.Input[str] datacenter_id: The managed object ID of
2629
+ :param pulumi.Input[builtins.str] datacenter_id: The managed object ID of
2628
2630
  the datacenter to create the cluster in. Forces a new resource if changed.
2629
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
2630
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
2631
+ :param pulumi.Input[builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
2632
+ :param pulumi.Input[builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
2631
2633
  machines in the cluster. Requires that DRS be enabled.
2632
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
2634
+ :param pulumi.Input[builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
2633
2635
  affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
2634
2636
  setting.
2635
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
2636
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
2637
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
2638
+ :param pulumi.Input[builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
2637
2639
  fullyAutomated.
2638
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
2639
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
2640
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
2641
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
2640
+ :param pulumi.Input[builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
2641
+ :param pulumi.Input[builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
2642
+ :param pulumi.Input[builtins.bool] drs_enabled: Enable DRS for this cluster.
2643
+ :param pulumi.Input[builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
2642
2644
  more imbalance while a higher setting will tolerate less.
2643
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
2644
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
2645
+ :param pulumi.Input[builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
2646
+ :param pulumi.Input[builtins.str] folder: The relative path to a folder to put this cluster in.
2645
2647
  This is a path relative to the datacenter you are deploying the cluster to.
2646
2648
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
2647
2649
  The provider will place a cluster named `compute-cluster-test` in a
2648
2650
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
2649
2651
  being `/dc1/host/foo/bar/datastore-cluster-test`.
2650
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
2652
+ :param pulumi.Input[builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
2651
2653
  for testing and is not recommended in normal use.
2652
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
2654
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
2653
2655
  failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
2654
2656
  will ignore the host when making recommendations.
2655
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
2657
+ :param pulumi.Input[builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
2656
2658
  machine operations. The maximum is one less than the number of hosts in the cluster.
2657
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
2659
+ :param pulumi.Input[builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
2658
2660
  warnings only, whereas a value of 100 disables the setting.
2659
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
2661
+ :param pulumi.Input[builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
2660
2662
  permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
2661
2663
  slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
2662
2664
  issues.
2663
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
2665
+ :param pulumi.Input[builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
2664
2666
  subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
2665
2667
  from the total amount of resources in the cluster. Disable to supply user-defined values.
2666
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
2668
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
2667
2669
  the cluster to reserve for failover.
2668
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
2670
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
2669
2671
  the cluster to reserve for failover.
2670
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
2671
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
2672
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
2672
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
2673
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
2674
+ :param pulumi.Input[builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
2673
2675
  to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
2674
2676
  currently in the cluster.
2675
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
2676
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
2677
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
2678
+ :param pulumi.Input[builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
2677
2679
  affected datastore clears in the middle of an APD event. Can be one of none or reset.
2678
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
2680
+ :param pulumi.Input[builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
2679
2681
  detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
2680
2682
  restartAggressive.
2681
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
2683
+ :param pulumi.Input[builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
2682
2684
  the response action defined in ha_datastore_apd_response.
2683
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
2685
+ :param pulumi.Input[builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
2684
2686
  detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
2685
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
2686
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
2687
+ :param pulumi.Input[builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
2688
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
2687
2689
  ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
2688
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
2690
+ :param pulumi.Input[builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
2689
2691
  allFeasibleDsWithUserPreference.
2690
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
2692
+ :param pulumi.Input[builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
2691
2693
  Can be one of none, powerOff, or shutdown.
2692
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
2693
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
2694
+ :param pulumi.Input[builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
2695
+ :param pulumi.Input[builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
2694
2696
  failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
2695
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
2697
+ :param pulumi.Input[builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
2696
2698
  on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
2697
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
2699
+ :param pulumi.Input[builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
2698
2700
  failed. The value is in seconds.
2699
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
2701
+ :param pulumi.Input[builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
2700
2702
  attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
2701
2703
  time is allotted.
2702
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
2703
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
2704
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
2704
+ :param pulumi.Input[builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
2705
+ :param pulumi.Input[builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
2706
+ :param pulumi.Input[builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
2705
2707
  vmMonitoringOnly, or vmAndAppMonitoring.
2706
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
2707
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
2708
+ :param pulumi.Input[builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
2709
+ :param pulumi.Input[builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
2708
2710
  high, or highest.
2709
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
2711
+ :param pulumi.Input[builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
2710
2712
  proceeding with the next priority.
2711
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
2713
+ :param pulumi.Input[builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
2712
2714
  :param pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']] host_image: Details about the host image which should be applied to the cluster.
2713
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
2714
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
2715
- :param pulumi.Input[str] name: The name of the cluster.
2716
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
2717
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
2718
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
2715
+ :param pulumi.Input[builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
2716
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
2717
+ :param pulumi.Input[builtins.str] name: The name of the cluster.
2718
+ :param pulumi.Input[builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
2719
+ :param pulumi.Input[builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
2720
+ :param pulumi.Input[builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
2719
2721
  this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
2720
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
2721
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
2722
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
2723
+ :param pulumi.Input[builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
2722
2724
  cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
2723
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
2724
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
2725
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
2725
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] tags: The IDs of any tags to attach to this resource.
2726
+ :param pulumi.Input[builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
2727
+ :param pulumi.Input[builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
2726
2728
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
2727
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
2728
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
2729
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
2730
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
2729
+ :param pulumi.Input[builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
2730
+ :param pulumi.Input[builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
2731
+ :param pulumi.Input[builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
2732
+ :param pulumi.Input[builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
2731
2733
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]] vsan_fault_domains: The configuration for vSAN fault domains.
2732
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
2733
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
2734
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
2734
+ :param pulumi.Input[builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
2735
+ :param pulumi.Input[builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
2736
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
2735
2737
  :param pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']] vsan_stretched_cluster: The configuration for stretched cluster.
2736
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
2737
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
2738
+ :param pulumi.Input[builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
2739
+ :param pulumi.Input[builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
2738
2740
  """
2739
2741
  ...
2740
2742
  @overload
@@ -2846,75 +2848,75 @@ class ComputeCluster(pulumi.CustomResource):
2846
2848
  def _internal_init(__self__,
2847
2849
  resource_name: str,
2848
2850
  opts: Optional[pulumi.ResourceOptions] = None,
2849
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2850
- datacenter_id: Optional[pulumi.Input[str]] = None,
2851
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
2852
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
2853
- dpm_threshold: Optional[pulumi.Input[int]] = None,
2854
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2855
- drs_automation_level: Optional[pulumi.Input[str]] = None,
2856
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
2857
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
2858
- drs_enabled: Optional[pulumi.Input[bool]] = None,
2859
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
2860
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
2861
- folder: Optional[pulumi.Input[str]] = None,
2862
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
2863
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2864
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
2865
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
2866
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
2867
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
2868
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
2869
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
2870
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
2871
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
2872
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
2873
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2874
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
2875
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
2876
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
2877
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
2878
- ha_enabled: Optional[pulumi.Input[bool]] = None,
2879
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2880
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
2881
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
2882
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
2883
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
2884
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
2885
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
2886
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
2887
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
2888
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
2889
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
2890
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
2891
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
2892
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
2893
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
2851
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
2852
+ datacenter_id: Optional[pulumi.Input[builtins.str]] = None,
2853
+ dpm_automation_level: Optional[pulumi.Input[builtins.str]] = None,
2854
+ dpm_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2855
+ dpm_threshold: Optional[pulumi.Input[builtins.int]] = None,
2856
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
2857
+ drs_automation_level: Optional[pulumi.Input[builtins.str]] = None,
2858
+ drs_enable_predictive_drs: Optional[pulumi.Input[builtins.bool]] = None,
2859
+ drs_enable_vm_overrides: Optional[pulumi.Input[builtins.bool]] = None,
2860
+ drs_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2861
+ drs_migration_threshold: Optional[pulumi.Input[builtins.int]] = None,
2862
+ drs_scale_descendants_shares: Optional[pulumi.Input[builtins.str]] = None,
2863
+ folder: Optional[pulumi.Input[builtins.str]] = None,
2864
+ force_evacuate_on_destroy: Optional[pulumi.Input[builtins.bool]] = None,
2865
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2866
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[builtins.int]] = None,
2867
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[builtins.int]] = None,
2868
+ ha_admission_control_policy: Optional[pulumi.Input[builtins.str]] = None,
2869
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[builtins.bool]] = None,
2870
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[builtins.int]] = None,
2871
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[builtins.int]] = None,
2872
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[builtins.int]] = None,
2873
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[builtins.int]] = None,
2874
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[builtins.bool]] = None,
2875
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
2876
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[builtins.str]] = None,
2877
+ ha_datastore_apd_response: Optional[pulumi.Input[builtins.str]] = None,
2878
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[builtins.int]] = None,
2879
+ ha_datastore_pdl_response: Optional[pulumi.Input[builtins.str]] = None,
2880
+ ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2881
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2882
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[builtins.str]] = None,
2883
+ ha_host_isolation_response: Optional[pulumi.Input[builtins.str]] = None,
2884
+ ha_host_monitoring: Optional[pulumi.Input[builtins.str]] = None,
2885
+ ha_vm_component_protection: Optional[pulumi.Input[builtins.str]] = None,
2886
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[builtins.str]] = None,
2887
+ ha_vm_failure_interval: Optional[pulumi.Input[builtins.int]] = None,
2888
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[builtins.int]] = None,
2889
+ ha_vm_maximum_resets: Optional[pulumi.Input[builtins.int]] = None,
2890
+ ha_vm_minimum_uptime: Optional[pulumi.Input[builtins.int]] = None,
2891
+ ha_vm_monitoring: Optional[pulumi.Input[builtins.str]] = None,
2892
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[builtins.int]] = None,
2893
+ ha_vm_restart_priority: Optional[pulumi.Input[builtins.str]] = None,
2894
+ ha_vm_restart_timeout: Optional[pulumi.Input[builtins.int]] = None,
2895
+ host_cluster_exit_timeout: Optional[pulumi.Input[builtins.int]] = None,
2894
2896
  host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
2895
- host_managed: Optional[pulumi.Input[bool]] = None,
2896
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2897
- name: Optional[pulumi.Input[str]] = None,
2898
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
2899
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
2900
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
2901
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2902
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
2903
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2904
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
2905
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
2897
+ host_managed: Optional[pulumi.Input[builtins.bool]] = None,
2898
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2899
+ name: Optional[pulumi.Input[builtins.str]] = None,
2900
+ proactive_ha_automation_level: Optional[pulumi.Input[builtins.str]] = None,
2901
+ proactive_ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2902
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[builtins.str]] = None,
2903
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2904
+ proactive_ha_severe_remediation: Optional[pulumi.Input[builtins.str]] = None,
2905
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2906
+ vsan_compression_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2907
+ vsan_dedup_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2906
2908
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
2907
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
2908
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
2909
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
2910
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
2909
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2910
+ vsan_dit_rekey_interval: Optional[pulumi.Input[builtins.int]] = None,
2911
+ vsan_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2912
+ vsan_esa_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2911
2913
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
2912
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
2913
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
2914
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2914
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2915
+ vsan_performance_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2916
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
2915
2917
  vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
2916
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
2917
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None,
2918
+ vsan_unmap_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2919
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None,
2918
2920
  __props__=None):
2919
2921
  opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
2920
2922
  if not isinstance(opts, pulumi.ResourceOptions):
@@ -3006,76 +3008,76 @@ class ComputeCluster(pulumi.CustomResource):
3006
3008
  def get(resource_name: str,
3007
3009
  id: pulumi.Input[str],
3008
3010
  opts: Optional[pulumi.ResourceOptions] = None,
3009
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
3010
- datacenter_id: Optional[pulumi.Input[str]] = None,
3011
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
3012
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
3013
- dpm_threshold: Optional[pulumi.Input[int]] = None,
3014
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
3015
- drs_automation_level: Optional[pulumi.Input[str]] = None,
3016
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
3017
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
3018
- drs_enabled: Optional[pulumi.Input[bool]] = None,
3019
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
3020
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
3021
- folder: Optional[pulumi.Input[str]] = None,
3022
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
3023
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3024
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
3025
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
3026
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
3027
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
3028
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
3029
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
3030
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
3031
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
3032
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
3033
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
3034
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
3035
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
3036
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
3037
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
3038
- ha_enabled: Optional[pulumi.Input[bool]] = None,
3039
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3040
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
3041
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
3042
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
3043
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
3044
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
3045
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
3046
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
3047
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
3048
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
3049
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
3050
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
3051
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
3052
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
3053
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
3011
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
3012
+ datacenter_id: Optional[pulumi.Input[builtins.str]] = None,
3013
+ dpm_automation_level: Optional[pulumi.Input[builtins.str]] = None,
3014
+ dpm_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3015
+ dpm_threshold: Optional[pulumi.Input[builtins.int]] = None,
3016
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
3017
+ drs_automation_level: Optional[pulumi.Input[builtins.str]] = None,
3018
+ drs_enable_predictive_drs: Optional[pulumi.Input[builtins.bool]] = None,
3019
+ drs_enable_vm_overrides: Optional[pulumi.Input[builtins.bool]] = None,
3020
+ drs_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3021
+ drs_migration_threshold: Optional[pulumi.Input[builtins.int]] = None,
3022
+ drs_scale_descendants_shares: Optional[pulumi.Input[builtins.str]] = None,
3023
+ folder: Optional[pulumi.Input[builtins.str]] = None,
3024
+ force_evacuate_on_destroy: Optional[pulumi.Input[builtins.bool]] = None,
3025
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
3026
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[builtins.int]] = None,
3027
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[builtins.int]] = None,
3028
+ ha_admission_control_policy: Optional[pulumi.Input[builtins.str]] = None,
3029
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[builtins.bool]] = None,
3030
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[builtins.int]] = None,
3031
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[builtins.int]] = None,
3032
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[builtins.int]] = None,
3033
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[builtins.int]] = None,
3034
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[builtins.bool]] = None,
3035
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]]] = None,
3036
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[builtins.str]] = None,
3037
+ ha_datastore_apd_response: Optional[pulumi.Input[builtins.str]] = None,
3038
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[builtins.int]] = None,
3039
+ ha_datastore_pdl_response: Optional[pulumi.Input[builtins.str]] = None,
3040
+ ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3041
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
3042
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[builtins.str]] = None,
3043
+ ha_host_isolation_response: Optional[pulumi.Input[builtins.str]] = None,
3044
+ ha_host_monitoring: Optional[pulumi.Input[builtins.str]] = None,
3045
+ ha_vm_component_protection: Optional[pulumi.Input[builtins.str]] = None,
3046
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[builtins.str]] = None,
3047
+ ha_vm_failure_interval: Optional[pulumi.Input[builtins.int]] = None,
3048
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[builtins.int]] = None,
3049
+ ha_vm_maximum_resets: Optional[pulumi.Input[builtins.int]] = None,
3050
+ ha_vm_minimum_uptime: Optional[pulumi.Input[builtins.int]] = None,
3051
+ ha_vm_monitoring: Optional[pulumi.Input[builtins.str]] = None,
3052
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[builtins.int]] = None,
3053
+ ha_vm_restart_priority: Optional[pulumi.Input[builtins.str]] = None,
3054
+ ha_vm_restart_timeout: Optional[pulumi.Input[builtins.int]] = None,
3055
+ host_cluster_exit_timeout: Optional[pulumi.Input[builtins.int]] = None,
3054
3056
  host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
3055
- host_managed: Optional[pulumi.Input[bool]] = None,
3056
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3057
- name: Optional[pulumi.Input[str]] = None,
3058
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
3059
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
3060
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
3061
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3062
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
3063
- resource_pool_id: Optional[pulumi.Input[str]] = None,
3064
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3065
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
3066
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
3057
+ host_managed: Optional[pulumi.Input[builtins.bool]] = None,
3058
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
3059
+ name: Optional[pulumi.Input[builtins.str]] = None,
3060
+ proactive_ha_automation_level: Optional[pulumi.Input[builtins.str]] = None,
3061
+ proactive_ha_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3062
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[builtins.str]] = None,
3063
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
3064
+ proactive_ha_severe_remediation: Optional[pulumi.Input[builtins.str]] = None,
3065
+ resource_pool_id: Optional[pulumi.Input[builtins.str]] = None,
3066
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
3067
+ vsan_compression_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3068
+ vsan_dedup_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3067
3069
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
3068
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
3069
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
3070
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
3071
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
3070
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3071
+ vsan_dit_rekey_interval: Optional[pulumi.Input[builtins.int]] = None,
3072
+ vsan_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3073
+ vsan_esa_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3072
3074
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
3073
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
3074
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
3075
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3075
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3076
+ vsan_performance_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3077
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[builtins.str]]]] = None,
3076
3078
  vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
3077
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
3078
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None) -> 'ComputeCluster':
3079
+ vsan_unmap_enabled: Optional[pulumi.Input[builtins.bool]] = None,
3080
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[builtins.bool]] = None) -> 'ComputeCluster':
3079
3081
  """
3080
3082
  Get an existing ComputeCluster resource's state with the given name, id, and optional extra
3081
3083
  properties used to qualify the lookup.
@@ -3083,127 +3085,127 @@ class ComputeCluster(pulumi.CustomResource):
3083
3085
  :param str resource_name: The unique name of the resulting resource.
3084
3086
  :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
3085
3087
  :param pulumi.ResourceOptions opts: Options for the resource.
3086
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
3088
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
3087
3089
  value strings to set for the datastore cluster.
3088
3090
 
3089
3091
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
3090
3092
  and require vCenter Server.
3091
- :param pulumi.Input[str] datacenter_id: The managed object ID of
3093
+ :param pulumi.Input[builtins.str] datacenter_id: The managed object ID of
3092
3094
  the datacenter to create the cluster in. Forces a new resource if changed.
3093
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
3094
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
3095
+ :param pulumi.Input[builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
3096
+ :param pulumi.Input[builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
3095
3097
  machines in the cluster. Requires that DRS be enabled.
3096
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
3098
+ :param pulumi.Input[builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
3097
3099
  affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
3098
3100
  setting.
3099
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
3100
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
3101
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
3102
+ :param pulumi.Input[builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
3101
3103
  fullyAutomated.
3102
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
3103
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
3104
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
3105
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
3104
+ :param pulumi.Input[builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
3105
+ :param pulumi.Input[builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
3106
+ :param pulumi.Input[builtins.bool] drs_enabled: Enable DRS for this cluster.
3107
+ :param pulumi.Input[builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
3106
3108
  more imbalance while a higher setting will tolerate less.
3107
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
3108
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
3109
+ :param pulumi.Input[builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
3110
+ :param pulumi.Input[builtins.str] folder: The relative path to a folder to put this cluster in.
3109
3111
  This is a path relative to the datacenter you are deploying the cluster to.
3110
3112
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
3111
3113
  The provider will place a cluster named `compute-cluster-test` in a
3112
3114
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
3113
3115
  being `/dc1/host/foo/bar/datastore-cluster-test`.
3114
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
3116
+ :param pulumi.Input[builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
3115
3117
  for testing and is not recommended in normal use.
3116
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
3118
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
3117
3119
  failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
3118
3120
  will ignore the host when making recommendations.
3119
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
3121
+ :param pulumi.Input[builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
3120
3122
  machine operations. The maximum is one less than the number of hosts in the cluster.
3121
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
3123
+ :param pulumi.Input[builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
3122
3124
  warnings only, whereas a value of 100 disables the setting.
3123
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
3125
+ :param pulumi.Input[builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
3124
3126
  permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
3125
3127
  slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
3126
3128
  issues.
3127
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
3129
+ :param pulumi.Input[builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
3128
3130
  subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
3129
3131
  from the total amount of resources in the cluster. Disable to supply user-defined values.
3130
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
3132
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
3131
3133
  the cluster to reserve for failover.
3132
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
3134
+ :param pulumi.Input[builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
3133
3135
  the cluster to reserve for failover.
3134
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
3135
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
3136
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
3136
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
3137
+ :param pulumi.Input[builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
3138
+ :param pulumi.Input[builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
3137
3139
  to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
3138
3140
  currently in the cluster.
3139
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
3140
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
3141
+ :param pulumi.Input[Mapping[str, pulumi.Input[builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
3142
+ :param pulumi.Input[builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
3141
3143
  affected datastore clears in the middle of an APD event. Can be one of none or reset.
3142
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3144
+ :param pulumi.Input[builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3143
3145
  detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
3144
3146
  restartAggressive.
3145
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
3147
+ :param pulumi.Input[builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
3146
3148
  the response action defined in ha_datastore_apd_response.
3147
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3149
+ :param pulumi.Input[builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3148
3150
  detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
3149
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
3150
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
3151
+ :param pulumi.Input[builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
3152
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
3151
3153
  ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
3152
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
3154
+ :param pulumi.Input[builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
3153
3155
  allFeasibleDsWithUserPreference.
3154
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
3156
+ :param pulumi.Input[builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
3155
3157
  Can be one of none, powerOff, or shutdown.
3156
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
3157
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
3158
+ :param pulumi.Input[builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
3159
+ :param pulumi.Input[builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
3158
3160
  failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
3159
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
3161
+ :param pulumi.Input[builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
3160
3162
  on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
3161
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
3163
+ :param pulumi.Input[builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
3162
3164
  failed. The value is in seconds.
3163
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
3165
+ :param pulumi.Input[builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
3164
3166
  attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
3165
3167
  time is allotted.
3166
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
3167
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
3168
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
3168
+ :param pulumi.Input[builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
3169
+ :param pulumi.Input[builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
3170
+ :param pulumi.Input[builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
3169
3171
  vmMonitoringOnly, or vmAndAppMonitoring.
3170
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
3171
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
3172
+ :param pulumi.Input[builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
3173
+ :param pulumi.Input[builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
3172
3174
  high, or highest.
3173
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
3175
+ :param pulumi.Input[builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
3174
3176
  proceeding with the next priority.
3175
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
3177
+ :param pulumi.Input[builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
3176
3178
  :param pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']] host_image: Details about the host image which should be applied to the cluster.
3177
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
3178
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
3179
- :param pulumi.Input[str] name: The name of the cluster.
3180
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
3181
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
3182
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
3179
+ :param pulumi.Input[builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
3180
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
3181
+ :param pulumi.Input[builtins.str] name: The name of the cluster.
3182
+ :param pulumi.Input[builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
3183
+ :param pulumi.Input[builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
3184
+ :param pulumi.Input[builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
3183
3185
  this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
3184
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
3185
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
3186
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
3187
+ :param pulumi.Input[builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
3186
3188
  cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
3187
- :param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
3189
+ :param pulumi.Input[builtins.str] resource_pool_id: The managed object ID of the primary
3188
3190
  resource pool for this cluster. This can be passed directly to the
3189
3191
  `resource_pool_id`
3190
3192
  attribute of the
3191
3193
  `VirtualMachine` resource.
3192
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
3193
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
3194
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
3194
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] tags: The IDs of any tags to attach to this resource.
3195
+ :param pulumi.Input[builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
3196
+ :param pulumi.Input[builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
3195
3197
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
3196
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
3197
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
3198
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
3199
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
3198
+ :param pulumi.Input[builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
3199
+ :param pulumi.Input[builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
3200
+ :param pulumi.Input[builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
3201
+ :param pulumi.Input[builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
3200
3202
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]] vsan_fault_domains: The configuration for vSAN fault domains.
3201
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
3202
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
3203
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
3203
+ :param pulumi.Input[builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
3204
+ :param pulumi.Input[builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
3205
+ :param pulumi.Input[Sequence[pulumi.Input[builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
3204
3206
  :param pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']] vsan_stretched_cluster: The configuration for stretched cluster.
3205
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
3206
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
3207
+ :param pulumi.Input[builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
3208
+ :param pulumi.Input[builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
3207
3209
  """
3208
3210
  opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
3209
3211
 
@@ -3283,7 +3285,7 @@ class ComputeCluster(pulumi.CustomResource):
3283
3285
 
3284
3286
  @property
3285
3287
  @pulumi.getter(name="customAttributes")
3286
- def custom_attributes(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
3288
+ def custom_attributes(self) -> pulumi.Output[Optional[Mapping[str, builtins.str]]]:
3287
3289
  """
3288
3290
  A map of custom attribute ids to attribute
3289
3291
  value strings to set for the datastore cluster.
@@ -3295,7 +3297,7 @@ class ComputeCluster(pulumi.CustomResource):
3295
3297
 
3296
3298
  @property
3297
3299
  @pulumi.getter(name="datacenterId")
3298
- def datacenter_id(self) -> pulumi.Output[str]:
3300
+ def datacenter_id(self) -> pulumi.Output[builtins.str]:
3299
3301
  """
3300
3302
  The managed object ID of
3301
3303
  the datacenter to create the cluster in. Forces a new resource if changed.
@@ -3304,7 +3306,7 @@ class ComputeCluster(pulumi.CustomResource):
3304
3306
 
3305
3307
  @property
3306
3308
  @pulumi.getter(name="dpmAutomationLevel")
3307
- def dpm_automation_level(self) -> pulumi.Output[Optional[str]]:
3309
+ def dpm_automation_level(self) -> pulumi.Output[Optional[builtins.str]]:
3308
3310
  """
3309
3311
  The automation level for host power operations in this cluster. Can be one of manual or automated.
3310
3312
  """
@@ -3312,7 +3314,7 @@ class ComputeCluster(pulumi.CustomResource):
3312
3314
 
3313
3315
  @property
3314
3316
  @pulumi.getter(name="dpmEnabled")
3315
- def dpm_enabled(self) -> pulumi.Output[Optional[bool]]:
3317
+ def dpm_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3316
3318
  """
3317
3319
  Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
3318
3320
  machines in the cluster. Requires that DRS be enabled.
@@ -3321,7 +3323,7 @@ class ComputeCluster(pulumi.CustomResource):
3321
3323
 
3322
3324
  @property
3323
3325
  @pulumi.getter(name="dpmThreshold")
3324
- def dpm_threshold(self) -> pulumi.Output[Optional[int]]:
3326
+ def dpm_threshold(self) -> pulumi.Output[Optional[builtins.int]]:
3325
3327
  """
3326
3328
  A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
3327
3329
  affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
@@ -3331,7 +3333,7 @@ class ComputeCluster(pulumi.CustomResource):
3331
3333
 
3332
3334
  @property
3333
3335
  @pulumi.getter(name="drsAdvancedOptions")
3334
- def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
3336
+ def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, builtins.str]]]:
3335
3337
  """
3336
3338
  Advanced configuration options for DRS and DPM.
3337
3339
  """
@@ -3339,7 +3341,7 @@ class ComputeCluster(pulumi.CustomResource):
3339
3341
 
3340
3342
  @property
3341
3343
  @pulumi.getter(name="drsAutomationLevel")
3342
- def drs_automation_level(self) -> pulumi.Output[Optional[str]]:
3344
+ def drs_automation_level(self) -> pulumi.Output[Optional[builtins.str]]:
3343
3345
  """
3344
3346
  The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
3345
3347
  fullyAutomated.
@@ -3348,7 +3350,7 @@ class ComputeCluster(pulumi.CustomResource):
3348
3350
 
3349
3351
  @property
3350
3352
  @pulumi.getter(name="drsEnablePredictiveDrs")
3351
- def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[bool]]:
3353
+ def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[builtins.bool]]:
3352
3354
  """
3353
3355
  When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
3354
3356
  """
@@ -3356,7 +3358,7 @@ class ComputeCluster(pulumi.CustomResource):
3356
3358
 
3357
3359
  @property
3358
3360
  @pulumi.getter(name="drsEnableVmOverrides")
3359
- def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[bool]]:
3361
+ def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[builtins.bool]]:
3360
3362
  """
3361
3363
  When true, allows individual VM overrides within this cluster to be set.
3362
3364
  """
@@ -3364,7 +3366,7 @@ class ComputeCluster(pulumi.CustomResource):
3364
3366
 
3365
3367
  @property
3366
3368
  @pulumi.getter(name="drsEnabled")
3367
- def drs_enabled(self) -> pulumi.Output[Optional[bool]]:
3369
+ def drs_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3368
3370
  """
3369
3371
  Enable DRS for this cluster.
3370
3372
  """
@@ -3372,7 +3374,7 @@ class ComputeCluster(pulumi.CustomResource):
3372
3374
 
3373
3375
  @property
3374
3376
  @pulumi.getter(name="drsMigrationThreshold")
3375
- def drs_migration_threshold(self) -> pulumi.Output[Optional[int]]:
3377
+ def drs_migration_threshold(self) -> pulumi.Output[Optional[builtins.int]]:
3376
3378
  """
3377
3379
  A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
3378
3380
  more imbalance while a higher setting will tolerate less.
@@ -3381,7 +3383,7 @@ class ComputeCluster(pulumi.CustomResource):
3381
3383
 
3382
3384
  @property
3383
3385
  @pulumi.getter(name="drsScaleDescendantsShares")
3384
- def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[str]]:
3386
+ def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[builtins.str]]:
3385
3387
  """
3386
3388
  Enable scalable shares for all descendants of this cluster.
3387
3389
  """
@@ -3389,7 +3391,7 @@ class ComputeCluster(pulumi.CustomResource):
3389
3391
 
3390
3392
  @property
3391
3393
  @pulumi.getter
3392
- def folder(self) -> pulumi.Output[Optional[str]]:
3394
+ def folder(self) -> pulumi.Output[Optional[builtins.str]]:
3393
3395
  """
3394
3396
  The relative path to a folder to put this cluster in.
3395
3397
  This is a path relative to the datacenter you are deploying the cluster to.
@@ -3402,7 +3404,7 @@ class ComputeCluster(pulumi.CustomResource):
3402
3404
 
3403
3405
  @property
3404
3406
  @pulumi.getter(name="forceEvacuateOnDestroy")
3405
- def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[bool]]:
3407
+ def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[builtins.bool]]:
3406
3408
  """
3407
3409
  Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
3408
3410
  for testing and is not recommended in normal use.
@@ -3411,7 +3413,7 @@ class ComputeCluster(pulumi.CustomResource):
3411
3413
 
3412
3414
  @property
3413
3415
  @pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
3414
- def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3416
+ def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[builtins.str]]]:
3415
3417
  """
3416
3418
  When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
3417
3419
  failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
@@ -3421,7 +3423,7 @@ class ComputeCluster(pulumi.CustomResource):
3421
3423
 
3422
3424
  @property
3423
3425
  @pulumi.getter(name="haAdmissionControlHostFailureTolerance")
3424
- def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[int]]:
3426
+ def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[builtins.int]]:
3425
3427
  """
3426
3428
  The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
3427
3429
  machine operations. The maximum is one less than the number of hosts in the cluster.
@@ -3430,7 +3432,7 @@ class ComputeCluster(pulumi.CustomResource):
3430
3432
 
3431
3433
  @property
3432
3434
  @pulumi.getter(name="haAdmissionControlPerformanceTolerance")
3433
- def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[int]]:
3435
+ def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[builtins.int]]:
3434
3436
  """
3435
3437
  The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
3436
3438
  warnings only, whereas a value of 100 disables the setting.
@@ -3439,7 +3441,7 @@ class ComputeCluster(pulumi.CustomResource):
3439
3441
 
3440
3442
  @property
3441
3443
  @pulumi.getter(name="haAdmissionControlPolicy")
3442
- def ha_admission_control_policy(self) -> pulumi.Output[Optional[str]]:
3444
+ def ha_admission_control_policy(self) -> pulumi.Output[Optional[builtins.str]]:
3443
3445
  """
3444
3446
  The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
3445
3447
  permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
@@ -3450,7 +3452,7 @@ class ComputeCluster(pulumi.CustomResource):
3450
3452
 
3451
3453
  @property
3452
3454
  @pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
3453
- def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[bool]]:
3455
+ def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[builtins.bool]]:
3454
3456
  """
3455
3457
  When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
3456
3458
  subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
@@ -3460,7 +3462,7 @@ class ComputeCluster(pulumi.CustomResource):
3460
3462
 
3461
3463
  @property
3462
3464
  @pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
3463
- def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[int]]:
3465
+ def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[builtins.int]]:
3464
3466
  """
3465
3467
  When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
3466
3468
  the cluster to reserve for failover.
@@ -3469,7 +3471,7 @@ class ComputeCluster(pulumi.CustomResource):
3469
3471
 
3470
3472
  @property
3471
3473
  @pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
3472
- def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[int]]:
3474
+ def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[builtins.int]]:
3473
3475
  """
3474
3476
  When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
3475
3477
  the cluster to reserve for failover.
@@ -3478,7 +3480,7 @@ class ComputeCluster(pulumi.CustomResource):
3478
3480
 
3479
3481
  @property
3480
3482
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
3481
- def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[int]]:
3483
+ def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[builtins.int]]:
3482
3484
  """
3483
3485
  When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
3484
3486
  """
@@ -3486,7 +3488,7 @@ class ComputeCluster(pulumi.CustomResource):
3486
3488
 
3487
3489
  @property
3488
3490
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
3489
- def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[int]]:
3491
+ def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[builtins.int]]:
3490
3492
  """
3491
3493
  When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
3492
3494
  """
@@ -3494,7 +3496,7 @@ class ComputeCluster(pulumi.CustomResource):
3494
3496
 
3495
3497
  @property
3496
3498
  @pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
3497
- def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[bool]]:
3499
+ def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[builtins.bool]]:
3498
3500
  """
3499
3501
  When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
3500
3502
  to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
@@ -3504,7 +3506,7 @@ class ComputeCluster(pulumi.CustomResource):
3504
3506
 
3505
3507
  @property
3506
3508
  @pulumi.getter(name="haAdvancedOptions")
3507
- def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
3509
+ def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, builtins.str]]]:
3508
3510
  """
3509
3511
  Advanced configuration options for vSphere HA.
3510
3512
  """
@@ -3512,7 +3514,7 @@ class ComputeCluster(pulumi.CustomResource):
3512
3514
 
3513
3515
  @property
3514
3516
  @pulumi.getter(name="haDatastoreApdRecoveryAction")
3515
- def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[str]]:
3517
+ def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[builtins.str]]:
3516
3518
  """
3517
3519
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
3518
3520
  affected datastore clears in the middle of an APD event. Can be one of none or reset.
@@ -3521,7 +3523,7 @@ class ComputeCluster(pulumi.CustomResource):
3521
3523
 
3522
3524
  @property
3523
3525
  @pulumi.getter(name="haDatastoreApdResponse")
3524
- def ha_datastore_apd_response(self) -> pulumi.Output[Optional[str]]:
3526
+ def ha_datastore_apd_response(self) -> pulumi.Output[Optional[builtins.str]]:
3525
3527
  """
3526
3528
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3527
3529
  detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
@@ -3531,7 +3533,7 @@ class ComputeCluster(pulumi.CustomResource):
3531
3533
 
3532
3534
  @property
3533
3535
  @pulumi.getter(name="haDatastoreApdResponseDelay")
3534
- def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[int]]:
3536
+ def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[builtins.int]]:
3535
3537
  """
3536
3538
  When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
3537
3539
  the response action defined in ha_datastore_apd_response.
@@ -3540,7 +3542,7 @@ class ComputeCluster(pulumi.CustomResource):
3540
3542
 
3541
3543
  @property
3542
3544
  @pulumi.getter(name="haDatastorePdlResponse")
3543
- def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[str]]:
3545
+ def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[builtins.str]]:
3544
3546
  """
3545
3547
  When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3546
3548
  detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
@@ -3549,7 +3551,7 @@ class ComputeCluster(pulumi.CustomResource):
3549
3551
 
3550
3552
  @property
3551
3553
  @pulumi.getter(name="haEnabled")
3552
- def ha_enabled(self) -> pulumi.Output[Optional[bool]]:
3554
+ def ha_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3553
3555
  """
3554
3556
  Enable vSphere HA for this cluster.
3555
3557
  """
@@ -3557,7 +3559,7 @@ class ComputeCluster(pulumi.CustomResource):
3557
3559
 
3558
3560
  @property
3559
3561
  @pulumi.getter(name="haHeartbeatDatastoreIds")
3560
- def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3562
+ def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[builtins.str]]]:
3561
3563
  """
3562
3564
  The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
3563
3565
  ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
@@ -3566,7 +3568,7 @@ class ComputeCluster(pulumi.CustomResource):
3566
3568
 
3567
3569
  @property
3568
3570
  @pulumi.getter(name="haHeartbeatDatastorePolicy")
3569
- def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[str]]:
3571
+ def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[builtins.str]]:
3570
3572
  """
3571
3573
  The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
3572
3574
  allFeasibleDsWithUserPreference.
@@ -3575,7 +3577,7 @@ class ComputeCluster(pulumi.CustomResource):
3575
3577
 
3576
3578
  @property
3577
3579
  @pulumi.getter(name="haHostIsolationResponse")
3578
- def ha_host_isolation_response(self) -> pulumi.Output[Optional[str]]:
3580
+ def ha_host_isolation_response(self) -> pulumi.Output[Optional[builtins.str]]:
3579
3581
  """
3580
3582
  The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
3581
3583
  Can be one of none, powerOff, or shutdown.
@@ -3584,7 +3586,7 @@ class ComputeCluster(pulumi.CustomResource):
3584
3586
 
3585
3587
  @property
3586
3588
  @pulumi.getter(name="haHostMonitoring")
3587
- def ha_host_monitoring(self) -> pulumi.Output[Optional[str]]:
3589
+ def ha_host_monitoring(self) -> pulumi.Output[Optional[builtins.str]]:
3588
3590
  """
3589
3591
  Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
3590
3592
  """
@@ -3592,7 +3594,7 @@ class ComputeCluster(pulumi.CustomResource):
3592
3594
 
3593
3595
  @property
3594
3596
  @pulumi.getter(name="haVmComponentProtection")
3595
- def ha_vm_component_protection(self) -> pulumi.Output[Optional[str]]:
3597
+ def ha_vm_component_protection(self) -> pulumi.Output[Optional[builtins.str]]:
3596
3598
  """
3597
3599
  Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
3598
3600
  failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
@@ -3601,7 +3603,7 @@ class ComputeCluster(pulumi.CustomResource):
3601
3603
 
3602
3604
  @property
3603
3605
  @pulumi.getter(name="haVmDependencyRestartCondition")
3604
- def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[str]]:
3606
+ def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[builtins.str]]:
3605
3607
  """
3606
3608
  The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
3607
3609
  on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
@@ -3610,7 +3612,7 @@ class ComputeCluster(pulumi.CustomResource):
3610
3612
 
3611
3613
  @property
3612
3614
  @pulumi.getter(name="haVmFailureInterval")
3613
- def ha_vm_failure_interval(self) -> pulumi.Output[Optional[int]]:
3615
+ def ha_vm_failure_interval(self) -> pulumi.Output[Optional[builtins.int]]:
3614
3616
  """
3615
3617
  If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
3616
3618
  failed. The value is in seconds.
@@ -3619,7 +3621,7 @@ class ComputeCluster(pulumi.CustomResource):
3619
3621
 
3620
3622
  @property
3621
3623
  @pulumi.getter(name="haVmMaximumFailureWindow")
3622
- def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[int]]:
3624
+ def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[builtins.int]]:
3623
3625
  """
3624
3626
  The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
3625
3627
  attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
@@ -3629,7 +3631,7 @@ class ComputeCluster(pulumi.CustomResource):
3629
3631
 
3630
3632
  @property
3631
3633
  @pulumi.getter(name="haVmMaximumResets")
3632
- def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[int]]:
3634
+ def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[builtins.int]]:
3633
3635
  """
3634
3636
  The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
3635
3637
  """
@@ -3637,7 +3639,7 @@ class ComputeCluster(pulumi.CustomResource):
3637
3639
 
3638
3640
  @property
3639
3641
  @pulumi.getter(name="haVmMinimumUptime")
3640
- def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[int]]:
3642
+ def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[builtins.int]]:
3641
3643
  """
3642
3644
  The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
3643
3645
  """
@@ -3645,7 +3647,7 @@ class ComputeCluster(pulumi.CustomResource):
3645
3647
 
3646
3648
  @property
3647
3649
  @pulumi.getter(name="haVmMonitoring")
3648
- def ha_vm_monitoring(self) -> pulumi.Output[Optional[str]]:
3650
+ def ha_vm_monitoring(self) -> pulumi.Output[Optional[builtins.str]]:
3649
3651
  """
3650
3652
  The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
3651
3653
  vmMonitoringOnly, or vmAndAppMonitoring.
@@ -3654,7 +3656,7 @@ class ComputeCluster(pulumi.CustomResource):
3654
3656
 
3655
3657
  @property
3656
3658
  @pulumi.getter(name="haVmRestartAdditionalDelay")
3657
- def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[int]]:
3659
+ def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[builtins.int]]:
3658
3660
  """
3659
3661
  Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
3660
3662
  """
@@ -3662,7 +3664,7 @@ class ComputeCluster(pulumi.CustomResource):
3662
3664
 
3663
3665
  @property
3664
3666
  @pulumi.getter(name="haVmRestartPriority")
3665
- def ha_vm_restart_priority(self) -> pulumi.Output[Optional[str]]:
3667
+ def ha_vm_restart_priority(self) -> pulumi.Output[Optional[builtins.str]]:
3666
3668
  """
3667
3669
  The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
3668
3670
  high, or highest.
@@ -3671,7 +3673,7 @@ class ComputeCluster(pulumi.CustomResource):
3671
3673
 
3672
3674
  @property
3673
3675
  @pulumi.getter(name="haVmRestartTimeout")
3674
- def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[int]]:
3676
+ def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[builtins.int]]:
3675
3677
  """
3676
3678
  The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
3677
3679
  proceeding with the next priority.
@@ -3680,7 +3682,7 @@ class ComputeCluster(pulumi.CustomResource):
3680
3682
 
3681
3683
  @property
3682
3684
  @pulumi.getter(name="hostClusterExitTimeout")
3683
- def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[int]]:
3685
+ def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[builtins.int]]:
3684
3686
  """
3685
3687
  The timeout for each host maintenance mode operation when removing hosts from a cluster.
3686
3688
  """
@@ -3696,7 +3698,7 @@ class ComputeCluster(pulumi.CustomResource):
3696
3698
 
3697
3699
  @property
3698
3700
  @pulumi.getter(name="hostManaged")
3699
- def host_managed(self) -> pulumi.Output[Optional[bool]]:
3701
+ def host_managed(self) -> pulumi.Output[Optional[builtins.bool]]:
3700
3702
  """
3701
3703
  Must be set if cluster enrollment is managed from host resource.
3702
3704
  """
@@ -3704,7 +3706,7 @@ class ComputeCluster(pulumi.CustomResource):
3704
3706
 
3705
3707
  @property
3706
3708
  @pulumi.getter(name="hostSystemIds")
3707
- def host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3709
+ def host_system_ids(self) -> pulumi.Output[Optional[Sequence[builtins.str]]]:
3708
3710
  """
3709
3711
  The managed object IDs of the hosts to put in the cluster.
3710
3712
  """
@@ -3712,7 +3714,7 @@ class ComputeCluster(pulumi.CustomResource):
3712
3714
 
3713
3715
  @property
3714
3716
  @pulumi.getter
3715
- def name(self) -> pulumi.Output[str]:
3717
+ def name(self) -> pulumi.Output[builtins.str]:
3716
3718
  """
3717
3719
  The name of the cluster.
3718
3720
  """
@@ -3720,7 +3722,7 @@ class ComputeCluster(pulumi.CustomResource):
3720
3722
 
3721
3723
  @property
3722
3724
  @pulumi.getter(name="proactiveHaAutomationLevel")
3723
- def proactive_ha_automation_level(self) -> pulumi.Output[Optional[str]]:
3725
+ def proactive_ha_automation_level(self) -> pulumi.Output[Optional[builtins.str]]:
3724
3726
  """
3725
3727
  The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
3726
3728
  """
@@ -3728,7 +3730,7 @@ class ComputeCluster(pulumi.CustomResource):
3728
3730
 
3729
3731
  @property
3730
3732
  @pulumi.getter(name="proactiveHaEnabled")
3731
- def proactive_ha_enabled(self) -> pulumi.Output[Optional[bool]]:
3733
+ def proactive_ha_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3732
3734
  """
3733
3735
  Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
3734
3736
  """
@@ -3736,7 +3738,7 @@ class ComputeCluster(pulumi.CustomResource):
3736
3738
 
3737
3739
  @property
3738
3740
  @pulumi.getter(name="proactiveHaModerateRemediation")
3739
- def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[str]]:
3741
+ def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[builtins.str]]:
3740
3742
  """
3741
3743
  The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
3742
3744
  this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
@@ -3745,7 +3747,7 @@ class ComputeCluster(pulumi.CustomResource):
3745
3747
 
3746
3748
  @property
3747
3749
  @pulumi.getter(name="proactiveHaProviderIds")
3748
- def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3750
+ def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[builtins.str]]]:
3749
3751
  """
3750
3752
  The list of IDs for health update providers configured for this cluster.
3751
3753
  """
@@ -3753,7 +3755,7 @@ class ComputeCluster(pulumi.CustomResource):
3753
3755
 
3754
3756
  @property
3755
3757
  @pulumi.getter(name="proactiveHaSevereRemediation")
3756
- def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[str]]:
3758
+ def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[builtins.str]]:
3757
3759
  """
3758
3760
  The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
3759
3761
  cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
@@ -3762,7 +3764,7 @@ class ComputeCluster(pulumi.CustomResource):
3762
3764
 
3763
3765
  @property
3764
3766
  @pulumi.getter(name="resourcePoolId")
3765
- def resource_pool_id(self) -> pulumi.Output[str]:
3767
+ def resource_pool_id(self) -> pulumi.Output[builtins.str]:
3766
3768
  """
3767
3769
  The managed object ID of the primary
3768
3770
  resource pool for this cluster. This can be passed directly to the
@@ -3774,7 +3776,7 @@ class ComputeCluster(pulumi.CustomResource):
3774
3776
 
3775
3777
  @property
3776
3778
  @pulumi.getter
3777
- def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
3779
+ def tags(self) -> pulumi.Output[Optional[Sequence[builtins.str]]]:
3778
3780
  """
3779
3781
  The IDs of any tags to attach to this resource.
3780
3782
  """
@@ -3782,7 +3784,7 @@ class ComputeCluster(pulumi.CustomResource):
3782
3784
 
3783
3785
  @property
3784
3786
  @pulumi.getter(name="vsanCompressionEnabled")
3785
- def vsan_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
3787
+ def vsan_compression_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3786
3788
  """
3787
3789
  Whether the vSAN compression service is enabled for the cluster.
3788
3790
  """
@@ -3790,7 +3792,7 @@ class ComputeCluster(pulumi.CustomResource):
3790
3792
 
3791
3793
  @property
3792
3794
  @pulumi.getter(name="vsanDedupEnabled")
3793
- def vsan_dedup_enabled(self) -> pulumi.Output[Optional[bool]]:
3795
+ def vsan_dedup_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3794
3796
  """
3795
3797
  Whether the vSAN deduplication service is enabled for the cluster.
3796
3798
  """
@@ -3806,7 +3808,7 @@ class ComputeCluster(pulumi.CustomResource):
3806
3808
 
3807
3809
  @property
3808
3810
  @pulumi.getter(name="vsanDitEncryptionEnabled")
3809
- def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
3811
+ def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3810
3812
  """
3811
3813
  Whether the vSAN data-in-transit encryption is enabled for the cluster.
3812
3814
  """
@@ -3814,7 +3816,7 @@ class ComputeCluster(pulumi.CustomResource):
3814
3816
 
3815
3817
  @property
3816
3818
  @pulumi.getter(name="vsanDitRekeyInterval")
3817
- def vsan_dit_rekey_interval(self) -> pulumi.Output[int]:
3819
+ def vsan_dit_rekey_interval(self) -> pulumi.Output[builtins.int]:
3818
3820
  """
3819
3821
  When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
3820
3822
  """
@@ -3822,7 +3824,7 @@ class ComputeCluster(pulumi.CustomResource):
3822
3824
 
3823
3825
  @property
3824
3826
  @pulumi.getter(name="vsanEnabled")
3825
- def vsan_enabled(self) -> pulumi.Output[Optional[bool]]:
3827
+ def vsan_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3826
3828
  """
3827
3829
  Whether the vSAN service is enabled for the cluster.
3828
3830
  """
@@ -3830,7 +3832,7 @@ class ComputeCluster(pulumi.CustomResource):
3830
3832
 
3831
3833
  @property
3832
3834
  @pulumi.getter(name="vsanEsaEnabled")
3833
- def vsan_esa_enabled(self) -> pulumi.Output[Optional[bool]]:
3835
+ def vsan_esa_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3834
3836
  """
3835
3837
  Whether the vSAN ESA service is enabled for the cluster.
3836
3838
  """
@@ -3846,7 +3848,7 @@ class ComputeCluster(pulumi.CustomResource):
3846
3848
 
3847
3849
  @property
3848
3850
  @pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
3849
- def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
3851
+ def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3850
3852
  """
3851
3853
  Whether the vSAN network diagnostic mode is enabled for the cluster.
3852
3854
  """
@@ -3854,7 +3856,7 @@ class ComputeCluster(pulumi.CustomResource):
3854
3856
 
3855
3857
  @property
3856
3858
  @pulumi.getter(name="vsanPerformanceEnabled")
3857
- def vsan_performance_enabled(self) -> pulumi.Output[Optional[bool]]:
3859
+ def vsan_performance_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3858
3860
  """
3859
3861
  Whether the vSAN performance service is enabled for the cluster.
3860
3862
  """
@@ -3862,7 +3864,7 @@ class ComputeCluster(pulumi.CustomResource):
3862
3864
 
3863
3865
  @property
3864
3866
  @pulumi.getter(name="vsanRemoteDatastoreIds")
3865
- def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3867
+ def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[builtins.str]]]:
3866
3868
  """
3867
3869
  The managed object IDs of the vSAN datastore to be mounted on the cluster.
3868
3870
  """
@@ -3878,7 +3880,7 @@ class ComputeCluster(pulumi.CustomResource):
3878
3880
 
3879
3881
  @property
3880
3882
  @pulumi.getter(name="vsanUnmapEnabled")
3881
- def vsan_unmap_enabled(self) -> pulumi.Output[Optional[bool]]:
3883
+ def vsan_unmap_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3882
3884
  """
3883
3885
  Whether the vSAN unmap service is enabled for the cluster.
3884
3886
  """
@@ -3886,7 +3888,7 @@ class ComputeCluster(pulumi.CustomResource):
3886
3888
 
3887
3889
  @property
3888
3890
  @pulumi.getter(name="vsanVerboseModeEnabled")
3889
- def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
3891
+ def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[builtins.bool]]:
3890
3892
  """
3891
3893
  Whether the vSAN verbose mode is enabled for the cluster.
3892
3894
  """