pulumi-vsphere 4.14.0a1739946909__py3-none-any.whl → 4.17.0a1763710194__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pulumi-vsphere might be problematic. Click here for more details.

Files changed (89) hide show
  1. pulumi_vsphere/__init__.py +12 -1
  2. pulumi_vsphere/_inputs.py +1172 -1172
  3. pulumi_vsphere/_utilities.py +1 -1
  4. pulumi_vsphere/compute_cluster.py +1226 -1484
  5. pulumi_vsphere/compute_cluster_host_group.py +124 -53
  6. pulumi_vsphere/compute_cluster_vm_affinity_rule.py +88 -87
  7. pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +92 -87
  8. pulumi_vsphere/compute_cluster_vm_dependency_rule.py +109 -104
  9. pulumi_vsphere/compute_cluster_vm_group.py +58 -53
  10. pulumi_vsphere/compute_cluster_vm_host_rule.py +126 -121
  11. pulumi_vsphere/config/__init__.py +2 -1
  12. pulumi_vsphere/config/__init__.pyi +2 -2
  13. pulumi_vsphere/config/vars.py +15 -15
  14. pulumi_vsphere/configuration_profile.py +286 -0
  15. pulumi_vsphere/content_library.py +64 -59
  16. pulumi_vsphere/content_library_item.py +109 -104
  17. pulumi_vsphere/custom_attribute.py +41 -36
  18. pulumi_vsphere/datacenter.py +80 -79
  19. pulumi_vsphere/datastore_cluster.py +531 -477
  20. pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +92 -87
  21. pulumi_vsphere/distributed_port_group.py +802 -818
  22. pulumi_vsphere/distributed_virtual_switch.py +1590 -1620
  23. pulumi_vsphere/distributed_virtual_switch_pvlan_mapping.py +71 -70
  24. pulumi_vsphere/dpm_host_override.py +159 -70
  25. pulumi_vsphere/drs_vm_override.py +75 -70
  26. pulumi_vsphere/entity_permissions.py +40 -39
  27. pulumi_vsphere/file.py +148 -162
  28. pulumi_vsphere/folder.py +104 -99
  29. pulumi_vsphere/get_compute_cluster.py +18 -18
  30. pulumi_vsphere/get_compute_cluster_host_group.py +18 -22
  31. pulumi_vsphere/get_configuration_profile.py +145 -0
  32. pulumi_vsphere/get_content_library.py +10 -10
  33. pulumi_vsphere/get_content_library_item.py +22 -22
  34. pulumi_vsphere/get_custom_attribute.py +12 -12
  35. pulumi_vsphere/get_datacenter.py +12 -12
  36. pulumi_vsphere/get_datastore.py +22 -22
  37. pulumi_vsphere/get_datastore_cluster.py +19 -19
  38. pulumi_vsphere/get_datastore_stats.py +22 -26
  39. pulumi_vsphere/get_distributed_virtual_switch.py +18 -18
  40. pulumi_vsphere/get_dynamic.py +22 -26
  41. pulumi_vsphere/get_folder.py +140 -18
  42. pulumi_vsphere/get_guest_os_customization.py +74 -21
  43. pulumi_vsphere/get_host.py +18 -18
  44. pulumi_vsphere/get_host_base_images.py +6 -6
  45. pulumi_vsphere/get_host_pci_device.py +30 -30
  46. pulumi_vsphere/get_host_thumbprint.py +22 -22
  47. pulumi_vsphere/get_host_vgpu_profile.py +17 -17
  48. pulumi_vsphere/get_license.py +26 -24
  49. pulumi_vsphere/get_network.py +80 -29
  50. pulumi_vsphere/get_ovf_vm_template.py +126 -126
  51. pulumi_vsphere/get_policy.py +10 -10
  52. pulumi_vsphere/get_resource_pool.py +77 -26
  53. pulumi_vsphere/get_role.py +26 -26
  54. pulumi_vsphere/get_tag.py +18 -18
  55. pulumi_vsphere/get_tag_category.py +16 -16
  56. pulumi_vsphere/get_vapp_container.py +16 -16
  57. pulumi_vsphere/get_virtual_machine.py +273 -273
  58. pulumi_vsphere/get_vmfs_disks.py +24 -24
  59. pulumi_vsphere/guest_os_customization.py +75 -74
  60. pulumi_vsphere/ha_vm_override.py +295 -374
  61. pulumi_vsphere/host.py +304 -249
  62. pulumi_vsphere/host_port_group.py +321 -341
  63. pulumi_vsphere/host_virtual_switch.py +373 -389
  64. pulumi_vsphere/license.py +125 -84
  65. pulumi_vsphere/nas_datastore.py +324 -261
  66. pulumi_vsphere/offline_software_depot.py +26 -21
  67. pulumi_vsphere/outputs.py +1003 -970
  68. pulumi_vsphere/provider.py +165 -146
  69. pulumi_vsphere/pulumi-plugin.json +1 -1
  70. pulumi_vsphere/resource_pool.py +691 -364
  71. pulumi_vsphere/role.py +46 -45
  72. pulumi_vsphere/storage_drs_vm_override.py +92 -87
  73. pulumi_vsphere/supervisor.py +210 -209
  74. pulumi_vsphere/tag.py +58 -53
  75. pulumi_vsphere/tag_category.py +75 -70
  76. pulumi_vsphere/vapp_container.py +262 -257
  77. pulumi_vsphere/vapp_entity.py +177 -172
  78. pulumi_vsphere/virtual_disk.py +130 -125
  79. pulumi_vsphere/virtual_machine.py +1339 -1443
  80. pulumi_vsphere/virtual_machine_class.py +109 -108
  81. pulumi_vsphere/virtual_machine_snapshot.py +122 -121
  82. pulumi_vsphere/vm_storage_policy.py +40 -39
  83. pulumi_vsphere/vmfs_datastore.py +189 -184
  84. pulumi_vsphere/vnic.py +145 -144
  85. {pulumi_vsphere-4.14.0a1739946909.dist-info → pulumi_vsphere-4.17.0a1763710194.dist-info}/METADATA +4 -4
  86. pulumi_vsphere-4.17.0a1763710194.dist-info/RECORD +89 -0
  87. {pulumi_vsphere-4.14.0a1739946909.dist-info → pulumi_vsphere-4.17.0a1763710194.dist-info}/WHEEL +1 -1
  88. pulumi_vsphere-4.14.0a1739946909.dist-info/RECORD +0 -87
  89. {pulumi_vsphere-4.14.0a1739946909.dist-info → pulumi_vsphere-4.17.0a1763710194.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,8 @@
1
1
  # coding=utf-8
2
- # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
2
+ # *** WARNING: this file was generated by pulumi-language-python. ***
3
3
  # *** Do not edit by hand unless you're certain you know what you are doing! ***
4
4
 
5
- import copy
5
+ import builtins as _builtins
6
6
  import warnings
7
7
  import sys
8
8
  import pulumi
@@ -21,193 +21,156 @@ __all__ = ['ComputeClusterArgs', 'ComputeCluster']
21
21
  @pulumi.input_type
22
22
  class ComputeClusterArgs:
23
23
  def __init__(__self__, *,
24
- datacenter_id: pulumi.Input[str],
25
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
26
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
27
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
28
- dpm_threshold: Optional[pulumi.Input[int]] = None,
29
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
30
- drs_automation_level: Optional[pulumi.Input[str]] = None,
31
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
32
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
33
- drs_enabled: Optional[pulumi.Input[bool]] = None,
34
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
35
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
36
- folder: Optional[pulumi.Input[str]] = None,
37
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
38
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
39
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
40
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
41
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
42
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
43
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
44
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
45
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
46
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
47
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
48
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
49
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
50
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
51
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
52
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
53
- ha_enabled: Optional[pulumi.Input[bool]] = None,
54
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
55
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
56
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
57
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
58
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
59
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
60
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
61
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
62
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
63
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
64
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
65
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
66
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
67
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
68
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
24
+ datacenter_id: pulumi.Input[_builtins.str],
25
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
26
+ dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
27
+ dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
28
+ dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
29
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
30
+ drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
31
+ drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
32
+ drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
33
+ drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
34
+ drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
35
+ drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
36
+ folder: Optional[pulumi.Input[_builtins.str]] = None,
37
+ force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
38
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
39
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
40
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
41
+ ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
42
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
43
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
44
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
45
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
46
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
47
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
48
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
49
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
50
+ ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
51
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
52
+ ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
53
+ ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
54
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
55
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
56
+ ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
57
+ ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
58
+ ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
59
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
60
+ ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
61
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
62
+ ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
63
+ ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
64
+ ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
65
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
66
+ ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
67
+ ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
68
+ host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
69
69
  host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
70
- host_managed: Optional[pulumi.Input[bool]] = None,
71
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
72
- name: Optional[pulumi.Input[str]] = None,
73
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
74
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
75
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
76
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
77
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
78
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
79
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
80
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
70
+ host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
71
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
72
+ name: Optional[pulumi.Input[_builtins.str]] = None,
73
+ proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
74
+ proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
75
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
76
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
77
+ proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
78
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
79
+ vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
80
+ vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
81
81
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]] = None,
82
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
83
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
84
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
85
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
82
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
83
+ vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
84
+ vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
85
+ vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
86
86
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]] = None,
87
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
88
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
89
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
87
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
88
+ vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
89
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
90
90
  vsan_stretched_cluster: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']] = None,
91
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
92
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None):
91
+ vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
92
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None):
93
93
  """
94
94
  The set of arguments for constructing a ComputeCluster resource.
95
- :param pulumi.Input[str] datacenter_id: The managed object ID of
95
+ :param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
96
96
  the datacenter to create the cluster in. Forces a new resource if changed.
97
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
97
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
98
98
  value strings to set for the datastore cluster.
99
99
 
100
100
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
101
101
  and require vCenter Server.
102
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
103
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
104
- machines in the cluster. Requires that DRS be enabled.
105
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
106
- affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
107
- setting.
108
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
109
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
110
- fullyAutomated.
111
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
112
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
113
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
114
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
115
- more imbalance while a higher setting will tolerate less.
116
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
117
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
102
+ :param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
103
+ :param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
104
+ :param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
105
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
106
+ :param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
107
+ :param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
108
+ :param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
109
+ :param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
110
+ :param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
111
+ :param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
112
+ :param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
118
113
  This is a path relative to the datacenter you are deploying the cluster to.
119
114
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
120
115
  The provider will place a cluster named `compute-cluster-test` in a
121
116
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
122
117
  being `/dc1/host/foo/bar/datastore-cluster-test`.
123
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
124
- for testing and is not recommended in normal use.
125
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
126
- failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
127
- will ignore the host when making recommendations.
128
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
129
- machine operations. The maximum is one less than the number of hosts in the cluster.
130
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
131
- warnings only, whereas a value of 100 disables the setting.
132
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
133
- permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
134
- slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
135
- issues.
136
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
137
- subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
138
- from the total amount of resources in the cluster. Disable to supply user-defined values.
139
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
140
- the cluster to reserve for failover.
141
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
142
- the cluster to reserve for failover.
143
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
144
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
145
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
146
- to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
147
- currently in the cluster.
148
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
149
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
150
- affected datastore clears in the middle of an APD event. Can be one of none or reset.
151
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
152
- detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
153
- restartAggressive.
154
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
155
- the response action defined in ha_datastore_apd_response.
156
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
157
- detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
158
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
159
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
160
- ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
161
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
162
- allFeasibleDsWithUserPreference.
163
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
164
- Can be one of none, powerOff, or shutdown.
165
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
166
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
167
- failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
168
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
169
- on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
170
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
171
- failed. The value is in seconds.
172
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
173
- attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
174
- time is allotted.
175
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
176
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
177
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
178
- vmMonitoringOnly, or vmAndAppMonitoring.
179
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
180
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
181
- high, or highest.
182
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
183
- proceeding with the next priority.
184
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
118
+ :param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
119
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
120
+ :param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
121
+ :param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
122
+ :param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
123
+ :param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
124
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
125
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
126
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
127
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
128
+ :param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
129
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
130
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
131
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
132
+ :param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
133
+ :param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
134
+ :param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
135
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
136
+ :param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
137
+ :param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
138
+ :param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
139
+ :param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
140
+ :param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
141
+ :param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
142
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
143
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
144
+ :param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
145
+ :param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
146
+ :param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
147
+ :param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
148
+ :param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
149
+ :param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
185
150
  :param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
186
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
187
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
188
- :param pulumi.Input[str] name: The name of the cluster.
189
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
190
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
191
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
192
- this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
193
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
194
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
195
- cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
196
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
197
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
198
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
151
+ :param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
152
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
153
+ :param pulumi.Input[_builtins.str] name: The name of the cluster.
154
+ :param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
155
+ :param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
156
+ :param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
157
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
158
+ :param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
159
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
160
+ :param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
161
+ :param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
199
162
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
200
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
201
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
202
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
203
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
163
+ :param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
164
+ :param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
165
+ :param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
166
+ :param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
204
167
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
205
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
206
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
207
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
168
+ :param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
169
+ :param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
170
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
208
171
  :param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
209
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
210
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
172
+ :param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
173
+ :param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
211
174
  """
212
175
  pulumi.set(__self__, "datacenter_id", datacenter_id)
213
176
  if custom_attributes is not None:
@@ -347,9 +310,9 @@ class ComputeClusterArgs:
347
310
  if vsan_verbose_mode_enabled is not None:
348
311
  pulumi.set(__self__, "vsan_verbose_mode_enabled", vsan_verbose_mode_enabled)
349
312
 
350
- @property
313
+ @_builtins.property
351
314
  @pulumi.getter(name="datacenterId")
352
- def datacenter_id(self) -> pulumi.Input[str]:
315
+ def datacenter_id(self) -> pulumi.Input[_builtins.str]:
353
316
  """
354
317
  The managed object ID of
355
318
  the datacenter to create the cluster in. Forces a new resource if changed.
@@ -357,12 +320,12 @@ class ComputeClusterArgs:
357
320
  return pulumi.get(self, "datacenter_id")
358
321
 
359
322
  @datacenter_id.setter
360
- def datacenter_id(self, value: pulumi.Input[str]):
323
+ def datacenter_id(self, value: pulumi.Input[_builtins.str]):
361
324
  pulumi.set(self, "datacenter_id", value)
362
325
 
363
- @property
326
+ @_builtins.property
364
327
  @pulumi.getter(name="customAttributes")
365
- def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
328
+ def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
366
329
  """
367
330
  A map of custom attribute ids to attribute
368
331
  value strings to set for the datastore cluster.
@@ -373,137 +336,132 @@ class ComputeClusterArgs:
373
336
  return pulumi.get(self, "custom_attributes")
374
337
 
375
338
  @custom_attributes.setter
376
- def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
339
+ def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
377
340
  pulumi.set(self, "custom_attributes", value)
378
341
 
379
- @property
342
+ @_builtins.property
380
343
  @pulumi.getter(name="dpmAutomationLevel")
381
- def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
344
+ def dpm_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
382
345
  """
383
346
  The automation level for host power operations in this cluster. Can be one of manual or automated.
384
347
  """
385
348
  return pulumi.get(self, "dpm_automation_level")
386
349
 
387
350
  @dpm_automation_level.setter
388
- def dpm_automation_level(self, value: Optional[pulumi.Input[str]]):
351
+ def dpm_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
389
352
  pulumi.set(self, "dpm_automation_level", value)
390
353
 
391
- @property
354
+ @_builtins.property
392
355
  @pulumi.getter(name="dpmEnabled")
393
- def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
356
+ def dpm_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
394
357
  """
395
- Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
396
- machines in the cluster. Requires that DRS be enabled.
358
+ Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
397
359
  """
398
360
  return pulumi.get(self, "dpm_enabled")
399
361
 
400
362
  @dpm_enabled.setter
401
- def dpm_enabled(self, value: Optional[pulumi.Input[bool]]):
363
+ def dpm_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
402
364
  pulumi.set(self, "dpm_enabled", value)
403
365
 
404
- @property
366
+ @_builtins.property
405
367
  @pulumi.getter(name="dpmThreshold")
406
- def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
368
+ def dpm_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
407
369
  """
408
- A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
409
- affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
410
- setting.
370
+ A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
411
371
  """
412
372
  return pulumi.get(self, "dpm_threshold")
413
373
 
414
374
  @dpm_threshold.setter
415
- def dpm_threshold(self, value: Optional[pulumi.Input[int]]):
375
+ def dpm_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
416
376
  pulumi.set(self, "dpm_threshold", value)
417
377
 
418
- @property
378
+ @_builtins.property
419
379
  @pulumi.getter(name="drsAdvancedOptions")
420
- def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
380
+ def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
421
381
  """
422
382
  Advanced configuration options for DRS and DPM.
423
383
  """
424
384
  return pulumi.get(self, "drs_advanced_options")
425
385
 
426
386
  @drs_advanced_options.setter
427
- def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
387
+ def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
428
388
  pulumi.set(self, "drs_advanced_options", value)
429
389
 
430
- @property
390
+ @_builtins.property
431
391
  @pulumi.getter(name="drsAutomationLevel")
432
- def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
392
+ def drs_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
433
393
  """
434
- The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
435
- fullyAutomated.
394
+ The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
436
395
  """
437
396
  return pulumi.get(self, "drs_automation_level")
438
397
 
439
398
  @drs_automation_level.setter
440
- def drs_automation_level(self, value: Optional[pulumi.Input[str]]):
399
+ def drs_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
441
400
  pulumi.set(self, "drs_automation_level", value)
442
401
 
443
- @property
402
+ @_builtins.property
444
403
  @pulumi.getter(name="drsEnablePredictiveDrs")
445
- def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
404
+ def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[_builtins.bool]]:
446
405
  """
447
406
  When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
448
407
  """
449
408
  return pulumi.get(self, "drs_enable_predictive_drs")
450
409
 
451
410
  @drs_enable_predictive_drs.setter
452
- def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[bool]]):
411
+ def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[_builtins.bool]]):
453
412
  pulumi.set(self, "drs_enable_predictive_drs", value)
454
413
 
455
- @property
414
+ @_builtins.property
456
415
  @pulumi.getter(name="drsEnableVmOverrides")
457
- def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
416
+ def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[_builtins.bool]]:
458
417
  """
459
418
  When true, allows individual VM overrides within this cluster to be set.
460
419
  """
461
420
  return pulumi.get(self, "drs_enable_vm_overrides")
462
421
 
463
422
  @drs_enable_vm_overrides.setter
464
- def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[bool]]):
423
+ def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[_builtins.bool]]):
465
424
  pulumi.set(self, "drs_enable_vm_overrides", value)
466
425
 
467
- @property
426
+ @_builtins.property
468
427
  @pulumi.getter(name="drsEnabled")
469
- def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
428
+ def drs_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
470
429
  """
471
430
  Enable DRS for this cluster.
472
431
  """
473
432
  return pulumi.get(self, "drs_enabled")
474
433
 
475
434
  @drs_enabled.setter
476
- def drs_enabled(self, value: Optional[pulumi.Input[bool]]):
435
+ def drs_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
477
436
  pulumi.set(self, "drs_enabled", value)
478
437
 
479
- @property
438
+ @_builtins.property
480
439
  @pulumi.getter(name="drsMigrationThreshold")
481
- def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
440
+ def drs_migration_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
482
441
  """
483
- A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
484
- more imbalance while a higher setting will tolerate less.
442
+ A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
485
443
  """
486
444
  return pulumi.get(self, "drs_migration_threshold")
487
445
 
488
446
  @drs_migration_threshold.setter
489
- def drs_migration_threshold(self, value: Optional[pulumi.Input[int]]):
447
+ def drs_migration_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
490
448
  pulumi.set(self, "drs_migration_threshold", value)
491
449
 
492
- @property
450
+ @_builtins.property
493
451
  @pulumi.getter(name="drsScaleDescendantsShares")
494
- def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
452
+ def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[_builtins.str]]:
495
453
  """
496
454
  Enable scalable shares for all descendants of this cluster.
497
455
  """
498
456
  return pulumi.get(self, "drs_scale_descendants_shares")
499
457
 
500
458
  @drs_scale_descendants_shares.setter
501
- def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[str]]):
459
+ def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[_builtins.str]]):
502
460
  pulumi.set(self, "drs_scale_descendants_shares", value)
503
461
 
504
- @property
462
+ @_builtins.property
505
463
  @pulumi.getter
506
- def folder(self) -> Optional[pulumi.Input[str]]:
464
+ def folder(self) -> Optional[pulumi.Input[_builtins.str]]:
507
465
  """
508
466
  The relative path to a folder to put this cluster in.
509
467
  This is a path relative to the datacenter you are deploying the cluster to.
@@ -515,424 +473,394 @@ class ComputeClusterArgs:
515
473
  return pulumi.get(self, "folder")
516
474
 
517
475
  @folder.setter
518
- def folder(self, value: Optional[pulumi.Input[str]]):
476
+ def folder(self, value: Optional[pulumi.Input[_builtins.str]]):
519
477
  pulumi.set(self, "folder", value)
520
478
 
521
- @property
479
+ @_builtins.property
522
480
  @pulumi.getter(name="forceEvacuateOnDestroy")
523
- def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
481
+ def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[_builtins.bool]]:
524
482
  """
525
- Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
526
- for testing and is not recommended in normal use.
483
+ Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
527
484
  """
528
485
  return pulumi.get(self, "force_evacuate_on_destroy")
529
486
 
530
487
  @force_evacuate_on_destroy.setter
531
- def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[bool]]):
488
+ def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[_builtins.bool]]):
532
489
  pulumi.set(self, "force_evacuate_on_destroy", value)
533
490
 
534
- @property
491
+ @_builtins.property
535
492
  @pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
536
- def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
493
+ def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
537
494
  """
538
- When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
539
- failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
540
- will ignore the host when making recommendations.
495
+ When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
541
496
  """
542
497
  return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
543
498
 
544
499
  @ha_admission_control_failover_host_system_ids.setter
545
- def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
500
+ def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
546
501
  pulumi.set(self, "ha_admission_control_failover_host_system_ids", value)
547
502
 
548
- @property
503
+ @_builtins.property
549
504
  @pulumi.getter(name="haAdmissionControlHostFailureTolerance")
550
- def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
505
+ def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
551
506
  """
552
- The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
553
- machine operations. The maximum is one less than the number of hosts in the cluster.
507
+ The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
554
508
  """
555
509
  return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
556
510
 
557
511
  @ha_admission_control_host_failure_tolerance.setter
558
- def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[int]]):
512
+ def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
559
513
  pulumi.set(self, "ha_admission_control_host_failure_tolerance", value)
560
514
 
561
- @property
515
+ @_builtins.property
562
516
  @pulumi.getter(name="haAdmissionControlPerformanceTolerance")
563
- def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
517
+ def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
564
518
  """
565
- The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
566
- warnings only, whereas a value of 100 disables the setting.
519
+ The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
567
520
  """
568
521
  return pulumi.get(self, "ha_admission_control_performance_tolerance")
569
522
 
570
523
  @ha_admission_control_performance_tolerance.setter
571
- def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[int]]):
524
+ def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
572
525
  pulumi.set(self, "ha_admission_control_performance_tolerance", value)
573
526
 
574
- @property
527
+ @_builtins.property
575
528
  @pulumi.getter(name="haAdmissionControlPolicy")
576
- def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
529
+ def ha_admission_control_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
577
530
  """
578
- The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
579
- permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
580
- slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
581
- issues.
531
+ The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
582
532
  """
583
533
  return pulumi.get(self, "ha_admission_control_policy")
584
534
 
585
535
  @ha_admission_control_policy.setter
586
- def ha_admission_control_policy(self, value: Optional[pulumi.Input[str]]):
536
+ def ha_admission_control_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
587
537
  pulumi.set(self, "ha_admission_control_policy", value)
588
538
 
589
- @property
539
+ @_builtins.property
590
540
  @pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
591
- def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
541
+ def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[_builtins.bool]]:
592
542
  """
593
- When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
594
- subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
595
- from the total amount of resources in the cluster. Disable to supply user-defined values.
543
+ When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
596
544
  """
597
545
  return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
598
546
 
599
547
  @ha_admission_control_resource_percentage_auto_compute.setter
600
- def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[bool]]):
548
+ def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[_builtins.bool]]):
601
549
  pulumi.set(self, "ha_admission_control_resource_percentage_auto_compute", value)
602
550
 
603
- @property
551
+ @_builtins.property
604
552
  @pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
605
- def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
553
+ def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
606
554
  """
607
- When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
608
- the cluster to reserve for failover.
555
+ When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
609
556
  """
610
557
  return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
611
558
 
612
559
  @ha_admission_control_resource_percentage_cpu.setter
613
- def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[int]]):
560
+ def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
614
561
  pulumi.set(self, "ha_admission_control_resource_percentage_cpu", value)
615
562
 
616
- @property
563
+ @_builtins.property
617
564
  @pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
618
- def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
565
+ def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
619
566
  """
620
- When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
621
- the cluster to reserve for failover.
567
+ When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
622
568
  """
623
569
  return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
624
570
 
625
571
  @ha_admission_control_resource_percentage_memory.setter
626
- def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[int]]):
572
+ def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
627
573
  pulumi.set(self, "ha_admission_control_resource_percentage_memory", value)
628
574
 
629
- @property
575
+ @_builtins.property
630
576
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
631
- def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
577
+ def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
632
578
  """
633
579
  When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
634
580
  """
635
581
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
636
582
 
637
583
  @ha_admission_control_slot_policy_explicit_cpu.setter
638
- def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[int]]):
584
+ def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
639
585
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_cpu", value)
640
586
 
641
- @property
587
+ @_builtins.property
642
588
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
643
- def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
589
+ def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
644
590
  """
645
591
  When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
646
592
  """
647
593
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
648
594
 
649
595
  @ha_admission_control_slot_policy_explicit_memory.setter
650
- def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[int]]):
596
+ def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
651
597
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_memory", value)
652
598
 
653
- @property
599
+ @_builtins.property
654
600
  @pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
655
- def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
601
+ def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[_builtins.bool]]:
656
602
  """
657
- When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
658
- to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
659
- currently in the cluster.
603
+ When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
660
604
  """
661
605
  return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
662
606
 
663
607
  @ha_admission_control_slot_policy_use_explicit_size.setter
664
- def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[bool]]):
608
+ def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[_builtins.bool]]):
665
609
  pulumi.set(self, "ha_admission_control_slot_policy_use_explicit_size", value)
666
610
 
667
- @property
611
+ @_builtins.property
668
612
  @pulumi.getter(name="haAdvancedOptions")
669
- def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
613
+ def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
670
614
  """
671
615
  Advanced configuration options for vSphere HA.
672
616
  """
673
617
  return pulumi.get(self, "ha_advanced_options")
674
618
 
675
619
  @ha_advanced_options.setter
676
- def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
620
+ def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
677
621
  pulumi.set(self, "ha_advanced_options", value)
678
622
 
679
- @property
623
+ @_builtins.property
680
624
  @pulumi.getter(name="haDatastoreApdRecoveryAction")
681
- def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
625
+ def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[_builtins.str]]:
682
626
  """
683
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
684
- affected datastore clears in the middle of an APD event. Can be one of none or reset.
627
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
685
628
  """
686
629
  return pulumi.get(self, "ha_datastore_apd_recovery_action")
687
630
 
688
631
  @ha_datastore_apd_recovery_action.setter
689
- def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[str]]):
632
+ def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[_builtins.str]]):
690
633
  pulumi.set(self, "ha_datastore_apd_recovery_action", value)
691
634
 
692
- @property
635
+ @_builtins.property
693
636
  @pulumi.getter(name="haDatastoreApdResponse")
694
- def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
637
+ def ha_datastore_apd_response(self) -> Optional[pulumi.Input[_builtins.str]]:
695
638
  """
696
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
697
- detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
698
- restartAggressive.
639
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
699
640
  """
700
641
  return pulumi.get(self, "ha_datastore_apd_response")
701
642
 
702
643
  @ha_datastore_apd_response.setter
703
- def ha_datastore_apd_response(self, value: Optional[pulumi.Input[str]]):
644
+ def ha_datastore_apd_response(self, value: Optional[pulumi.Input[_builtins.str]]):
704
645
  pulumi.set(self, "ha_datastore_apd_response", value)
705
646
 
706
- @property
647
+ @_builtins.property
707
648
  @pulumi.getter(name="haDatastoreApdResponseDelay")
708
- def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
649
+ def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
709
650
  """
710
- When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
711
- the response action defined in ha_datastore_apd_response.
651
+ When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
712
652
  """
713
653
  return pulumi.get(self, "ha_datastore_apd_response_delay")
714
654
 
715
655
  @ha_datastore_apd_response_delay.setter
716
- def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[int]]):
656
+ def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
717
657
  pulumi.set(self, "ha_datastore_apd_response_delay", value)
718
658
 
719
- @property
659
+ @_builtins.property
720
660
  @pulumi.getter(name="haDatastorePdlResponse")
721
- def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
661
+ def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[_builtins.str]]:
722
662
  """
723
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
724
- detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
663
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
725
664
  """
726
665
  return pulumi.get(self, "ha_datastore_pdl_response")
727
666
 
728
667
  @ha_datastore_pdl_response.setter
729
- def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[str]]):
668
+ def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[_builtins.str]]):
730
669
  pulumi.set(self, "ha_datastore_pdl_response", value)
731
670
 
732
- @property
671
+ @_builtins.property
733
672
  @pulumi.getter(name="haEnabled")
734
- def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
673
+ def ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
735
674
  """
736
675
  Enable vSphere HA for this cluster.
737
676
  """
738
677
  return pulumi.get(self, "ha_enabled")
739
678
 
740
679
  @ha_enabled.setter
741
- def ha_enabled(self, value: Optional[pulumi.Input[bool]]):
680
+ def ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
742
681
  pulumi.set(self, "ha_enabled", value)
743
682
 
744
- @property
683
+ @_builtins.property
745
684
  @pulumi.getter(name="haHeartbeatDatastoreIds")
746
- def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
685
+ def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
747
686
  """
748
- The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
749
- ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
687
+ The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
750
688
  """
751
689
  return pulumi.get(self, "ha_heartbeat_datastore_ids")
752
690
 
753
691
  @ha_heartbeat_datastore_ids.setter
754
- def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
692
+ def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
755
693
  pulumi.set(self, "ha_heartbeat_datastore_ids", value)
756
694
 
757
- @property
695
+ @_builtins.property
758
696
  @pulumi.getter(name="haHeartbeatDatastorePolicy")
759
- def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
697
+ def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
760
698
  """
761
- The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
762
- allFeasibleDsWithUserPreference.
699
+ The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
763
700
  """
764
701
  return pulumi.get(self, "ha_heartbeat_datastore_policy")
765
702
 
766
703
  @ha_heartbeat_datastore_policy.setter
767
- def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[str]]):
704
+ def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
768
705
  pulumi.set(self, "ha_heartbeat_datastore_policy", value)
769
706
 
770
- @property
707
+ @_builtins.property
771
708
  @pulumi.getter(name="haHostIsolationResponse")
772
- def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
709
+ def ha_host_isolation_response(self) -> Optional[pulumi.Input[_builtins.str]]:
773
710
  """
774
- The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
775
- Can be one of none, powerOff, or shutdown.
711
+ The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
776
712
  """
777
713
  return pulumi.get(self, "ha_host_isolation_response")
778
714
 
779
715
  @ha_host_isolation_response.setter
780
- def ha_host_isolation_response(self, value: Optional[pulumi.Input[str]]):
716
+ def ha_host_isolation_response(self, value: Optional[pulumi.Input[_builtins.str]]):
781
717
  pulumi.set(self, "ha_host_isolation_response", value)
782
718
 
783
- @property
719
+ @_builtins.property
784
720
  @pulumi.getter(name="haHostMonitoring")
785
- def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
721
+ def ha_host_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
786
722
  """
787
723
  Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
788
724
  """
789
725
  return pulumi.get(self, "ha_host_monitoring")
790
726
 
791
727
  @ha_host_monitoring.setter
792
- def ha_host_monitoring(self, value: Optional[pulumi.Input[str]]):
728
+ def ha_host_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
793
729
  pulumi.set(self, "ha_host_monitoring", value)
794
730
 
795
- @property
731
+ @_builtins.property
796
732
  @pulumi.getter(name="haVmComponentProtection")
797
- def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
733
+ def ha_vm_component_protection(self) -> Optional[pulumi.Input[_builtins.str]]:
798
734
  """
799
- Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
800
- failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
735
+ Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
801
736
  """
802
737
  return pulumi.get(self, "ha_vm_component_protection")
803
738
 
804
739
  @ha_vm_component_protection.setter
805
- def ha_vm_component_protection(self, value: Optional[pulumi.Input[str]]):
740
+ def ha_vm_component_protection(self, value: Optional[pulumi.Input[_builtins.str]]):
806
741
  pulumi.set(self, "ha_vm_component_protection", value)
807
742
 
808
- @property
743
+ @_builtins.property
809
744
  @pulumi.getter(name="haVmDependencyRestartCondition")
810
- def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
745
+ def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[_builtins.str]]:
811
746
  """
812
- The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
813
- on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
747
+ The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
814
748
  """
815
749
  return pulumi.get(self, "ha_vm_dependency_restart_condition")
816
750
 
817
751
  @ha_vm_dependency_restart_condition.setter
818
- def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[str]]):
752
+ def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[_builtins.str]]):
819
753
  pulumi.set(self, "ha_vm_dependency_restart_condition", value)
820
754
 
821
- @property
755
+ @_builtins.property
822
756
  @pulumi.getter(name="haVmFailureInterval")
823
- def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
757
+ def ha_vm_failure_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
824
758
  """
825
- If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
826
- failed. The value is in seconds.
759
+ If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
827
760
  """
828
761
  return pulumi.get(self, "ha_vm_failure_interval")
829
762
 
830
763
  @ha_vm_failure_interval.setter
831
- def ha_vm_failure_interval(self, value: Optional[pulumi.Input[int]]):
764
+ def ha_vm_failure_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
832
765
  pulumi.set(self, "ha_vm_failure_interval", value)
833
766
 
834
- @property
767
+ @_builtins.property
835
768
  @pulumi.getter(name="haVmMaximumFailureWindow")
836
- def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
769
+ def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[_builtins.int]]:
837
770
  """
838
- The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
839
- attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
840
- time is allotted.
771
+ The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
841
772
  """
842
773
  return pulumi.get(self, "ha_vm_maximum_failure_window")
843
774
 
844
775
  @ha_vm_maximum_failure_window.setter
845
- def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[int]]):
776
+ def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[_builtins.int]]):
846
777
  pulumi.set(self, "ha_vm_maximum_failure_window", value)
847
778
 
848
- @property
779
+ @_builtins.property
849
780
  @pulumi.getter(name="haVmMaximumResets")
850
- def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
781
+ def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[_builtins.int]]:
851
782
  """
852
783
  The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
853
784
  """
854
785
  return pulumi.get(self, "ha_vm_maximum_resets")
855
786
 
856
787
  @ha_vm_maximum_resets.setter
857
- def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[int]]):
788
+ def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[_builtins.int]]):
858
789
  pulumi.set(self, "ha_vm_maximum_resets", value)
859
790
 
860
- @property
791
+ @_builtins.property
861
792
  @pulumi.getter(name="haVmMinimumUptime")
862
- def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
793
+ def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[_builtins.int]]:
863
794
  """
864
795
  The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
865
796
  """
866
797
  return pulumi.get(self, "ha_vm_minimum_uptime")
867
798
 
868
799
  @ha_vm_minimum_uptime.setter
869
- def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[int]]):
800
+ def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[_builtins.int]]):
870
801
  pulumi.set(self, "ha_vm_minimum_uptime", value)
871
802
 
872
- @property
803
+ @_builtins.property
873
804
  @pulumi.getter(name="haVmMonitoring")
874
- def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
805
+ def ha_vm_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
875
806
  """
876
- The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
877
- vmMonitoringOnly, or vmAndAppMonitoring.
807
+ The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
878
808
  """
879
809
  return pulumi.get(self, "ha_vm_monitoring")
880
810
 
881
811
  @ha_vm_monitoring.setter
882
- def ha_vm_monitoring(self, value: Optional[pulumi.Input[str]]):
812
+ def ha_vm_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
883
813
  pulumi.set(self, "ha_vm_monitoring", value)
884
814
 
885
- @property
815
+ @_builtins.property
886
816
  @pulumi.getter(name="haVmRestartAdditionalDelay")
887
- def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
817
+ def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
888
818
  """
889
819
  Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
890
820
  """
891
821
  return pulumi.get(self, "ha_vm_restart_additional_delay")
892
822
 
893
823
  @ha_vm_restart_additional_delay.setter
894
- def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[int]]):
824
+ def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
895
825
  pulumi.set(self, "ha_vm_restart_additional_delay", value)
896
826
 
897
- @property
827
+ @_builtins.property
898
828
  @pulumi.getter(name="haVmRestartPriority")
899
- def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
829
+ def ha_vm_restart_priority(self) -> Optional[pulumi.Input[_builtins.str]]:
900
830
  """
901
- The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
902
- high, or highest.
831
+ The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
903
832
  """
904
833
  return pulumi.get(self, "ha_vm_restart_priority")
905
834
 
906
835
  @ha_vm_restart_priority.setter
907
- def ha_vm_restart_priority(self, value: Optional[pulumi.Input[str]]):
836
+ def ha_vm_restart_priority(self, value: Optional[pulumi.Input[_builtins.str]]):
908
837
  pulumi.set(self, "ha_vm_restart_priority", value)
909
838
 
910
- @property
839
+ @_builtins.property
911
840
  @pulumi.getter(name="haVmRestartTimeout")
912
- def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
841
+ def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
913
842
  """
914
- The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
915
- proceeding with the next priority.
843
+ The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
916
844
  """
917
845
  return pulumi.get(self, "ha_vm_restart_timeout")
918
846
 
919
847
  @ha_vm_restart_timeout.setter
920
- def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[int]]):
848
+ def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
921
849
  pulumi.set(self, "ha_vm_restart_timeout", value)
922
850
 
923
- @property
851
+ @_builtins.property
924
852
  @pulumi.getter(name="hostClusterExitTimeout")
925
- def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
853
+ def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
926
854
  """
927
855
  The timeout for each host maintenance mode operation when removing hosts from a cluster.
928
856
  """
929
857
  return pulumi.get(self, "host_cluster_exit_timeout")
930
858
 
931
859
  @host_cluster_exit_timeout.setter
932
- def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
860
+ def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
933
861
  pulumi.set(self, "host_cluster_exit_timeout", value)
934
862
 
935
- @property
863
+ @_builtins.property
936
864
  @pulumi.getter(name="hostImage")
937
865
  def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
938
866
  """
@@ -944,141 +872,139 @@ class ComputeClusterArgs:
944
872
  def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
945
873
  pulumi.set(self, "host_image", value)
946
874
 
947
- @property
875
+ @_builtins.property
948
876
  @pulumi.getter(name="hostManaged")
949
- def host_managed(self) -> Optional[pulumi.Input[bool]]:
877
+ def host_managed(self) -> Optional[pulumi.Input[_builtins.bool]]:
950
878
  """
951
879
  Must be set if cluster enrollment is managed from host resource.
952
880
  """
953
881
  return pulumi.get(self, "host_managed")
954
882
 
955
883
  @host_managed.setter
956
- def host_managed(self, value: Optional[pulumi.Input[bool]]):
884
+ def host_managed(self, value: Optional[pulumi.Input[_builtins.bool]]):
957
885
  pulumi.set(self, "host_managed", value)
958
886
 
959
- @property
887
+ @_builtins.property
960
888
  @pulumi.getter(name="hostSystemIds")
961
- def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
889
+ def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
962
890
  """
963
891
  The managed object IDs of the hosts to put in the cluster.
964
892
  """
965
893
  return pulumi.get(self, "host_system_ids")
966
894
 
967
895
  @host_system_ids.setter
968
- def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
896
+ def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
969
897
  pulumi.set(self, "host_system_ids", value)
970
898
 
971
- @property
899
+ @_builtins.property
972
900
  @pulumi.getter
973
- def name(self) -> Optional[pulumi.Input[str]]:
901
+ def name(self) -> Optional[pulumi.Input[_builtins.str]]:
974
902
  """
975
903
  The name of the cluster.
976
904
  """
977
905
  return pulumi.get(self, "name")
978
906
 
979
907
  @name.setter
980
- def name(self, value: Optional[pulumi.Input[str]]):
908
+ def name(self, value: Optional[pulumi.Input[_builtins.str]]):
981
909
  pulumi.set(self, "name", value)
982
910
 
983
- @property
911
+ @_builtins.property
984
912
  @pulumi.getter(name="proactiveHaAutomationLevel")
985
- def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
913
+ def proactive_ha_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
986
914
  """
987
915
  The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
988
916
  """
989
917
  return pulumi.get(self, "proactive_ha_automation_level")
990
918
 
991
919
  @proactive_ha_automation_level.setter
992
- def proactive_ha_automation_level(self, value: Optional[pulumi.Input[str]]):
920
+ def proactive_ha_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
993
921
  pulumi.set(self, "proactive_ha_automation_level", value)
994
922
 
995
- @property
923
+ @_builtins.property
996
924
  @pulumi.getter(name="proactiveHaEnabled")
997
- def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
925
+ def proactive_ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
998
926
  """
999
927
  Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
1000
928
  """
1001
929
  return pulumi.get(self, "proactive_ha_enabled")
1002
930
 
1003
931
  @proactive_ha_enabled.setter
1004
- def proactive_ha_enabled(self, value: Optional[pulumi.Input[bool]]):
932
+ def proactive_ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1005
933
  pulumi.set(self, "proactive_ha_enabled", value)
1006
934
 
1007
- @property
935
+ @_builtins.property
1008
936
  @pulumi.getter(name="proactiveHaModerateRemediation")
1009
- def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
937
+ def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
1010
938
  """
1011
- The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
1012
- this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
939
+ The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
1013
940
  """
1014
941
  return pulumi.get(self, "proactive_ha_moderate_remediation")
1015
942
 
1016
943
  @proactive_ha_moderate_remediation.setter
1017
- def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[str]]):
944
+ def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
1018
945
  pulumi.set(self, "proactive_ha_moderate_remediation", value)
1019
946
 
1020
- @property
947
+ @_builtins.property
1021
948
  @pulumi.getter(name="proactiveHaProviderIds")
1022
- def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
949
+ def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
1023
950
  """
1024
951
  The list of IDs for health update providers configured for this cluster.
1025
952
  """
1026
953
  return pulumi.get(self, "proactive_ha_provider_ids")
1027
954
 
1028
955
  @proactive_ha_provider_ids.setter
1029
- def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
956
+ def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
1030
957
  pulumi.set(self, "proactive_ha_provider_ids", value)
1031
958
 
1032
- @property
959
+ @_builtins.property
1033
960
  @pulumi.getter(name="proactiveHaSevereRemediation")
1034
- def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
961
+ def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
1035
962
  """
1036
- The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
1037
- cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
963
+ The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
1038
964
  """
1039
965
  return pulumi.get(self, "proactive_ha_severe_remediation")
1040
966
 
1041
967
  @proactive_ha_severe_remediation.setter
1042
- def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[str]]):
968
+ def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
1043
969
  pulumi.set(self, "proactive_ha_severe_remediation", value)
1044
970
 
1045
- @property
971
+ @_builtins.property
1046
972
  @pulumi.getter
1047
- def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
973
+ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
1048
974
  """
1049
975
  The IDs of any tags to attach to this resource.
1050
976
  """
1051
977
  return pulumi.get(self, "tags")
1052
978
 
1053
979
  @tags.setter
1054
- def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
980
+ def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
1055
981
  pulumi.set(self, "tags", value)
1056
982
 
1057
- @property
983
+ @_builtins.property
1058
984
  @pulumi.getter(name="vsanCompressionEnabled")
1059
- def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
985
+ def vsan_compression_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1060
986
  """
1061
987
  Whether the vSAN compression service is enabled for the cluster.
1062
988
  """
1063
989
  return pulumi.get(self, "vsan_compression_enabled")
1064
990
 
1065
991
  @vsan_compression_enabled.setter
1066
- def vsan_compression_enabled(self, value: Optional[pulumi.Input[bool]]):
992
+ def vsan_compression_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1067
993
  pulumi.set(self, "vsan_compression_enabled", value)
1068
994
 
1069
- @property
995
+ @_builtins.property
1070
996
  @pulumi.getter(name="vsanDedupEnabled")
1071
- def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
997
+ def vsan_dedup_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1072
998
  """
1073
999
  Whether the vSAN deduplication service is enabled for the cluster.
1074
1000
  """
1075
1001
  return pulumi.get(self, "vsan_dedup_enabled")
1076
1002
 
1077
1003
  @vsan_dedup_enabled.setter
1078
- def vsan_dedup_enabled(self, value: Optional[pulumi.Input[bool]]):
1004
+ def vsan_dedup_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1079
1005
  pulumi.set(self, "vsan_dedup_enabled", value)
1080
1006
 
1081
- @property
1007
+ @_builtins.property
1082
1008
  @pulumi.getter(name="vsanDiskGroups")
1083
1009
  def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
1084
1010
  """
@@ -1090,55 +1016,55 @@ class ComputeClusterArgs:
1090
1016
  def vsan_disk_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]):
1091
1017
  pulumi.set(self, "vsan_disk_groups", value)
1092
1018
 
1093
- @property
1019
+ @_builtins.property
1094
1020
  @pulumi.getter(name="vsanDitEncryptionEnabled")
1095
- def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
1021
+ def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1096
1022
  """
1097
1023
  Whether the vSAN data-in-transit encryption is enabled for the cluster.
1098
1024
  """
1099
1025
  return pulumi.get(self, "vsan_dit_encryption_enabled")
1100
1026
 
1101
1027
  @vsan_dit_encryption_enabled.setter
1102
- def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[bool]]):
1028
+ def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1103
1029
  pulumi.set(self, "vsan_dit_encryption_enabled", value)
1104
1030
 
1105
- @property
1031
+ @_builtins.property
1106
1032
  @pulumi.getter(name="vsanDitRekeyInterval")
1107
- def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
1033
+ def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
1108
1034
  """
1109
1035
  When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
1110
1036
  """
1111
1037
  return pulumi.get(self, "vsan_dit_rekey_interval")
1112
1038
 
1113
1039
  @vsan_dit_rekey_interval.setter
1114
- def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[int]]):
1040
+ def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
1115
1041
  pulumi.set(self, "vsan_dit_rekey_interval", value)
1116
1042
 
1117
- @property
1043
+ @_builtins.property
1118
1044
  @pulumi.getter(name="vsanEnabled")
1119
- def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
1045
+ def vsan_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1120
1046
  """
1121
1047
  Whether the vSAN service is enabled for the cluster.
1122
1048
  """
1123
1049
  return pulumi.get(self, "vsan_enabled")
1124
1050
 
1125
1051
  @vsan_enabled.setter
1126
- def vsan_enabled(self, value: Optional[pulumi.Input[bool]]):
1052
+ def vsan_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1127
1053
  pulumi.set(self, "vsan_enabled", value)
1128
1054
 
1129
- @property
1055
+ @_builtins.property
1130
1056
  @pulumi.getter(name="vsanEsaEnabled")
1131
- def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
1057
+ def vsan_esa_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1132
1058
  """
1133
1059
  Whether the vSAN ESA service is enabled for the cluster.
1134
1060
  """
1135
1061
  return pulumi.get(self, "vsan_esa_enabled")
1136
1062
 
1137
1063
  @vsan_esa_enabled.setter
1138
- def vsan_esa_enabled(self, value: Optional[pulumi.Input[bool]]):
1064
+ def vsan_esa_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1139
1065
  pulumi.set(self, "vsan_esa_enabled", value)
1140
1066
 
1141
- @property
1067
+ @_builtins.property
1142
1068
  @pulumi.getter(name="vsanFaultDomains")
1143
1069
  def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
1144
1070
  """
@@ -1150,43 +1076,43 @@ class ComputeClusterArgs:
1150
1076
  def vsan_fault_domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]):
1151
1077
  pulumi.set(self, "vsan_fault_domains", value)
1152
1078
 
1153
- @property
1079
+ @_builtins.property
1154
1080
  @pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
1155
- def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
1081
+ def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1156
1082
  """
1157
1083
  Whether the vSAN network diagnostic mode is enabled for the cluster.
1158
1084
  """
1159
1085
  return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
1160
1086
 
1161
1087
  @vsan_network_diagnostic_mode_enabled.setter
1162
- def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
1088
+ def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1163
1089
  pulumi.set(self, "vsan_network_diagnostic_mode_enabled", value)
1164
1090
 
1165
- @property
1091
+ @_builtins.property
1166
1092
  @pulumi.getter(name="vsanPerformanceEnabled")
1167
- def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
1093
+ def vsan_performance_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1168
1094
  """
1169
1095
  Whether the vSAN performance service is enabled for the cluster.
1170
1096
  """
1171
1097
  return pulumi.get(self, "vsan_performance_enabled")
1172
1098
 
1173
1099
  @vsan_performance_enabled.setter
1174
- def vsan_performance_enabled(self, value: Optional[pulumi.Input[bool]]):
1100
+ def vsan_performance_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1175
1101
  pulumi.set(self, "vsan_performance_enabled", value)
1176
1102
 
1177
- @property
1103
+ @_builtins.property
1178
1104
  @pulumi.getter(name="vsanRemoteDatastoreIds")
1179
- def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1105
+ def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
1180
1106
  """
1181
1107
  The managed object IDs of the vSAN datastore to be mounted on the cluster.
1182
1108
  """
1183
1109
  return pulumi.get(self, "vsan_remote_datastore_ids")
1184
1110
 
1185
1111
  @vsan_remote_datastore_ids.setter
1186
- def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1112
+ def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
1187
1113
  pulumi.set(self, "vsan_remote_datastore_ids", value)
1188
1114
 
1189
- @property
1115
+ @_builtins.property
1190
1116
  @pulumi.getter(name="vsanStretchedCluster")
1191
1117
  def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
1192
1118
  """
@@ -1198,227 +1124,190 @@ class ComputeClusterArgs:
1198
1124
  def vsan_stretched_cluster(self, value: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]):
1199
1125
  pulumi.set(self, "vsan_stretched_cluster", value)
1200
1126
 
1201
- @property
1127
+ @_builtins.property
1202
1128
  @pulumi.getter(name="vsanUnmapEnabled")
1203
- def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
1129
+ def vsan_unmap_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1204
1130
  """
1205
1131
  Whether the vSAN unmap service is enabled for the cluster.
1206
1132
  """
1207
1133
  return pulumi.get(self, "vsan_unmap_enabled")
1208
1134
 
1209
1135
  @vsan_unmap_enabled.setter
1210
- def vsan_unmap_enabled(self, value: Optional[pulumi.Input[bool]]):
1136
+ def vsan_unmap_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1211
1137
  pulumi.set(self, "vsan_unmap_enabled", value)
1212
1138
 
1213
- @property
1139
+ @_builtins.property
1214
1140
  @pulumi.getter(name="vsanVerboseModeEnabled")
1215
- def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
1141
+ def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1216
1142
  """
1217
1143
  Whether the vSAN verbose mode is enabled for the cluster.
1218
1144
  """
1219
1145
  return pulumi.get(self, "vsan_verbose_mode_enabled")
1220
1146
 
1221
1147
  @vsan_verbose_mode_enabled.setter
1222
- def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
1148
+ def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1223
1149
  pulumi.set(self, "vsan_verbose_mode_enabled", value)
1224
1150
 
1225
1151
 
1226
1152
  @pulumi.input_type
1227
1153
  class _ComputeClusterState:
1228
1154
  def __init__(__self__, *,
1229
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
1230
- datacenter_id: Optional[pulumi.Input[str]] = None,
1231
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
1232
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
1233
- dpm_threshold: Optional[pulumi.Input[int]] = None,
1234
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
1235
- drs_automation_level: Optional[pulumi.Input[str]] = None,
1236
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
1237
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
1238
- drs_enabled: Optional[pulumi.Input[bool]] = None,
1239
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
1240
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
1241
- folder: Optional[pulumi.Input[str]] = None,
1242
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
1243
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1244
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
1245
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
1246
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
1247
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
1248
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
1249
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
1250
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
1251
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
1252
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
1253
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
1254
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
1255
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
1256
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
1257
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
1258
- ha_enabled: Optional[pulumi.Input[bool]] = None,
1259
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1260
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
1261
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
1262
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
1263
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
1264
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
1265
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
1266
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
1267
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
1268
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
1269
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
1270
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
1271
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
1272
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
1273
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
1155
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1156
+ datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
1157
+ dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
1158
+ dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1159
+ dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
1160
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1161
+ drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
1162
+ drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
1163
+ drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
1164
+ drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1165
+ drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
1166
+ drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
1167
+ folder: Optional[pulumi.Input[_builtins.str]] = None,
1168
+ force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
1169
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1170
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
1171
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
1172
+ ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
1173
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
1174
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
1175
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
1176
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
1177
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
1178
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
1179
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
1180
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
1181
+ ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
1182
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
1183
+ ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
1184
+ ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1185
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1186
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
1187
+ ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
1188
+ ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
1189
+ ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
1190
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
1191
+ ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
1192
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
1193
+ ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
1194
+ ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
1195
+ ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
1196
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
1197
+ ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
1198
+ ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
1199
+ host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
1274
1200
  host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
1275
- host_managed: Optional[pulumi.Input[bool]] = None,
1276
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1277
- name: Optional[pulumi.Input[str]] = None,
1278
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
1279
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
1280
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
1281
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1282
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
1283
- resource_pool_id: Optional[pulumi.Input[str]] = None,
1284
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1285
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
1286
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
1201
+ host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
1202
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1203
+ name: Optional[pulumi.Input[_builtins.str]] = None,
1204
+ proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
1205
+ proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1206
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
1207
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1208
+ proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
1209
+ resource_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
1210
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1211
+ vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1212
+ vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1287
1213
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]] = None,
1288
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
1289
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
1290
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
1291
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
1214
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1215
+ vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
1216
+ vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1217
+ vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1292
1218
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]] = None,
1293
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
1294
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
1295
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
1219
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1220
+ vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1221
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
1296
1222
  vsan_stretched_cluster: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']] = None,
1297
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
1298
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None):
1223
+ vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
1224
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None):
1299
1225
  """
1300
1226
  Input properties used for looking up and filtering ComputeCluster resources.
1301
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
1227
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
1302
1228
  value strings to set for the datastore cluster.
1303
1229
 
1304
1230
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
1305
1231
  and require vCenter Server.
1306
- :param pulumi.Input[str] datacenter_id: The managed object ID of
1232
+ :param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
1307
1233
  the datacenter to create the cluster in. Forces a new resource if changed.
1308
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
1309
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
1310
- machines in the cluster. Requires that DRS be enabled.
1311
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
1312
- affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
1313
- setting.
1314
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
1315
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
1316
- fullyAutomated.
1317
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
1318
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
1319
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
1320
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
1321
- more imbalance while a higher setting will tolerate less.
1322
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
1323
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
1234
+ :param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
1235
+ :param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
1236
+ :param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
1237
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
1238
+ :param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
1239
+ :param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
1240
+ :param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
1241
+ :param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
1242
+ :param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
1243
+ :param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
1244
+ :param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
1324
1245
  This is a path relative to the datacenter you are deploying the cluster to.
1325
1246
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
1326
1247
  The provider will place a cluster named `compute-cluster-test` in a
1327
1248
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
1328
1249
  being `/dc1/host/foo/bar/datastore-cluster-test`.
1329
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
1330
- for testing and is not recommended in normal use.
1331
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
1332
- failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
1333
- will ignore the host when making recommendations.
1334
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
1335
- machine operations. The maximum is one less than the number of hosts in the cluster.
1336
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
1337
- warnings only, whereas a value of 100 disables the setting.
1338
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
1339
- permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
1340
- slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
1341
- issues.
1342
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
1343
- subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
1344
- from the total amount of resources in the cluster. Disable to supply user-defined values.
1345
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
1346
- the cluster to reserve for failover.
1347
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
1348
- the cluster to reserve for failover.
1349
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
1350
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
1351
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
1352
- to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
1353
- currently in the cluster.
1354
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
1355
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
1356
- affected datastore clears in the middle of an APD event. Can be one of none or reset.
1357
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1358
- detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
1359
- restartAggressive.
1360
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
1361
- the response action defined in ha_datastore_apd_response.
1362
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1363
- detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
1364
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
1365
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
1366
- ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
1367
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
1368
- allFeasibleDsWithUserPreference.
1369
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
1370
- Can be one of none, powerOff, or shutdown.
1371
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
1372
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
1373
- failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
1374
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
1375
- on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
1376
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
1377
- failed. The value is in seconds.
1378
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
1379
- attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
1380
- time is allotted.
1381
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
1382
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
1383
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
1384
- vmMonitoringOnly, or vmAndAppMonitoring.
1385
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
1386
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
1387
- high, or highest.
1388
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
1389
- proceeding with the next priority.
1390
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
1250
+ :param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
1251
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
1252
+ :param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
1253
+ :param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
1254
+ :param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
1255
+ :param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
1256
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
1257
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
1258
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
1259
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
1260
+ :param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
1261
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
1262
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
1263
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
1264
+ :param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
1265
+ :param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
1266
+ :param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
1267
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
1268
+ :param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
1269
+ :param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
1270
+ :param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
1271
+ :param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
1272
+ :param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
1273
+ :param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
1274
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
1275
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
1276
+ :param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
1277
+ :param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
1278
+ :param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
1279
+ :param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
1280
+ :param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
1281
+ :param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
1391
1282
  :param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
1392
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
1393
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
1394
- :param pulumi.Input[str] name: The name of the cluster.
1395
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
1396
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
1397
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
1398
- this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
1399
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
1400
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
1401
- cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
1402
- :param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
1283
+ :param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
1284
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
1285
+ :param pulumi.Input[_builtins.str] name: The name of the cluster.
1286
+ :param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
1287
+ :param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
1288
+ :param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
1289
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
1290
+ :param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
1291
+ :param pulumi.Input[_builtins.str] resource_pool_id: The managed object ID of the primary
1403
1292
  resource pool for this cluster. This can be passed directly to the
1404
1293
  `resource_pool_id`
1405
1294
  attribute of the
1406
1295
  `VirtualMachine` resource.
1407
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
1408
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
1409
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
1296
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
1297
+ :param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
1298
+ :param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
1410
1299
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
1411
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
1412
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
1413
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
1414
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
1300
+ :param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
1301
+ :param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
1302
+ :param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
1303
+ :param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
1415
1304
  :param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
1416
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
1417
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
1418
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
1305
+ :param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
1306
+ :param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
1307
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
1419
1308
  :param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
1420
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
1421
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
1309
+ :param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
1310
+ :param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
1422
1311
  """
1423
1312
  if custom_attributes is not None:
1424
1313
  pulumi.set(__self__, "custom_attributes", custom_attributes)
@@ -1561,9 +1450,9 @@ class _ComputeClusterState:
1561
1450
  if vsan_verbose_mode_enabled is not None:
1562
1451
  pulumi.set(__self__, "vsan_verbose_mode_enabled", vsan_verbose_mode_enabled)
1563
1452
 
1564
- @property
1453
+ @_builtins.property
1565
1454
  @pulumi.getter(name="customAttributes")
1566
- def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
1455
+ def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
1567
1456
  """
1568
1457
  A map of custom attribute ids to attribute
1569
1458
  value strings to set for the datastore cluster.
@@ -1574,12 +1463,12 @@ class _ComputeClusterState:
1574
1463
  return pulumi.get(self, "custom_attributes")
1575
1464
 
1576
1465
  @custom_attributes.setter
1577
- def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
1466
+ def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
1578
1467
  pulumi.set(self, "custom_attributes", value)
1579
1468
 
1580
- @property
1469
+ @_builtins.property
1581
1470
  @pulumi.getter(name="datacenterId")
1582
- def datacenter_id(self) -> Optional[pulumi.Input[str]]:
1471
+ def datacenter_id(self) -> Optional[pulumi.Input[_builtins.str]]:
1583
1472
  """
1584
1473
  The managed object ID of
1585
1474
  the datacenter to create the cluster in. Forces a new resource if changed.
@@ -1587,137 +1476,132 @@ class _ComputeClusterState:
1587
1476
  return pulumi.get(self, "datacenter_id")
1588
1477
 
1589
1478
  @datacenter_id.setter
1590
- def datacenter_id(self, value: Optional[pulumi.Input[str]]):
1479
+ def datacenter_id(self, value: Optional[pulumi.Input[_builtins.str]]):
1591
1480
  pulumi.set(self, "datacenter_id", value)
1592
1481
 
1593
- @property
1482
+ @_builtins.property
1594
1483
  @pulumi.getter(name="dpmAutomationLevel")
1595
- def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
1484
+ def dpm_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
1596
1485
  """
1597
1486
  The automation level for host power operations in this cluster. Can be one of manual or automated.
1598
1487
  """
1599
1488
  return pulumi.get(self, "dpm_automation_level")
1600
1489
 
1601
1490
  @dpm_automation_level.setter
1602
- def dpm_automation_level(self, value: Optional[pulumi.Input[str]]):
1491
+ def dpm_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
1603
1492
  pulumi.set(self, "dpm_automation_level", value)
1604
1493
 
1605
- @property
1494
+ @_builtins.property
1606
1495
  @pulumi.getter(name="dpmEnabled")
1607
- def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
1496
+ def dpm_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1608
1497
  """
1609
- Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
1610
- machines in the cluster. Requires that DRS be enabled.
1498
+ Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
1611
1499
  """
1612
1500
  return pulumi.get(self, "dpm_enabled")
1613
1501
 
1614
1502
  @dpm_enabled.setter
1615
- def dpm_enabled(self, value: Optional[pulumi.Input[bool]]):
1503
+ def dpm_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1616
1504
  pulumi.set(self, "dpm_enabled", value)
1617
1505
 
1618
- @property
1506
+ @_builtins.property
1619
1507
  @pulumi.getter(name="dpmThreshold")
1620
- def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
1508
+ def dpm_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
1621
1509
  """
1622
- A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
1623
- affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
1624
- setting.
1510
+ A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
1625
1511
  """
1626
1512
  return pulumi.get(self, "dpm_threshold")
1627
1513
 
1628
1514
  @dpm_threshold.setter
1629
- def dpm_threshold(self, value: Optional[pulumi.Input[int]]):
1515
+ def dpm_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
1630
1516
  pulumi.set(self, "dpm_threshold", value)
1631
1517
 
1632
- @property
1518
+ @_builtins.property
1633
1519
  @pulumi.getter(name="drsAdvancedOptions")
1634
- def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
1520
+ def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
1635
1521
  """
1636
1522
  Advanced configuration options for DRS and DPM.
1637
1523
  """
1638
1524
  return pulumi.get(self, "drs_advanced_options")
1639
1525
 
1640
1526
  @drs_advanced_options.setter
1641
- def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
1527
+ def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
1642
1528
  pulumi.set(self, "drs_advanced_options", value)
1643
1529
 
1644
- @property
1530
+ @_builtins.property
1645
1531
  @pulumi.getter(name="drsAutomationLevel")
1646
- def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
1532
+ def drs_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
1647
1533
  """
1648
- The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
1649
- fullyAutomated.
1534
+ The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
1650
1535
  """
1651
1536
  return pulumi.get(self, "drs_automation_level")
1652
1537
 
1653
1538
  @drs_automation_level.setter
1654
- def drs_automation_level(self, value: Optional[pulumi.Input[str]]):
1539
+ def drs_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
1655
1540
  pulumi.set(self, "drs_automation_level", value)
1656
1541
 
1657
- @property
1542
+ @_builtins.property
1658
1543
  @pulumi.getter(name="drsEnablePredictiveDrs")
1659
- def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
1544
+ def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[_builtins.bool]]:
1660
1545
  """
1661
1546
  When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
1662
1547
  """
1663
1548
  return pulumi.get(self, "drs_enable_predictive_drs")
1664
1549
 
1665
1550
  @drs_enable_predictive_drs.setter
1666
- def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[bool]]):
1551
+ def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[_builtins.bool]]):
1667
1552
  pulumi.set(self, "drs_enable_predictive_drs", value)
1668
1553
 
1669
- @property
1554
+ @_builtins.property
1670
1555
  @pulumi.getter(name="drsEnableVmOverrides")
1671
- def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
1556
+ def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[_builtins.bool]]:
1672
1557
  """
1673
1558
  When true, allows individual VM overrides within this cluster to be set.
1674
1559
  """
1675
1560
  return pulumi.get(self, "drs_enable_vm_overrides")
1676
1561
 
1677
1562
  @drs_enable_vm_overrides.setter
1678
- def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[bool]]):
1563
+ def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[_builtins.bool]]):
1679
1564
  pulumi.set(self, "drs_enable_vm_overrides", value)
1680
1565
 
1681
- @property
1566
+ @_builtins.property
1682
1567
  @pulumi.getter(name="drsEnabled")
1683
- def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
1568
+ def drs_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1684
1569
  """
1685
1570
  Enable DRS for this cluster.
1686
1571
  """
1687
1572
  return pulumi.get(self, "drs_enabled")
1688
1573
 
1689
1574
  @drs_enabled.setter
1690
- def drs_enabled(self, value: Optional[pulumi.Input[bool]]):
1575
+ def drs_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1691
1576
  pulumi.set(self, "drs_enabled", value)
1692
1577
 
1693
- @property
1578
+ @_builtins.property
1694
1579
  @pulumi.getter(name="drsMigrationThreshold")
1695
- def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
1580
+ def drs_migration_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
1696
1581
  """
1697
- A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
1698
- more imbalance while a higher setting will tolerate less.
1582
+ A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
1699
1583
  """
1700
1584
  return pulumi.get(self, "drs_migration_threshold")
1701
1585
 
1702
1586
  @drs_migration_threshold.setter
1703
- def drs_migration_threshold(self, value: Optional[pulumi.Input[int]]):
1587
+ def drs_migration_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
1704
1588
  pulumi.set(self, "drs_migration_threshold", value)
1705
1589
 
1706
- @property
1590
+ @_builtins.property
1707
1591
  @pulumi.getter(name="drsScaleDescendantsShares")
1708
- def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
1592
+ def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[_builtins.str]]:
1709
1593
  """
1710
1594
  Enable scalable shares for all descendants of this cluster.
1711
1595
  """
1712
1596
  return pulumi.get(self, "drs_scale_descendants_shares")
1713
1597
 
1714
1598
  @drs_scale_descendants_shares.setter
1715
- def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[str]]):
1599
+ def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[_builtins.str]]):
1716
1600
  pulumi.set(self, "drs_scale_descendants_shares", value)
1717
1601
 
1718
- @property
1602
+ @_builtins.property
1719
1603
  @pulumi.getter
1720
- def folder(self) -> Optional[pulumi.Input[str]]:
1604
+ def folder(self) -> Optional[pulumi.Input[_builtins.str]]:
1721
1605
  """
1722
1606
  The relative path to a folder to put this cluster in.
1723
1607
  This is a path relative to the datacenter you are deploying the cluster to.
@@ -1729,424 +1613,394 @@ class _ComputeClusterState:
1729
1613
  return pulumi.get(self, "folder")
1730
1614
 
1731
1615
  @folder.setter
1732
- def folder(self, value: Optional[pulumi.Input[str]]):
1616
+ def folder(self, value: Optional[pulumi.Input[_builtins.str]]):
1733
1617
  pulumi.set(self, "folder", value)
1734
1618
 
1735
- @property
1619
+ @_builtins.property
1736
1620
  @pulumi.getter(name="forceEvacuateOnDestroy")
1737
- def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
1621
+ def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[_builtins.bool]]:
1738
1622
  """
1739
- Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
1740
- for testing and is not recommended in normal use.
1623
+ Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
1741
1624
  """
1742
1625
  return pulumi.get(self, "force_evacuate_on_destroy")
1743
1626
 
1744
1627
  @force_evacuate_on_destroy.setter
1745
- def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[bool]]):
1628
+ def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[_builtins.bool]]):
1746
1629
  pulumi.set(self, "force_evacuate_on_destroy", value)
1747
1630
 
1748
- @property
1631
+ @_builtins.property
1749
1632
  @pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
1750
- def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1633
+ def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
1751
1634
  """
1752
- When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
1753
- failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
1754
- will ignore the host when making recommendations.
1635
+ When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
1755
1636
  """
1756
1637
  return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
1757
1638
 
1758
1639
  @ha_admission_control_failover_host_system_ids.setter
1759
- def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1640
+ def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
1760
1641
  pulumi.set(self, "ha_admission_control_failover_host_system_ids", value)
1761
1642
 
1762
- @property
1643
+ @_builtins.property
1763
1644
  @pulumi.getter(name="haAdmissionControlHostFailureTolerance")
1764
- def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
1645
+ def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
1765
1646
  """
1766
- The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
1767
- machine operations. The maximum is one less than the number of hosts in the cluster.
1647
+ The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
1768
1648
  """
1769
1649
  return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
1770
1650
 
1771
1651
  @ha_admission_control_host_failure_tolerance.setter
1772
- def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[int]]):
1652
+ def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
1773
1653
  pulumi.set(self, "ha_admission_control_host_failure_tolerance", value)
1774
1654
 
1775
- @property
1655
+ @_builtins.property
1776
1656
  @pulumi.getter(name="haAdmissionControlPerformanceTolerance")
1777
- def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
1657
+ def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
1778
1658
  """
1779
- The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
1780
- warnings only, whereas a value of 100 disables the setting.
1659
+ The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
1781
1660
  """
1782
1661
  return pulumi.get(self, "ha_admission_control_performance_tolerance")
1783
1662
 
1784
1663
  @ha_admission_control_performance_tolerance.setter
1785
- def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[int]]):
1664
+ def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
1786
1665
  pulumi.set(self, "ha_admission_control_performance_tolerance", value)
1787
1666
 
1788
- @property
1667
+ @_builtins.property
1789
1668
  @pulumi.getter(name="haAdmissionControlPolicy")
1790
- def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
1669
+ def ha_admission_control_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
1791
1670
  """
1792
- The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
1793
- permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
1794
- slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
1795
- issues.
1671
+ The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
1796
1672
  """
1797
1673
  return pulumi.get(self, "ha_admission_control_policy")
1798
1674
 
1799
1675
  @ha_admission_control_policy.setter
1800
- def ha_admission_control_policy(self, value: Optional[pulumi.Input[str]]):
1676
+ def ha_admission_control_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
1801
1677
  pulumi.set(self, "ha_admission_control_policy", value)
1802
1678
 
1803
- @property
1679
+ @_builtins.property
1804
1680
  @pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
1805
- def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
1681
+ def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[_builtins.bool]]:
1806
1682
  """
1807
- When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
1808
- subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
1809
- from the total amount of resources in the cluster. Disable to supply user-defined values.
1683
+ When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
1810
1684
  """
1811
1685
  return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
1812
1686
 
1813
1687
  @ha_admission_control_resource_percentage_auto_compute.setter
1814
- def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[bool]]):
1688
+ def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[_builtins.bool]]):
1815
1689
  pulumi.set(self, "ha_admission_control_resource_percentage_auto_compute", value)
1816
1690
 
1817
- @property
1691
+ @_builtins.property
1818
1692
  @pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
1819
- def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
1693
+ def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
1820
1694
  """
1821
- When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
1822
- the cluster to reserve for failover.
1695
+ When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
1823
1696
  """
1824
1697
  return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
1825
1698
 
1826
1699
  @ha_admission_control_resource_percentage_cpu.setter
1827
- def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[int]]):
1700
+ def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
1828
1701
  pulumi.set(self, "ha_admission_control_resource_percentage_cpu", value)
1829
1702
 
1830
- @property
1703
+ @_builtins.property
1831
1704
  @pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
1832
- def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
1705
+ def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
1833
1706
  """
1834
- When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
1835
- the cluster to reserve for failover.
1707
+ When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
1836
1708
  """
1837
1709
  return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
1838
1710
 
1839
1711
  @ha_admission_control_resource_percentage_memory.setter
1840
- def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[int]]):
1712
+ def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
1841
1713
  pulumi.set(self, "ha_admission_control_resource_percentage_memory", value)
1842
1714
 
1843
- @property
1715
+ @_builtins.property
1844
1716
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
1845
- def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
1717
+ def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
1846
1718
  """
1847
1719
  When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
1848
1720
  """
1849
1721
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
1850
1722
 
1851
1723
  @ha_admission_control_slot_policy_explicit_cpu.setter
1852
- def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[int]]):
1724
+ def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
1853
1725
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_cpu", value)
1854
1726
 
1855
- @property
1727
+ @_builtins.property
1856
1728
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
1857
- def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
1729
+ def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
1858
1730
  """
1859
1731
  When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
1860
1732
  """
1861
1733
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
1862
1734
 
1863
1735
  @ha_admission_control_slot_policy_explicit_memory.setter
1864
- def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[int]]):
1736
+ def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
1865
1737
  pulumi.set(self, "ha_admission_control_slot_policy_explicit_memory", value)
1866
1738
 
1867
- @property
1739
+ @_builtins.property
1868
1740
  @pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
1869
- def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
1741
+ def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[_builtins.bool]]:
1870
1742
  """
1871
- When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
1872
- to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
1873
- currently in the cluster.
1743
+ When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
1874
1744
  """
1875
1745
  return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
1876
1746
 
1877
1747
  @ha_admission_control_slot_policy_use_explicit_size.setter
1878
- def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[bool]]):
1748
+ def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[_builtins.bool]]):
1879
1749
  pulumi.set(self, "ha_admission_control_slot_policy_use_explicit_size", value)
1880
1750
 
1881
- @property
1751
+ @_builtins.property
1882
1752
  @pulumi.getter(name="haAdvancedOptions")
1883
- def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
1753
+ def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
1884
1754
  """
1885
1755
  Advanced configuration options for vSphere HA.
1886
1756
  """
1887
1757
  return pulumi.get(self, "ha_advanced_options")
1888
1758
 
1889
1759
  @ha_advanced_options.setter
1890
- def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
1760
+ def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
1891
1761
  pulumi.set(self, "ha_advanced_options", value)
1892
1762
 
1893
- @property
1763
+ @_builtins.property
1894
1764
  @pulumi.getter(name="haDatastoreApdRecoveryAction")
1895
- def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
1765
+ def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[_builtins.str]]:
1896
1766
  """
1897
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
1898
- affected datastore clears in the middle of an APD event. Can be one of none or reset.
1767
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
1899
1768
  """
1900
1769
  return pulumi.get(self, "ha_datastore_apd_recovery_action")
1901
1770
 
1902
1771
  @ha_datastore_apd_recovery_action.setter
1903
- def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[str]]):
1772
+ def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[_builtins.str]]):
1904
1773
  pulumi.set(self, "ha_datastore_apd_recovery_action", value)
1905
1774
 
1906
- @property
1775
+ @_builtins.property
1907
1776
  @pulumi.getter(name="haDatastoreApdResponse")
1908
- def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
1777
+ def ha_datastore_apd_response(self) -> Optional[pulumi.Input[_builtins.str]]:
1909
1778
  """
1910
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1911
- detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
1912
- restartAggressive.
1779
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
1913
1780
  """
1914
1781
  return pulumi.get(self, "ha_datastore_apd_response")
1915
1782
 
1916
1783
  @ha_datastore_apd_response.setter
1917
- def ha_datastore_apd_response(self, value: Optional[pulumi.Input[str]]):
1784
+ def ha_datastore_apd_response(self, value: Optional[pulumi.Input[_builtins.str]]):
1918
1785
  pulumi.set(self, "ha_datastore_apd_response", value)
1919
1786
 
1920
- @property
1787
+ @_builtins.property
1921
1788
  @pulumi.getter(name="haDatastoreApdResponseDelay")
1922
- def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
1789
+ def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
1923
1790
  """
1924
- When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
1925
- the response action defined in ha_datastore_apd_response.
1791
+ When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
1926
1792
  """
1927
1793
  return pulumi.get(self, "ha_datastore_apd_response_delay")
1928
1794
 
1929
1795
  @ha_datastore_apd_response_delay.setter
1930
- def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[int]]):
1796
+ def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
1931
1797
  pulumi.set(self, "ha_datastore_apd_response_delay", value)
1932
1798
 
1933
- @property
1799
+ @_builtins.property
1934
1800
  @pulumi.getter(name="haDatastorePdlResponse")
1935
- def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
1801
+ def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[_builtins.str]]:
1936
1802
  """
1937
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
1938
- detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
1803
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
1939
1804
  """
1940
1805
  return pulumi.get(self, "ha_datastore_pdl_response")
1941
1806
 
1942
1807
  @ha_datastore_pdl_response.setter
1943
- def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[str]]):
1808
+ def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[_builtins.str]]):
1944
1809
  pulumi.set(self, "ha_datastore_pdl_response", value)
1945
1810
 
1946
- @property
1811
+ @_builtins.property
1947
1812
  @pulumi.getter(name="haEnabled")
1948
- def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
1813
+ def ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
1949
1814
  """
1950
1815
  Enable vSphere HA for this cluster.
1951
1816
  """
1952
1817
  return pulumi.get(self, "ha_enabled")
1953
1818
 
1954
1819
  @ha_enabled.setter
1955
- def ha_enabled(self, value: Optional[pulumi.Input[bool]]):
1820
+ def ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
1956
1821
  pulumi.set(self, "ha_enabled", value)
1957
1822
 
1958
- @property
1823
+ @_builtins.property
1959
1824
  @pulumi.getter(name="haHeartbeatDatastoreIds")
1960
- def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
1825
+ def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
1961
1826
  """
1962
- The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
1963
- ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
1827
+ The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
1964
1828
  """
1965
1829
  return pulumi.get(self, "ha_heartbeat_datastore_ids")
1966
1830
 
1967
1831
  @ha_heartbeat_datastore_ids.setter
1968
- def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
1832
+ def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
1969
1833
  pulumi.set(self, "ha_heartbeat_datastore_ids", value)
1970
1834
 
1971
- @property
1835
+ @_builtins.property
1972
1836
  @pulumi.getter(name="haHeartbeatDatastorePolicy")
1973
- def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
1837
+ def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
1974
1838
  """
1975
- The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
1976
- allFeasibleDsWithUserPreference.
1839
+ The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
1977
1840
  """
1978
1841
  return pulumi.get(self, "ha_heartbeat_datastore_policy")
1979
1842
 
1980
1843
  @ha_heartbeat_datastore_policy.setter
1981
- def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[str]]):
1844
+ def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
1982
1845
  pulumi.set(self, "ha_heartbeat_datastore_policy", value)
1983
1846
 
1984
- @property
1847
+ @_builtins.property
1985
1848
  @pulumi.getter(name="haHostIsolationResponse")
1986
- def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
1849
+ def ha_host_isolation_response(self) -> Optional[pulumi.Input[_builtins.str]]:
1987
1850
  """
1988
- The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
1989
- Can be one of none, powerOff, or shutdown.
1851
+ The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
1990
1852
  """
1991
1853
  return pulumi.get(self, "ha_host_isolation_response")
1992
1854
 
1993
1855
  @ha_host_isolation_response.setter
1994
- def ha_host_isolation_response(self, value: Optional[pulumi.Input[str]]):
1856
+ def ha_host_isolation_response(self, value: Optional[pulumi.Input[_builtins.str]]):
1995
1857
  pulumi.set(self, "ha_host_isolation_response", value)
1996
1858
 
1997
- @property
1859
+ @_builtins.property
1998
1860
  @pulumi.getter(name="haHostMonitoring")
1999
- def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
1861
+ def ha_host_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
2000
1862
  """
2001
1863
  Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
2002
1864
  """
2003
1865
  return pulumi.get(self, "ha_host_monitoring")
2004
1866
 
2005
1867
  @ha_host_monitoring.setter
2006
- def ha_host_monitoring(self, value: Optional[pulumi.Input[str]]):
1868
+ def ha_host_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
2007
1869
  pulumi.set(self, "ha_host_monitoring", value)
2008
1870
 
2009
- @property
1871
+ @_builtins.property
2010
1872
  @pulumi.getter(name="haVmComponentProtection")
2011
- def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
1873
+ def ha_vm_component_protection(self) -> Optional[pulumi.Input[_builtins.str]]:
2012
1874
  """
2013
- Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
2014
- failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
1875
+ Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
2015
1876
  """
2016
1877
  return pulumi.get(self, "ha_vm_component_protection")
2017
1878
 
2018
1879
  @ha_vm_component_protection.setter
2019
- def ha_vm_component_protection(self, value: Optional[pulumi.Input[str]]):
1880
+ def ha_vm_component_protection(self, value: Optional[pulumi.Input[_builtins.str]]):
2020
1881
  pulumi.set(self, "ha_vm_component_protection", value)
2021
1882
 
2022
- @property
1883
+ @_builtins.property
2023
1884
  @pulumi.getter(name="haVmDependencyRestartCondition")
2024
- def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
1885
+ def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[_builtins.str]]:
2025
1886
  """
2026
- The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
2027
- on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
1887
+ The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
2028
1888
  """
2029
1889
  return pulumi.get(self, "ha_vm_dependency_restart_condition")
2030
1890
 
2031
1891
  @ha_vm_dependency_restart_condition.setter
2032
- def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[str]]):
1892
+ def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[_builtins.str]]):
2033
1893
  pulumi.set(self, "ha_vm_dependency_restart_condition", value)
2034
1894
 
2035
- @property
1895
+ @_builtins.property
2036
1896
  @pulumi.getter(name="haVmFailureInterval")
2037
- def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
1897
+ def ha_vm_failure_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
2038
1898
  """
2039
- If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
2040
- failed. The value is in seconds.
1899
+ If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
2041
1900
  """
2042
1901
  return pulumi.get(self, "ha_vm_failure_interval")
2043
1902
 
2044
1903
  @ha_vm_failure_interval.setter
2045
- def ha_vm_failure_interval(self, value: Optional[pulumi.Input[int]]):
1904
+ def ha_vm_failure_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
2046
1905
  pulumi.set(self, "ha_vm_failure_interval", value)
2047
1906
 
2048
- @property
1907
+ @_builtins.property
2049
1908
  @pulumi.getter(name="haVmMaximumFailureWindow")
2050
- def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
1909
+ def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[_builtins.int]]:
2051
1910
  """
2052
- The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
2053
- attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
2054
- time is allotted.
1911
+ The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
2055
1912
  """
2056
1913
  return pulumi.get(self, "ha_vm_maximum_failure_window")
2057
1914
 
2058
1915
  @ha_vm_maximum_failure_window.setter
2059
- def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[int]]):
1916
+ def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[_builtins.int]]):
2060
1917
  pulumi.set(self, "ha_vm_maximum_failure_window", value)
2061
1918
 
2062
- @property
1919
+ @_builtins.property
2063
1920
  @pulumi.getter(name="haVmMaximumResets")
2064
- def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
1921
+ def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[_builtins.int]]:
2065
1922
  """
2066
1923
  The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
2067
1924
  """
2068
1925
  return pulumi.get(self, "ha_vm_maximum_resets")
2069
1926
 
2070
1927
  @ha_vm_maximum_resets.setter
2071
- def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[int]]):
1928
+ def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[_builtins.int]]):
2072
1929
  pulumi.set(self, "ha_vm_maximum_resets", value)
2073
1930
 
2074
- @property
1931
+ @_builtins.property
2075
1932
  @pulumi.getter(name="haVmMinimumUptime")
2076
- def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
1933
+ def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[_builtins.int]]:
2077
1934
  """
2078
1935
  The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
2079
1936
  """
2080
1937
  return pulumi.get(self, "ha_vm_minimum_uptime")
2081
1938
 
2082
1939
  @ha_vm_minimum_uptime.setter
2083
- def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[int]]):
1940
+ def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[_builtins.int]]):
2084
1941
  pulumi.set(self, "ha_vm_minimum_uptime", value)
2085
1942
 
2086
- @property
1943
+ @_builtins.property
2087
1944
  @pulumi.getter(name="haVmMonitoring")
2088
- def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
1945
+ def ha_vm_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
2089
1946
  """
2090
- The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
2091
- vmMonitoringOnly, or vmAndAppMonitoring.
1947
+ The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
2092
1948
  """
2093
1949
  return pulumi.get(self, "ha_vm_monitoring")
2094
1950
 
2095
1951
  @ha_vm_monitoring.setter
2096
- def ha_vm_monitoring(self, value: Optional[pulumi.Input[str]]):
1952
+ def ha_vm_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
2097
1953
  pulumi.set(self, "ha_vm_monitoring", value)
2098
1954
 
2099
- @property
1955
+ @_builtins.property
2100
1956
  @pulumi.getter(name="haVmRestartAdditionalDelay")
2101
- def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
1957
+ def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
2102
1958
  """
2103
1959
  Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
2104
1960
  """
2105
1961
  return pulumi.get(self, "ha_vm_restart_additional_delay")
2106
1962
 
2107
1963
  @ha_vm_restart_additional_delay.setter
2108
- def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[int]]):
1964
+ def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
2109
1965
  pulumi.set(self, "ha_vm_restart_additional_delay", value)
2110
1966
 
2111
- @property
1967
+ @_builtins.property
2112
1968
  @pulumi.getter(name="haVmRestartPriority")
2113
- def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
1969
+ def ha_vm_restart_priority(self) -> Optional[pulumi.Input[_builtins.str]]:
2114
1970
  """
2115
- The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
2116
- high, or highest.
1971
+ The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
2117
1972
  """
2118
1973
  return pulumi.get(self, "ha_vm_restart_priority")
2119
1974
 
2120
1975
  @ha_vm_restart_priority.setter
2121
- def ha_vm_restart_priority(self, value: Optional[pulumi.Input[str]]):
1976
+ def ha_vm_restart_priority(self, value: Optional[pulumi.Input[_builtins.str]]):
2122
1977
  pulumi.set(self, "ha_vm_restart_priority", value)
2123
1978
 
2124
- @property
1979
+ @_builtins.property
2125
1980
  @pulumi.getter(name="haVmRestartTimeout")
2126
- def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
1981
+ def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
2127
1982
  """
2128
- The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
2129
- proceeding with the next priority.
1983
+ The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
2130
1984
  """
2131
1985
  return pulumi.get(self, "ha_vm_restart_timeout")
2132
1986
 
2133
1987
  @ha_vm_restart_timeout.setter
2134
- def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[int]]):
1988
+ def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
2135
1989
  pulumi.set(self, "ha_vm_restart_timeout", value)
2136
1990
 
2137
- @property
1991
+ @_builtins.property
2138
1992
  @pulumi.getter(name="hostClusterExitTimeout")
2139
- def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
1993
+ def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
2140
1994
  """
2141
1995
  The timeout for each host maintenance mode operation when removing hosts from a cluster.
2142
1996
  """
2143
1997
  return pulumi.get(self, "host_cluster_exit_timeout")
2144
1998
 
2145
1999
  @host_cluster_exit_timeout.setter
2146
- def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
2000
+ def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
2147
2001
  pulumi.set(self, "host_cluster_exit_timeout", value)
2148
2002
 
2149
- @property
2003
+ @_builtins.property
2150
2004
  @pulumi.getter(name="hostImage")
2151
2005
  def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
2152
2006
  """
@@ -2158,107 +2012,105 @@ class _ComputeClusterState:
2158
2012
  def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
2159
2013
  pulumi.set(self, "host_image", value)
2160
2014
 
2161
- @property
2015
+ @_builtins.property
2162
2016
  @pulumi.getter(name="hostManaged")
2163
- def host_managed(self) -> Optional[pulumi.Input[bool]]:
2017
+ def host_managed(self) -> Optional[pulumi.Input[_builtins.bool]]:
2164
2018
  """
2165
2019
  Must be set if cluster enrollment is managed from host resource.
2166
2020
  """
2167
2021
  return pulumi.get(self, "host_managed")
2168
2022
 
2169
2023
  @host_managed.setter
2170
- def host_managed(self, value: Optional[pulumi.Input[bool]]):
2024
+ def host_managed(self, value: Optional[pulumi.Input[_builtins.bool]]):
2171
2025
  pulumi.set(self, "host_managed", value)
2172
2026
 
2173
- @property
2027
+ @_builtins.property
2174
2028
  @pulumi.getter(name="hostSystemIds")
2175
- def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2029
+ def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
2176
2030
  """
2177
2031
  The managed object IDs of the hosts to put in the cluster.
2178
2032
  """
2179
2033
  return pulumi.get(self, "host_system_ids")
2180
2034
 
2181
2035
  @host_system_ids.setter
2182
- def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2036
+ def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
2183
2037
  pulumi.set(self, "host_system_ids", value)
2184
2038
 
2185
- @property
2039
+ @_builtins.property
2186
2040
  @pulumi.getter
2187
- def name(self) -> Optional[pulumi.Input[str]]:
2041
+ def name(self) -> Optional[pulumi.Input[_builtins.str]]:
2188
2042
  """
2189
2043
  The name of the cluster.
2190
2044
  """
2191
2045
  return pulumi.get(self, "name")
2192
2046
 
2193
2047
  @name.setter
2194
- def name(self, value: Optional[pulumi.Input[str]]):
2048
+ def name(self, value: Optional[pulumi.Input[_builtins.str]]):
2195
2049
  pulumi.set(self, "name", value)
2196
2050
 
2197
- @property
2051
+ @_builtins.property
2198
2052
  @pulumi.getter(name="proactiveHaAutomationLevel")
2199
- def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
2053
+ def proactive_ha_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
2200
2054
  """
2201
2055
  The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
2202
2056
  """
2203
2057
  return pulumi.get(self, "proactive_ha_automation_level")
2204
2058
 
2205
2059
  @proactive_ha_automation_level.setter
2206
- def proactive_ha_automation_level(self, value: Optional[pulumi.Input[str]]):
2060
+ def proactive_ha_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
2207
2061
  pulumi.set(self, "proactive_ha_automation_level", value)
2208
2062
 
2209
- @property
2063
+ @_builtins.property
2210
2064
  @pulumi.getter(name="proactiveHaEnabled")
2211
- def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
2065
+ def proactive_ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2212
2066
  """
2213
2067
  Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
2214
2068
  """
2215
2069
  return pulumi.get(self, "proactive_ha_enabled")
2216
2070
 
2217
2071
  @proactive_ha_enabled.setter
2218
- def proactive_ha_enabled(self, value: Optional[pulumi.Input[bool]]):
2072
+ def proactive_ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2219
2073
  pulumi.set(self, "proactive_ha_enabled", value)
2220
2074
 
2221
- @property
2075
+ @_builtins.property
2222
2076
  @pulumi.getter(name="proactiveHaModerateRemediation")
2223
- def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
2077
+ def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
2224
2078
  """
2225
- The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
2226
- this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
2079
+ The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
2227
2080
  """
2228
2081
  return pulumi.get(self, "proactive_ha_moderate_remediation")
2229
2082
 
2230
2083
  @proactive_ha_moderate_remediation.setter
2231
- def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[str]]):
2084
+ def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
2232
2085
  pulumi.set(self, "proactive_ha_moderate_remediation", value)
2233
2086
 
2234
- @property
2087
+ @_builtins.property
2235
2088
  @pulumi.getter(name="proactiveHaProviderIds")
2236
- def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2089
+ def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
2237
2090
  """
2238
2091
  The list of IDs for health update providers configured for this cluster.
2239
2092
  """
2240
2093
  return pulumi.get(self, "proactive_ha_provider_ids")
2241
2094
 
2242
2095
  @proactive_ha_provider_ids.setter
2243
- def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2096
+ def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
2244
2097
  pulumi.set(self, "proactive_ha_provider_ids", value)
2245
2098
 
2246
- @property
2099
+ @_builtins.property
2247
2100
  @pulumi.getter(name="proactiveHaSevereRemediation")
2248
- def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
2101
+ def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
2249
2102
  """
2250
- The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
2251
- cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
2103
+ The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
2252
2104
  """
2253
2105
  return pulumi.get(self, "proactive_ha_severe_remediation")
2254
2106
 
2255
2107
  @proactive_ha_severe_remediation.setter
2256
- def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[str]]):
2108
+ def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
2257
2109
  pulumi.set(self, "proactive_ha_severe_remediation", value)
2258
2110
 
2259
- @property
2111
+ @_builtins.property
2260
2112
  @pulumi.getter(name="resourcePoolId")
2261
- def resource_pool_id(self) -> Optional[pulumi.Input[str]]:
2113
+ def resource_pool_id(self) -> Optional[pulumi.Input[_builtins.str]]:
2262
2114
  """
2263
2115
  The managed object ID of the primary
2264
2116
  resource pool for this cluster. This can be passed directly to the
@@ -2269,46 +2121,46 @@ class _ComputeClusterState:
2269
2121
  return pulumi.get(self, "resource_pool_id")
2270
2122
 
2271
2123
  @resource_pool_id.setter
2272
- def resource_pool_id(self, value: Optional[pulumi.Input[str]]):
2124
+ def resource_pool_id(self, value: Optional[pulumi.Input[_builtins.str]]):
2273
2125
  pulumi.set(self, "resource_pool_id", value)
2274
2126
 
2275
- @property
2127
+ @_builtins.property
2276
2128
  @pulumi.getter
2277
- def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2129
+ def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
2278
2130
  """
2279
2131
  The IDs of any tags to attach to this resource.
2280
2132
  """
2281
2133
  return pulumi.get(self, "tags")
2282
2134
 
2283
2135
  @tags.setter
2284
- def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2136
+ def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
2285
2137
  pulumi.set(self, "tags", value)
2286
2138
 
2287
- @property
2139
+ @_builtins.property
2288
2140
  @pulumi.getter(name="vsanCompressionEnabled")
2289
- def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
2141
+ def vsan_compression_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2290
2142
  """
2291
2143
  Whether the vSAN compression service is enabled for the cluster.
2292
2144
  """
2293
2145
  return pulumi.get(self, "vsan_compression_enabled")
2294
2146
 
2295
2147
  @vsan_compression_enabled.setter
2296
- def vsan_compression_enabled(self, value: Optional[pulumi.Input[bool]]):
2148
+ def vsan_compression_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2297
2149
  pulumi.set(self, "vsan_compression_enabled", value)
2298
2150
 
2299
- @property
2151
+ @_builtins.property
2300
2152
  @pulumi.getter(name="vsanDedupEnabled")
2301
- def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
2153
+ def vsan_dedup_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2302
2154
  """
2303
2155
  Whether the vSAN deduplication service is enabled for the cluster.
2304
2156
  """
2305
2157
  return pulumi.get(self, "vsan_dedup_enabled")
2306
2158
 
2307
2159
  @vsan_dedup_enabled.setter
2308
- def vsan_dedup_enabled(self, value: Optional[pulumi.Input[bool]]):
2160
+ def vsan_dedup_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2309
2161
  pulumi.set(self, "vsan_dedup_enabled", value)
2310
2162
 
2311
- @property
2163
+ @_builtins.property
2312
2164
  @pulumi.getter(name="vsanDiskGroups")
2313
2165
  def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
2314
2166
  """
@@ -2320,55 +2172,55 @@ class _ComputeClusterState:
2320
2172
  def vsan_disk_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]):
2321
2173
  pulumi.set(self, "vsan_disk_groups", value)
2322
2174
 
2323
- @property
2175
+ @_builtins.property
2324
2176
  @pulumi.getter(name="vsanDitEncryptionEnabled")
2325
- def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
2177
+ def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2326
2178
  """
2327
2179
  Whether the vSAN data-in-transit encryption is enabled for the cluster.
2328
2180
  """
2329
2181
  return pulumi.get(self, "vsan_dit_encryption_enabled")
2330
2182
 
2331
2183
  @vsan_dit_encryption_enabled.setter
2332
- def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[bool]]):
2184
+ def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2333
2185
  pulumi.set(self, "vsan_dit_encryption_enabled", value)
2334
2186
 
2335
- @property
2187
+ @_builtins.property
2336
2188
  @pulumi.getter(name="vsanDitRekeyInterval")
2337
- def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
2189
+ def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
2338
2190
  """
2339
2191
  When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
2340
2192
  """
2341
2193
  return pulumi.get(self, "vsan_dit_rekey_interval")
2342
2194
 
2343
2195
  @vsan_dit_rekey_interval.setter
2344
- def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[int]]):
2196
+ def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
2345
2197
  pulumi.set(self, "vsan_dit_rekey_interval", value)
2346
2198
 
2347
- @property
2199
+ @_builtins.property
2348
2200
  @pulumi.getter(name="vsanEnabled")
2349
- def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
2201
+ def vsan_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2350
2202
  """
2351
2203
  Whether the vSAN service is enabled for the cluster.
2352
2204
  """
2353
2205
  return pulumi.get(self, "vsan_enabled")
2354
2206
 
2355
2207
  @vsan_enabled.setter
2356
- def vsan_enabled(self, value: Optional[pulumi.Input[bool]]):
2208
+ def vsan_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2357
2209
  pulumi.set(self, "vsan_enabled", value)
2358
2210
 
2359
- @property
2211
+ @_builtins.property
2360
2212
  @pulumi.getter(name="vsanEsaEnabled")
2361
- def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
2213
+ def vsan_esa_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2362
2214
  """
2363
2215
  Whether the vSAN ESA service is enabled for the cluster.
2364
2216
  """
2365
2217
  return pulumi.get(self, "vsan_esa_enabled")
2366
2218
 
2367
2219
  @vsan_esa_enabled.setter
2368
- def vsan_esa_enabled(self, value: Optional[pulumi.Input[bool]]):
2220
+ def vsan_esa_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2369
2221
  pulumi.set(self, "vsan_esa_enabled", value)
2370
2222
 
2371
- @property
2223
+ @_builtins.property
2372
2224
  @pulumi.getter(name="vsanFaultDomains")
2373
2225
  def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
2374
2226
  """
@@ -2380,43 +2232,43 @@ class _ComputeClusterState:
2380
2232
  def vsan_fault_domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]):
2381
2233
  pulumi.set(self, "vsan_fault_domains", value)
2382
2234
 
2383
- @property
2235
+ @_builtins.property
2384
2236
  @pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
2385
- def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
2237
+ def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2386
2238
  """
2387
2239
  Whether the vSAN network diagnostic mode is enabled for the cluster.
2388
2240
  """
2389
2241
  return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
2390
2242
 
2391
2243
  @vsan_network_diagnostic_mode_enabled.setter
2392
- def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
2244
+ def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2393
2245
  pulumi.set(self, "vsan_network_diagnostic_mode_enabled", value)
2394
2246
 
2395
- @property
2247
+ @_builtins.property
2396
2248
  @pulumi.getter(name="vsanPerformanceEnabled")
2397
- def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
2249
+ def vsan_performance_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2398
2250
  """
2399
2251
  Whether the vSAN performance service is enabled for the cluster.
2400
2252
  """
2401
2253
  return pulumi.get(self, "vsan_performance_enabled")
2402
2254
 
2403
2255
  @vsan_performance_enabled.setter
2404
- def vsan_performance_enabled(self, value: Optional[pulumi.Input[bool]]):
2256
+ def vsan_performance_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2405
2257
  pulumi.set(self, "vsan_performance_enabled", value)
2406
2258
 
2407
- @property
2259
+ @_builtins.property
2408
2260
  @pulumi.getter(name="vsanRemoteDatastoreIds")
2409
- def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
2261
+ def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
2410
2262
  """
2411
2263
  The managed object IDs of the vSAN datastore to be mounted on the cluster.
2412
2264
  """
2413
2265
  return pulumi.get(self, "vsan_remote_datastore_ids")
2414
2266
 
2415
2267
  @vsan_remote_datastore_ids.setter
2416
- def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
2268
+ def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
2417
2269
  pulumi.set(self, "vsan_remote_datastore_ids", value)
2418
2270
 
2419
- @property
2271
+ @_builtins.property
2420
2272
  @pulumi.getter(name="vsanStretchedCluster")
2421
2273
  def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
2422
2274
  """
@@ -2428,105 +2280,106 @@ class _ComputeClusterState:
2428
2280
  def vsan_stretched_cluster(self, value: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]):
2429
2281
  pulumi.set(self, "vsan_stretched_cluster", value)
2430
2282
 
2431
- @property
2283
+ @_builtins.property
2432
2284
  @pulumi.getter(name="vsanUnmapEnabled")
2433
- def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
2285
+ def vsan_unmap_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2434
2286
  """
2435
2287
  Whether the vSAN unmap service is enabled for the cluster.
2436
2288
  """
2437
2289
  return pulumi.get(self, "vsan_unmap_enabled")
2438
2290
 
2439
2291
  @vsan_unmap_enabled.setter
2440
- def vsan_unmap_enabled(self, value: Optional[pulumi.Input[bool]]):
2292
+ def vsan_unmap_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2441
2293
  pulumi.set(self, "vsan_unmap_enabled", value)
2442
2294
 
2443
- @property
2295
+ @_builtins.property
2444
2296
  @pulumi.getter(name="vsanVerboseModeEnabled")
2445
- def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
2297
+ def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
2446
2298
  """
2447
2299
  Whether the vSAN verbose mode is enabled for the cluster.
2448
2300
  """
2449
2301
  return pulumi.get(self, "vsan_verbose_mode_enabled")
2450
2302
 
2451
2303
  @vsan_verbose_mode_enabled.setter
2452
- def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[bool]]):
2304
+ def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
2453
2305
  pulumi.set(self, "vsan_verbose_mode_enabled", value)
2454
2306
 
2455
2307
 
2308
+ @pulumi.type_token("vsphere:index/computeCluster:ComputeCluster")
2456
2309
  class ComputeCluster(pulumi.CustomResource):
2457
2310
  @overload
2458
2311
  def __init__(__self__,
2459
2312
  resource_name: str,
2460
2313
  opts: Optional[pulumi.ResourceOptions] = None,
2461
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2462
- datacenter_id: Optional[pulumi.Input[str]] = None,
2463
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
2464
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
2465
- dpm_threshold: Optional[pulumi.Input[int]] = None,
2466
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2467
- drs_automation_level: Optional[pulumi.Input[str]] = None,
2468
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
2469
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
2470
- drs_enabled: Optional[pulumi.Input[bool]] = None,
2471
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
2472
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
2473
- folder: Optional[pulumi.Input[str]] = None,
2474
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
2475
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2476
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
2477
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
2478
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
2479
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
2480
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
2481
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
2482
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
2483
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
2484
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
2485
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2486
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
2487
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
2488
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
2489
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
2490
- ha_enabled: Optional[pulumi.Input[bool]] = None,
2491
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2492
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
2493
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
2494
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
2495
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
2496
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
2497
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
2498
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
2499
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
2500
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
2501
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
2502
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
2503
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
2504
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
2505
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
2314
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2315
+ datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
2316
+ dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2317
+ dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2318
+ dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
2319
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2320
+ drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2321
+ drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
2322
+ drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
2323
+ drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2324
+ drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
2325
+ drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
2326
+ folder: Optional[pulumi.Input[_builtins.str]] = None,
2327
+ force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
2328
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2329
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
2330
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
2331
+ ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
2332
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
2333
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
2334
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
2335
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
2336
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
2337
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
2338
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2339
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
2340
+ ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
2341
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
2342
+ ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
2343
+ ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2344
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2345
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
2346
+ ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
2347
+ ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
2348
+ ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
2349
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
2350
+ ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
2351
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
2352
+ ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
2353
+ ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
2354
+ ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
2355
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
2356
+ ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
2357
+ ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
2358
+ host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
2506
2359
  host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
2507
- host_managed: Optional[pulumi.Input[bool]] = None,
2508
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2509
- name: Optional[pulumi.Input[str]] = None,
2510
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
2511
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
2512
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
2513
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2514
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
2515
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2516
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
2517
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
2360
+ host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
2361
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2362
+ name: Optional[pulumi.Input[_builtins.str]] = None,
2363
+ proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2364
+ proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2365
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
2366
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2367
+ proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
2368
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2369
+ vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2370
+ vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2518
2371
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
2519
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
2520
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
2521
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
2522
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
2372
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2373
+ vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
2374
+ vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2375
+ vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2523
2376
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
2524
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
2525
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
2526
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2377
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2378
+ vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2379
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2527
2380
  vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
2528
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
2529
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None,
2381
+ vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2382
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2530
2383
  __props__=None):
2531
2384
  """
2532
2385
  > **A note on the naming of this resource:** VMware refers to clusters of
@@ -2559,6 +2412,8 @@ class ComputeCluster(pulumi.CustomResource):
2559
2412
 
2560
2413
  path to the cluster, via the following command:
2561
2414
 
2415
+ [docs-import]: https://developer.hashicorp.com/terraform/cli/import
2416
+
2562
2417
  hcl
2563
2418
 
2564
2419
  variable "datacenter" {
@@ -2575,9 +2430,9 @@ class ComputeCluster(pulumi.CustomResource):
2575
2430
 
2576
2431
  resource "vsphere_compute_cluster" "compute_cluster" {
2577
2432
 
2578
- name = "cluster-01"
2433
+ name = "cluster-01"
2579
2434
 
2580
- datacenter_id = data.vsphere_datacenter.datacenter.id
2435
+ datacenter_id = data.vsphere_datacenter.datacenter.id
2581
2436
 
2582
2437
  }
2583
2438
 
@@ -2605,9 +2460,7 @@ class ComputeCluster(pulumi.CustomResource):
2605
2460
 
2606
2461
  ha_datastore_pdl_response = "restartAggressive"
2607
2462
 
2608
- ... etc.
2609
-
2610
- console
2463
+ }
2611
2464
 
2612
2465
  ```sh
2613
2466
  $ pulumi import vsphere:index/computeCluster:ComputeCluster compute_cluster /dc-01/host/cluster-01
@@ -2619,122 +2472,85 @@ class ComputeCluster(pulumi.CustomResource):
2619
2472
 
2620
2473
  :param str resource_name: The name of the resource.
2621
2474
  :param pulumi.ResourceOptions opts: Options for the resource.
2622
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
2475
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
2623
2476
  value strings to set for the datastore cluster.
2624
2477
 
2625
2478
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
2626
2479
  and require vCenter Server.
2627
- :param pulumi.Input[str] datacenter_id: The managed object ID of
2480
+ :param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
2628
2481
  the datacenter to create the cluster in. Forces a new resource if changed.
2629
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
2630
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
2631
- machines in the cluster. Requires that DRS be enabled.
2632
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
2633
- affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
2634
- setting.
2635
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
2636
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
2637
- fullyAutomated.
2638
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
2639
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
2640
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
2641
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
2642
- more imbalance while a higher setting will tolerate less.
2643
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
2644
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
2482
+ :param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
2483
+ :param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
2484
+ :param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
2485
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
2486
+ :param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
2487
+ :param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
2488
+ :param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
2489
+ :param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
2490
+ :param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
2491
+ :param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
2492
+ :param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
2645
2493
  This is a path relative to the datacenter you are deploying the cluster to.
2646
2494
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
2647
2495
  The provider will place a cluster named `compute-cluster-test` in a
2648
2496
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
2649
2497
  being `/dc1/host/foo/bar/datastore-cluster-test`.
2650
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
2651
- for testing and is not recommended in normal use.
2652
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
2653
- failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
2654
- will ignore the host when making recommendations.
2655
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
2656
- machine operations. The maximum is one less than the number of hosts in the cluster.
2657
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
2658
- warnings only, whereas a value of 100 disables the setting.
2659
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
2660
- permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
2661
- slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
2662
- issues.
2663
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
2664
- subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
2665
- from the total amount of resources in the cluster. Disable to supply user-defined values.
2666
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
2667
- the cluster to reserve for failover.
2668
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
2669
- the cluster to reserve for failover.
2670
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
2671
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
2672
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
2673
- to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
2674
- currently in the cluster.
2675
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
2676
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
2677
- affected datastore clears in the middle of an APD event. Can be one of none or reset.
2678
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
2679
- detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
2680
- restartAggressive.
2681
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
2682
- the response action defined in ha_datastore_apd_response.
2683
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
2684
- detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
2685
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
2686
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
2687
- ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
2688
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
2689
- allFeasibleDsWithUserPreference.
2690
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
2691
- Can be one of none, powerOff, or shutdown.
2692
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
2693
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
2694
- failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
2695
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
2696
- on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
2697
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
2698
- failed. The value is in seconds.
2699
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
2700
- attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
2701
- time is allotted.
2702
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
2703
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
2704
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
2705
- vmMonitoringOnly, or vmAndAppMonitoring.
2706
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
2707
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
2708
- high, or highest.
2709
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
2710
- proceeding with the next priority.
2711
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
2498
+ :param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
2499
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
2500
+ :param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
2501
+ :param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
2502
+ :param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
2503
+ :param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
2504
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
2505
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
2506
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
2507
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
2508
+ :param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
2509
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
2510
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
2511
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
2512
+ :param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
2513
+ :param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
2514
+ :param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
2515
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
2516
+ :param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
2517
+ :param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
2518
+ :param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
2519
+ :param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
2520
+ :param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
2521
+ :param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
2522
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
2523
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
2524
+ :param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
2525
+ :param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
2526
+ :param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
2527
+ :param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
2528
+ :param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
2529
+ :param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
2712
2530
  :param pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']] host_image: Details about the host image which should be applied to the cluster.
2713
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
2714
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
2715
- :param pulumi.Input[str] name: The name of the cluster.
2716
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
2717
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
2718
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
2719
- this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
2720
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
2721
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
2722
- cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
2723
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
2724
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
2725
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
2531
+ :param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
2532
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
2533
+ :param pulumi.Input[_builtins.str] name: The name of the cluster.
2534
+ :param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
2535
+ :param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
2536
+ :param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
2537
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
2538
+ :param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
2539
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
2540
+ :param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
2541
+ :param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
2726
2542
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
2727
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
2728
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
2729
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
2730
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
2543
+ :param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
2544
+ :param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
2545
+ :param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
2546
+ :param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
2731
2547
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]] vsan_fault_domains: The configuration for vSAN fault domains.
2732
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
2733
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
2734
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
2548
+ :param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
2549
+ :param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
2550
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
2735
2551
  :param pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']] vsan_stretched_cluster: The configuration for stretched cluster.
2736
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
2737
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
2552
+ :param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
2553
+ :param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
2738
2554
  """
2739
2555
  ...
2740
2556
  @overload
@@ -2773,6 +2589,8 @@ class ComputeCluster(pulumi.CustomResource):
2773
2589
 
2774
2590
  path to the cluster, via the following command:
2775
2591
 
2592
+ [docs-import]: https://developer.hashicorp.com/terraform/cli/import
2593
+
2776
2594
  hcl
2777
2595
 
2778
2596
  variable "datacenter" {
@@ -2789,9 +2607,9 @@ class ComputeCluster(pulumi.CustomResource):
2789
2607
 
2790
2608
  resource "vsphere_compute_cluster" "compute_cluster" {
2791
2609
 
2792
- name = "cluster-01"
2610
+ name = "cluster-01"
2793
2611
 
2794
- datacenter_id = data.vsphere_datacenter.datacenter.id
2612
+ datacenter_id = data.vsphere_datacenter.datacenter.id
2795
2613
 
2796
2614
  }
2797
2615
 
@@ -2819,9 +2637,7 @@ class ComputeCluster(pulumi.CustomResource):
2819
2637
 
2820
2638
  ha_datastore_pdl_response = "restartAggressive"
2821
2639
 
2822
- ... etc.
2823
-
2824
- console
2640
+ }
2825
2641
 
2826
2642
  ```sh
2827
2643
  $ pulumi import vsphere:index/computeCluster:ComputeCluster compute_cluster /dc-01/host/cluster-01
@@ -2846,75 +2662,75 @@ class ComputeCluster(pulumi.CustomResource):
2846
2662
  def _internal_init(__self__,
2847
2663
  resource_name: str,
2848
2664
  opts: Optional[pulumi.ResourceOptions] = None,
2849
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2850
- datacenter_id: Optional[pulumi.Input[str]] = None,
2851
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
2852
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
2853
- dpm_threshold: Optional[pulumi.Input[int]] = None,
2854
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2855
- drs_automation_level: Optional[pulumi.Input[str]] = None,
2856
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
2857
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
2858
- drs_enabled: Optional[pulumi.Input[bool]] = None,
2859
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
2860
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
2861
- folder: Optional[pulumi.Input[str]] = None,
2862
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
2863
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2864
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
2865
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
2866
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
2867
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
2868
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
2869
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
2870
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
2871
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
2872
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
2873
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
2874
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
2875
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
2876
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
2877
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
2878
- ha_enabled: Optional[pulumi.Input[bool]] = None,
2879
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2880
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
2881
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
2882
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
2883
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
2884
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
2885
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
2886
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
2887
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
2888
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
2889
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
2890
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
2891
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
2892
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
2893
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
2665
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2666
+ datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
2667
+ dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2668
+ dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2669
+ dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
2670
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2671
+ drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2672
+ drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
2673
+ drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
2674
+ drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2675
+ drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
2676
+ drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
2677
+ folder: Optional[pulumi.Input[_builtins.str]] = None,
2678
+ force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
2679
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2680
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
2681
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
2682
+ ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
2683
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
2684
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
2685
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
2686
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
2687
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
2688
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
2689
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2690
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
2691
+ ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
2692
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
2693
+ ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
2694
+ ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2695
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2696
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
2697
+ ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
2698
+ ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
2699
+ ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
2700
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
2701
+ ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
2702
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
2703
+ ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
2704
+ ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
2705
+ ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
2706
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
2707
+ ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
2708
+ ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
2709
+ host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
2894
2710
  host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
2895
- host_managed: Optional[pulumi.Input[bool]] = None,
2896
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2897
- name: Optional[pulumi.Input[str]] = None,
2898
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
2899
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
2900
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
2901
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2902
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
2903
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2904
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
2905
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
2711
+ host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
2712
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2713
+ name: Optional[pulumi.Input[_builtins.str]] = None,
2714
+ proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2715
+ proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2716
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
2717
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2718
+ proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
2719
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2720
+ vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2721
+ vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2906
2722
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
2907
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
2908
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
2909
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
2910
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
2723
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2724
+ vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
2725
+ vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2726
+ vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2911
2727
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
2912
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
2913
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
2914
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2728
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2729
+ vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2730
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2915
2731
  vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
2916
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
2917
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None,
2732
+ vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2733
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2918
2734
  __props__=None):
2919
2735
  opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
2920
2736
  if not isinstance(opts, pulumi.ResourceOptions):
@@ -3006,76 +2822,76 @@ class ComputeCluster(pulumi.CustomResource):
3006
2822
  def get(resource_name: str,
3007
2823
  id: pulumi.Input[str],
3008
2824
  opts: Optional[pulumi.ResourceOptions] = None,
3009
- custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
3010
- datacenter_id: Optional[pulumi.Input[str]] = None,
3011
- dpm_automation_level: Optional[pulumi.Input[str]] = None,
3012
- dpm_enabled: Optional[pulumi.Input[bool]] = None,
3013
- dpm_threshold: Optional[pulumi.Input[int]] = None,
3014
- drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
3015
- drs_automation_level: Optional[pulumi.Input[str]] = None,
3016
- drs_enable_predictive_drs: Optional[pulumi.Input[bool]] = None,
3017
- drs_enable_vm_overrides: Optional[pulumi.Input[bool]] = None,
3018
- drs_enabled: Optional[pulumi.Input[bool]] = None,
3019
- drs_migration_threshold: Optional[pulumi.Input[int]] = None,
3020
- drs_scale_descendants_shares: Optional[pulumi.Input[str]] = None,
3021
- folder: Optional[pulumi.Input[str]] = None,
3022
- force_evacuate_on_destroy: Optional[pulumi.Input[bool]] = None,
3023
- ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3024
- ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[int]] = None,
3025
- ha_admission_control_performance_tolerance: Optional[pulumi.Input[int]] = None,
3026
- ha_admission_control_policy: Optional[pulumi.Input[str]] = None,
3027
- ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[bool]] = None,
3028
- ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[int]] = None,
3029
- ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[int]] = None,
3030
- ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[int]] = None,
3031
- ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[int]] = None,
3032
- ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[bool]] = None,
3033
- ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
3034
- ha_datastore_apd_recovery_action: Optional[pulumi.Input[str]] = None,
3035
- ha_datastore_apd_response: Optional[pulumi.Input[str]] = None,
3036
- ha_datastore_apd_response_delay: Optional[pulumi.Input[int]] = None,
3037
- ha_datastore_pdl_response: Optional[pulumi.Input[str]] = None,
3038
- ha_enabled: Optional[pulumi.Input[bool]] = None,
3039
- ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3040
- ha_heartbeat_datastore_policy: Optional[pulumi.Input[str]] = None,
3041
- ha_host_isolation_response: Optional[pulumi.Input[str]] = None,
3042
- ha_host_monitoring: Optional[pulumi.Input[str]] = None,
3043
- ha_vm_component_protection: Optional[pulumi.Input[str]] = None,
3044
- ha_vm_dependency_restart_condition: Optional[pulumi.Input[str]] = None,
3045
- ha_vm_failure_interval: Optional[pulumi.Input[int]] = None,
3046
- ha_vm_maximum_failure_window: Optional[pulumi.Input[int]] = None,
3047
- ha_vm_maximum_resets: Optional[pulumi.Input[int]] = None,
3048
- ha_vm_minimum_uptime: Optional[pulumi.Input[int]] = None,
3049
- ha_vm_monitoring: Optional[pulumi.Input[str]] = None,
3050
- ha_vm_restart_additional_delay: Optional[pulumi.Input[int]] = None,
3051
- ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
3052
- ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
3053
- host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
2825
+ custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2826
+ datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
2827
+ dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2828
+ dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2829
+ dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
2830
+ drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2831
+ drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2832
+ drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
2833
+ drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
2834
+ drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2835
+ drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
2836
+ drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
2837
+ folder: Optional[pulumi.Input[_builtins.str]] = None,
2838
+ force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
2839
+ ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2840
+ ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
2841
+ ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
2842
+ ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
2843
+ ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
2844
+ ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
2845
+ ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
2846
+ ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
2847
+ ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
2848
+ ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
2849
+ ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
2850
+ ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
2851
+ ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
2852
+ ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
2853
+ ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
2854
+ ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2855
+ ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2856
+ ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
2857
+ ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
2858
+ ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
2859
+ ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
2860
+ ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
2861
+ ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
2862
+ ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
2863
+ ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
2864
+ ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
2865
+ ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
2866
+ ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
2867
+ ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
2868
+ ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
2869
+ host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
3054
2870
  host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
3055
- host_managed: Optional[pulumi.Input[bool]] = None,
3056
- host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3057
- name: Optional[pulumi.Input[str]] = None,
3058
- proactive_ha_automation_level: Optional[pulumi.Input[str]] = None,
3059
- proactive_ha_enabled: Optional[pulumi.Input[bool]] = None,
3060
- proactive_ha_moderate_remediation: Optional[pulumi.Input[str]] = None,
3061
- proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3062
- proactive_ha_severe_remediation: Optional[pulumi.Input[str]] = None,
3063
- resource_pool_id: Optional[pulumi.Input[str]] = None,
3064
- tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
3065
- vsan_compression_enabled: Optional[pulumi.Input[bool]] = None,
3066
- vsan_dedup_enabled: Optional[pulumi.Input[bool]] = None,
2871
+ host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
2872
+ host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2873
+ name: Optional[pulumi.Input[_builtins.str]] = None,
2874
+ proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
2875
+ proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2876
+ proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
2877
+ proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2878
+ proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
2879
+ resource_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
2880
+ tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
2881
+ vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2882
+ vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
3067
2883
  vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
3068
- vsan_dit_encryption_enabled: Optional[pulumi.Input[bool]] = None,
3069
- vsan_dit_rekey_interval: Optional[pulumi.Input[int]] = None,
3070
- vsan_enabled: Optional[pulumi.Input[bool]] = None,
3071
- vsan_esa_enabled: Optional[pulumi.Input[bool]] = None,
2884
+ vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2885
+ vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
2886
+ vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2887
+ vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
3072
2888
  vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
3073
- vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[bool]] = None,
3074
- vsan_performance_enabled: Optional[pulumi.Input[bool]] = None,
3075
- vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
2889
+ vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2890
+ vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2891
+ vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
3076
2892
  vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
3077
- vsan_unmap_enabled: Optional[pulumi.Input[bool]] = None,
3078
- vsan_verbose_mode_enabled: Optional[pulumi.Input[bool]] = None) -> 'ComputeCluster':
2893
+ vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
2894
+ vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None) -> 'ComputeCluster':
3079
2895
  """
3080
2896
  Get an existing ComputeCluster resource's state with the given name, id, and optional extra
3081
2897
  properties used to qualify the lookup.
@@ -3083,127 +2899,90 @@ class ComputeCluster(pulumi.CustomResource):
3083
2899
  :param str resource_name: The unique name of the resulting resource.
3084
2900
  :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
3085
2901
  :param pulumi.ResourceOptions opts: Options for the resource.
3086
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: A map of custom attribute ids to attribute
2902
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
3087
2903
  value strings to set for the datastore cluster.
3088
2904
 
3089
2905
  > **NOTE:** Custom attributes are unsupported on direct ESXi connections
3090
2906
  and require vCenter Server.
3091
- :param pulumi.Input[str] datacenter_id: The managed object ID of
2907
+ :param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
3092
2908
  the datacenter to create the cluster in. Forces a new resource if changed.
3093
- :param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
3094
- :param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
3095
- machines in the cluster. Requires that DRS be enabled.
3096
- :param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
3097
- affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
3098
- setting.
3099
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
3100
- :param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
3101
- fullyAutomated.
3102
- :param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
3103
- :param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
3104
- :param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
3105
- :param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
3106
- more imbalance while a higher setting will tolerate less.
3107
- :param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
3108
- :param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
2909
+ :param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
2910
+ :param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
2911
+ :param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
2912
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
2913
+ :param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
2914
+ :param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
2915
+ :param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
2916
+ :param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
2917
+ :param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
2918
+ :param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
2919
+ :param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
3109
2920
  This is a path relative to the datacenter you are deploying the cluster to.
3110
2921
  Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
3111
2922
  The provider will place a cluster named `compute-cluster-test` in a
3112
2923
  host folder located at `/dc1/host/foo/bar`, with the final inventory path
3113
2924
  being `/dc1/host/foo/bar/datastore-cluster-test`.
3114
- :param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
3115
- for testing and is not recommended in normal use.
3116
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
3117
- failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
3118
- will ignore the host when making recommendations.
3119
- :param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
3120
- machine operations. The maximum is one less than the number of hosts in the cluster.
3121
- :param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
3122
- warnings only, whereas a value of 100 disables the setting.
3123
- :param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
3124
- permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
3125
- slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
3126
- issues.
3127
- :param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
3128
- subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
3129
- from the total amount of resources in the cluster. Disable to supply user-defined values.
3130
- :param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
3131
- the cluster to reserve for failover.
3132
- :param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
3133
- the cluster to reserve for failover.
3134
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
3135
- :param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
3136
- :param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
3137
- to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
3138
- currently in the cluster.
3139
- :param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
3140
- :param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
3141
- affected datastore clears in the middle of an APD event. Can be one of none or reset.
3142
- :param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3143
- detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
3144
- restartAggressive.
3145
- :param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
3146
- the response action defined in ha_datastore_apd_response.
3147
- :param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3148
- detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
3149
- :param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
3150
- :param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
3151
- ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
3152
- :param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
3153
- allFeasibleDsWithUserPreference.
3154
- :param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
3155
- Can be one of none, powerOff, or shutdown.
3156
- :param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
3157
- :param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
3158
- failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
3159
- :param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
3160
- on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
3161
- :param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
3162
- failed. The value is in seconds.
3163
- :param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
3164
- attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
3165
- time is allotted.
3166
- :param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
3167
- :param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
3168
- :param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
3169
- vmMonitoringOnly, or vmAndAppMonitoring.
3170
- :param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
3171
- :param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
3172
- high, or highest.
3173
- :param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
3174
- proceeding with the next priority.
3175
- :param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
2925
+ :param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
2926
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
2927
+ :param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
2928
+ :param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
2929
+ :param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
2930
+ :param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
2931
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
2932
+ :param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
2933
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
2934
+ :param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
2935
+ :param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
2936
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
2937
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
2938
+ :param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
2939
+ :param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
2940
+ :param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
2941
+ :param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
2942
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
2943
+ :param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
2944
+ :param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
2945
+ :param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
2946
+ :param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
2947
+ :param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
2948
+ :param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
2949
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
2950
+ :param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
2951
+ :param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
2952
+ :param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
2953
+ :param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
2954
+ :param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
2955
+ :param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
2956
+ :param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
3176
2957
  :param pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']] host_image: Details about the host image which should be applied to the cluster.
3177
- :param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
3178
- :param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
3179
- :param pulumi.Input[str] name: The name of the cluster.
3180
- :param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
3181
- :param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
3182
- :param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
3183
- this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
3184
- :param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
3185
- :param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
3186
- cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
3187
- :param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
2958
+ :param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
2959
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
2960
+ :param pulumi.Input[_builtins.str] name: The name of the cluster.
2961
+ :param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
2962
+ :param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
2963
+ :param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
2964
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
2965
+ :param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
2966
+ :param pulumi.Input[_builtins.str] resource_pool_id: The managed object ID of the primary
3188
2967
  resource pool for this cluster. This can be passed directly to the
3189
2968
  `resource_pool_id`
3190
2969
  attribute of the
3191
2970
  `VirtualMachine` resource.
3192
- :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
3193
- :param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
3194
- :param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
2971
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
2972
+ :param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
2973
+ :param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
3195
2974
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
3196
- :param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
3197
- :param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
3198
- :param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
3199
- :param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
2975
+ :param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
2976
+ :param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
2977
+ :param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
2978
+ :param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
3200
2979
  :param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]] vsan_fault_domains: The configuration for vSAN fault domains.
3201
- :param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
3202
- :param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
3203
- :param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
2980
+ :param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
2981
+ :param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
2982
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
3204
2983
  :param pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']] vsan_stretched_cluster: The configuration for stretched cluster.
3205
- :param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
3206
- :param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
2984
+ :param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
2985
+ :param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
3207
2986
  """
3208
2987
  opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
3209
2988
 
@@ -3281,9 +3060,9 @@ class ComputeCluster(pulumi.CustomResource):
3281
3060
  __props__.__dict__["vsan_verbose_mode_enabled"] = vsan_verbose_mode_enabled
3282
3061
  return ComputeCluster(resource_name, opts=opts, __props__=__props__)
3283
3062
 
3284
- @property
3063
+ @_builtins.property
3285
3064
  @pulumi.getter(name="customAttributes")
3286
- def custom_attributes(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
3065
+ def custom_attributes(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
3287
3066
  """
3288
3067
  A map of custom attribute ids to attribute
3289
3068
  value strings to set for the datastore cluster.
@@ -3293,103 +3072,98 @@ class ComputeCluster(pulumi.CustomResource):
3293
3072
  """
3294
3073
  return pulumi.get(self, "custom_attributes")
3295
3074
 
3296
- @property
3075
+ @_builtins.property
3297
3076
  @pulumi.getter(name="datacenterId")
3298
- def datacenter_id(self) -> pulumi.Output[str]:
3077
+ def datacenter_id(self) -> pulumi.Output[_builtins.str]:
3299
3078
  """
3300
3079
  The managed object ID of
3301
3080
  the datacenter to create the cluster in. Forces a new resource if changed.
3302
3081
  """
3303
3082
  return pulumi.get(self, "datacenter_id")
3304
3083
 
3305
- @property
3084
+ @_builtins.property
3306
3085
  @pulumi.getter(name="dpmAutomationLevel")
3307
- def dpm_automation_level(self) -> pulumi.Output[Optional[str]]:
3086
+ def dpm_automation_level(self) -> pulumi.Output[Optional[_builtins.str]]:
3308
3087
  """
3309
3088
  The automation level for host power operations in this cluster. Can be one of manual or automated.
3310
3089
  """
3311
3090
  return pulumi.get(self, "dpm_automation_level")
3312
3091
 
3313
- @property
3092
+ @_builtins.property
3314
3093
  @pulumi.getter(name="dpmEnabled")
3315
- def dpm_enabled(self) -> pulumi.Output[Optional[bool]]:
3094
+ def dpm_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3316
3095
  """
3317
- Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
3318
- machines in the cluster. Requires that DRS be enabled.
3096
+ Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
3319
3097
  """
3320
3098
  return pulumi.get(self, "dpm_enabled")
3321
3099
 
3322
- @property
3100
+ @_builtins.property
3323
3101
  @pulumi.getter(name="dpmThreshold")
3324
- def dpm_threshold(self) -> pulumi.Output[Optional[int]]:
3102
+ def dpm_threshold(self) -> pulumi.Output[Optional[_builtins.int]]:
3325
3103
  """
3326
- A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
3327
- affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
3328
- setting.
3104
+ A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
3329
3105
  """
3330
3106
  return pulumi.get(self, "dpm_threshold")
3331
3107
 
3332
- @property
3108
+ @_builtins.property
3333
3109
  @pulumi.getter(name="drsAdvancedOptions")
3334
- def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
3110
+ def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
3335
3111
  """
3336
3112
  Advanced configuration options for DRS and DPM.
3337
3113
  """
3338
3114
  return pulumi.get(self, "drs_advanced_options")
3339
3115
 
3340
- @property
3116
+ @_builtins.property
3341
3117
  @pulumi.getter(name="drsAutomationLevel")
3342
- def drs_automation_level(self) -> pulumi.Output[Optional[str]]:
3118
+ def drs_automation_level(self) -> pulumi.Output[Optional[_builtins.str]]:
3343
3119
  """
3344
- The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
3345
- fullyAutomated.
3120
+ The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
3346
3121
  """
3347
3122
  return pulumi.get(self, "drs_automation_level")
3348
3123
 
3349
- @property
3124
+ @_builtins.property
3350
3125
  @pulumi.getter(name="drsEnablePredictiveDrs")
3351
- def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[bool]]:
3126
+ def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[_builtins.bool]]:
3352
3127
  """
3353
3128
  When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
3354
3129
  """
3355
3130
  return pulumi.get(self, "drs_enable_predictive_drs")
3356
3131
 
3357
- @property
3132
+ @_builtins.property
3358
3133
  @pulumi.getter(name="drsEnableVmOverrides")
3359
- def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[bool]]:
3134
+ def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[_builtins.bool]]:
3360
3135
  """
3361
3136
  When true, allows individual VM overrides within this cluster to be set.
3362
3137
  """
3363
3138
  return pulumi.get(self, "drs_enable_vm_overrides")
3364
3139
 
3365
- @property
3140
+ @_builtins.property
3366
3141
  @pulumi.getter(name="drsEnabled")
3367
- def drs_enabled(self) -> pulumi.Output[Optional[bool]]:
3142
+ def drs_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3368
3143
  """
3369
3144
  Enable DRS for this cluster.
3370
3145
  """
3371
3146
  return pulumi.get(self, "drs_enabled")
3372
3147
 
3373
- @property
3148
+ @_builtins.property
3374
3149
  @pulumi.getter(name="drsMigrationThreshold")
3375
- def drs_migration_threshold(self) -> pulumi.Output[Optional[int]]:
3150
+ def drs_migration_threshold(self) -> pulumi.Output[Optional[_builtins.int]]:
3376
3151
  """
3377
- A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
3378
- more imbalance while a higher setting will tolerate less.
3152
+ A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
3379
3153
  """
3380
3154
  return pulumi.get(self, "drs_migration_threshold")
3381
3155
 
3382
- @property
3156
+ @_builtins.property
3383
3157
  @pulumi.getter(name="drsScaleDescendantsShares")
3384
- def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[str]]:
3158
+ def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[_builtins.str]]:
3385
3159
  """
3386
3160
  Enable scalable shares for all descendants of this cluster.
3387
3161
  """
3388
3162
  return pulumi.get(self, "drs_scale_descendants_shares")
3389
3163
 
3390
- @property
3164
+ @_builtins.property
3391
3165
  @pulumi.getter
3392
- def folder(self) -> pulumi.Output[Optional[str]]:
3166
+ def folder(self) -> pulumi.Output[Optional[_builtins.str]]:
3393
3167
  """
3394
3168
  The relative path to a folder to put this cluster in.
3395
3169
  This is a path relative to the datacenter you are deploying the cluster to.
@@ -3400,293 +3174,263 @@ class ComputeCluster(pulumi.CustomResource):
3400
3174
  """
3401
3175
  return pulumi.get(self, "folder")
3402
3176
 
3403
- @property
3177
+ @_builtins.property
3404
3178
  @pulumi.getter(name="forceEvacuateOnDestroy")
3405
- def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[bool]]:
3179
+ def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[_builtins.bool]]:
3406
3180
  """
3407
- Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
3408
- for testing and is not recommended in normal use.
3181
+ Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
3409
3182
  """
3410
3183
  return pulumi.get(self, "force_evacuate_on_destroy")
3411
3184
 
3412
- @property
3185
+ @_builtins.property
3413
3186
  @pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
3414
- def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3187
+ def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
3415
3188
  """
3416
- When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
3417
- failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
3418
- will ignore the host when making recommendations.
3189
+ When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
3419
3190
  """
3420
3191
  return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
3421
3192
 
3422
- @property
3193
+ @_builtins.property
3423
3194
  @pulumi.getter(name="haAdmissionControlHostFailureTolerance")
3424
- def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[int]]:
3195
+ def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[_builtins.int]]:
3425
3196
  """
3426
- The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
3427
- machine operations. The maximum is one less than the number of hosts in the cluster.
3197
+ The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
3428
3198
  """
3429
3199
  return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
3430
3200
 
3431
- @property
3201
+ @_builtins.property
3432
3202
  @pulumi.getter(name="haAdmissionControlPerformanceTolerance")
3433
- def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[int]]:
3203
+ def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[_builtins.int]]:
3434
3204
  """
3435
- The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
3436
- warnings only, whereas a value of 100 disables the setting.
3205
+ The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
3437
3206
  """
3438
3207
  return pulumi.get(self, "ha_admission_control_performance_tolerance")
3439
3208
 
3440
- @property
3209
+ @_builtins.property
3441
3210
  @pulumi.getter(name="haAdmissionControlPolicy")
3442
- def ha_admission_control_policy(self) -> pulumi.Output[Optional[str]]:
3211
+ def ha_admission_control_policy(self) -> pulumi.Output[Optional[_builtins.str]]:
3443
3212
  """
3444
- The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
3445
- permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
3446
- slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
3447
- issues.
3213
+ The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
3448
3214
  """
3449
3215
  return pulumi.get(self, "ha_admission_control_policy")
3450
3216
 
3451
- @property
3217
+ @_builtins.property
3452
3218
  @pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
3453
- def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[bool]]:
3219
+ def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[_builtins.bool]]:
3454
3220
  """
3455
- When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
3456
- subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
3457
- from the total amount of resources in the cluster. Disable to supply user-defined values.
3221
+ When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
3458
3222
  """
3459
3223
  return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
3460
3224
 
3461
- @property
3225
+ @_builtins.property
3462
3226
  @pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
3463
- def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[int]]:
3227
+ def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[_builtins.int]]:
3464
3228
  """
3465
- When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
3466
- the cluster to reserve for failover.
3229
+ When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
3467
3230
  """
3468
3231
  return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
3469
3232
 
3470
- @property
3233
+ @_builtins.property
3471
3234
  @pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
3472
- def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[int]]:
3235
+ def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[_builtins.int]]:
3473
3236
  """
3474
- When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
3475
- the cluster to reserve for failover.
3237
+ When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
3476
3238
  """
3477
3239
  return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
3478
3240
 
3479
- @property
3241
+ @_builtins.property
3480
3242
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
3481
- def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[int]]:
3243
+ def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[_builtins.int]]:
3482
3244
  """
3483
3245
  When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
3484
3246
  """
3485
3247
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
3486
3248
 
3487
- @property
3249
+ @_builtins.property
3488
3250
  @pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
3489
- def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[int]]:
3251
+ def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[_builtins.int]]:
3490
3252
  """
3491
3253
  When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
3492
3254
  """
3493
3255
  return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
3494
3256
 
3495
- @property
3257
+ @_builtins.property
3496
3258
  @pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
3497
- def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[bool]]:
3259
+ def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[_builtins.bool]]:
3498
3260
  """
3499
- When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
3500
- to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
3501
- currently in the cluster.
3261
+ When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
3502
3262
  """
3503
3263
  return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
3504
3264
 
3505
- @property
3265
+ @_builtins.property
3506
3266
  @pulumi.getter(name="haAdvancedOptions")
3507
- def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
3267
+ def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
3508
3268
  """
3509
3269
  Advanced configuration options for vSphere HA.
3510
3270
  """
3511
3271
  return pulumi.get(self, "ha_advanced_options")
3512
3272
 
3513
- @property
3273
+ @_builtins.property
3514
3274
  @pulumi.getter(name="haDatastoreApdRecoveryAction")
3515
- def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[str]]:
3275
+ def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[_builtins.str]]:
3516
3276
  """
3517
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
3518
- affected datastore clears in the middle of an APD event. Can be one of none or reset.
3277
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
3519
3278
  """
3520
3279
  return pulumi.get(self, "ha_datastore_apd_recovery_action")
3521
3280
 
3522
- @property
3281
+ @_builtins.property
3523
3282
  @pulumi.getter(name="haDatastoreApdResponse")
3524
- def ha_datastore_apd_response(self) -> pulumi.Output[Optional[str]]:
3283
+ def ha_datastore_apd_response(self) -> pulumi.Output[Optional[_builtins.str]]:
3525
3284
  """
3526
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3527
- detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
3528
- restartAggressive.
3285
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
3529
3286
  """
3530
3287
  return pulumi.get(self, "ha_datastore_apd_response")
3531
3288
 
3532
- @property
3289
+ @_builtins.property
3533
3290
  @pulumi.getter(name="haDatastoreApdResponseDelay")
3534
- def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[int]]:
3291
+ def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[_builtins.int]]:
3535
3292
  """
3536
- When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
3537
- the response action defined in ha_datastore_apd_response.
3293
+ When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
3538
3294
  """
3539
3295
  return pulumi.get(self, "ha_datastore_apd_response_delay")
3540
3296
 
3541
- @property
3297
+ @_builtins.property
3542
3298
  @pulumi.getter(name="haDatastorePdlResponse")
3543
- def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[str]]:
3299
+ def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[_builtins.str]]:
3544
3300
  """
3545
- When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
3546
- detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
3301
+ When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
3547
3302
  """
3548
3303
  return pulumi.get(self, "ha_datastore_pdl_response")
3549
3304
 
3550
- @property
3305
+ @_builtins.property
3551
3306
  @pulumi.getter(name="haEnabled")
3552
- def ha_enabled(self) -> pulumi.Output[Optional[bool]]:
3307
+ def ha_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3553
3308
  """
3554
3309
  Enable vSphere HA for this cluster.
3555
3310
  """
3556
3311
  return pulumi.get(self, "ha_enabled")
3557
3312
 
3558
- @property
3313
+ @_builtins.property
3559
3314
  @pulumi.getter(name="haHeartbeatDatastoreIds")
3560
- def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3315
+ def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
3561
3316
  """
3562
- The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
3563
- ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
3317
+ The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
3564
3318
  """
3565
3319
  return pulumi.get(self, "ha_heartbeat_datastore_ids")
3566
3320
 
3567
- @property
3321
+ @_builtins.property
3568
3322
  @pulumi.getter(name="haHeartbeatDatastorePolicy")
3569
- def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[str]]:
3323
+ def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[_builtins.str]]:
3570
3324
  """
3571
- The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
3572
- allFeasibleDsWithUserPreference.
3325
+ The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
3573
3326
  """
3574
3327
  return pulumi.get(self, "ha_heartbeat_datastore_policy")
3575
3328
 
3576
- @property
3329
+ @_builtins.property
3577
3330
  @pulumi.getter(name="haHostIsolationResponse")
3578
- def ha_host_isolation_response(self) -> pulumi.Output[Optional[str]]:
3331
+ def ha_host_isolation_response(self) -> pulumi.Output[Optional[_builtins.str]]:
3579
3332
  """
3580
- The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
3581
- Can be one of none, powerOff, or shutdown.
3333
+ The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
3582
3334
  """
3583
3335
  return pulumi.get(self, "ha_host_isolation_response")
3584
3336
 
3585
- @property
3337
+ @_builtins.property
3586
3338
  @pulumi.getter(name="haHostMonitoring")
3587
- def ha_host_monitoring(self) -> pulumi.Output[Optional[str]]:
3339
+ def ha_host_monitoring(self) -> pulumi.Output[Optional[_builtins.str]]:
3588
3340
  """
3589
3341
  Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
3590
3342
  """
3591
3343
  return pulumi.get(self, "ha_host_monitoring")
3592
3344
 
3593
- @property
3345
+ @_builtins.property
3594
3346
  @pulumi.getter(name="haVmComponentProtection")
3595
- def ha_vm_component_protection(self) -> pulumi.Output[Optional[str]]:
3347
+ def ha_vm_component_protection(self) -> pulumi.Output[Optional[_builtins.str]]:
3596
3348
  """
3597
- Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
3598
- failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
3349
+ Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
3599
3350
  """
3600
3351
  return pulumi.get(self, "ha_vm_component_protection")
3601
3352
 
3602
- @property
3353
+ @_builtins.property
3603
3354
  @pulumi.getter(name="haVmDependencyRestartCondition")
3604
- def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[str]]:
3355
+ def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[_builtins.str]]:
3605
3356
  """
3606
- The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
3607
- on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
3357
+ The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
3608
3358
  """
3609
3359
  return pulumi.get(self, "ha_vm_dependency_restart_condition")
3610
3360
 
3611
- @property
3361
+ @_builtins.property
3612
3362
  @pulumi.getter(name="haVmFailureInterval")
3613
- def ha_vm_failure_interval(self) -> pulumi.Output[Optional[int]]:
3363
+ def ha_vm_failure_interval(self) -> pulumi.Output[Optional[_builtins.int]]:
3614
3364
  """
3615
- If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
3616
- failed. The value is in seconds.
3365
+ If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
3617
3366
  """
3618
3367
  return pulumi.get(self, "ha_vm_failure_interval")
3619
3368
 
3620
- @property
3369
+ @_builtins.property
3621
3370
  @pulumi.getter(name="haVmMaximumFailureWindow")
3622
- def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[int]]:
3371
+ def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[_builtins.int]]:
3623
3372
  """
3624
- The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
3625
- attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
3626
- time is allotted.
3373
+ The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
3627
3374
  """
3628
3375
  return pulumi.get(self, "ha_vm_maximum_failure_window")
3629
3376
 
3630
- @property
3377
+ @_builtins.property
3631
3378
  @pulumi.getter(name="haVmMaximumResets")
3632
- def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[int]]:
3379
+ def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[_builtins.int]]:
3633
3380
  """
3634
3381
  The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
3635
3382
  """
3636
3383
  return pulumi.get(self, "ha_vm_maximum_resets")
3637
3384
 
3638
- @property
3385
+ @_builtins.property
3639
3386
  @pulumi.getter(name="haVmMinimumUptime")
3640
- def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[int]]:
3387
+ def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[_builtins.int]]:
3641
3388
  """
3642
3389
  The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
3643
3390
  """
3644
3391
  return pulumi.get(self, "ha_vm_minimum_uptime")
3645
3392
 
3646
- @property
3393
+ @_builtins.property
3647
3394
  @pulumi.getter(name="haVmMonitoring")
3648
- def ha_vm_monitoring(self) -> pulumi.Output[Optional[str]]:
3395
+ def ha_vm_monitoring(self) -> pulumi.Output[Optional[_builtins.str]]:
3649
3396
  """
3650
- The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
3651
- vmMonitoringOnly, or vmAndAppMonitoring.
3397
+ The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
3652
3398
  """
3653
3399
  return pulumi.get(self, "ha_vm_monitoring")
3654
3400
 
3655
- @property
3401
+ @_builtins.property
3656
3402
  @pulumi.getter(name="haVmRestartAdditionalDelay")
3657
- def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[int]]:
3403
+ def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[_builtins.int]]:
3658
3404
  """
3659
3405
  Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
3660
3406
  """
3661
3407
  return pulumi.get(self, "ha_vm_restart_additional_delay")
3662
3408
 
3663
- @property
3409
+ @_builtins.property
3664
3410
  @pulumi.getter(name="haVmRestartPriority")
3665
- def ha_vm_restart_priority(self) -> pulumi.Output[Optional[str]]:
3411
+ def ha_vm_restart_priority(self) -> pulumi.Output[Optional[_builtins.str]]:
3666
3412
  """
3667
- The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
3668
- high, or highest.
3413
+ The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
3669
3414
  """
3670
3415
  return pulumi.get(self, "ha_vm_restart_priority")
3671
3416
 
3672
- @property
3417
+ @_builtins.property
3673
3418
  @pulumi.getter(name="haVmRestartTimeout")
3674
- def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[int]]:
3419
+ def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[_builtins.int]]:
3675
3420
  """
3676
- The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
3677
- proceeding with the next priority.
3421
+ The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
3678
3422
  """
3679
3423
  return pulumi.get(self, "ha_vm_restart_timeout")
3680
3424
 
3681
- @property
3425
+ @_builtins.property
3682
3426
  @pulumi.getter(name="hostClusterExitTimeout")
3683
- def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[int]]:
3427
+ def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[_builtins.int]]:
3684
3428
  """
3685
3429
  The timeout for each host maintenance mode operation when removing hosts from a cluster.
3686
3430
  """
3687
3431
  return pulumi.get(self, "host_cluster_exit_timeout")
3688
3432
 
3689
- @property
3433
+ @_builtins.property
3690
3434
  @pulumi.getter(name="hostImage")
3691
3435
  def host_image(self) -> pulumi.Output[Optional['outputs.ComputeClusterHostImage']]:
3692
3436
  """
@@ -3694,75 +3438,73 @@ class ComputeCluster(pulumi.CustomResource):
3694
3438
  """
3695
3439
  return pulumi.get(self, "host_image")
3696
3440
 
3697
- @property
3441
+ @_builtins.property
3698
3442
  @pulumi.getter(name="hostManaged")
3699
- def host_managed(self) -> pulumi.Output[Optional[bool]]:
3443
+ def host_managed(self) -> pulumi.Output[Optional[_builtins.bool]]:
3700
3444
  """
3701
3445
  Must be set if cluster enrollment is managed from host resource.
3702
3446
  """
3703
3447
  return pulumi.get(self, "host_managed")
3704
3448
 
3705
- @property
3449
+ @_builtins.property
3706
3450
  @pulumi.getter(name="hostSystemIds")
3707
- def host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3451
+ def host_system_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
3708
3452
  """
3709
3453
  The managed object IDs of the hosts to put in the cluster.
3710
3454
  """
3711
3455
  return pulumi.get(self, "host_system_ids")
3712
3456
 
3713
- @property
3457
+ @_builtins.property
3714
3458
  @pulumi.getter
3715
- def name(self) -> pulumi.Output[str]:
3459
+ def name(self) -> pulumi.Output[_builtins.str]:
3716
3460
  """
3717
3461
  The name of the cluster.
3718
3462
  """
3719
3463
  return pulumi.get(self, "name")
3720
3464
 
3721
- @property
3465
+ @_builtins.property
3722
3466
  @pulumi.getter(name="proactiveHaAutomationLevel")
3723
- def proactive_ha_automation_level(self) -> pulumi.Output[Optional[str]]:
3467
+ def proactive_ha_automation_level(self) -> pulumi.Output[Optional[_builtins.str]]:
3724
3468
  """
3725
3469
  The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
3726
3470
  """
3727
3471
  return pulumi.get(self, "proactive_ha_automation_level")
3728
3472
 
3729
- @property
3473
+ @_builtins.property
3730
3474
  @pulumi.getter(name="proactiveHaEnabled")
3731
- def proactive_ha_enabled(self) -> pulumi.Output[Optional[bool]]:
3475
+ def proactive_ha_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3732
3476
  """
3733
3477
  Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
3734
3478
  """
3735
3479
  return pulumi.get(self, "proactive_ha_enabled")
3736
3480
 
3737
- @property
3481
+ @_builtins.property
3738
3482
  @pulumi.getter(name="proactiveHaModerateRemediation")
3739
- def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[str]]:
3483
+ def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[_builtins.str]]:
3740
3484
  """
3741
- The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
3742
- this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
3485
+ The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
3743
3486
  """
3744
3487
  return pulumi.get(self, "proactive_ha_moderate_remediation")
3745
3488
 
3746
- @property
3489
+ @_builtins.property
3747
3490
  @pulumi.getter(name="proactiveHaProviderIds")
3748
- def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3491
+ def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
3749
3492
  """
3750
3493
  The list of IDs for health update providers configured for this cluster.
3751
3494
  """
3752
3495
  return pulumi.get(self, "proactive_ha_provider_ids")
3753
3496
 
3754
- @property
3497
+ @_builtins.property
3755
3498
  @pulumi.getter(name="proactiveHaSevereRemediation")
3756
- def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[str]]:
3499
+ def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[_builtins.str]]:
3757
3500
  """
3758
- The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
3759
- cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
3501
+ The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
3760
3502
  """
3761
3503
  return pulumi.get(self, "proactive_ha_severe_remediation")
3762
3504
 
3763
- @property
3505
+ @_builtins.property
3764
3506
  @pulumi.getter(name="resourcePoolId")
3765
- def resource_pool_id(self) -> pulumi.Output[str]:
3507
+ def resource_pool_id(self) -> pulumi.Output[_builtins.str]:
3766
3508
  """
3767
3509
  The managed object ID of the primary
3768
3510
  resource pool for this cluster. This can be passed directly to the
@@ -3772,31 +3514,31 @@ class ComputeCluster(pulumi.CustomResource):
3772
3514
  """
3773
3515
  return pulumi.get(self, "resource_pool_id")
3774
3516
 
3775
- @property
3517
+ @_builtins.property
3776
3518
  @pulumi.getter
3777
- def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
3519
+ def tags(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
3778
3520
  """
3779
3521
  The IDs of any tags to attach to this resource.
3780
3522
  """
3781
3523
  return pulumi.get(self, "tags")
3782
3524
 
3783
- @property
3525
+ @_builtins.property
3784
3526
  @pulumi.getter(name="vsanCompressionEnabled")
3785
- def vsan_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
3527
+ def vsan_compression_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3786
3528
  """
3787
3529
  Whether the vSAN compression service is enabled for the cluster.
3788
3530
  """
3789
3531
  return pulumi.get(self, "vsan_compression_enabled")
3790
3532
 
3791
- @property
3533
+ @_builtins.property
3792
3534
  @pulumi.getter(name="vsanDedupEnabled")
3793
- def vsan_dedup_enabled(self) -> pulumi.Output[Optional[bool]]:
3535
+ def vsan_dedup_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3794
3536
  """
3795
3537
  Whether the vSAN deduplication service is enabled for the cluster.
3796
3538
  """
3797
3539
  return pulumi.get(self, "vsan_dedup_enabled")
3798
3540
 
3799
- @property
3541
+ @_builtins.property
3800
3542
  @pulumi.getter(name="vsanDiskGroups")
3801
3543
  def vsan_disk_groups(self) -> pulumi.Output[Sequence['outputs.ComputeClusterVsanDiskGroup']]:
3802
3544
  """
@@ -3804,39 +3546,39 @@ class ComputeCluster(pulumi.CustomResource):
3804
3546
  """
3805
3547
  return pulumi.get(self, "vsan_disk_groups")
3806
3548
 
3807
- @property
3549
+ @_builtins.property
3808
3550
  @pulumi.getter(name="vsanDitEncryptionEnabled")
3809
- def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
3551
+ def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3810
3552
  """
3811
3553
  Whether the vSAN data-in-transit encryption is enabled for the cluster.
3812
3554
  """
3813
3555
  return pulumi.get(self, "vsan_dit_encryption_enabled")
3814
3556
 
3815
- @property
3557
+ @_builtins.property
3816
3558
  @pulumi.getter(name="vsanDitRekeyInterval")
3817
- def vsan_dit_rekey_interval(self) -> pulumi.Output[int]:
3559
+ def vsan_dit_rekey_interval(self) -> pulumi.Output[_builtins.int]:
3818
3560
  """
3819
3561
  When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
3820
3562
  """
3821
3563
  return pulumi.get(self, "vsan_dit_rekey_interval")
3822
3564
 
3823
- @property
3565
+ @_builtins.property
3824
3566
  @pulumi.getter(name="vsanEnabled")
3825
- def vsan_enabled(self) -> pulumi.Output[Optional[bool]]:
3567
+ def vsan_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3826
3568
  """
3827
3569
  Whether the vSAN service is enabled for the cluster.
3828
3570
  """
3829
3571
  return pulumi.get(self, "vsan_enabled")
3830
3572
 
3831
- @property
3573
+ @_builtins.property
3832
3574
  @pulumi.getter(name="vsanEsaEnabled")
3833
- def vsan_esa_enabled(self) -> pulumi.Output[Optional[bool]]:
3575
+ def vsan_esa_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3834
3576
  """
3835
3577
  Whether the vSAN ESA service is enabled for the cluster.
3836
3578
  """
3837
3579
  return pulumi.get(self, "vsan_esa_enabled")
3838
3580
 
3839
- @property
3581
+ @_builtins.property
3840
3582
  @pulumi.getter(name="vsanFaultDomains")
3841
3583
  def vsan_fault_domains(self) -> pulumi.Output[Optional[Sequence['outputs.ComputeClusterVsanFaultDomain']]]:
3842
3584
  """
@@ -3844,31 +3586,31 @@ class ComputeCluster(pulumi.CustomResource):
3844
3586
  """
3845
3587
  return pulumi.get(self, "vsan_fault_domains")
3846
3588
 
3847
- @property
3589
+ @_builtins.property
3848
3590
  @pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
3849
- def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
3591
+ def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3850
3592
  """
3851
3593
  Whether the vSAN network diagnostic mode is enabled for the cluster.
3852
3594
  """
3853
3595
  return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
3854
3596
 
3855
- @property
3597
+ @_builtins.property
3856
3598
  @pulumi.getter(name="vsanPerformanceEnabled")
3857
- def vsan_performance_enabled(self) -> pulumi.Output[Optional[bool]]:
3599
+ def vsan_performance_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3858
3600
  """
3859
3601
  Whether the vSAN performance service is enabled for the cluster.
3860
3602
  """
3861
3603
  return pulumi.get(self, "vsan_performance_enabled")
3862
3604
 
3863
- @property
3605
+ @_builtins.property
3864
3606
  @pulumi.getter(name="vsanRemoteDatastoreIds")
3865
- def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
3607
+ def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
3866
3608
  """
3867
3609
  The managed object IDs of the vSAN datastore to be mounted on the cluster.
3868
3610
  """
3869
3611
  return pulumi.get(self, "vsan_remote_datastore_ids")
3870
3612
 
3871
- @property
3613
+ @_builtins.property
3872
3614
  @pulumi.getter(name="vsanStretchedCluster")
3873
3615
  def vsan_stretched_cluster(self) -> pulumi.Output[Optional['outputs.ComputeClusterVsanStretchedCluster']]:
3874
3616
  """
@@ -3876,17 +3618,17 @@ class ComputeCluster(pulumi.CustomResource):
3876
3618
  """
3877
3619
  return pulumi.get(self, "vsan_stretched_cluster")
3878
3620
 
3879
- @property
3621
+ @_builtins.property
3880
3622
  @pulumi.getter(name="vsanUnmapEnabled")
3881
- def vsan_unmap_enabled(self) -> pulumi.Output[Optional[bool]]:
3623
+ def vsan_unmap_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3882
3624
  """
3883
3625
  Whether the vSAN unmap service is enabled for the cluster.
3884
3626
  """
3885
3627
  return pulumi.get(self, "vsan_unmap_enabled")
3886
3628
 
3887
- @property
3629
+ @_builtins.property
3888
3630
  @pulumi.getter(name="vsanVerboseModeEnabled")
3889
- def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
3631
+ def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
3890
3632
  """
3891
3633
  Whether the vSAN verbose mode is enabled for the cluster.
3892
3634
  """