pulumi-vsphere 4.10.0a1710245029__py3-none-any.whl → 4.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-vsphere might be problematic. Click here for more details.
- pulumi_vsphere/__init__.py +28 -0
- pulumi_vsphere/_inputs.py +566 -236
- pulumi_vsphere/_utilities.py +35 -0
- pulumi_vsphere/compute_cluster.py +747 -1477
- pulumi_vsphere/compute_cluster_vm_affinity_rule.py +28 -20
- pulumi_vsphere/datacenter.py +33 -40
- pulumi_vsphere/datastore_cluster.py +154 -364
- pulumi_vsphere/distributed_port_group.py +126 -182
- pulumi_vsphere/distributed_virtual_switch.py +301 -819
- pulumi_vsphere/entity_permissions.py +56 -35
- pulumi_vsphere/file.py +16 -24
- pulumi_vsphere/folder.py +7 -28
- pulumi_vsphere/get_compute_cluster.py +0 -4
- pulumi_vsphere/get_compute_cluster_host_group.py +20 -20
- pulumi_vsphere/get_content_library.py +10 -10
- pulumi_vsphere/get_content_library_item.py +12 -8
- pulumi_vsphere/get_custom_attribute.py +0 -4
- pulumi_vsphere/get_datacenter.py +0 -4
- pulumi_vsphere/get_datastore.py +9 -13
- pulumi_vsphere/get_datastore_cluster.py +0 -4
- pulumi_vsphere/get_datastore_stats.py +38 -44
- pulumi_vsphere/get_distributed_virtual_switch.py +2 -4
- pulumi_vsphere/get_dynamic.py +18 -20
- pulumi_vsphere/get_folder.py +10 -6
- pulumi_vsphere/get_guest_os_customization.py +8 -47
- pulumi_vsphere/get_host.py +0 -4
- pulumi_vsphere/get_host_base_images.py +97 -0
- pulumi_vsphere/get_host_pci_device.py +8 -14
- pulumi_vsphere/get_host_thumbprint.py +12 -16
- pulumi_vsphere/get_host_vgpu_profile.py +4 -10
- pulumi_vsphere/get_license.py +2 -5
- pulumi_vsphere/get_network.py +14 -18
- pulumi_vsphere/get_policy.py +0 -4
- pulumi_vsphere/get_resource_pool.py +14 -18
- pulumi_vsphere/get_role.py +4 -8
- pulumi_vsphere/get_tag.py +0 -4
- pulumi_vsphere/get_tag_category.py +0 -4
- pulumi_vsphere/get_vapp_container.py +0 -4
- pulumi_vsphere/get_virtual_machine.py +58 -41
- pulumi_vsphere/get_vmfs_disks.py +0 -4
- pulumi_vsphere/guest_os_customization.py +50 -0
- pulumi_vsphere/ha_vm_override.py +189 -378
- pulumi_vsphere/host.py +0 -20
- pulumi_vsphere/host_port_group.py +12 -24
- pulumi_vsphere/host_virtual_switch.py +140 -287
- pulumi_vsphere/license.py +0 -32
- pulumi_vsphere/nas_datastore.py +7 -7
- pulumi_vsphere/offline_software_depot.py +180 -0
- pulumi_vsphere/outputs.py +591 -270
- pulumi_vsphere/provider.py +2 -6
- pulumi_vsphere/pulumi-plugin.json +2 -1
- pulumi_vsphere/resource_pool.py +50 -24
- pulumi_vsphere/supervisor.py +962 -0
- pulumi_vsphere/virtual_disk.py +14 -20
- pulumi_vsphere/virtual_machine.py +580 -809
- pulumi_vsphere/virtual_machine_class.py +442 -0
- pulumi_vsphere/virtual_machine_snapshot.py +8 -12
- pulumi_vsphere/vm_storage_policy.py +74 -86
- pulumi_vsphere/vnic.py +61 -77
- {pulumi_vsphere-4.10.0a1710245029.dist-info → pulumi_vsphere-4.10.2.dist-info}/METADATA +1 -1
- pulumi_vsphere-4.10.2.dist-info/RECORD +86 -0
- {pulumi_vsphere-4.10.0a1710245029.dist-info → pulumi_vsphere-4.10.2.dist-info}/WHEEL +1 -1
- pulumi_vsphere-4.10.0a1710245029.dist-info/RECORD +0 -82
- {pulumi_vsphere-4.10.0a1710245029.dist-info → pulumi_vsphere-4.10.2.dist-info}/top_level.txt +0 -0
|
@@ -61,6 +61,7 @@ class ComputeClusterArgs:
|
|
|
61
61
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
62
62
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
63
63
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
64
|
+
host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
|
|
64
65
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
65
66
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
66
67
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -93,225 +94,115 @@ class ComputeClusterArgs:
|
|
|
93
94
|
|
|
94
95
|
> **NOTE:** Custom attributes are unsupported on direct ESXi connections
|
|
95
96
|
and require vCenter Server.
|
|
96
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
:param pulumi.Input[
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
:param pulumi.Input[
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
:param pulumi.Input[
|
|
107
|
-
|
|
108
|
-
:param pulumi.Input[
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
112
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
113
|
-
recommendations. <sup>\\*</sup>
|
|
114
|
-
|
|
115
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
116
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
117
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
118
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
119
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
120
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
121
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
122
|
-
`3`.
|
|
123
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
124
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
125
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
97
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
98
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
99
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
100
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
101
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
102
|
+
setting.
|
|
103
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
104
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
105
|
+
fullyAutomated.
|
|
106
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
107
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
108
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
109
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
110
|
+
more imbalance while a higher setting will tolerate less.
|
|
111
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
126
112
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
127
113
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
128
114
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
129
115
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
130
116
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
131
117
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
132
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
:param pulumi.Input[
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
:param pulumi.Input[str]
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
:param pulumi.Input[
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
:param pulumi.Input[
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
:param pulumi.Input[
|
|
176
|
-
|
|
177
|
-
:param pulumi.Input[
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
:param pulumi.Input[
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
:param pulumi.Input[str]
|
|
197
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
198
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
199
|
-
`restartAggressive`. Default: `disabled`.
|
|
200
|
-
<sup>\\*</sup>
|
|
201
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
202
|
-
`false`.
|
|
203
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
204
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
205
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
206
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
207
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
208
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
209
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
210
|
-
`allFeasibleDsWithUserPreference`.
|
|
211
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
212
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
213
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
214
|
-
`none`.
|
|
215
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
216
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
217
|
-
or `disabled`. Default: `enabled`.
|
|
218
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
219
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
220
|
-
`disabled`. Default: `enabled`.
|
|
221
|
-
<sup>\\*</sup>
|
|
222
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
223
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
224
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
225
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
226
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
227
|
-
is considered ready immediately after a host is found to start it on.
|
|
228
|
-
<sup>\\*</sup>
|
|
229
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
230
|
-
from a virtual machine is not received within this configured interval,
|
|
231
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
232
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
233
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
234
|
-
window expires, no more resets are attempted regardless of the setting
|
|
235
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
236
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
237
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
238
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
239
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
240
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
241
|
-
`120` seconds (2 minutes).
|
|
242
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
243
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
244
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
245
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
246
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
247
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
248
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
249
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
250
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
251
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
252
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
253
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
254
|
-
<sup>\\*</sup>
|
|
255
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
256
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
257
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
258
|
-
membership will be managed through the `host` resource rather than the
|
|
259
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
260
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
261
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
118
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
119
|
+
for testing and is not recommended in normal use.
|
|
120
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
121
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
122
|
+
will ignore the host when making recommendations.
|
|
123
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
124
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
125
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
126
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
127
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
128
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
129
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
130
|
+
issues.
|
|
131
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
132
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
133
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
134
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
135
|
+
the cluster to reserve for failover.
|
|
136
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
137
|
+
the cluster to reserve for failover.
|
|
138
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
139
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
140
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
141
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
142
|
+
currently in the cluster.
|
|
143
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
144
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
145
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
146
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
147
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
148
|
+
restartAggressive.
|
|
149
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
150
|
+
the response action defined in ha_datastore_apd_response.
|
|
151
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
152
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
153
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
154
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
155
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
156
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
157
|
+
allFeasibleDsWithUserPreference.
|
|
158
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
159
|
+
Can be one of none, powerOff, or shutdown.
|
|
160
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
161
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
162
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
163
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
164
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
165
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
166
|
+
failed. The value is in seconds.
|
|
167
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
168
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
169
|
+
time is allotted.
|
|
170
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
171
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
172
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
173
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
174
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
175
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
176
|
+
high, or highest.
|
|
177
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
178
|
+
proceeding with the next priority.
|
|
179
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
180
|
+
:param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
|
|
181
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
182
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
262
183
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
263
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
:param pulumi.Input[
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
271
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
272
|
-
`proactive_ha_severe_remediation` is set
|
|
273
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
274
|
-
<sup>\\*</sup>
|
|
275
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
276
|
-
providers configured for this cluster.
|
|
277
|
-
<sup>\\*</sup>
|
|
278
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
279
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
280
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
281
|
-
`proactive_ha_moderate_remediation` is
|
|
282
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
283
|
-
<sup>\\*</sup>
|
|
184
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
185
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
186
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
187
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
188
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
189
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
190
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
284
191
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
285
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
286
|
-
|
|
287
|
-
:param pulumi.Input[
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
:param pulumi.Input[
|
|
291
|
-
|
|
292
|
-
:param pulumi.Input[
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
:param pulumi.Input[
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
300
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
301
|
-
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
302
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
303
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
304
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
305
|
-
the cluster. Default: `true`.
|
|
306
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
307
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
308
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
309
|
-
data-in-transit encryption feature at the same time.
|
|
310
|
-
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
311
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
312
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
313
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
314
|
-
performance service on the cluster.
|
|
192
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
193
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
194
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
195
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
196
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
197
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
198
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
199
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
200
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
201
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
202
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
203
|
+
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
204
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
205
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
315
206
|
"""
|
|
316
207
|
pulumi.set(__self__, "datacenter_id", datacenter_id)
|
|
317
208
|
if custom_attributes is not None:
|
|
@@ -402,6 +293,8 @@ class ComputeClusterArgs:
|
|
|
402
293
|
pulumi.set(__self__, "ha_vm_restart_timeout", ha_vm_restart_timeout)
|
|
403
294
|
if host_cluster_exit_timeout is not None:
|
|
404
295
|
pulumi.set(__self__, "host_cluster_exit_timeout", host_cluster_exit_timeout)
|
|
296
|
+
if host_image is not None:
|
|
297
|
+
pulumi.set(__self__, "host_image", host_image)
|
|
405
298
|
if host_managed is not None:
|
|
406
299
|
pulumi.set(__self__, "host_managed", host_managed)
|
|
407
300
|
if host_system_ids is not None:
|
|
@@ -482,9 +375,7 @@ class ComputeClusterArgs:
|
|
|
482
375
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
483
376
|
def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
484
377
|
"""
|
|
485
|
-
The automation level for host power
|
|
486
|
-
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
487
|
-
`manual`.
|
|
378
|
+
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
488
379
|
"""
|
|
489
380
|
return pulumi.get(self, "dpm_automation_level")
|
|
490
381
|
|
|
@@ -496,9 +387,8 @@ class ComputeClusterArgs:
|
|
|
496
387
|
@pulumi.getter(name="dpmEnabled")
|
|
497
388
|
def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
498
389
|
"""
|
|
499
|
-
Enable DPM support for DRS
|
|
500
|
-
|
|
501
|
-
Default: `false`.
|
|
390
|
+
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
391
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
502
392
|
"""
|
|
503
393
|
return pulumi.get(self, "dpm_enabled")
|
|
504
394
|
|
|
@@ -510,10 +400,9 @@ class ComputeClusterArgs:
|
|
|
510
400
|
@pulumi.getter(name="dpmThreshold")
|
|
511
401
|
def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
512
402
|
"""
|
|
513
|
-
A value between
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
403
|
+
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
404
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
405
|
+
setting.
|
|
517
406
|
"""
|
|
518
407
|
return pulumi.get(self, "dpm_threshold")
|
|
519
408
|
|
|
@@ -525,8 +414,7 @@ class ComputeClusterArgs:
|
|
|
525
414
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
526
415
|
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
527
416
|
"""
|
|
528
|
-
|
|
529
|
-
options for DRS and DPM.
|
|
417
|
+
Advanced configuration options for DRS and DPM.
|
|
530
418
|
"""
|
|
531
419
|
return pulumi.get(self, "drs_advanced_options")
|
|
532
420
|
|
|
@@ -538,9 +426,8 @@ class ComputeClusterArgs:
|
|
|
538
426
|
@pulumi.getter(name="drsAutomationLevel")
|
|
539
427
|
def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
540
428
|
"""
|
|
541
|
-
The default automation level for all
|
|
542
|
-
|
|
543
|
-
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
429
|
+
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
430
|
+
fullyAutomated.
|
|
544
431
|
"""
|
|
545
432
|
return pulumi.get(self, "drs_automation_level")
|
|
546
433
|
|
|
@@ -552,11 +439,7 @@ class ComputeClusterArgs:
|
|
|
552
439
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
553
440
|
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
|
|
554
441
|
"""
|
|
555
|
-
When
|
|
556
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
557
|
-
recommendations. <sup>\\*</sup>
|
|
558
|
-
|
|
559
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
442
|
+
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
560
443
|
"""
|
|
561
444
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
562
445
|
|
|
@@ -568,8 +451,7 @@ class ComputeClusterArgs:
|
|
|
568
451
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
569
452
|
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
|
|
570
453
|
"""
|
|
571
|
-
|
|
572
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
454
|
+
When true, allows individual VM overrides within this cluster to be set.
|
|
573
455
|
"""
|
|
574
456
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
575
457
|
|
|
@@ -581,7 +463,7 @@ class ComputeClusterArgs:
|
|
|
581
463
|
@pulumi.getter(name="drsEnabled")
|
|
582
464
|
def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
583
465
|
"""
|
|
584
|
-
Enable DRS for this cluster.
|
|
466
|
+
Enable DRS for this cluster.
|
|
585
467
|
"""
|
|
586
468
|
return pulumi.get(self, "drs_enabled")
|
|
587
469
|
|
|
@@ -593,10 +475,8 @@ class ComputeClusterArgs:
|
|
|
593
475
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
594
476
|
def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
595
477
|
"""
|
|
596
|
-
A value between
|
|
597
|
-
|
|
598
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
599
|
-
`3`.
|
|
478
|
+
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
479
|
+
more imbalance while a higher setting will tolerate less.
|
|
600
480
|
"""
|
|
601
481
|
return pulumi.get(self, "drs_migration_threshold")
|
|
602
482
|
|
|
@@ -608,9 +488,7 @@ class ComputeClusterArgs:
|
|
|
608
488
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
609
489
|
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
|
|
610
490
|
"""
|
|
611
|
-
Enable scalable shares for all
|
|
612
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
613
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
491
|
+
Enable scalable shares for all descendants of this cluster.
|
|
614
492
|
"""
|
|
615
493
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
616
494
|
|
|
@@ -639,18 +517,8 @@ class ComputeClusterArgs:
|
|
|
639
517
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
640
518
|
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
|
|
641
519
|
"""
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
645
|
-
below. This is an advanced
|
|
646
|
-
option and should only be used for testing. Default: `false`.
|
|
647
|
-
|
|
648
|
-
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
649
|
-
there are many pitfalls to its use when working with complex cluster
|
|
650
|
-
configurations. Depending on the virtual machines currently on the cluster, and
|
|
651
|
-
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
652
|
-
incrementally remove hosts from your configuration by adjusting the contents of
|
|
653
|
-
the `host_system_ids` attribute.
|
|
520
|
+
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
521
|
+
for testing and is not recommended in normal use.
|
|
654
522
|
"""
|
|
655
523
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
656
524
|
|
|
@@ -662,11 +530,9 @@ class ComputeClusterArgs:
|
|
|
662
530
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
663
531
|
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
664
532
|
"""
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
block access to the host, and DRS will ignore the host when making
|
|
669
|
-
recommendations.
|
|
533
|
+
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
534
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
535
|
+
will ignore the host when making recommendations.
|
|
670
536
|
"""
|
|
671
537
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
672
538
|
|
|
@@ -678,11 +544,8 @@ class ComputeClusterArgs:
|
|
|
678
544
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
679
545
|
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
680
546
|
"""
|
|
681
|
-
The maximum number
|
|
682
|
-
|
|
683
|
-
whether to permit virtual machine operations. The maximum is one less than
|
|
684
|
-
the number of hosts in the cluster. Default: `1`.
|
|
685
|
-
<sup>\\*</sup>
|
|
547
|
+
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
548
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
686
549
|
"""
|
|
687
550
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
688
551
|
|
|
@@ -694,10 +557,8 @@ class ComputeClusterArgs:
|
|
|
694
557
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
695
558
|
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
696
559
|
"""
|
|
697
|
-
The percentage of
|
|
698
|
-
|
|
699
|
-
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
700
|
-
disables the setting. Default: `100` (disabled).
|
|
560
|
+
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
561
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
701
562
|
"""
|
|
702
563
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
703
564
|
|
|
@@ -709,9 +570,10 @@ class ComputeClusterArgs:
|
|
|
709
570
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
710
571
|
def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
|
|
711
572
|
"""
|
|
712
|
-
The type of admission control
|
|
713
|
-
|
|
714
|
-
|
|
573
|
+
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
574
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
575
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
576
|
+
issues.
|
|
715
577
|
"""
|
|
716
578
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
717
579
|
|
|
@@ -723,12 +585,9 @@ class ComputeClusterArgs:
|
|
|
723
585
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
724
586
|
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
|
|
725
587
|
"""
|
|
726
|
-
|
|
727
|
-
average number of host resources represented by the
|
|
728
|
-
|
|
729
|
-
setting from the total amount of resources in the cluster. Disable to supply
|
|
730
|
-
user-defined values. Default: `true`.
|
|
731
|
-
<sup>\\*</sup>
|
|
588
|
+
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
589
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
590
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
732
591
|
"""
|
|
733
592
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
734
593
|
|
|
@@ -740,9 +599,8 @@ class ComputeClusterArgs:
|
|
|
740
599
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
741
600
|
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
742
601
|
"""
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
failover. Default: `100`.
|
|
602
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
603
|
+
the cluster to reserve for failover.
|
|
746
604
|
"""
|
|
747
605
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
748
606
|
|
|
@@ -754,9 +612,8 @@ class ComputeClusterArgs:
|
|
|
754
612
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
755
613
|
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
|
|
756
614
|
"""
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
failover. Default: `100`.
|
|
615
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
616
|
+
the cluster to reserve for failover.
|
|
760
617
|
"""
|
|
761
618
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
762
619
|
|
|
@@ -768,8 +625,7 @@ class ComputeClusterArgs:
|
|
|
768
625
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
769
626
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
770
627
|
"""
|
|
771
|
-
|
|
772
|
-
user-defined CPU slot size, in MHz. Default: `32`.
|
|
628
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
773
629
|
"""
|
|
774
630
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
775
631
|
|
|
@@ -781,8 +637,7 @@ class ComputeClusterArgs:
|
|
|
781
637
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
782
638
|
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
|
|
783
639
|
"""
|
|
784
|
-
|
|
785
|
-
user-defined memory slot size, in MB. Default: `100`.
|
|
640
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
786
641
|
"""
|
|
787
642
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
788
643
|
|
|
@@ -794,10 +649,9 @@ class ComputeClusterArgs:
|
|
|
794
649
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
795
650
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
|
|
796
651
|
"""
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
average based on all powered-on virtual machines currently in the cluster.
|
|
652
|
+
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
653
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
654
|
+
currently in the cluster.
|
|
801
655
|
"""
|
|
802
656
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
803
657
|
|
|
@@ -809,8 +663,7 @@ class ComputeClusterArgs:
|
|
|
809
663
|
@pulumi.getter(name="haAdvancedOptions")
|
|
810
664
|
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
811
665
|
"""
|
|
812
|
-
|
|
813
|
-
options for vSphere HA.
|
|
666
|
+
Advanced configuration options for vSphere HA.
|
|
814
667
|
"""
|
|
815
668
|
return pulumi.get(self, "ha_advanced_options")
|
|
816
669
|
|
|
@@ -822,10 +675,8 @@ class ComputeClusterArgs:
|
|
|
822
675
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
823
676
|
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
|
|
824
677
|
"""
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
828
|
-
<sup>\\*</sup>
|
|
678
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
679
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
829
680
|
"""
|
|
830
681
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
831
682
|
|
|
@@ -837,11 +688,9 @@ class ComputeClusterArgs:
|
|
|
837
688
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
838
689
|
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
|
|
839
690
|
"""
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
844
|
-
<sup>\\*</sup>
|
|
691
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
692
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
693
|
+
restartAggressive.
|
|
845
694
|
"""
|
|
846
695
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
847
696
|
|
|
@@ -853,10 +702,8 @@ class ComputeClusterArgs:
|
|
|
853
702
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
854
703
|
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
|
|
855
704
|
"""
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
`ha_datastore_apd_response`. Default: `180`
|
|
859
|
-
seconds (3 minutes). <sup>\\*</sup>
|
|
705
|
+
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
706
|
+
the response action defined in ha_datastore_apd_response.
|
|
860
707
|
"""
|
|
861
708
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
862
709
|
|
|
@@ -868,11 +715,8 @@ class ComputeClusterArgs:
|
|
|
868
715
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
869
716
|
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
|
|
870
717
|
"""
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
874
|
-
`restartAggressive`. Default: `disabled`.
|
|
875
|
-
<sup>\\*</sup>
|
|
718
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
719
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
876
720
|
"""
|
|
877
721
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
878
722
|
|
|
@@ -884,8 +728,7 @@ class ComputeClusterArgs:
|
|
|
884
728
|
@pulumi.getter(name="haEnabled")
|
|
885
729
|
def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
886
730
|
"""
|
|
887
|
-
Enable vSphere HA for this cluster.
|
|
888
|
-
`false`.
|
|
731
|
+
Enable vSphere HA for this cluster.
|
|
889
732
|
"""
|
|
890
733
|
return pulumi.get(self, "ha_enabled")
|
|
891
734
|
|
|
@@ -897,10 +740,8 @@ class ComputeClusterArgs:
|
|
|
897
740
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
898
741
|
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
899
742
|
"""
|
|
900
|
-
The list of managed object IDs for
|
|
901
|
-
|
|
902
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
903
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
743
|
+
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
744
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
904
745
|
"""
|
|
905
746
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
906
747
|
|
|
@@ -912,10 +753,8 @@ class ComputeClusterArgs:
|
|
|
912
753
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
913
754
|
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
|
|
914
755
|
"""
|
|
915
|
-
The selection policy for HA
|
|
916
|
-
|
|
917
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
918
|
-
`allFeasibleDsWithUserPreference`.
|
|
756
|
+
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
757
|
+
allFeasibleDsWithUserPreference.
|
|
919
758
|
"""
|
|
920
759
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
921
760
|
|
|
@@ -927,10 +766,8 @@ class ComputeClusterArgs:
|
|
|
927
766
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
928
767
|
def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
|
|
929
768
|
"""
|
|
930
|
-
The action to take on virtual
|
|
931
|
-
|
|
932
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
933
|
-
`none`.
|
|
769
|
+
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
770
|
+
Can be one of none, powerOff, or shutdown.
|
|
934
771
|
"""
|
|
935
772
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
936
773
|
|
|
@@ -942,9 +779,7 @@ class ComputeClusterArgs:
|
|
|
942
779
|
@pulumi.getter(name="haHostMonitoring")
|
|
943
780
|
def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
944
781
|
"""
|
|
945
|
-
Global setting that controls whether
|
|
946
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
947
|
-
or `disabled`. Default: `enabled`.
|
|
782
|
+
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
948
783
|
"""
|
|
949
784
|
return pulumi.get(self, "ha_host_monitoring")
|
|
950
785
|
|
|
@@ -956,10 +791,8 @@ class ComputeClusterArgs:
|
|
|
956
791
|
@pulumi.getter(name="haVmComponentProtection")
|
|
957
792
|
def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
|
|
958
793
|
"""
|
|
959
|
-
Controls vSphere VM component
|
|
960
|
-
|
|
961
|
-
`disabled`. Default: `enabled`.
|
|
962
|
-
<sup>\\*</sup>
|
|
794
|
+
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
795
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
963
796
|
"""
|
|
964
797
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
965
798
|
|
|
@@ -971,13 +804,8 @@ class ComputeClusterArgs:
|
|
|
971
804
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
972
805
|
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
|
|
973
806
|
"""
|
|
974
|
-
The condition used to
|
|
975
|
-
|
|
976
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
977
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
978
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
979
|
-
is considered ready immediately after a host is found to start it on.
|
|
980
|
-
<sup>\\*</sup>
|
|
807
|
+
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
808
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
981
809
|
"""
|
|
982
810
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
983
811
|
|
|
@@ -989,9 +817,8 @@ class ComputeClusterArgs:
|
|
|
989
817
|
@pulumi.getter(name="haVmFailureInterval")
|
|
990
818
|
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
|
|
991
819
|
"""
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
820
|
+
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
821
|
+
failed. The value is in seconds.
|
|
995
822
|
"""
|
|
996
823
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
997
824
|
|
|
@@ -1003,11 +830,9 @@ class ComputeClusterArgs:
|
|
|
1003
830
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
1004
831
|
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
|
|
1005
832
|
"""
|
|
1006
|
-
The
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
1010
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
833
|
+
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
834
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
835
|
+
time is allotted.
|
|
1011
836
|
"""
|
|
1012
837
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
1013
838
|
|
|
@@ -1019,8 +844,7 @@ class ComputeClusterArgs:
|
|
|
1019
844
|
@pulumi.getter(name="haVmMaximumResets")
|
|
1020
845
|
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
|
|
1021
846
|
"""
|
|
1022
|
-
The maximum number of resets that HA will
|
|
1023
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
847
|
+
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
1024
848
|
"""
|
|
1025
849
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
1026
850
|
|
|
@@ -1032,9 +856,7 @@ class ComputeClusterArgs:
|
|
|
1032
856
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
1033
857
|
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
|
|
1034
858
|
"""
|
|
1035
|
-
The time, in seconds, that HA waits after
|
|
1036
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
1037
|
-
`120` seconds (2 minutes).
|
|
859
|
+
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
1038
860
|
"""
|
|
1039
861
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
1040
862
|
|
|
@@ -1046,9 +868,8 @@ class ComputeClusterArgs:
|
|
|
1046
868
|
@pulumi.getter(name="haVmMonitoring")
|
|
1047
869
|
def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
1048
870
|
"""
|
|
1049
|
-
The type of virtual machine monitoring to use
|
|
1050
|
-
|
|
1051
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
871
|
+
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
872
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
1052
873
|
"""
|
|
1053
874
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
1054
875
|
|
|
@@ -1060,9 +881,7 @@ class ComputeClusterArgs:
|
|
|
1060
881
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
1061
882
|
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
|
|
1062
883
|
"""
|
|
1063
|
-
Additional delay
|
|
1064
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
1065
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
884
|
+
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
1066
885
|
"""
|
|
1067
886
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
1068
887
|
|
|
@@ -1074,9 +893,8 @@ class ComputeClusterArgs:
|
|
|
1074
893
|
@pulumi.getter(name="haVmRestartPriority")
|
|
1075
894
|
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
|
|
1076
895
|
"""
|
|
1077
|
-
The default restart priority
|
|
1078
|
-
|
|
1079
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
896
|
+
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
897
|
+
high, or highest.
|
|
1080
898
|
"""
|
|
1081
899
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
1082
900
|
|
|
@@ -1088,10 +906,8 @@ class ComputeClusterArgs:
|
|
|
1088
906
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
1089
907
|
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
1090
908
|
"""
|
|
1091
|
-
The maximum time, in seconds,
|
|
1092
|
-
|
|
1093
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
1094
|
-
<sup>\\*</sup>
|
|
909
|
+
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
910
|
+
proceeding with the next priority.
|
|
1095
911
|
"""
|
|
1096
912
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
1097
913
|
|
|
@@ -1103,8 +919,7 @@ class ComputeClusterArgs:
|
|
|
1103
919
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
1104
920
|
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
1105
921
|
"""
|
|
1106
|
-
The timeout
|
|
1107
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
922
|
+
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
1108
923
|
"""
|
|
1109
924
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
1110
925
|
|
|
@@ -1112,13 +927,23 @@ class ComputeClusterArgs:
|
|
|
1112
927
|
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
|
|
1113
928
|
pulumi.set(self, "host_cluster_exit_timeout", value)
|
|
1114
929
|
|
|
930
|
+
@property
|
|
931
|
+
@pulumi.getter(name="hostImage")
|
|
932
|
+
def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
|
|
933
|
+
"""
|
|
934
|
+
Details about the host image which should be applied to the cluster.
|
|
935
|
+
"""
|
|
936
|
+
return pulumi.get(self, "host_image")
|
|
937
|
+
|
|
938
|
+
@host_image.setter
|
|
939
|
+
def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
|
|
940
|
+
pulumi.set(self, "host_image", value)
|
|
941
|
+
|
|
1115
942
|
@property
|
|
1116
943
|
@pulumi.getter(name="hostManaged")
|
|
1117
944
|
def host_managed(self) -> Optional[pulumi.Input[bool]]:
|
|
1118
945
|
"""
|
|
1119
|
-
|
|
1120
|
-
membership will be managed through the `host` resource rather than the
|
|
1121
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
946
|
+
Must be set if cluster enrollment is managed from host resource.
|
|
1122
947
|
"""
|
|
1123
948
|
return pulumi.get(self, "host_managed")
|
|
1124
949
|
|
|
@@ -1130,8 +955,7 @@ class ComputeClusterArgs:
|
|
|
1130
955
|
@pulumi.getter(name="hostSystemIds")
|
|
1131
956
|
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1132
957
|
"""
|
|
1133
|
-
The managed object IDs of
|
|
1134
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
958
|
+
The managed object IDs of the hosts to put in the cluster.
|
|
1135
959
|
"""
|
|
1136
960
|
return pulumi.get(self, "host_system_ids")
|
|
1137
961
|
|
|
@@ -1155,10 +979,7 @@ class ComputeClusterArgs:
|
|
|
1155
979
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
1156
980
|
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1157
981
|
"""
|
|
1158
|
-
|
|
1159
|
-
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
1160
|
-
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
1161
|
-
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
982
|
+
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
1162
983
|
"""
|
|
1163
984
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
1164
985
|
|
|
@@ -1170,8 +991,7 @@ class ComputeClusterArgs:
|
|
|
1170
991
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
1171
992
|
def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1172
993
|
"""
|
|
1173
|
-
Enables
|
|
1174
|
-
<sup>\\*</sup>
|
|
994
|
+
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
1175
995
|
"""
|
|
1176
996
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
1177
997
|
|
|
@@ -1183,12 +1003,8 @@ class ComputeClusterArgs:
|
|
|
1183
1003
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
1184
1004
|
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
1185
1005
|
"""
|
|
1186
|
-
The configured remediation
|
|
1187
|
-
|
|
1188
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
1189
|
-
`proactive_ha_severe_remediation` is set
|
|
1190
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
1191
|
-
<sup>\\*</sup>
|
|
1006
|
+
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
1007
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
1192
1008
|
"""
|
|
1193
1009
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
1194
1010
|
|
|
@@ -1200,9 +1016,7 @@ class ComputeClusterArgs:
|
|
|
1200
1016
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
1201
1017
|
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1202
1018
|
"""
|
|
1203
|
-
The list of IDs for health update
|
|
1204
|
-
providers configured for this cluster.
|
|
1205
|
-
<sup>\\*</sup>
|
|
1019
|
+
The list of IDs for health update providers configured for this cluster.
|
|
1206
1020
|
"""
|
|
1207
1021
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
1208
1022
|
|
|
@@ -1214,12 +1028,8 @@ class ComputeClusterArgs:
|
|
|
1214
1028
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
1215
1029
|
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
1216
1030
|
"""
|
|
1217
|
-
The configured remediation for
|
|
1218
|
-
|
|
1219
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
1220
|
-
`proactive_ha_moderate_remediation` is
|
|
1221
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
1222
|
-
<sup>\\*</sup>
|
|
1031
|
+
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
1032
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
1223
1033
|
"""
|
|
1224
1034
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
1225
1035
|
|
|
@@ -1243,8 +1053,7 @@ class ComputeClusterArgs:
|
|
|
1243
1053
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
1244
1054
|
def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1245
1055
|
"""
|
|
1246
|
-
|
|
1247
|
-
cluster.
|
|
1056
|
+
Whether the vSAN compression service is enabled for the cluster.
|
|
1248
1057
|
"""
|
|
1249
1058
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
1250
1059
|
|
|
@@ -1256,9 +1065,7 @@ class ComputeClusterArgs:
|
|
|
1256
1065
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
1257
1066
|
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1258
1067
|
"""
|
|
1259
|
-
|
|
1260
|
-
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
1261
|
-
compression must also be enabled.
|
|
1068
|
+
Whether the vSAN deduplication service is enabled for the cluster.
|
|
1262
1069
|
"""
|
|
1263
1070
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
1264
1071
|
|
|
@@ -1270,8 +1077,7 @@ class ComputeClusterArgs:
|
|
|
1270
1077
|
@pulumi.getter(name="vsanDiskGroups")
|
|
1271
1078
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
1272
1079
|
"""
|
|
1273
|
-
|
|
1274
|
-
group in the cluster.
|
|
1080
|
+
A list of disk UUIDs to add to the vSAN cluster.
|
|
1275
1081
|
"""
|
|
1276
1082
|
return pulumi.get(self, "vsan_disk_groups")
|
|
1277
1083
|
|
|
@@ -1283,10 +1089,7 @@ class ComputeClusterArgs:
|
|
|
1283
1089
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
1284
1090
|
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1285
1091
|
"""
|
|
1286
|
-
|
|
1287
|
-
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
1288
|
-
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
1289
|
-
at the same time.
|
|
1092
|
+
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
1290
1093
|
"""
|
|
1291
1094
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
1292
1095
|
|
|
@@ -1298,9 +1101,7 @@ class ComputeClusterArgs:
|
|
|
1298
1101
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
1299
1102
|
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
|
|
1300
1103
|
"""
|
|
1301
|
-
|
|
1302
|
-
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
1303
|
-
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
1104
|
+
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
1304
1105
|
"""
|
|
1305
1106
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
1306
1107
|
|
|
@@ -1312,7 +1113,7 @@ class ComputeClusterArgs:
|
|
|
1312
1113
|
@pulumi.getter(name="vsanEnabled")
|
|
1313
1114
|
def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1314
1115
|
"""
|
|
1315
|
-
|
|
1116
|
+
Whether the vSAN service is enabled for the cluster.
|
|
1316
1117
|
"""
|
|
1317
1118
|
return pulumi.get(self, "vsan_enabled")
|
|
1318
1119
|
|
|
@@ -1324,7 +1125,7 @@ class ComputeClusterArgs:
|
|
|
1324
1125
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
1325
1126
|
def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1326
1127
|
"""
|
|
1327
|
-
|
|
1128
|
+
Whether the vSAN ESA service is enabled for the cluster.
|
|
1328
1129
|
"""
|
|
1329
1130
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
1330
1131
|
|
|
@@ -1336,7 +1137,7 @@ class ComputeClusterArgs:
|
|
|
1336
1137
|
@pulumi.getter(name="vsanFaultDomains")
|
|
1337
1138
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
1338
1139
|
"""
|
|
1339
|
-
|
|
1140
|
+
The configuration for vSAN fault domains.
|
|
1340
1141
|
"""
|
|
1341
1142
|
return pulumi.get(self, "vsan_fault_domains")
|
|
1342
1143
|
|
|
@@ -1348,8 +1149,7 @@ class ComputeClusterArgs:
|
|
|
1348
1149
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
1349
1150
|
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1350
1151
|
"""
|
|
1351
|
-
|
|
1352
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
1152
|
+
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
1353
1153
|
"""
|
|
1354
1154
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
1355
1155
|
|
|
@@ -1361,8 +1161,7 @@ class ComputeClusterArgs:
|
|
|
1361
1161
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
1362
1162
|
def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1363
1163
|
"""
|
|
1364
|
-
|
|
1365
|
-
the cluster. Default: `true`.
|
|
1164
|
+
Whether the vSAN performance service is enabled for the cluster.
|
|
1366
1165
|
"""
|
|
1367
1166
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
1368
1167
|
|
|
@@ -1374,10 +1173,7 @@ class ComputeClusterArgs:
|
|
|
1374
1173
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
1375
1174
|
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1376
1175
|
"""
|
|
1377
|
-
The
|
|
1378
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
1379
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
1380
|
-
data-in-transit encryption feature at the same time.
|
|
1176
|
+
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
1381
1177
|
"""
|
|
1382
1178
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
1383
1179
|
|
|
@@ -1389,7 +1185,7 @@ class ComputeClusterArgs:
|
|
|
1389
1185
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
1390
1186
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
1391
1187
|
"""
|
|
1392
|
-
|
|
1188
|
+
The configuration for stretched cluster.
|
|
1393
1189
|
"""
|
|
1394
1190
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
1395
1191
|
|
|
@@ -1401,8 +1197,7 @@ class ComputeClusterArgs:
|
|
|
1401
1197
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
1402
1198
|
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1403
1199
|
"""
|
|
1404
|
-
|
|
1405
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
1200
|
+
Whether the vSAN unmap service is enabled for the cluster.
|
|
1406
1201
|
"""
|
|
1407
1202
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
1408
1203
|
|
|
@@ -1414,8 +1209,7 @@ class ComputeClusterArgs:
|
|
|
1414
1209
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
1415
1210
|
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1416
1211
|
"""
|
|
1417
|
-
|
|
1418
|
-
performance service on the cluster.
|
|
1212
|
+
Whether the vSAN verbose mode is enabled for the cluster.
|
|
1419
1213
|
"""
|
|
1420
1214
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
1421
1215
|
|
|
@@ -1472,6 +1266,7 @@ class _ComputeClusterState:
|
|
|
1472
1266
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
1473
1267
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
1474
1268
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
1269
|
+
host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
|
|
1475
1270
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
1476
1271
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
1477
1272
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -1505,230 +1300,120 @@ class _ComputeClusterState:
|
|
|
1505
1300
|
and require vCenter Server.
|
|
1506
1301
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
1507
1302
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
1508
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
:param pulumi.Input[
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
:param pulumi.Input[
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
:param pulumi.Input[
|
|
1519
|
-
|
|
1520
|
-
:param pulumi.Input[
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
1524
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
1525
|
-
recommendations. <sup>\\*</sup>
|
|
1526
|
-
|
|
1527
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
1528
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
1529
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
1530
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
1531
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
1532
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
1533
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
1534
|
-
`3`.
|
|
1535
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
1536
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
1537
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
1303
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
1304
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
1305
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
1306
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
1307
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
1308
|
+
setting.
|
|
1309
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
1310
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
1311
|
+
fullyAutomated.
|
|
1312
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
1313
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
1314
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
1315
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
1316
|
+
more imbalance while a higher setting will tolerate less.
|
|
1317
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
1538
1318
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
1539
1319
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
1540
1320
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
1541
1321
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
1542
1322
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
1543
1323
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
1544
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
:param pulumi.Input[
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
:param pulumi.Input[str]
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
:param pulumi.Input[
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
:param pulumi.Input[
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
:param pulumi.Input[
|
|
1588
|
-
|
|
1589
|
-
:param pulumi.Input[
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
:param pulumi.Input[
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
:param pulumi.Input[str]
|
|
1609
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
1610
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
1611
|
-
`restartAggressive`. Default: `disabled`.
|
|
1612
|
-
<sup>\\*</sup>
|
|
1613
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
1614
|
-
`false`.
|
|
1615
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
1616
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
1617
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
1618
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
1619
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
1620
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
1621
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
1622
|
-
`allFeasibleDsWithUserPreference`.
|
|
1623
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
1624
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
1625
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
1626
|
-
`none`.
|
|
1627
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
1628
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
1629
|
-
or `disabled`. Default: `enabled`.
|
|
1630
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
1631
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
1632
|
-
`disabled`. Default: `enabled`.
|
|
1633
|
-
<sup>\\*</sup>
|
|
1634
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
1635
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
1636
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
1637
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
1638
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
1639
|
-
is considered ready immediately after a host is found to start it on.
|
|
1640
|
-
<sup>\\*</sup>
|
|
1641
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
1642
|
-
from a virtual machine is not received within this configured interval,
|
|
1643
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
1644
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
1645
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
1646
|
-
window expires, no more resets are attempted regardless of the setting
|
|
1647
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
1648
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
1649
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
1650
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
1651
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
1652
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
1653
|
-
`120` seconds (2 minutes).
|
|
1654
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
1655
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
1656
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
1657
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
1658
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
1659
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
1660
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
1661
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
1662
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
1663
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
1664
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
1665
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
1666
|
-
<sup>\\*</sup>
|
|
1667
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
1668
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
1669
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
1670
|
-
membership will be managed through the `host` resource rather than the
|
|
1671
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
1672
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
1673
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
1324
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
1325
|
+
for testing and is not recommended in normal use.
|
|
1326
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
1327
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
1328
|
+
will ignore the host when making recommendations.
|
|
1329
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
1330
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
1331
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
1332
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
1333
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
1334
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
1335
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
1336
|
+
issues.
|
|
1337
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
1338
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
1339
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
1340
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
1341
|
+
the cluster to reserve for failover.
|
|
1342
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
1343
|
+
the cluster to reserve for failover.
|
|
1344
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
1345
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
1346
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
1347
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
1348
|
+
currently in the cluster.
|
|
1349
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
1350
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
1351
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
1352
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1353
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
1354
|
+
restartAggressive.
|
|
1355
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
1356
|
+
the response action defined in ha_datastore_apd_response.
|
|
1357
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1358
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
1359
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
1360
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
1361
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
1362
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
1363
|
+
allFeasibleDsWithUserPreference.
|
|
1364
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
1365
|
+
Can be one of none, powerOff, or shutdown.
|
|
1366
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
1367
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
1368
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
1369
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
1370
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
1371
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
1372
|
+
failed. The value is in seconds.
|
|
1373
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
1374
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
1375
|
+
time is allotted.
|
|
1376
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
1377
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
1378
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
1379
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
1380
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
1381
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
1382
|
+
high, or highest.
|
|
1383
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
1384
|
+
proceeding with the next priority.
|
|
1385
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
1386
|
+
:param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
|
|
1387
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
1388
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
1674
1389
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
1675
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
:param pulumi.Input[
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
1683
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
1684
|
-
`proactive_ha_severe_remediation` is set
|
|
1685
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
1686
|
-
<sup>\\*</sup>
|
|
1687
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
1688
|
-
providers configured for this cluster.
|
|
1689
|
-
<sup>\\*</sup>
|
|
1690
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
1691
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
1692
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
1693
|
-
`proactive_ha_moderate_remediation` is
|
|
1694
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
1695
|
-
<sup>\\*</sup>
|
|
1390
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
1391
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
1392
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
1393
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
1394
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
1395
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
1396
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
1696
1397
|
:param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
|
|
1697
1398
|
resource pool for this cluster. This can be passed directly to the
|
|
1698
1399
|
`resource_pool_id`
|
|
1699
1400
|
attribute of the
|
|
1700
1401
|
`VirtualMachine` resource.
|
|
1701
1402
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
1702
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
1703
|
-
|
|
1704
|
-
:param pulumi.Input[
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
:param pulumi.Input[
|
|
1708
|
-
|
|
1709
|
-
:param pulumi.Input[
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
:param pulumi.Input[
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
1717
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
1718
|
-
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
1719
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
1720
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
1721
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
1722
|
-
the cluster. Default: `true`.
|
|
1723
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
1724
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
1725
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
1726
|
-
data-in-transit encryption feature at the same time.
|
|
1727
|
-
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
1728
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
1729
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
1730
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
1731
|
-
performance service on the cluster.
|
|
1403
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
1404
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
1405
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
1406
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
1407
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
1408
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
1409
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
1410
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
1411
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
1412
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
1413
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
1414
|
+
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
1415
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
1416
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
1732
1417
|
"""
|
|
1733
1418
|
if custom_attributes is not None:
|
|
1734
1419
|
pulumi.set(__self__, "custom_attributes", custom_attributes)
|
|
@@ -1820,6 +1505,8 @@ class _ComputeClusterState:
|
|
|
1820
1505
|
pulumi.set(__self__, "ha_vm_restart_timeout", ha_vm_restart_timeout)
|
|
1821
1506
|
if host_cluster_exit_timeout is not None:
|
|
1822
1507
|
pulumi.set(__self__, "host_cluster_exit_timeout", host_cluster_exit_timeout)
|
|
1508
|
+
if host_image is not None:
|
|
1509
|
+
pulumi.set(__self__, "host_image", host_image)
|
|
1823
1510
|
if host_managed is not None:
|
|
1824
1511
|
pulumi.set(__self__, "host_managed", host_managed)
|
|
1825
1512
|
if host_system_ids is not None:
|
|
@@ -1902,9 +1589,7 @@ class _ComputeClusterState:
|
|
|
1902
1589
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
1903
1590
|
def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1904
1591
|
"""
|
|
1905
|
-
The automation level for host power
|
|
1906
|
-
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
1907
|
-
`manual`.
|
|
1592
|
+
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
1908
1593
|
"""
|
|
1909
1594
|
return pulumi.get(self, "dpm_automation_level")
|
|
1910
1595
|
|
|
@@ -1916,9 +1601,8 @@ class _ComputeClusterState:
|
|
|
1916
1601
|
@pulumi.getter(name="dpmEnabled")
|
|
1917
1602
|
def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1918
1603
|
"""
|
|
1919
|
-
Enable DPM support for DRS
|
|
1920
|
-
|
|
1921
|
-
Default: `false`.
|
|
1604
|
+
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
1605
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
1922
1606
|
"""
|
|
1923
1607
|
return pulumi.get(self, "dpm_enabled")
|
|
1924
1608
|
|
|
@@ -1930,10 +1614,9 @@ class _ComputeClusterState:
|
|
|
1930
1614
|
@pulumi.getter(name="dpmThreshold")
|
|
1931
1615
|
def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
1932
1616
|
"""
|
|
1933
|
-
A value between
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
1617
|
+
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
1618
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
1619
|
+
setting.
|
|
1937
1620
|
"""
|
|
1938
1621
|
return pulumi.get(self, "dpm_threshold")
|
|
1939
1622
|
|
|
@@ -1945,8 +1628,7 @@ class _ComputeClusterState:
|
|
|
1945
1628
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
1946
1629
|
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
1947
1630
|
"""
|
|
1948
|
-
|
|
1949
|
-
options for DRS and DPM.
|
|
1631
|
+
Advanced configuration options for DRS and DPM.
|
|
1950
1632
|
"""
|
|
1951
1633
|
return pulumi.get(self, "drs_advanced_options")
|
|
1952
1634
|
|
|
@@ -1958,9 +1640,8 @@ class _ComputeClusterState:
|
|
|
1958
1640
|
@pulumi.getter(name="drsAutomationLevel")
|
|
1959
1641
|
def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1960
1642
|
"""
|
|
1961
|
-
The default automation level for all
|
|
1962
|
-
|
|
1963
|
-
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
1643
|
+
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
1644
|
+
fullyAutomated.
|
|
1964
1645
|
"""
|
|
1965
1646
|
return pulumi.get(self, "drs_automation_level")
|
|
1966
1647
|
|
|
@@ -1972,11 +1653,7 @@ class _ComputeClusterState:
|
|
|
1972
1653
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
1973
1654
|
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
|
|
1974
1655
|
"""
|
|
1975
|
-
When
|
|
1976
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
1977
|
-
recommendations. <sup>\\*</sup>
|
|
1978
|
-
|
|
1979
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
1656
|
+
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
1980
1657
|
"""
|
|
1981
1658
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
1982
1659
|
|
|
@@ -1988,8 +1665,7 @@ class _ComputeClusterState:
|
|
|
1988
1665
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
1989
1666
|
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
|
|
1990
1667
|
"""
|
|
1991
|
-
|
|
1992
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
1668
|
+
When true, allows individual VM overrides within this cluster to be set.
|
|
1993
1669
|
"""
|
|
1994
1670
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
1995
1671
|
|
|
@@ -2001,7 +1677,7 @@ class _ComputeClusterState:
|
|
|
2001
1677
|
@pulumi.getter(name="drsEnabled")
|
|
2002
1678
|
def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2003
1679
|
"""
|
|
2004
|
-
Enable DRS for this cluster.
|
|
1680
|
+
Enable DRS for this cluster.
|
|
2005
1681
|
"""
|
|
2006
1682
|
return pulumi.get(self, "drs_enabled")
|
|
2007
1683
|
|
|
@@ -2013,10 +1689,8 @@ class _ComputeClusterState:
|
|
|
2013
1689
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
2014
1690
|
def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
2015
1691
|
"""
|
|
2016
|
-
A value between
|
|
2017
|
-
|
|
2018
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
2019
|
-
`3`.
|
|
1692
|
+
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
1693
|
+
more imbalance while a higher setting will tolerate less.
|
|
2020
1694
|
"""
|
|
2021
1695
|
return pulumi.get(self, "drs_migration_threshold")
|
|
2022
1696
|
|
|
@@ -2028,9 +1702,7 @@ class _ComputeClusterState:
|
|
|
2028
1702
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
2029
1703
|
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
|
|
2030
1704
|
"""
|
|
2031
|
-
Enable scalable shares for all
|
|
2032
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
2033
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
1705
|
+
Enable scalable shares for all descendants of this cluster.
|
|
2034
1706
|
"""
|
|
2035
1707
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
2036
1708
|
|
|
@@ -2059,18 +1731,8 @@ class _ComputeClusterState:
|
|
|
2059
1731
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
2060
1732
|
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
|
|
2061
1733
|
"""
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
2065
|
-
below. This is an advanced
|
|
2066
|
-
option and should only be used for testing. Default: `false`.
|
|
2067
|
-
|
|
2068
|
-
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
2069
|
-
there are many pitfalls to its use when working with complex cluster
|
|
2070
|
-
configurations. Depending on the virtual machines currently on the cluster, and
|
|
2071
|
-
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
2072
|
-
incrementally remove hosts from your configuration by adjusting the contents of
|
|
2073
|
-
the `host_system_ids` attribute.
|
|
1734
|
+
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
1735
|
+
for testing and is not recommended in normal use.
|
|
2074
1736
|
"""
|
|
2075
1737
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
2076
1738
|
|
|
@@ -2082,11 +1744,9 @@ class _ComputeClusterState:
|
|
|
2082
1744
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
2083
1745
|
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2084
1746
|
"""
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
block access to the host, and DRS will ignore the host when making
|
|
2089
|
-
recommendations.
|
|
1747
|
+
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
1748
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
1749
|
+
will ignore the host when making recommendations.
|
|
2090
1750
|
"""
|
|
2091
1751
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
2092
1752
|
|
|
@@ -2098,11 +1758,8 @@ class _ComputeClusterState:
|
|
|
2098
1758
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
2099
1759
|
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
2100
1760
|
"""
|
|
2101
|
-
The maximum number
|
|
2102
|
-
|
|
2103
|
-
whether to permit virtual machine operations. The maximum is one less than
|
|
2104
|
-
the number of hosts in the cluster. Default: `1`.
|
|
2105
|
-
<sup>\\*</sup>
|
|
1761
|
+
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
1762
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
2106
1763
|
"""
|
|
2107
1764
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
2108
1765
|
|
|
@@ -2114,10 +1771,8 @@ class _ComputeClusterState:
|
|
|
2114
1771
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
2115
1772
|
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
2116
1773
|
"""
|
|
2117
|
-
The percentage of
|
|
2118
|
-
|
|
2119
|
-
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
2120
|
-
disables the setting. Default: `100` (disabled).
|
|
1774
|
+
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
1775
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
2121
1776
|
"""
|
|
2122
1777
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
2123
1778
|
|
|
@@ -2129,9 +1784,10 @@ class _ComputeClusterState:
|
|
|
2129
1784
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
2130
1785
|
def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
|
|
2131
1786
|
"""
|
|
2132
|
-
The type of admission control
|
|
2133
|
-
|
|
2134
|
-
|
|
1787
|
+
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
1788
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
1789
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
1790
|
+
issues.
|
|
2135
1791
|
"""
|
|
2136
1792
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
2137
1793
|
|
|
@@ -2143,12 +1799,9 @@ class _ComputeClusterState:
|
|
|
2143
1799
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
2144
1800
|
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
|
|
2145
1801
|
"""
|
|
2146
|
-
|
|
2147
|
-
average number of host resources represented by the
|
|
2148
|
-
|
|
2149
|
-
setting from the total amount of resources in the cluster. Disable to supply
|
|
2150
|
-
user-defined values. Default: `true`.
|
|
2151
|
-
<sup>\\*</sup>
|
|
1802
|
+
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
1803
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
1804
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
2152
1805
|
"""
|
|
2153
1806
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
2154
1807
|
|
|
@@ -2160,9 +1813,8 @@ class _ComputeClusterState:
|
|
|
2160
1813
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
2161
1814
|
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
2162
1815
|
"""
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
failover. Default: `100`.
|
|
1816
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
1817
|
+
the cluster to reserve for failover.
|
|
2166
1818
|
"""
|
|
2167
1819
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
2168
1820
|
|
|
@@ -2174,9 +1826,8 @@ class _ComputeClusterState:
|
|
|
2174
1826
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
2175
1827
|
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
|
|
2176
1828
|
"""
|
|
2177
|
-
|
|
2178
|
-
|
|
2179
|
-
failover. Default: `100`.
|
|
1829
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
1830
|
+
the cluster to reserve for failover.
|
|
2180
1831
|
"""
|
|
2181
1832
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
2182
1833
|
|
|
@@ -2188,8 +1839,7 @@ class _ComputeClusterState:
|
|
|
2188
1839
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
2189
1840
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
2190
1841
|
"""
|
|
2191
|
-
|
|
2192
|
-
user-defined CPU slot size, in MHz. Default: `32`.
|
|
1842
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
2193
1843
|
"""
|
|
2194
1844
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
2195
1845
|
|
|
@@ -2201,8 +1851,7 @@ class _ComputeClusterState:
|
|
|
2201
1851
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
2202
1852
|
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
|
|
2203
1853
|
"""
|
|
2204
|
-
|
|
2205
|
-
user-defined memory slot size, in MB. Default: `100`.
|
|
1854
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
2206
1855
|
"""
|
|
2207
1856
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
2208
1857
|
|
|
@@ -2214,10 +1863,9 @@ class _ComputeClusterState:
|
|
|
2214
1863
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
2215
1864
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
|
|
2216
1865
|
"""
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
average based on all powered-on virtual machines currently in the cluster.
|
|
1866
|
+
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
1867
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
1868
|
+
currently in the cluster.
|
|
2221
1869
|
"""
|
|
2222
1870
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
2223
1871
|
|
|
@@ -2229,8 +1877,7 @@ class _ComputeClusterState:
|
|
|
2229
1877
|
@pulumi.getter(name="haAdvancedOptions")
|
|
2230
1878
|
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
2231
1879
|
"""
|
|
2232
|
-
|
|
2233
|
-
options for vSphere HA.
|
|
1880
|
+
Advanced configuration options for vSphere HA.
|
|
2234
1881
|
"""
|
|
2235
1882
|
return pulumi.get(self, "ha_advanced_options")
|
|
2236
1883
|
|
|
@@ -2242,10 +1889,8 @@ class _ComputeClusterState:
|
|
|
2242
1889
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
2243
1890
|
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
|
|
2244
1891
|
"""
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
2248
|
-
<sup>\\*</sup>
|
|
1892
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
1893
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
2249
1894
|
"""
|
|
2250
1895
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
2251
1896
|
|
|
@@ -2257,11 +1902,9 @@ class _ComputeClusterState:
|
|
|
2257
1902
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
2258
1903
|
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
|
|
2259
1904
|
"""
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
2264
|
-
<sup>\\*</sup>
|
|
1905
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1906
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
1907
|
+
restartAggressive.
|
|
2265
1908
|
"""
|
|
2266
1909
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
2267
1910
|
|
|
@@ -2273,10 +1916,8 @@ class _ComputeClusterState:
|
|
|
2273
1916
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
2274
1917
|
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
|
|
2275
1918
|
"""
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
`ha_datastore_apd_response`. Default: `180`
|
|
2279
|
-
seconds (3 minutes). <sup>\\*</sup>
|
|
1919
|
+
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
1920
|
+
the response action defined in ha_datastore_apd_response.
|
|
2280
1921
|
"""
|
|
2281
1922
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
2282
1923
|
|
|
@@ -2288,11 +1929,8 @@ class _ComputeClusterState:
|
|
|
2288
1929
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
2289
1930
|
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
|
|
2290
1931
|
"""
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
2294
|
-
`restartAggressive`. Default: `disabled`.
|
|
2295
|
-
<sup>\\*</sup>
|
|
1932
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1933
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
2296
1934
|
"""
|
|
2297
1935
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
2298
1936
|
|
|
@@ -2304,8 +1942,7 @@ class _ComputeClusterState:
|
|
|
2304
1942
|
@pulumi.getter(name="haEnabled")
|
|
2305
1943
|
def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2306
1944
|
"""
|
|
2307
|
-
Enable vSphere HA for this cluster.
|
|
2308
|
-
`false`.
|
|
1945
|
+
Enable vSphere HA for this cluster.
|
|
2309
1946
|
"""
|
|
2310
1947
|
return pulumi.get(self, "ha_enabled")
|
|
2311
1948
|
|
|
@@ -2317,10 +1954,8 @@ class _ComputeClusterState:
|
|
|
2317
1954
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
2318
1955
|
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2319
1956
|
"""
|
|
2320
|
-
The list of managed object IDs for
|
|
2321
|
-
|
|
2322
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
2323
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
1957
|
+
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
1958
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
2324
1959
|
"""
|
|
2325
1960
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
2326
1961
|
|
|
@@ -2332,10 +1967,8 @@ class _ComputeClusterState:
|
|
|
2332
1967
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
2333
1968
|
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
|
|
2334
1969
|
"""
|
|
2335
|
-
The selection policy for HA
|
|
2336
|
-
|
|
2337
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
2338
|
-
`allFeasibleDsWithUserPreference`.
|
|
1970
|
+
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
1971
|
+
allFeasibleDsWithUserPreference.
|
|
2339
1972
|
"""
|
|
2340
1973
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
2341
1974
|
|
|
@@ -2347,10 +1980,8 @@ class _ComputeClusterState:
|
|
|
2347
1980
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
2348
1981
|
def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
|
|
2349
1982
|
"""
|
|
2350
|
-
The action to take on virtual
|
|
2351
|
-
|
|
2352
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
2353
|
-
`none`.
|
|
1983
|
+
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
1984
|
+
Can be one of none, powerOff, or shutdown.
|
|
2354
1985
|
"""
|
|
2355
1986
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
2356
1987
|
|
|
@@ -2362,9 +1993,7 @@ class _ComputeClusterState:
|
|
|
2362
1993
|
@pulumi.getter(name="haHostMonitoring")
|
|
2363
1994
|
def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
2364
1995
|
"""
|
|
2365
|
-
Global setting that controls whether
|
|
2366
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
2367
|
-
or `disabled`. Default: `enabled`.
|
|
1996
|
+
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2368
1997
|
"""
|
|
2369
1998
|
return pulumi.get(self, "ha_host_monitoring")
|
|
2370
1999
|
|
|
@@ -2376,10 +2005,8 @@ class _ComputeClusterState:
|
|
|
2376
2005
|
@pulumi.getter(name="haVmComponentProtection")
|
|
2377
2006
|
def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
|
|
2378
2007
|
"""
|
|
2379
|
-
Controls vSphere VM component
|
|
2380
|
-
|
|
2381
|
-
`disabled`. Default: `enabled`.
|
|
2382
|
-
<sup>\\*</sup>
|
|
2008
|
+
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
2009
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
2383
2010
|
"""
|
|
2384
2011
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
2385
2012
|
|
|
@@ -2391,13 +2018,8 @@ class _ComputeClusterState:
|
|
|
2391
2018
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
2392
2019
|
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
|
|
2393
2020
|
"""
|
|
2394
|
-
The condition used to
|
|
2395
|
-
|
|
2396
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
2397
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
2398
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
2399
|
-
is considered ready immediately after a host is found to start it on.
|
|
2400
|
-
<sup>\\*</sup>
|
|
2021
|
+
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2022
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
2401
2023
|
"""
|
|
2402
2024
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
2403
2025
|
|
|
@@ -2409,9 +2031,8 @@ class _ComputeClusterState:
|
|
|
2409
2031
|
@pulumi.getter(name="haVmFailureInterval")
|
|
2410
2032
|
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
|
|
2411
2033
|
"""
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
2034
|
+
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2035
|
+
failed. The value is in seconds.
|
|
2415
2036
|
"""
|
|
2416
2037
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
2417
2038
|
|
|
@@ -2423,11 +2044,9 @@ class _ComputeClusterState:
|
|
|
2423
2044
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
2424
2045
|
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
|
|
2425
2046
|
"""
|
|
2426
|
-
The
|
|
2427
|
-
|
|
2428
|
-
|
|
2429
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
2430
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
2047
|
+
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2048
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
2049
|
+
time is allotted.
|
|
2431
2050
|
"""
|
|
2432
2051
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
2433
2052
|
|
|
@@ -2439,8 +2058,7 @@ class _ComputeClusterState:
|
|
|
2439
2058
|
@pulumi.getter(name="haVmMaximumResets")
|
|
2440
2059
|
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
|
|
2441
2060
|
"""
|
|
2442
|
-
The maximum number of resets that HA will
|
|
2443
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
2061
|
+
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2444
2062
|
"""
|
|
2445
2063
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
2446
2064
|
|
|
@@ -2452,9 +2070,7 @@ class _ComputeClusterState:
|
|
|
2452
2070
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
2453
2071
|
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
|
|
2454
2072
|
"""
|
|
2455
|
-
The time, in seconds, that HA waits after
|
|
2456
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
2457
|
-
`120` seconds (2 minutes).
|
|
2073
|
+
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2458
2074
|
"""
|
|
2459
2075
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
2460
2076
|
|
|
@@ -2466,9 +2082,8 @@ class _ComputeClusterState:
|
|
|
2466
2082
|
@pulumi.getter(name="haVmMonitoring")
|
|
2467
2083
|
def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
2468
2084
|
"""
|
|
2469
|
-
The type of virtual machine monitoring to use
|
|
2470
|
-
|
|
2471
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
2085
|
+
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2086
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
2472
2087
|
"""
|
|
2473
2088
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
2474
2089
|
|
|
@@ -2480,9 +2095,7 @@ class _ComputeClusterState:
|
|
|
2480
2095
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
2481
2096
|
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
|
|
2482
2097
|
"""
|
|
2483
|
-
Additional delay
|
|
2484
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
2485
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
2098
|
+
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2486
2099
|
"""
|
|
2487
2100
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
2488
2101
|
|
|
@@ -2494,9 +2107,8 @@ class _ComputeClusterState:
|
|
|
2494
2107
|
@pulumi.getter(name="haVmRestartPriority")
|
|
2495
2108
|
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
|
|
2496
2109
|
"""
|
|
2497
|
-
The default restart priority
|
|
2498
|
-
|
|
2499
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
2110
|
+
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2111
|
+
high, or highest.
|
|
2500
2112
|
"""
|
|
2501
2113
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
2502
2114
|
|
|
@@ -2508,10 +2120,8 @@ class _ComputeClusterState:
|
|
|
2508
2120
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
2509
2121
|
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
2510
2122
|
"""
|
|
2511
|
-
The maximum time, in seconds,
|
|
2512
|
-
|
|
2513
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
2514
|
-
<sup>\\*</sup>
|
|
2123
|
+
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2124
|
+
proceeding with the next priority.
|
|
2515
2125
|
"""
|
|
2516
2126
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
2517
2127
|
|
|
@@ -2523,8 +2133,7 @@ class _ComputeClusterState:
|
|
|
2523
2133
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
2524
2134
|
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
2525
2135
|
"""
|
|
2526
|
-
The timeout
|
|
2527
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
2136
|
+
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2528
2137
|
"""
|
|
2529
2138
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
2530
2139
|
|
|
@@ -2532,13 +2141,23 @@ class _ComputeClusterState:
|
|
|
2532
2141
|
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
|
|
2533
2142
|
pulumi.set(self, "host_cluster_exit_timeout", value)
|
|
2534
2143
|
|
|
2144
|
+
@property
|
|
2145
|
+
@pulumi.getter(name="hostImage")
|
|
2146
|
+
def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
|
|
2147
|
+
"""
|
|
2148
|
+
Details about the host image which should be applied to the cluster.
|
|
2149
|
+
"""
|
|
2150
|
+
return pulumi.get(self, "host_image")
|
|
2151
|
+
|
|
2152
|
+
@host_image.setter
|
|
2153
|
+
def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
|
|
2154
|
+
pulumi.set(self, "host_image", value)
|
|
2155
|
+
|
|
2535
2156
|
@property
|
|
2536
2157
|
@pulumi.getter(name="hostManaged")
|
|
2537
2158
|
def host_managed(self) -> Optional[pulumi.Input[bool]]:
|
|
2538
2159
|
"""
|
|
2539
|
-
|
|
2540
|
-
membership will be managed through the `host` resource rather than the
|
|
2541
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
2160
|
+
Must be set if cluster enrollment is managed from host resource.
|
|
2542
2161
|
"""
|
|
2543
2162
|
return pulumi.get(self, "host_managed")
|
|
2544
2163
|
|
|
@@ -2550,8 +2169,7 @@ class _ComputeClusterState:
|
|
|
2550
2169
|
@pulumi.getter(name="hostSystemIds")
|
|
2551
2170
|
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2552
2171
|
"""
|
|
2553
|
-
The managed object IDs of
|
|
2554
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2172
|
+
The managed object IDs of the hosts to put in the cluster.
|
|
2555
2173
|
"""
|
|
2556
2174
|
return pulumi.get(self, "host_system_ids")
|
|
2557
2175
|
|
|
@@ -2575,10 +2193,7 @@ class _ComputeClusterState:
|
|
|
2575
2193
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
2576
2194
|
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
2577
2195
|
"""
|
|
2578
|
-
|
|
2579
|
-
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
2580
|
-
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
2581
|
-
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
2196
|
+
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
2582
2197
|
"""
|
|
2583
2198
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
2584
2199
|
|
|
@@ -2590,8 +2205,7 @@ class _ComputeClusterState:
|
|
|
2590
2205
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
2591
2206
|
def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2592
2207
|
"""
|
|
2593
|
-
Enables
|
|
2594
|
-
<sup>\\*</sup>
|
|
2208
|
+
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
2595
2209
|
"""
|
|
2596
2210
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
2597
2211
|
|
|
@@ -2603,12 +2217,8 @@ class _ComputeClusterState:
|
|
|
2603
2217
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
2604
2218
|
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
2605
2219
|
"""
|
|
2606
|
-
The configured remediation
|
|
2607
|
-
|
|
2608
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
2609
|
-
`proactive_ha_severe_remediation` is set
|
|
2610
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
2611
|
-
<sup>\\*</sup>
|
|
2220
|
+
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
2221
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
2612
2222
|
"""
|
|
2613
2223
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
2614
2224
|
|
|
@@ -2620,9 +2230,7 @@ class _ComputeClusterState:
|
|
|
2620
2230
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
2621
2231
|
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2622
2232
|
"""
|
|
2623
|
-
The list of IDs for health update
|
|
2624
|
-
providers configured for this cluster.
|
|
2625
|
-
<sup>\\*</sup>
|
|
2233
|
+
The list of IDs for health update providers configured for this cluster.
|
|
2626
2234
|
"""
|
|
2627
2235
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
2628
2236
|
|
|
@@ -2634,12 +2242,8 @@ class _ComputeClusterState:
|
|
|
2634
2242
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
2635
2243
|
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
2636
2244
|
"""
|
|
2637
|
-
The configured remediation for
|
|
2638
|
-
|
|
2639
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
2640
|
-
`proactive_ha_moderate_remediation` is
|
|
2641
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
2642
|
-
<sup>\\*</sup>
|
|
2245
|
+
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
2246
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
2643
2247
|
"""
|
|
2644
2248
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
2645
2249
|
|
|
@@ -2679,8 +2283,7 @@ class _ComputeClusterState:
|
|
|
2679
2283
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
2680
2284
|
def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2681
2285
|
"""
|
|
2682
|
-
|
|
2683
|
-
cluster.
|
|
2286
|
+
Whether the vSAN compression service is enabled for the cluster.
|
|
2684
2287
|
"""
|
|
2685
2288
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
2686
2289
|
|
|
@@ -2692,9 +2295,7 @@ class _ComputeClusterState:
|
|
|
2692
2295
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
2693
2296
|
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2694
2297
|
"""
|
|
2695
|
-
|
|
2696
|
-
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
2697
|
-
compression must also be enabled.
|
|
2298
|
+
Whether the vSAN deduplication service is enabled for the cluster.
|
|
2698
2299
|
"""
|
|
2699
2300
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
2700
2301
|
|
|
@@ -2706,8 +2307,7 @@ class _ComputeClusterState:
|
|
|
2706
2307
|
@pulumi.getter(name="vsanDiskGroups")
|
|
2707
2308
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
2708
2309
|
"""
|
|
2709
|
-
|
|
2710
|
-
group in the cluster.
|
|
2310
|
+
A list of disk UUIDs to add to the vSAN cluster.
|
|
2711
2311
|
"""
|
|
2712
2312
|
return pulumi.get(self, "vsan_disk_groups")
|
|
2713
2313
|
|
|
@@ -2719,10 +2319,7 @@ class _ComputeClusterState:
|
|
|
2719
2319
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
2720
2320
|
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2721
2321
|
"""
|
|
2722
|
-
|
|
2723
|
-
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
2724
|
-
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
2725
|
-
at the same time.
|
|
2322
|
+
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
2726
2323
|
"""
|
|
2727
2324
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
2728
2325
|
|
|
@@ -2734,9 +2331,7 @@ class _ComputeClusterState:
|
|
|
2734
2331
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
2735
2332
|
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
|
|
2736
2333
|
"""
|
|
2737
|
-
|
|
2738
|
-
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
2739
|
-
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
2334
|
+
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
2740
2335
|
"""
|
|
2741
2336
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
2742
2337
|
|
|
@@ -2748,7 +2343,7 @@ class _ComputeClusterState:
|
|
|
2748
2343
|
@pulumi.getter(name="vsanEnabled")
|
|
2749
2344
|
def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2750
2345
|
"""
|
|
2751
|
-
|
|
2346
|
+
Whether the vSAN service is enabled for the cluster.
|
|
2752
2347
|
"""
|
|
2753
2348
|
return pulumi.get(self, "vsan_enabled")
|
|
2754
2349
|
|
|
@@ -2760,7 +2355,7 @@ class _ComputeClusterState:
|
|
|
2760
2355
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
2761
2356
|
def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2762
2357
|
"""
|
|
2763
|
-
|
|
2358
|
+
Whether the vSAN ESA service is enabled for the cluster.
|
|
2764
2359
|
"""
|
|
2765
2360
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
2766
2361
|
|
|
@@ -2772,7 +2367,7 @@ class _ComputeClusterState:
|
|
|
2772
2367
|
@pulumi.getter(name="vsanFaultDomains")
|
|
2773
2368
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
2774
2369
|
"""
|
|
2775
|
-
|
|
2370
|
+
The configuration for vSAN fault domains.
|
|
2776
2371
|
"""
|
|
2777
2372
|
return pulumi.get(self, "vsan_fault_domains")
|
|
2778
2373
|
|
|
@@ -2784,8 +2379,7 @@ class _ComputeClusterState:
|
|
|
2784
2379
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
2785
2380
|
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2786
2381
|
"""
|
|
2787
|
-
|
|
2788
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
2382
|
+
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
2789
2383
|
"""
|
|
2790
2384
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
2791
2385
|
|
|
@@ -2797,8 +2391,7 @@ class _ComputeClusterState:
|
|
|
2797
2391
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
2798
2392
|
def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2799
2393
|
"""
|
|
2800
|
-
|
|
2801
|
-
the cluster. Default: `true`.
|
|
2394
|
+
Whether the vSAN performance service is enabled for the cluster.
|
|
2802
2395
|
"""
|
|
2803
2396
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
2804
2397
|
|
|
@@ -2810,10 +2403,7 @@ class _ComputeClusterState:
|
|
|
2810
2403
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
2811
2404
|
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2812
2405
|
"""
|
|
2813
|
-
The
|
|
2814
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
2815
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
2816
|
-
data-in-transit encryption feature at the same time.
|
|
2406
|
+
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
2817
2407
|
"""
|
|
2818
2408
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
2819
2409
|
|
|
@@ -2825,7 +2415,7 @@ class _ComputeClusterState:
|
|
|
2825
2415
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
2826
2416
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
2827
2417
|
"""
|
|
2828
|
-
|
|
2418
|
+
The configuration for stretched cluster.
|
|
2829
2419
|
"""
|
|
2830
2420
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
2831
2421
|
|
|
@@ -2837,8 +2427,7 @@ class _ComputeClusterState:
|
|
|
2837
2427
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
2838
2428
|
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2839
2429
|
"""
|
|
2840
|
-
|
|
2841
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
2430
|
+
Whether the vSAN unmap service is enabled for the cluster.
|
|
2842
2431
|
"""
|
|
2843
2432
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
2844
2433
|
|
|
@@ -2850,8 +2439,7 @@ class _ComputeClusterState:
|
|
|
2850
2439
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
2851
2440
|
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2852
2441
|
"""
|
|
2853
|
-
|
|
2854
|
-
performance service on the cluster.
|
|
2442
|
+
Whether the vSAN verbose mode is enabled for the cluster.
|
|
2855
2443
|
"""
|
|
2856
2444
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
2857
2445
|
|
|
@@ -2910,6 +2498,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2910
2498
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
2911
2499
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
2912
2500
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
2501
|
+
host_image: Optional[pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']]] = None,
|
|
2913
2502
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
2914
2503
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
2915
2504
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -2945,225 +2534,115 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2945
2534
|
and require vCenter Server.
|
|
2946
2535
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
2947
2536
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
2948
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
2949
|
-
|
|
2950
|
-
|
|
2951
|
-
:param pulumi.Input[
|
|
2952
|
-
|
|
2953
|
-
|
|
2954
|
-
:param pulumi.Input[
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
2958
|
-
:param pulumi.Input[
|
|
2959
|
-
|
|
2960
|
-
:param pulumi.Input[
|
|
2961
|
-
|
|
2962
|
-
|
|
2963
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
2964
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
2965
|
-
recommendations. <sup>\\*</sup>
|
|
2966
|
-
|
|
2967
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
2968
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
2969
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
2970
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
2971
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
2972
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
2973
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
2974
|
-
`3`.
|
|
2975
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
2976
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
2977
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
2537
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
2538
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
2539
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
2540
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
2541
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
2542
|
+
setting.
|
|
2543
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
2544
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
2545
|
+
fullyAutomated.
|
|
2546
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
2547
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
2548
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
2549
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
2550
|
+
more imbalance while a higher setting will tolerate less.
|
|
2551
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
2978
2552
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
2979
2553
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
2980
2554
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
2981
2555
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
2982
2556
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
2983
2557
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
2984
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
2985
|
-
|
|
2986
|
-
|
|
2987
|
-
|
|
2988
|
-
|
|
2989
|
-
|
|
2990
|
-
|
|
2991
|
-
|
|
2992
|
-
|
|
2993
|
-
|
|
2994
|
-
|
|
2995
|
-
|
|
2996
|
-
|
|
2997
|
-
|
|
2998
|
-
|
|
2999
|
-
|
|
3000
|
-
|
|
3001
|
-
|
|
3002
|
-
|
|
3003
|
-
|
|
3004
|
-
|
|
3005
|
-
|
|
3006
|
-
:param pulumi.Input[
|
|
3007
|
-
|
|
3008
|
-
|
|
3009
|
-
|
|
3010
|
-
:param pulumi.Input[str]
|
|
3011
|
-
|
|
3012
|
-
|
|
3013
|
-
|
|
3014
|
-
|
|
3015
|
-
|
|
3016
|
-
|
|
3017
|
-
|
|
3018
|
-
|
|
3019
|
-
:param pulumi.Input[
|
|
3020
|
-
|
|
3021
|
-
|
|
3022
|
-
:param pulumi.Input[
|
|
3023
|
-
|
|
3024
|
-
|
|
3025
|
-
|
|
3026
|
-
|
|
3027
|
-
:param pulumi.Input[
|
|
3028
|
-
|
|
3029
|
-
:param pulumi.Input[
|
|
3030
|
-
|
|
3031
|
-
|
|
3032
|
-
|
|
3033
|
-
:param pulumi.Input[
|
|
3034
|
-
|
|
3035
|
-
|
|
3036
|
-
|
|
3037
|
-
|
|
3038
|
-
|
|
3039
|
-
|
|
3040
|
-
|
|
3041
|
-
|
|
3042
|
-
|
|
3043
|
-
|
|
3044
|
-
|
|
3045
|
-
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
:param pulumi.Input[str]
|
|
3049
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
3050
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
3051
|
-
`restartAggressive`. Default: `disabled`.
|
|
3052
|
-
<sup>\\*</sup>
|
|
3053
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
3054
|
-
`false`.
|
|
3055
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
3056
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
3057
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
3058
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3059
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
3060
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
3061
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
3062
|
-
`allFeasibleDsWithUserPreference`.
|
|
3063
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
3064
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
3065
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
3066
|
-
`none`.
|
|
3067
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
3068
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
3069
|
-
or `disabled`. Default: `enabled`.
|
|
3070
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
3071
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
3072
|
-
`disabled`. Default: `enabled`.
|
|
3073
|
-
<sup>\\*</sup>
|
|
3074
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
3075
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
3076
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
3077
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
3078
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
3079
|
-
is considered ready immediately after a host is found to start it on.
|
|
3080
|
-
<sup>\\*</sup>
|
|
3081
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
3082
|
-
from a virtual machine is not received within this configured interval,
|
|
3083
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3084
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
3085
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
3086
|
-
window expires, no more resets are attempted regardless of the setting
|
|
3087
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
3088
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3089
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
3090
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3091
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
3092
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
3093
|
-
`120` seconds (2 minutes).
|
|
3094
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
3095
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
3096
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3097
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
3098
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
3099
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3100
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
3101
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
3102
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3103
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
3104
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
3105
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
3106
|
-
<sup>\\*</sup>
|
|
3107
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
3108
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3109
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
3110
|
-
membership will be managed through the `host` resource rather than the
|
|
3111
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3112
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
3113
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2558
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
2559
|
+
for testing and is not recommended in normal use.
|
|
2560
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
2561
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
2562
|
+
will ignore the host when making recommendations.
|
|
2563
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
2564
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
2565
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
2566
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
2567
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
2568
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
2569
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
2570
|
+
issues.
|
|
2571
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
2572
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
2573
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
2574
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
2575
|
+
the cluster to reserve for failover.
|
|
2576
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
2577
|
+
the cluster to reserve for failover.
|
|
2578
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
2579
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
2580
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
2581
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
2582
|
+
currently in the cluster.
|
|
2583
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
2584
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
2585
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
2586
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2587
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
2588
|
+
restartAggressive.
|
|
2589
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
2590
|
+
the response action defined in ha_datastore_apd_response.
|
|
2591
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2592
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
2593
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
2594
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
2595
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
2596
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
2597
|
+
allFeasibleDsWithUserPreference.
|
|
2598
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
2599
|
+
Can be one of none, powerOff, or shutdown.
|
|
2600
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2601
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
2602
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
2603
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2604
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
2605
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2606
|
+
failed. The value is in seconds.
|
|
2607
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2608
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
2609
|
+
time is allotted.
|
|
2610
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2611
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2612
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2613
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
2614
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2615
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2616
|
+
high, or highest.
|
|
2617
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2618
|
+
proceeding with the next priority.
|
|
2619
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2620
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']] host_image: Details about the host image which should be applied to the cluster.
|
|
2621
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
2622
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
3114
2623
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
3115
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
3116
|
-
|
|
3117
|
-
|
|
3118
|
-
|
|
3119
|
-
:param pulumi.Input[
|
|
3120
|
-
|
|
3121
|
-
|
|
3122
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
3123
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
3124
|
-
`proactive_ha_severe_remediation` is set
|
|
3125
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
3126
|
-
<sup>\\*</sup>
|
|
3127
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
3128
|
-
providers configured for this cluster.
|
|
3129
|
-
<sup>\\*</sup>
|
|
3130
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
3131
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
3132
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
3133
|
-
`proactive_ha_moderate_remediation` is
|
|
3134
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
3135
|
-
<sup>\\*</sup>
|
|
2624
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
2625
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
2626
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
2627
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
2628
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
2629
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
2630
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
3136
2631
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
3137
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
3138
|
-
|
|
3139
|
-
:param pulumi.Input[
|
|
3140
|
-
|
|
3141
|
-
|
|
3142
|
-
:param pulumi.Input[
|
|
3143
|
-
|
|
3144
|
-
:param pulumi.Input[
|
|
3145
|
-
|
|
3146
|
-
|
|
3147
|
-
|
|
3148
|
-
:param pulumi.Input[
|
|
3149
|
-
|
|
3150
|
-
|
|
3151
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
3152
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
3153
|
-
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
3154
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
3155
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
3156
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
3157
|
-
the cluster. Default: `true`.
|
|
3158
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
3159
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
3160
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
3161
|
-
data-in-transit encryption feature at the same time.
|
|
3162
|
-
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
3163
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
3164
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3165
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
3166
|
-
performance service on the cluster.
|
|
2632
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
2633
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
2634
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanDiskGroupArgs']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
2635
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
2636
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
2637
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
2638
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
2639
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
2640
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
2641
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
2642
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
2643
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
2644
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
2645
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
3167
2646
|
"""
|
|
3168
2647
|
...
|
|
3169
2648
|
@overload
|
|
@@ -3233,6 +2712,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3233
2712
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
3234
2713
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
3235
2714
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
2715
|
+
host_image: Optional[pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']]] = None,
|
|
3236
2716
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
3237
2717
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
3238
2718
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -3312,6 +2792,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3312
2792
|
__props__.__dict__["ha_vm_restart_priority"] = ha_vm_restart_priority
|
|
3313
2793
|
__props__.__dict__["ha_vm_restart_timeout"] = ha_vm_restart_timeout
|
|
3314
2794
|
__props__.__dict__["host_cluster_exit_timeout"] = host_cluster_exit_timeout
|
|
2795
|
+
__props__.__dict__["host_image"] = host_image
|
|
3315
2796
|
__props__.__dict__["host_managed"] = host_managed
|
|
3316
2797
|
__props__.__dict__["host_system_ids"] = host_system_ids
|
|
3317
2798
|
__props__.__dict__["name"] = name
|
|
@@ -3391,6 +2872,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3391
2872
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
3392
2873
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
3393
2874
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
2875
|
+
host_image: Optional[pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']]] = None,
|
|
3394
2876
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
3395
2877
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
3396
2878
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -3429,230 +2911,120 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3429
2911
|
and require vCenter Server.
|
|
3430
2912
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
3431
2913
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
3432
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
3433
|
-
|
|
3434
|
-
|
|
3435
|
-
:param pulumi.Input[
|
|
3436
|
-
|
|
3437
|
-
|
|
3438
|
-
:param pulumi.Input[
|
|
3439
|
-
|
|
3440
|
-
|
|
3441
|
-
|
|
3442
|
-
:param pulumi.Input[
|
|
3443
|
-
|
|
3444
|
-
:param pulumi.Input[
|
|
3445
|
-
|
|
3446
|
-
|
|
3447
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
3448
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
3449
|
-
recommendations. <sup>\\*</sup>
|
|
3450
|
-
|
|
3451
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
3452
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
3453
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
3454
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
3455
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
3456
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
3457
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
3458
|
-
`3`.
|
|
3459
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
3460
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
3461
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
2914
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
2915
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
2916
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
2917
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
2918
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
2919
|
+
setting.
|
|
2920
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
2921
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
2922
|
+
fullyAutomated.
|
|
2923
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
2924
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
2925
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
2926
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
2927
|
+
more imbalance while a higher setting will tolerate less.
|
|
2928
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
3462
2929
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
3463
2930
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
3464
2931
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
3465
2932
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
3466
2933
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
3467
2934
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
3468
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
3469
|
-
|
|
3470
|
-
|
|
3471
|
-
|
|
3472
|
-
|
|
3473
|
-
|
|
3474
|
-
|
|
3475
|
-
|
|
3476
|
-
|
|
3477
|
-
|
|
3478
|
-
|
|
3479
|
-
|
|
3480
|
-
|
|
3481
|
-
|
|
3482
|
-
|
|
3483
|
-
|
|
3484
|
-
|
|
3485
|
-
|
|
3486
|
-
|
|
3487
|
-
|
|
3488
|
-
|
|
3489
|
-
|
|
3490
|
-
:param pulumi.Input[
|
|
3491
|
-
|
|
3492
|
-
|
|
3493
|
-
|
|
3494
|
-
:param pulumi.Input[str]
|
|
3495
|
-
|
|
3496
|
-
|
|
3497
|
-
|
|
3498
|
-
|
|
3499
|
-
|
|
3500
|
-
|
|
3501
|
-
|
|
3502
|
-
|
|
3503
|
-
:param pulumi.Input[
|
|
3504
|
-
|
|
3505
|
-
|
|
3506
|
-
:param pulumi.Input[
|
|
3507
|
-
|
|
3508
|
-
|
|
3509
|
-
|
|
3510
|
-
|
|
3511
|
-
:param pulumi.Input[
|
|
3512
|
-
|
|
3513
|
-
:param pulumi.Input[
|
|
3514
|
-
|
|
3515
|
-
|
|
3516
|
-
|
|
3517
|
-
:param pulumi.Input[
|
|
3518
|
-
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
|
|
3522
|
-
|
|
3523
|
-
|
|
3524
|
-
|
|
3525
|
-
|
|
3526
|
-
|
|
3527
|
-
|
|
3528
|
-
|
|
3529
|
-
|
|
3530
|
-
|
|
3531
|
-
|
|
3532
|
-
:param pulumi.Input[str]
|
|
3533
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
3534
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
3535
|
-
`restartAggressive`. Default: `disabled`.
|
|
3536
|
-
<sup>\\*</sup>
|
|
3537
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
3538
|
-
`false`.
|
|
3539
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
3540
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
3541
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
3542
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3543
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
3544
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
3545
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
3546
|
-
`allFeasibleDsWithUserPreference`.
|
|
3547
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
3548
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
3549
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
3550
|
-
`none`.
|
|
3551
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
3552
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
3553
|
-
or `disabled`. Default: `enabled`.
|
|
3554
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
3555
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
3556
|
-
`disabled`. Default: `enabled`.
|
|
3557
|
-
<sup>\\*</sup>
|
|
3558
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
3559
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
3560
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
3561
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
3562
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
3563
|
-
is considered ready immediately after a host is found to start it on.
|
|
3564
|
-
<sup>\\*</sup>
|
|
3565
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
3566
|
-
from a virtual machine is not received within this configured interval,
|
|
3567
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3568
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
3569
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
3570
|
-
window expires, no more resets are attempted regardless of the setting
|
|
3571
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
3572
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3573
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
3574
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3575
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
3576
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
3577
|
-
`120` seconds (2 minutes).
|
|
3578
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
3579
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
3580
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3581
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
3582
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
3583
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3584
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
3585
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
3586
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3587
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
3588
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
3589
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
3590
|
-
<sup>\\*</sup>
|
|
3591
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
3592
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3593
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
3594
|
-
membership will be managed through the `host` resource rather than the
|
|
3595
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3596
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
3597
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2935
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
2936
|
+
for testing and is not recommended in normal use.
|
|
2937
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
2938
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
2939
|
+
will ignore the host when making recommendations.
|
|
2940
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
2941
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
2942
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
2943
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
2944
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
2945
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
2946
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
2947
|
+
issues.
|
|
2948
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
2949
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
2950
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
2951
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
2952
|
+
the cluster to reserve for failover.
|
|
2953
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
2954
|
+
the cluster to reserve for failover.
|
|
2955
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
2956
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
2957
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
2958
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
2959
|
+
currently in the cluster.
|
|
2960
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
2961
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
2962
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
2963
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2964
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
2965
|
+
restartAggressive.
|
|
2966
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
2967
|
+
the response action defined in ha_datastore_apd_response.
|
|
2968
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2969
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
2970
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
2971
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
2972
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
2973
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
2974
|
+
allFeasibleDsWithUserPreference.
|
|
2975
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
2976
|
+
Can be one of none, powerOff, or shutdown.
|
|
2977
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2978
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
2979
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
2980
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2981
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
2982
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2983
|
+
failed. The value is in seconds.
|
|
2984
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2985
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
2986
|
+
time is allotted.
|
|
2987
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2988
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2989
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2990
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
2991
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2992
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2993
|
+
high, or highest.
|
|
2994
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2995
|
+
proceeding with the next priority.
|
|
2996
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2997
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']] host_image: Details about the host image which should be applied to the cluster.
|
|
2998
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
2999
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
3598
3000
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
3599
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
3600
|
-
|
|
3601
|
-
|
|
3602
|
-
|
|
3603
|
-
:param pulumi.Input[
|
|
3604
|
-
|
|
3605
|
-
|
|
3606
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
3607
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
3608
|
-
`proactive_ha_severe_remediation` is set
|
|
3609
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
3610
|
-
<sup>\\*</sup>
|
|
3611
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
3612
|
-
providers configured for this cluster.
|
|
3613
|
-
<sup>\\*</sup>
|
|
3614
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
3615
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
3616
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
3617
|
-
`proactive_ha_moderate_remediation` is
|
|
3618
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
3619
|
-
<sup>\\*</sup>
|
|
3001
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
3002
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
3003
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
3004
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
3005
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
3006
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
3007
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
3620
3008
|
:param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
|
|
3621
3009
|
resource pool for this cluster. This can be passed directly to the
|
|
3622
3010
|
`resource_pool_id`
|
|
3623
3011
|
attribute of the
|
|
3624
3012
|
`VirtualMachine` resource.
|
|
3625
3013
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
3626
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
3627
|
-
|
|
3628
|
-
:param pulumi.Input[
|
|
3629
|
-
|
|
3630
|
-
|
|
3631
|
-
:param pulumi.Input[
|
|
3632
|
-
|
|
3633
|
-
:param pulumi.Input[
|
|
3634
|
-
|
|
3635
|
-
|
|
3636
|
-
|
|
3637
|
-
:param pulumi.Input[
|
|
3638
|
-
|
|
3639
|
-
|
|
3640
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
3641
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
3642
|
-
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
3643
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
3644
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
3645
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
3646
|
-
the cluster. Default: `true`.
|
|
3647
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
3648
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
3649
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
3650
|
-
data-in-transit encryption feature at the same time.
|
|
3651
|
-
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
3652
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
3653
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3654
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
3655
|
-
performance service on the cluster.
|
|
3014
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
3015
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
3016
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanDiskGroupArgs']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
3017
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
3018
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
3019
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
3020
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
3021
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
3022
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
3023
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
3024
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
3025
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
3026
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
3027
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
3656
3028
|
"""
|
|
3657
3029
|
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
|
|
3658
3030
|
|
|
@@ -3703,6 +3075,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3703
3075
|
__props__.__dict__["ha_vm_restart_priority"] = ha_vm_restart_priority
|
|
3704
3076
|
__props__.__dict__["ha_vm_restart_timeout"] = ha_vm_restart_timeout
|
|
3705
3077
|
__props__.__dict__["host_cluster_exit_timeout"] = host_cluster_exit_timeout
|
|
3078
|
+
__props__.__dict__["host_image"] = host_image
|
|
3706
3079
|
__props__.__dict__["host_managed"] = host_managed
|
|
3707
3080
|
__props__.__dict__["host_system_ids"] = host_system_ids
|
|
3708
3081
|
__props__.__dict__["name"] = name
|
|
@@ -3754,9 +3127,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3754
3127
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
3755
3128
|
def dpm_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
3756
3129
|
"""
|
|
3757
|
-
The automation level for host power
|
|
3758
|
-
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
3759
|
-
`manual`.
|
|
3130
|
+
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
3760
3131
|
"""
|
|
3761
3132
|
return pulumi.get(self, "dpm_automation_level")
|
|
3762
3133
|
|
|
@@ -3764,9 +3135,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3764
3135
|
@pulumi.getter(name="dpmEnabled")
|
|
3765
3136
|
def dpm_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3766
3137
|
"""
|
|
3767
|
-
Enable DPM support for DRS
|
|
3768
|
-
|
|
3769
|
-
Default: `false`.
|
|
3138
|
+
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
3139
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
3770
3140
|
"""
|
|
3771
3141
|
return pulumi.get(self, "dpm_enabled")
|
|
3772
3142
|
|
|
@@ -3774,10 +3144,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3774
3144
|
@pulumi.getter(name="dpmThreshold")
|
|
3775
3145
|
def dpm_threshold(self) -> pulumi.Output[Optional[int]]:
|
|
3776
3146
|
"""
|
|
3777
|
-
A value between
|
|
3778
|
-
|
|
3779
|
-
|
|
3780
|
-
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
3147
|
+
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
3148
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
3149
|
+
setting.
|
|
3781
3150
|
"""
|
|
3782
3151
|
return pulumi.get(self, "dpm_threshold")
|
|
3783
3152
|
|
|
@@ -3785,8 +3154,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3785
3154
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
3786
3155
|
def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
|
|
3787
3156
|
"""
|
|
3788
|
-
|
|
3789
|
-
options for DRS and DPM.
|
|
3157
|
+
Advanced configuration options for DRS and DPM.
|
|
3790
3158
|
"""
|
|
3791
3159
|
return pulumi.get(self, "drs_advanced_options")
|
|
3792
3160
|
|
|
@@ -3794,9 +3162,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3794
3162
|
@pulumi.getter(name="drsAutomationLevel")
|
|
3795
3163
|
def drs_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
3796
3164
|
"""
|
|
3797
|
-
The default automation level for all
|
|
3798
|
-
|
|
3799
|
-
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
3165
|
+
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
3166
|
+
fullyAutomated.
|
|
3800
3167
|
"""
|
|
3801
3168
|
return pulumi.get(self, "drs_automation_level")
|
|
3802
3169
|
|
|
@@ -3804,11 +3171,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3804
3171
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
3805
3172
|
def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[bool]]:
|
|
3806
3173
|
"""
|
|
3807
|
-
When
|
|
3808
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
3809
|
-
recommendations. <sup>\\*</sup>
|
|
3810
|
-
|
|
3811
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
3174
|
+
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
3812
3175
|
"""
|
|
3813
3176
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
3814
3177
|
|
|
@@ -3816,8 +3179,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3816
3179
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
3817
3180
|
def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[bool]]:
|
|
3818
3181
|
"""
|
|
3819
|
-
|
|
3820
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
3182
|
+
When true, allows individual VM overrides within this cluster to be set.
|
|
3821
3183
|
"""
|
|
3822
3184
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
3823
3185
|
|
|
@@ -3825,7 +3187,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3825
3187
|
@pulumi.getter(name="drsEnabled")
|
|
3826
3188
|
def drs_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3827
3189
|
"""
|
|
3828
|
-
Enable DRS for this cluster.
|
|
3190
|
+
Enable DRS for this cluster.
|
|
3829
3191
|
"""
|
|
3830
3192
|
return pulumi.get(self, "drs_enabled")
|
|
3831
3193
|
|
|
@@ -3833,10 +3195,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3833
3195
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
3834
3196
|
def drs_migration_threshold(self) -> pulumi.Output[Optional[int]]:
|
|
3835
3197
|
"""
|
|
3836
|
-
A value between
|
|
3837
|
-
|
|
3838
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
3839
|
-
`3`.
|
|
3198
|
+
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
3199
|
+
more imbalance while a higher setting will tolerate less.
|
|
3840
3200
|
"""
|
|
3841
3201
|
return pulumi.get(self, "drs_migration_threshold")
|
|
3842
3202
|
|
|
@@ -3844,9 +3204,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3844
3204
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
3845
3205
|
def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[str]]:
|
|
3846
3206
|
"""
|
|
3847
|
-
Enable scalable shares for all
|
|
3848
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
3849
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
3207
|
+
Enable scalable shares for all descendants of this cluster.
|
|
3850
3208
|
"""
|
|
3851
3209
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
3852
3210
|
|
|
@@ -3867,18 +3225,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3867
3225
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
3868
3226
|
def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[bool]]:
|
|
3869
3227
|
"""
|
|
3870
|
-
|
|
3871
|
-
|
|
3872
|
-
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
3873
|
-
below. This is an advanced
|
|
3874
|
-
option and should only be used for testing. Default: `false`.
|
|
3875
|
-
|
|
3876
|
-
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
3877
|
-
there are many pitfalls to its use when working with complex cluster
|
|
3878
|
-
configurations. Depending on the virtual machines currently on the cluster, and
|
|
3879
|
-
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
3880
|
-
incrementally remove hosts from your configuration by adjusting the contents of
|
|
3881
|
-
the `host_system_ids` attribute.
|
|
3228
|
+
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
3229
|
+
for testing and is not recommended in normal use.
|
|
3882
3230
|
"""
|
|
3883
3231
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
3884
3232
|
|
|
@@ -3886,11 +3234,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3886
3234
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
3887
3235
|
def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
3888
3236
|
"""
|
|
3889
|
-
|
|
3890
|
-
|
|
3891
|
-
|
|
3892
|
-
block access to the host, and DRS will ignore the host when making
|
|
3893
|
-
recommendations.
|
|
3237
|
+
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
3238
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
3239
|
+
will ignore the host when making recommendations.
|
|
3894
3240
|
"""
|
|
3895
3241
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
3896
3242
|
|
|
@@ -3898,11 +3244,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3898
3244
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
3899
3245
|
def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[int]]:
|
|
3900
3246
|
"""
|
|
3901
|
-
The maximum number
|
|
3902
|
-
|
|
3903
|
-
whether to permit virtual machine operations. The maximum is one less than
|
|
3904
|
-
the number of hosts in the cluster. Default: `1`.
|
|
3905
|
-
<sup>\\*</sup>
|
|
3247
|
+
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
3248
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
3906
3249
|
"""
|
|
3907
3250
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
3908
3251
|
|
|
@@ -3910,10 +3253,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3910
3253
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
3911
3254
|
def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[int]]:
|
|
3912
3255
|
"""
|
|
3913
|
-
The percentage of
|
|
3914
|
-
|
|
3915
|
-
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
3916
|
-
disables the setting. Default: `100` (disabled).
|
|
3256
|
+
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
3257
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
3917
3258
|
"""
|
|
3918
3259
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
3919
3260
|
|
|
@@ -3921,9 +3262,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3921
3262
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
3922
3263
|
def ha_admission_control_policy(self) -> pulumi.Output[Optional[str]]:
|
|
3923
3264
|
"""
|
|
3924
|
-
The type of admission control
|
|
3925
|
-
|
|
3926
|
-
|
|
3265
|
+
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
3266
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
3267
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
3268
|
+
issues.
|
|
3927
3269
|
"""
|
|
3928
3270
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
3929
3271
|
|
|
@@ -3931,12 +3273,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3931
3273
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
3932
3274
|
def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[bool]]:
|
|
3933
3275
|
"""
|
|
3934
|
-
|
|
3935
|
-
average number of host resources represented by the
|
|
3936
|
-
|
|
3937
|
-
setting from the total amount of resources in the cluster. Disable to supply
|
|
3938
|
-
user-defined values. Default: `true`.
|
|
3939
|
-
<sup>\\*</sup>
|
|
3276
|
+
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
3277
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
3278
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
3940
3279
|
"""
|
|
3941
3280
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
3942
3281
|
|
|
@@ -3944,9 +3283,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3944
3283
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
3945
3284
|
def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[int]]:
|
|
3946
3285
|
"""
|
|
3947
|
-
|
|
3948
|
-
|
|
3949
|
-
failover. Default: `100`.
|
|
3286
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
3287
|
+
the cluster to reserve for failover.
|
|
3950
3288
|
"""
|
|
3951
3289
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
3952
3290
|
|
|
@@ -3954,9 +3292,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3954
3292
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
3955
3293
|
def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[int]]:
|
|
3956
3294
|
"""
|
|
3957
|
-
|
|
3958
|
-
|
|
3959
|
-
failover. Default: `100`.
|
|
3295
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
3296
|
+
the cluster to reserve for failover.
|
|
3960
3297
|
"""
|
|
3961
3298
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
3962
3299
|
|
|
@@ -3964,8 +3301,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3964
3301
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
3965
3302
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[int]]:
|
|
3966
3303
|
"""
|
|
3967
|
-
|
|
3968
|
-
user-defined CPU slot size, in MHz. Default: `32`.
|
|
3304
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
3969
3305
|
"""
|
|
3970
3306
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
3971
3307
|
|
|
@@ -3973,8 +3309,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3973
3309
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
3974
3310
|
def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[int]]:
|
|
3975
3311
|
"""
|
|
3976
|
-
|
|
3977
|
-
user-defined memory slot size, in MB. Default: `100`.
|
|
3312
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
3978
3313
|
"""
|
|
3979
3314
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
3980
3315
|
|
|
@@ -3982,10 +3317,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3982
3317
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
3983
3318
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[bool]]:
|
|
3984
3319
|
"""
|
|
3985
|
-
|
|
3986
|
-
|
|
3987
|
-
|
|
3988
|
-
average based on all powered-on virtual machines currently in the cluster.
|
|
3320
|
+
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
3321
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
3322
|
+
currently in the cluster.
|
|
3989
3323
|
"""
|
|
3990
3324
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
3991
3325
|
|
|
@@ -3993,8 +3327,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3993
3327
|
@pulumi.getter(name="haAdvancedOptions")
|
|
3994
3328
|
def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
|
|
3995
3329
|
"""
|
|
3996
|
-
|
|
3997
|
-
options for vSphere HA.
|
|
3330
|
+
Advanced configuration options for vSphere HA.
|
|
3998
3331
|
"""
|
|
3999
3332
|
return pulumi.get(self, "ha_advanced_options")
|
|
4000
3333
|
|
|
@@ -4002,10 +3335,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4002
3335
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
4003
3336
|
def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[str]]:
|
|
4004
3337
|
"""
|
|
4005
|
-
|
|
4006
|
-
|
|
4007
|
-
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
4008
|
-
<sup>\\*</sup>
|
|
3338
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
3339
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
4009
3340
|
"""
|
|
4010
3341
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
4011
3342
|
|
|
@@ -4013,11 +3344,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4013
3344
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
4014
3345
|
def ha_datastore_apd_response(self) -> pulumi.Output[Optional[str]]:
|
|
4015
3346
|
"""
|
|
4016
|
-
|
|
4017
|
-
|
|
4018
|
-
|
|
4019
|
-
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
4020
|
-
<sup>\\*</sup>
|
|
3347
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3348
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
3349
|
+
restartAggressive.
|
|
4021
3350
|
"""
|
|
4022
3351
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
4023
3352
|
|
|
@@ -4025,10 +3354,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4025
3354
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
4026
3355
|
def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[int]]:
|
|
4027
3356
|
"""
|
|
4028
|
-
|
|
4029
|
-
|
|
4030
|
-
`ha_datastore_apd_response`. Default: `180`
|
|
4031
|
-
seconds (3 minutes). <sup>\\*</sup>
|
|
3357
|
+
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
3358
|
+
the response action defined in ha_datastore_apd_response.
|
|
4032
3359
|
"""
|
|
4033
3360
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
4034
3361
|
|
|
@@ -4036,11 +3363,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4036
3363
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
4037
3364
|
def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[str]]:
|
|
4038
3365
|
"""
|
|
4039
|
-
|
|
4040
|
-
|
|
4041
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
4042
|
-
`restartAggressive`. Default: `disabled`.
|
|
4043
|
-
<sup>\\*</sup>
|
|
3366
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3367
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
4044
3368
|
"""
|
|
4045
3369
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
4046
3370
|
|
|
@@ -4048,8 +3372,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4048
3372
|
@pulumi.getter(name="haEnabled")
|
|
4049
3373
|
def ha_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4050
3374
|
"""
|
|
4051
|
-
Enable vSphere HA for this cluster.
|
|
4052
|
-
`false`.
|
|
3375
|
+
Enable vSphere HA for this cluster.
|
|
4053
3376
|
"""
|
|
4054
3377
|
return pulumi.get(self, "ha_enabled")
|
|
4055
3378
|
|
|
@@ -4057,10 +3380,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4057
3380
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
4058
3381
|
def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4059
3382
|
"""
|
|
4060
|
-
The list of managed object IDs for
|
|
4061
|
-
|
|
4062
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
4063
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3383
|
+
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
3384
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
4064
3385
|
"""
|
|
4065
3386
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
4066
3387
|
|
|
@@ -4068,10 +3389,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4068
3389
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
4069
3390
|
def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[str]]:
|
|
4070
3391
|
"""
|
|
4071
|
-
The selection policy for HA
|
|
4072
|
-
|
|
4073
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
4074
|
-
`allFeasibleDsWithUserPreference`.
|
|
3392
|
+
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
3393
|
+
allFeasibleDsWithUserPreference.
|
|
4075
3394
|
"""
|
|
4076
3395
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
4077
3396
|
|
|
@@ -4079,10 +3398,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4079
3398
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
4080
3399
|
def ha_host_isolation_response(self) -> pulumi.Output[Optional[str]]:
|
|
4081
3400
|
"""
|
|
4082
|
-
The action to take on virtual
|
|
4083
|
-
|
|
4084
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
4085
|
-
`none`.
|
|
3401
|
+
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
3402
|
+
Can be one of none, powerOff, or shutdown.
|
|
4086
3403
|
"""
|
|
4087
3404
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
4088
3405
|
|
|
@@ -4090,9 +3407,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4090
3407
|
@pulumi.getter(name="haHostMonitoring")
|
|
4091
3408
|
def ha_host_monitoring(self) -> pulumi.Output[Optional[str]]:
|
|
4092
3409
|
"""
|
|
4093
|
-
Global setting that controls whether
|
|
4094
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
4095
|
-
or `disabled`. Default: `enabled`.
|
|
3410
|
+
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
4096
3411
|
"""
|
|
4097
3412
|
return pulumi.get(self, "ha_host_monitoring")
|
|
4098
3413
|
|
|
@@ -4100,10 +3415,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4100
3415
|
@pulumi.getter(name="haVmComponentProtection")
|
|
4101
3416
|
def ha_vm_component_protection(self) -> pulumi.Output[Optional[str]]:
|
|
4102
3417
|
"""
|
|
4103
|
-
Controls vSphere VM component
|
|
4104
|
-
|
|
4105
|
-
`disabled`. Default: `enabled`.
|
|
4106
|
-
<sup>\\*</sup>
|
|
3418
|
+
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
3419
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
4107
3420
|
"""
|
|
4108
3421
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
4109
3422
|
|
|
@@ -4111,13 +3424,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4111
3424
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
4112
3425
|
def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[str]]:
|
|
4113
3426
|
"""
|
|
4114
|
-
The condition used to
|
|
4115
|
-
|
|
4116
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
4117
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
4118
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
4119
|
-
is considered ready immediately after a host is found to start it on.
|
|
4120
|
-
<sup>\\*</sup>
|
|
3427
|
+
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
3428
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
4121
3429
|
"""
|
|
4122
3430
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
4123
3431
|
|
|
@@ -4125,9 +3433,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4125
3433
|
@pulumi.getter(name="haVmFailureInterval")
|
|
4126
3434
|
def ha_vm_failure_interval(self) -> pulumi.Output[Optional[int]]:
|
|
4127
3435
|
"""
|
|
4128
|
-
|
|
4129
|
-
|
|
4130
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3436
|
+
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
3437
|
+
failed. The value is in seconds.
|
|
4131
3438
|
"""
|
|
4132
3439
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
4133
3440
|
|
|
@@ -4135,11 +3442,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4135
3442
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
4136
3443
|
def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[int]]:
|
|
4137
3444
|
"""
|
|
4138
|
-
The
|
|
4139
|
-
|
|
4140
|
-
|
|
4141
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
4142
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3445
|
+
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
3446
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
3447
|
+
time is allotted.
|
|
4143
3448
|
"""
|
|
4144
3449
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
4145
3450
|
|
|
@@ -4147,8 +3452,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4147
3452
|
@pulumi.getter(name="haVmMaximumResets")
|
|
4148
3453
|
def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[int]]:
|
|
4149
3454
|
"""
|
|
4150
|
-
The maximum number of resets that HA will
|
|
4151
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3455
|
+
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
4152
3456
|
"""
|
|
4153
3457
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
4154
3458
|
|
|
@@ -4156,9 +3460,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4156
3460
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
4157
3461
|
def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[int]]:
|
|
4158
3462
|
"""
|
|
4159
|
-
The time, in seconds, that HA waits after
|
|
4160
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
4161
|
-
`120` seconds (2 minutes).
|
|
3463
|
+
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
4162
3464
|
"""
|
|
4163
3465
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
4164
3466
|
|
|
@@ -4166,9 +3468,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4166
3468
|
@pulumi.getter(name="haVmMonitoring")
|
|
4167
3469
|
def ha_vm_monitoring(self) -> pulumi.Output[Optional[str]]:
|
|
4168
3470
|
"""
|
|
4169
|
-
The type of virtual machine monitoring to use
|
|
4170
|
-
|
|
4171
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3471
|
+
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
3472
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
4172
3473
|
"""
|
|
4173
3474
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
4174
3475
|
|
|
@@ -4176,9 +3477,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4176
3477
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
4177
3478
|
def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[int]]:
|
|
4178
3479
|
"""
|
|
4179
|
-
Additional delay
|
|
4180
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
4181
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3480
|
+
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
4182
3481
|
"""
|
|
4183
3482
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
4184
3483
|
|
|
@@ -4186,9 +3485,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4186
3485
|
@pulumi.getter(name="haVmRestartPriority")
|
|
4187
3486
|
def ha_vm_restart_priority(self) -> pulumi.Output[Optional[str]]:
|
|
4188
3487
|
"""
|
|
4189
|
-
The default restart priority
|
|
4190
|
-
|
|
4191
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3488
|
+
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
3489
|
+
high, or highest.
|
|
4192
3490
|
"""
|
|
4193
3491
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
4194
3492
|
|
|
@@ -4196,10 +3494,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4196
3494
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
4197
3495
|
def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[int]]:
|
|
4198
3496
|
"""
|
|
4199
|
-
The maximum time, in seconds,
|
|
4200
|
-
|
|
4201
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
4202
|
-
<sup>\\*</sup>
|
|
3497
|
+
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
3498
|
+
proceeding with the next priority.
|
|
4203
3499
|
"""
|
|
4204
3500
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
4205
3501
|
|
|
@@ -4207,18 +3503,23 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4207
3503
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
4208
3504
|
def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[int]]:
|
|
4209
3505
|
"""
|
|
4210
|
-
The timeout
|
|
4211
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3506
|
+
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
4212
3507
|
"""
|
|
4213
3508
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
4214
3509
|
|
|
3510
|
+
@property
|
|
3511
|
+
@pulumi.getter(name="hostImage")
|
|
3512
|
+
def host_image(self) -> pulumi.Output[Optional['outputs.ComputeClusterHostImage']]:
|
|
3513
|
+
"""
|
|
3514
|
+
Details about the host image which should be applied to the cluster.
|
|
3515
|
+
"""
|
|
3516
|
+
return pulumi.get(self, "host_image")
|
|
3517
|
+
|
|
4215
3518
|
@property
|
|
4216
3519
|
@pulumi.getter(name="hostManaged")
|
|
4217
3520
|
def host_managed(self) -> pulumi.Output[Optional[bool]]:
|
|
4218
3521
|
"""
|
|
4219
|
-
|
|
4220
|
-
membership will be managed through the `host` resource rather than the
|
|
4221
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3522
|
+
Must be set if cluster enrollment is managed from host resource.
|
|
4222
3523
|
"""
|
|
4223
3524
|
return pulumi.get(self, "host_managed")
|
|
4224
3525
|
|
|
@@ -4226,8 +3527,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4226
3527
|
@pulumi.getter(name="hostSystemIds")
|
|
4227
3528
|
def host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4228
3529
|
"""
|
|
4229
|
-
The managed object IDs of
|
|
4230
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
3530
|
+
The managed object IDs of the hosts to put in the cluster.
|
|
4231
3531
|
"""
|
|
4232
3532
|
return pulumi.get(self, "host_system_ids")
|
|
4233
3533
|
|
|
@@ -4243,10 +3543,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4243
3543
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
4244
3544
|
def proactive_ha_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
4245
3545
|
"""
|
|
4246
|
-
|
|
4247
|
-
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
4248
|
-
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
4249
|
-
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
3546
|
+
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
4250
3547
|
"""
|
|
4251
3548
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
4252
3549
|
|
|
@@ -4254,8 +3551,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4254
3551
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
4255
3552
|
def proactive_ha_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4256
3553
|
"""
|
|
4257
|
-
Enables
|
|
4258
|
-
<sup>\\*</sup>
|
|
3554
|
+
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
4259
3555
|
"""
|
|
4260
3556
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
4261
3557
|
|
|
@@ -4263,12 +3559,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4263
3559
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
4264
3560
|
def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[str]]:
|
|
4265
3561
|
"""
|
|
4266
|
-
The configured remediation
|
|
4267
|
-
|
|
4268
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
4269
|
-
`proactive_ha_severe_remediation` is set
|
|
4270
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
4271
|
-
<sup>\\*</sup>
|
|
3562
|
+
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
3563
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
4272
3564
|
"""
|
|
4273
3565
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
4274
3566
|
|
|
@@ -4276,9 +3568,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4276
3568
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
4277
3569
|
def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4278
3570
|
"""
|
|
4279
|
-
The list of IDs for health update
|
|
4280
|
-
providers configured for this cluster.
|
|
4281
|
-
<sup>\\*</sup>
|
|
3571
|
+
The list of IDs for health update providers configured for this cluster.
|
|
4282
3572
|
"""
|
|
4283
3573
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
4284
3574
|
|
|
@@ -4286,12 +3576,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4286
3576
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
4287
3577
|
def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[str]]:
|
|
4288
3578
|
"""
|
|
4289
|
-
The configured remediation for
|
|
4290
|
-
|
|
4291
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
4292
|
-
`proactive_ha_moderate_remediation` is
|
|
4293
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
4294
|
-
<sup>\\*</sup>
|
|
3579
|
+
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
3580
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
4295
3581
|
"""
|
|
4296
3582
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
4297
3583
|
|
|
@@ -4319,8 +3605,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4319
3605
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
4320
3606
|
def vsan_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4321
3607
|
"""
|
|
4322
|
-
|
|
4323
|
-
cluster.
|
|
3608
|
+
Whether the vSAN compression service is enabled for the cluster.
|
|
4324
3609
|
"""
|
|
4325
3610
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
4326
3611
|
|
|
@@ -4328,9 +3613,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4328
3613
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
4329
3614
|
def vsan_dedup_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4330
3615
|
"""
|
|
4331
|
-
|
|
4332
|
-
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
4333
|
-
compression must also be enabled.
|
|
3616
|
+
Whether the vSAN deduplication service is enabled for the cluster.
|
|
4334
3617
|
"""
|
|
4335
3618
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
4336
3619
|
|
|
@@ -4338,8 +3621,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4338
3621
|
@pulumi.getter(name="vsanDiskGroups")
|
|
4339
3622
|
def vsan_disk_groups(self) -> pulumi.Output[Sequence['outputs.ComputeClusterVsanDiskGroup']]:
|
|
4340
3623
|
"""
|
|
4341
|
-
|
|
4342
|
-
group in the cluster.
|
|
3624
|
+
A list of disk UUIDs to add to the vSAN cluster.
|
|
4343
3625
|
"""
|
|
4344
3626
|
return pulumi.get(self, "vsan_disk_groups")
|
|
4345
3627
|
|
|
@@ -4347,10 +3629,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4347
3629
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
4348
3630
|
def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4349
3631
|
"""
|
|
4350
|
-
|
|
4351
|
-
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
4352
|
-
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
4353
|
-
at the same time.
|
|
3632
|
+
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
4354
3633
|
"""
|
|
4355
3634
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
4356
3635
|
|
|
@@ -4358,9 +3637,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4358
3637
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
4359
3638
|
def vsan_dit_rekey_interval(self) -> pulumi.Output[int]:
|
|
4360
3639
|
"""
|
|
4361
|
-
|
|
4362
|
-
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
4363
|
-
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
3640
|
+
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
4364
3641
|
"""
|
|
4365
3642
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
4366
3643
|
|
|
@@ -4368,7 +3645,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4368
3645
|
@pulumi.getter(name="vsanEnabled")
|
|
4369
3646
|
def vsan_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4370
3647
|
"""
|
|
4371
|
-
|
|
3648
|
+
Whether the vSAN service is enabled for the cluster.
|
|
4372
3649
|
"""
|
|
4373
3650
|
return pulumi.get(self, "vsan_enabled")
|
|
4374
3651
|
|
|
@@ -4376,7 +3653,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4376
3653
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
4377
3654
|
def vsan_esa_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4378
3655
|
"""
|
|
4379
|
-
|
|
3656
|
+
Whether the vSAN ESA service is enabled for the cluster.
|
|
4380
3657
|
"""
|
|
4381
3658
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
4382
3659
|
|
|
@@ -4384,7 +3661,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4384
3661
|
@pulumi.getter(name="vsanFaultDomains")
|
|
4385
3662
|
def vsan_fault_domains(self) -> pulumi.Output[Optional[Sequence['outputs.ComputeClusterVsanFaultDomain']]]:
|
|
4386
3663
|
"""
|
|
4387
|
-
|
|
3664
|
+
The configuration for vSAN fault domains.
|
|
4388
3665
|
"""
|
|
4389
3666
|
return pulumi.get(self, "vsan_fault_domains")
|
|
4390
3667
|
|
|
@@ -4392,8 +3669,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4392
3669
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
4393
3670
|
def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4394
3671
|
"""
|
|
4395
|
-
|
|
4396
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
3672
|
+
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
4397
3673
|
"""
|
|
4398
3674
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
4399
3675
|
|
|
@@ -4401,8 +3677,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4401
3677
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
4402
3678
|
def vsan_performance_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4403
3679
|
"""
|
|
4404
|
-
|
|
4405
|
-
the cluster. Default: `true`.
|
|
3680
|
+
Whether the vSAN performance service is enabled for the cluster.
|
|
4406
3681
|
"""
|
|
4407
3682
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
4408
3683
|
|
|
@@ -4410,10 +3685,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4410
3685
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
4411
3686
|
def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4412
3687
|
"""
|
|
4413
|
-
The
|
|
4414
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
4415
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
4416
|
-
data-in-transit encryption feature at the same time.
|
|
3688
|
+
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
4417
3689
|
"""
|
|
4418
3690
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
4419
3691
|
|
|
@@ -4421,7 +3693,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4421
3693
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
4422
3694
|
def vsan_stretched_cluster(self) -> pulumi.Output[Optional['outputs.ComputeClusterVsanStretchedCluster']]:
|
|
4423
3695
|
"""
|
|
4424
|
-
|
|
3696
|
+
The configuration for stretched cluster.
|
|
4425
3697
|
"""
|
|
4426
3698
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
4427
3699
|
|
|
@@ -4429,8 +3701,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4429
3701
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
4430
3702
|
def vsan_unmap_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4431
3703
|
"""
|
|
4432
|
-
|
|
4433
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3704
|
+
Whether the vSAN unmap service is enabled for the cluster.
|
|
4434
3705
|
"""
|
|
4435
3706
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
4436
3707
|
|
|
@@ -4438,8 +3709,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4438
3709
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
4439
3710
|
def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4440
3711
|
"""
|
|
4441
|
-
|
|
4442
|
-
performance service on the cluster.
|
|
3712
|
+
Whether the vSAN verbose mode is enabled for the cluster.
|
|
4443
3713
|
"""
|
|
4444
3714
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
4445
3715
|
|