pulumi-vsphere 4.11.0a1713561492__py3-none-any.whl → 4.11.0a1713905355__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-vsphere might be problematic. Click here for more details.
- pulumi_vsphere/_inputs.py +96 -232
- pulumi_vsphere/compute_cluster.py +700 -1477
- pulumi_vsphere/compute_cluster_vm_affinity_rule.py +0 -8
- pulumi_vsphere/datacenter.py +0 -8
- pulumi_vsphere/datastore_cluster.py +154 -350
- pulumi_vsphere/distributed_port_group.py +70 -175
- pulumi_vsphere/distributed_virtual_switch.py +308 -798
- pulumi_vsphere/file.py +0 -8
- pulumi_vsphere/get_compute_cluster.py +0 -4
- pulumi_vsphere/get_compute_cluster_host_group.py +0 -4
- pulumi_vsphere/get_content_library.py +0 -4
- pulumi_vsphere/get_custom_attribute.py +0 -4
- pulumi_vsphere/get_datacenter.py +0 -4
- pulumi_vsphere/get_datastore.py +0 -4
- pulumi_vsphere/get_datastore_cluster.py +0 -4
- pulumi_vsphere/get_datastore_stats.py +0 -8
- pulumi_vsphere/get_distributed_virtual_switch.py +0 -4
- pulumi_vsphere/get_dynamic.py +0 -4
- pulumi_vsphere/get_folder.py +0 -4
- pulumi_vsphere/get_guest_os_customization.py +0 -4
- pulumi_vsphere/get_host.py +0 -4
- pulumi_vsphere/get_host_pci_device.py +4 -12
- pulumi_vsphere/get_host_thumbprint.py +0 -4
- pulumi_vsphere/get_host_vgpu_profile.py +0 -8
- pulumi_vsphere/get_license.py +0 -4
- pulumi_vsphere/get_network.py +0 -4
- pulumi_vsphere/get_policy.py +0 -4
- pulumi_vsphere/get_resource_pool.py +0 -8
- pulumi_vsphere/get_role.py +0 -4
- pulumi_vsphere/get_tag.py +0 -4
- pulumi_vsphere/get_tag_category.py +0 -4
- pulumi_vsphere/get_vapp_container.py +0 -4
- pulumi_vsphere/get_virtual_machine.py +0 -8
- pulumi_vsphere/get_vmfs_disks.py +0 -4
- pulumi_vsphere/guest_os_customization.py +0 -4
- pulumi_vsphere/ha_vm_override.py +189 -378
- pulumi_vsphere/host.py +0 -8
- pulumi_vsphere/host_port_group.py +0 -8
- pulumi_vsphere/host_virtual_switch.py +140 -287
- pulumi_vsphere/outputs.py +96 -232
- pulumi_vsphere/resource_pool.py +0 -12
- pulumi_vsphere/virtual_machine.py +599 -739
- pulumi_vsphere/virtual_machine_snapshot.py +0 -4
- pulumi_vsphere/vm_storage_policy.py +0 -12
- pulumi_vsphere/vnic.py +0 -8
- {pulumi_vsphere-4.11.0a1713561492.dist-info → pulumi_vsphere-4.11.0a1713905355.dist-info}/METADATA +1 -1
- pulumi_vsphere-4.11.0a1713905355.dist-info/RECORD +82 -0
- pulumi_vsphere-4.11.0a1713561492.dist-info/RECORD +0 -82
- {pulumi_vsphere-4.11.0a1713561492.dist-info → pulumi_vsphere-4.11.0a1713905355.dist-info}/WHEEL +0 -0
- {pulumi_vsphere-4.11.0a1713561492.dist-info → pulumi_vsphere-4.11.0a1713905355.dist-info}/top_level.txt +0 -0
|
@@ -93,225 +93,114 @@ class ComputeClusterArgs:
|
|
|
93
93
|
|
|
94
94
|
> **NOTE:** Custom attributes are unsupported on direct ESXi connections
|
|
95
95
|
and require vCenter Server.
|
|
96
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
:param pulumi.Input[
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
:param pulumi.Input[
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
:param pulumi.Input[
|
|
107
|
-
|
|
108
|
-
:param pulumi.Input[
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
112
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
113
|
-
recommendations. <sup>\\*</sup>
|
|
114
|
-
|
|
115
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
116
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
117
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
118
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
119
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
120
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
121
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
122
|
-
`3`.
|
|
123
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
124
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
125
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
96
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
97
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
98
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
99
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
100
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
101
|
+
setting.
|
|
102
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
103
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
104
|
+
fullyAutomated.
|
|
105
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
106
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
107
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
108
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
109
|
+
more imbalance while a higher setting will tolerate less.
|
|
110
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
126
111
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
127
112
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
128
113
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
129
114
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
130
115
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
131
116
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
132
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
:param pulumi.Input[
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
:param pulumi.Input[str]
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
:param pulumi.Input[
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
:param pulumi.Input[
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
:param pulumi.Input[
|
|
176
|
-
|
|
177
|
-
:param pulumi.Input[
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
:param pulumi.Input[
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
197
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
198
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
199
|
-
`restartAggressive`. Default: `disabled`.
|
|
200
|
-
<sup>\\*</sup>
|
|
201
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
202
|
-
`false`.
|
|
203
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
204
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
205
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
206
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
207
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
208
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
209
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
210
|
-
`allFeasibleDsWithUserPreference`.
|
|
211
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
212
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
213
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
214
|
-
`none`.
|
|
215
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
216
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
217
|
-
or `disabled`. Default: `enabled`.
|
|
218
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
219
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
220
|
-
`disabled`. Default: `enabled`.
|
|
221
|
-
<sup>\\*</sup>
|
|
222
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
223
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
224
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
225
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
226
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
227
|
-
is considered ready immediately after a host is found to start it on.
|
|
228
|
-
<sup>\\*</sup>
|
|
229
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
230
|
-
from a virtual machine is not received within this configured interval,
|
|
231
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
232
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
233
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
234
|
-
window expires, no more resets are attempted regardless of the setting
|
|
235
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
236
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
237
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
238
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
239
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
240
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
241
|
-
`120` seconds (2 minutes).
|
|
242
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
243
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
244
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
245
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
246
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
247
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
248
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
249
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
250
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
251
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
252
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
253
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
254
|
-
<sup>\\*</sup>
|
|
255
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
256
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
257
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
258
|
-
membership will be managed through the `host` resource rather than the
|
|
259
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
260
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
261
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
117
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
118
|
+
for testing and is not recommended in normal use.
|
|
119
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
120
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
121
|
+
will ignore the host when making recommendations.
|
|
122
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
123
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
124
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
125
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
126
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
127
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
128
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
129
|
+
issues.
|
|
130
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
131
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
132
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
133
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
134
|
+
the cluster to reserve for failover.
|
|
135
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
136
|
+
the cluster to reserve for failover.
|
|
137
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
138
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
139
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
140
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
141
|
+
currently in the cluster.
|
|
142
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
143
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
144
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
145
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
146
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
147
|
+
restartAggressive.
|
|
148
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
149
|
+
the response action defined in ha_datastore_apd_response.
|
|
150
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
151
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
152
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
153
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
154
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
155
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
156
|
+
allFeasibleDsWithUserPreference.
|
|
157
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
158
|
+
Can be one of none, powerOff, or shutdown.
|
|
159
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
160
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
161
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
162
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
163
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
164
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
165
|
+
failed. The value is in seconds.
|
|
166
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
167
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
168
|
+
time is allotted.
|
|
169
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
170
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
171
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
172
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
173
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
174
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
175
|
+
high, or highest.
|
|
176
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
177
|
+
proceeding with the next priority.
|
|
178
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
179
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
180
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
262
181
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
263
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
:param pulumi.Input[
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
271
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
272
|
-
`proactive_ha_severe_remediation` is set
|
|
273
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
274
|
-
<sup>\\*</sup>
|
|
275
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
276
|
-
providers configured for this cluster.
|
|
277
|
-
<sup>\\*</sup>
|
|
278
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
279
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
280
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
281
|
-
`proactive_ha_moderate_remediation` is
|
|
282
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
283
|
-
<sup>\\*</sup>
|
|
182
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
183
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
184
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
185
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
186
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
187
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
188
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
284
189
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
285
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
286
|
-
|
|
287
|
-
:param pulumi.Input[
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
:param pulumi.Input[
|
|
291
|
-
|
|
292
|
-
:param pulumi.Input[
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
:param pulumi.Input[
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
300
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
301
|
-
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
302
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
303
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
304
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
305
|
-
the cluster. Default: `true`.
|
|
306
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
307
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
308
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
309
|
-
data-in-transit encryption feature at the same time.
|
|
310
|
-
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
311
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
312
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
313
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
314
|
-
performance service on the cluster.
|
|
190
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
191
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
192
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
193
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
194
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
195
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
196
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
197
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
198
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
199
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
200
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
201
|
+
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
202
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
203
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
315
204
|
"""
|
|
316
205
|
pulumi.set(__self__, "datacenter_id", datacenter_id)
|
|
317
206
|
if custom_attributes is not None:
|
|
@@ -482,9 +371,7 @@ class ComputeClusterArgs:
|
|
|
482
371
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
483
372
|
def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
484
373
|
"""
|
|
485
|
-
The automation level for host power
|
|
486
|
-
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
487
|
-
`manual`.
|
|
374
|
+
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
488
375
|
"""
|
|
489
376
|
return pulumi.get(self, "dpm_automation_level")
|
|
490
377
|
|
|
@@ -496,9 +383,8 @@ class ComputeClusterArgs:
|
|
|
496
383
|
@pulumi.getter(name="dpmEnabled")
|
|
497
384
|
def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
498
385
|
"""
|
|
499
|
-
Enable DPM support for DRS
|
|
500
|
-
|
|
501
|
-
Default: `false`.
|
|
386
|
+
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
387
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
502
388
|
"""
|
|
503
389
|
return pulumi.get(self, "dpm_enabled")
|
|
504
390
|
|
|
@@ -510,10 +396,9 @@ class ComputeClusterArgs:
|
|
|
510
396
|
@pulumi.getter(name="dpmThreshold")
|
|
511
397
|
def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
512
398
|
"""
|
|
513
|
-
A value between
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
399
|
+
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
400
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
401
|
+
setting.
|
|
517
402
|
"""
|
|
518
403
|
return pulumi.get(self, "dpm_threshold")
|
|
519
404
|
|
|
@@ -525,8 +410,7 @@ class ComputeClusterArgs:
|
|
|
525
410
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
526
411
|
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
527
412
|
"""
|
|
528
|
-
|
|
529
|
-
options for DRS and DPM.
|
|
413
|
+
Advanced configuration options for DRS and DPM.
|
|
530
414
|
"""
|
|
531
415
|
return pulumi.get(self, "drs_advanced_options")
|
|
532
416
|
|
|
@@ -538,9 +422,8 @@ class ComputeClusterArgs:
|
|
|
538
422
|
@pulumi.getter(name="drsAutomationLevel")
|
|
539
423
|
def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
540
424
|
"""
|
|
541
|
-
The default automation level for all
|
|
542
|
-
|
|
543
|
-
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
425
|
+
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
426
|
+
fullyAutomated.
|
|
544
427
|
"""
|
|
545
428
|
return pulumi.get(self, "drs_automation_level")
|
|
546
429
|
|
|
@@ -552,11 +435,7 @@ class ComputeClusterArgs:
|
|
|
552
435
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
553
436
|
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
|
|
554
437
|
"""
|
|
555
|
-
When
|
|
556
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
557
|
-
recommendations. <sup>\\*</sup>
|
|
558
|
-
|
|
559
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
438
|
+
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
560
439
|
"""
|
|
561
440
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
562
441
|
|
|
@@ -568,8 +447,7 @@ class ComputeClusterArgs:
|
|
|
568
447
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
569
448
|
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
|
|
570
449
|
"""
|
|
571
|
-
|
|
572
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
450
|
+
When true, allows individual VM overrides within this cluster to be set.
|
|
573
451
|
"""
|
|
574
452
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
575
453
|
|
|
@@ -581,7 +459,7 @@ class ComputeClusterArgs:
|
|
|
581
459
|
@pulumi.getter(name="drsEnabled")
|
|
582
460
|
def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
583
461
|
"""
|
|
584
|
-
Enable DRS for this cluster.
|
|
462
|
+
Enable DRS for this cluster.
|
|
585
463
|
"""
|
|
586
464
|
return pulumi.get(self, "drs_enabled")
|
|
587
465
|
|
|
@@ -593,10 +471,8 @@ class ComputeClusterArgs:
|
|
|
593
471
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
594
472
|
def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
595
473
|
"""
|
|
596
|
-
A value between
|
|
597
|
-
|
|
598
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
599
|
-
`3`.
|
|
474
|
+
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
475
|
+
more imbalance while a higher setting will tolerate less.
|
|
600
476
|
"""
|
|
601
477
|
return pulumi.get(self, "drs_migration_threshold")
|
|
602
478
|
|
|
@@ -608,9 +484,7 @@ class ComputeClusterArgs:
|
|
|
608
484
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
609
485
|
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
|
|
610
486
|
"""
|
|
611
|
-
Enable scalable shares for all
|
|
612
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
613
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
487
|
+
Enable scalable shares for all descendants of this cluster.
|
|
614
488
|
"""
|
|
615
489
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
616
490
|
|
|
@@ -639,18 +513,8 @@ class ComputeClusterArgs:
|
|
|
639
513
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
640
514
|
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
|
|
641
515
|
"""
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
645
|
-
below. This is an advanced
|
|
646
|
-
option and should only be used for testing. Default: `false`.
|
|
647
|
-
|
|
648
|
-
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
649
|
-
there are many pitfalls to its use when working with complex cluster
|
|
650
|
-
configurations. Depending on the virtual machines currently on the cluster, and
|
|
651
|
-
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
652
|
-
incrementally remove hosts from your configuration by adjusting the contents of
|
|
653
|
-
the `host_system_ids` attribute.
|
|
516
|
+
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
517
|
+
for testing and is not recommended in normal use.
|
|
654
518
|
"""
|
|
655
519
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
656
520
|
|
|
@@ -662,11 +526,9 @@ class ComputeClusterArgs:
|
|
|
662
526
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
663
527
|
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
664
528
|
"""
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
block access to the host, and DRS will ignore the host when making
|
|
669
|
-
recommendations.
|
|
529
|
+
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
530
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
531
|
+
will ignore the host when making recommendations.
|
|
670
532
|
"""
|
|
671
533
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
672
534
|
|
|
@@ -678,11 +540,8 @@ class ComputeClusterArgs:
|
|
|
678
540
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
679
541
|
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
680
542
|
"""
|
|
681
|
-
The maximum number
|
|
682
|
-
|
|
683
|
-
whether to permit virtual machine operations. The maximum is one less than
|
|
684
|
-
the number of hosts in the cluster. Default: `1`.
|
|
685
|
-
<sup>\\*</sup>
|
|
543
|
+
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
544
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
686
545
|
"""
|
|
687
546
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
688
547
|
|
|
@@ -694,10 +553,8 @@ class ComputeClusterArgs:
|
|
|
694
553
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
695
554
|
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
696
555
|
"""
|
|
697
|
-
The percentage of
|
|
698
|
-
|
|
699
|
-
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
700
|
-
disables the setting. Default: `100` (disabled).
|
|
556
|
+
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
557
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
701
558
|
"""
|
|
702
559
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
703
560
|
|
|
@@ -709,9 +566,10 @@ class ComputeClusterArgs:
|
|
|
709
566
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
710
567
|
def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
|
|
711
568
|
"""
|
|
712
|
-
The type of admission control
|
|
713
|
-
|
|
714
|
-
|
|
569
|
+
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
570
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
571
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
572
|
+
issues.
|
|
715
573
|
"""
|
|
716
574
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
717
575
|
|
|
@@ -723,12 +581,9 @@ class ComputeClusterArgs:
|
|
|
723
581
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
724
582
|
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
|
|
725
583
|
"""
|
|
726
|
-
|
|
727
|
-
average number of host resources represented by the
|
|
728
|
-
|
|
729
|
-
setting from the total amount of resources in the cluster. Disable to supply
|
|
730
|
-
user-defined values. Default: `true`.
|
|
731
|
-
<sup>\\*</sup>
|
|
584
|
+
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
585
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
586
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
732
587
|
"""
|
|
733
588
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
734
589
|
|
|
@@ -740,9 +595,8 @@ class ComputeClusterArgs:
|
|
|
740
595
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
741
596
|
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
742
597
|
"""
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
failover. Default: `100`.
|
|
598
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
599
|
+
the cluster to reserve for failover.
|
|
746
600
|
"""
|
|
747
601
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
748
602
|
|
|
@@ -754,9 +608,8 @@ class ComputeClusterArgs:
|
|
|
754
608
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
755
609
|
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
|
|
756
610
|
"""
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
failover. Default: `100`.
|
|
611
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
612
|
+
the cluster to reserve for failover.
|
|
760
613
|
"""
|
|
761
614
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
762
615
|
|
|
@@ -768,8 +621,7 @@ class ComputeClusterArgs:
|
|
|
768
621
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
769
622
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
770
623
|
"""
|
|
771
|
-
|
|
772
|
-
user-defined CPU slot size, in MHz. Default: `32`.
|
|
624
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
773
625
|
"""
|
|
774
626
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
775
627
|
|
|
@@ -781,8 +633,7 @@ class ComputeClusterArgs:
|
|
|
781
633
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
782
634
|
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
|
|
783
635
|
"""
|
|
784
|
-
|
|
785
|
-
user-defined memory slot size, in MB. Default: `100`.
|
|
636
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
786
637
|
"""
|
|
787
638
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
788
639
|
|
|
@@ -794,10 +645,9 @@ class ComputeClusterArgs:
|
|
|
794
645
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
795
646
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
|
|
796
647
|
"""
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
average based on all powered-on virtual machines currently in the cluster.
|
|
648
|
+
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
649
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
650
|
+
currently in the cluster.
|
|
801
651
|
"""
|
|
802
652
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
803
653
|
|
|
@@ -809,8 +659,7 @@ class ComputeClusterArgs:
|
|
|
809
659
|
@pulumi.getter(name="haAdvancedOptions")
|
|
810
660
|
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
811
661
|
"""
|
|
812
|
-
|
|
813
|
-
options for vSphere HA.
|
|
662
|
+
Advanced configuration options for vSphere HA.
|
|
814
663
|
"""
|
|
815
664
|
return pulumi.get(self, "ha_advanced_options")
|
|
816
665
|
|
|
@@ -822,10 +671,8 @@ class ComputeClusterArgs:
|
|
|
822
671
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
823
672
|
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
|
|
824
673
|
"""
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
828
|
-
<sup>\\*</sup>
|
|
674
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
675
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
829
676
|
"""
|
|
830
677
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
831
678
|
|
|
@@ -837,11 +684,9 @@ class ComputeClusterArgs:
|
|
|
837
684
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
838
685
|
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
|
|
839
686
|
"""
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
844
|
-
<sup>\\*</sup>
|
|
687
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
688
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
689
|
+
restartAggressive.
|
|
845
690
|
"""
|
|
846
691
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
847
692
|
|
|
@@ -853,10 +698,8 @@ class ComputeClusterArgs:
|
|
|
853
698
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
854
699
|
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
|
|
855
700
|
"""
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
`ha_datastore_apd_response`. Default: `180`
|
|
859
|
-
seconds (3 minutes). <sup>\\*</sup>
|
|
701
|
+
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
702
|
+
the response action defined in ha_datastore_apd_response.
|
|
860
703
|
"""
|
|
861
704
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
862
705
|
|
|
@@ -868,11 +711,8 @@ class ComputeClusterArgs:
|
|
|
868
711
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
869
712
|
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
|
|
870
713
|
"""
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
874
|
-
`restartAggressive`. Default: `disabled`.
|
|
875
|
-
<sup>\\*</sup>
|
|
714
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
715
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
876
716
|
"""
|
|
877
717
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
878
718
|
|
|
@@ -884,8 +724,7 @@ class ComputeClusterArgs:
|
|
|
884
724
|
@pulumi.getter(name="haEnabled")
|
|
885
725
|
def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
886
726
|
"""
|
|
887
|
-
Enable vSphere HA for this cluster.
|
|
888
|
-
`false`.
|
|
727
|
+
Enable vSphere HA for this cluster.
|
|
889
728
|
"""
|
|
890
729
|
return pulumi.get(self, "ha_enabled")
|
|
891
730
|
|
|
@@ -897,10 +736,8 @@ class ComputeClusterArgs:
|
|
|
897
736
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
898
737
|
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
899
738
|
"""
|
|
900
|
-
The list of managed object IDs for
|
|
901
|
-
|
|
902
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
903
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
739
|
+
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
740
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
904
741
|
"""
|
|
905
742
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
906
743
|
|
|
@@ -912,10 +749,8 @@ class ComputeClusterArgs:
|
|
|
912
749
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
913
750
|
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
|
|
914
751
|
"""
|
|
915
|
-
The selection policy for HA
|
|
916
|
-
|
|
917
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
918
|
-
`allFeasibleDsWithUserPreference`.
|
|
752
|
+
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
753
|
+
allFeasibleDsWithUserPreference.
|
|
919
754
|
"""
|
|
920
755
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
921
756
|
|
|
@@ -927,10 +762,8 @@ class ComputeClusterArgs:
|
|
|
927
762
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
928
763
|
def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
|
|
929
764
|
"""
|
|
930
|
-
The action to take on virtual
|
|
931
|
-
|
|
932
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
933
|
-
`none`.
|
|
765
|
+
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
766
|
+
Can be one of none, powerOff, or shutdown.
|
|
934
767
|
"""
|
|
935
768
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
936
769
|
|
|
@@ -942,9 +775,7 @@ class ComputeClusterArgs:
|
|
|
942
775
|
@pulumi.getter(name="haHostMonitoring")
|
|
943
776
|
def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
944
777
|
"""
|
|
945
|
-
Global setting that controls whether
|
|
946
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
947
|
-
or `disabled`. Default: `enabled`.
|
|
778
|
+
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
948
779
|
"""
|
|
949
780
|
return pulumi.get(self, "ha_host_monitoring")
|
|
950
781
|
|
|
@@ -956,10 +787,8 @@ class ComputeClusterArgs:
|
|
|
956
787
|
@pulumi.getter(name="haVmComponentProtection")
|
|
957
788
|
def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
|
|
958
789
|
"""
|
|
959
|
-
Controls vSphere VM component
|
|
960
|
-
|
|
961
|
-
`disabled`. Default: `enabled`.
|
|
962
|
-
<sup>\\*</sup>
|
|
790
|
+
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
791
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
963
792
|
"""
|
|
964
793
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
965
794
|
|
|
@@ -971,13 +800,8 @@ class ComputeClusterArgs:
|
|
|
971
800
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
972
801
|
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
|
|
973
802
|
"""
|
|
974
|
-
The condition used to
|
|
975
|
-
|
|
976
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
977
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
978
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
979
|
-
is considered ready immediately after a host is found to start it on.
|
|
980
|
-
<sup>\\*</sup>
|
|
803
|
+
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
804
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
981
805
|
"""
|
|
982
806
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
983
807
|
|
|
@@ -989,9 +813,8 @@ class ComputeClusterArgs:
|
|
|
989
813
|
@pulumi.getter(name="haVmFailureInterval")
|
|
990
814
|
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
|
|
991
815
|
"""
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
816
|
+
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
817
|
+
failed. The value is in seconds.
|
|
995
818
|
"""
|
|
996
819
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
997
820
|
|
|
@@ -1003,11 +826,9 @@ class ComputeClusterArgs:
|
|
|
1003
826
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
1004
827
|
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
|
|
1005
828
|
"""
|
|
1006
|
-
The
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
1010
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
829
|
+
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
830
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
831
|
+
time is allotted.
|
|
1011
832
|
"""
|
|
1012
833
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
1013
834
|
|
|
@@ -1019,8 +840,7 @@ class ComputeClusterArgs:
|
|
|
1019
840
|
@pulumi.getter(name="haVmMaximumResets")
|
|
1020
841
|
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
|
|
1021
842
|
"""
|
|
1022
|
-
The maximum number of resets that HA will
|
|
1023
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
843
|
+
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
1024
844
|
"""
|
|
1025
845
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
1026
846
|
|
|
@@ -1032,9 +852,7 @@ class ComputeClusterArgs:
|
|
|
1032
852
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
1033
853
|
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
|
|
1034
854
|
"""
|
|
1035
|
-
The time, in seconds, that HA waits after
|
|
1036
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
1037
|
-
`120` seconds (2 minutes).
|
|
855
|
+
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
1038
856
|
"""
|
|
1039
857
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
1040
858
|
|
|
@@ -1046,9 +864,8 @@ class ComputeClusterArgs:
|
|
|
1046
864
|
@pulumi.getter(name="haVmMonitoring")
|
|
1047
865
|
def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
1048
866
|
"""
|
|
1049
|
-
The type of virtual machine monitoring to use
|
|
1050
|
-
|
|
1051
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
867
|
+
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
868
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
1052
869
|
"""
|
|
1053
870
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
1054
871
|
|
|
@@ -1060,9 +877,7 @@ class ComputeClusterArgs:
|
|
|
1060
877
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
1061
878
|
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
|
|
1062
879
|
"""
|
|
1063
|
-
Additional delay
|
|
1064
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
1065
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
880
|
+
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
1066
881
|
"""
|
|
1067
882
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
1068
883
|
|
|
@@ -1074,9 +889,8 @@ class ComputeClusterArgs:
|
|
|
1074
889
|
@pulumi.getter(name="haVmRestartPriority")
|
|
1075
890
|
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
|
|
1076
891
|
"""
|
|
1077
|
-
The default restart priority
|
|
1078
|
-
|
|
1079
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
892
|
+
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
893
|
+
high, or highest.
|
|
1080
894
|
"""
|
|
1081
895
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
1082
896
|
|
|
@@ -1088,10 +902,8 @@ class ComputeClusterArgs:
|
|
|
1088
902
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
1089
903
|
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
1090
904
|
"""
|
|
1091
|
-
The maximum time, in seconds,
|
|
1092
|
-
|
|
1093
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
1094
|
-
<sup>\\*</sup>
|
|
905
|
+
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
906
|
+
proceeding with the next priority.
|
|
1095
907
|
"""
|
|
1096
908
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
1097
909
|
|
|
@@ -1103,8 +915,7 @@ class ComputeClusterArgs:
|
|
|
1103
915
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
1104
916
|
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
1105
917
|
"""
|
|
1106
|
-
The timeout
|
|
1107
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
918
|
+
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
1108
919
|
"""
|
|
1109
920
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
1110
921
|
|
|
@@ -1116,9 +927,7 @@ class ComputeClusterArgs:
|
|
|
1116
927
|
@pulumi.getter(name="hostManaged")
|
|
1117
928
|
def host_managed(self) -> Optional[pulumi.Input[bool]]:
|
|
1118
929
|
"""
|
|
1119
|
-
|
|
1120
|
-
membership will be managed through the `host` resource rather than the
|
|
1121
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
930
|
+
Must be set if cluster enrollment is managed from host resource.
|
|
1122
931
|
"""
|
|
1123
932
|
return pulumi.get(self, "host_managed")
|
|
1124
933
|
|
|
@@ -1130,8 +939,7 @@ class ComputeClusterArgs:
|
|
|
1130
939
|
@pulumi.getter(name="hostSystemIds")
|
|
1131
940
|
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1132
941
|
"""
|
|
1133
|
-
The managed object IDs of
|
|
1134
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
942
|
+
The managed object IDs of the hosts to put in the cluster.
|
|
1135
943
|
"""
|
|
1136
944
|
return pulumi.get(self, "host_system_ids")
|
|
1137
945
|
|
|
@@ -1155,10 +963,7 @@ class ComputeClusterArgs:
|
|
|
1155
963
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
1156
964
|
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1157
965
|
"""
|
|
1158
|
-
|
|
1159
|
-
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
1160
|
-
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
1161
|
-
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
966
|
+
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
1162
967
|
"""
|
|
1163
968
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
1164
969
|
|
|
@@ -1170,8 +975,7 @@ class ComputeClusterArgs:
|
|
|
1170
975
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
1171
976
|
def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1172
977
|
"""
|
|
1173
|
-
Enables
|
|
1174
|
-
<sup>\\*</sup>
|
|
978
|
+
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
1175
979
|
"""
|
|
1176
980
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
1177
981
|
|
|
@@ -1183,12 +987,8 @@ class ComputeClusterArgs:
|
|
|
1183
987
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
1184
988
|
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
1185
989
|
"""
|
|
1186
|
-
The configured remediation
|
|
1187
|
-
|
|
1188
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
1189
|
-
`proactive_ha_severe_remediation` is set
|
|
1190
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
1191
|
-
<sup>\\*</sup>
|
|
990
|
+
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
991
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
1192
992
|
"""
|
|
1193
993
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
1194
994
|
|
|
@@ -1200,9 +1000,7 @@ class ComputeClusterArgs:
|
|
|
1200
1000
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
1201
1001
|
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1202
1002
|
"""
|
|
1203
|
-
The list of IDs for health update
|
|
1204
|
-
providers configured for this cluster.
|
|
1205
|
-
<sup>\\*</sup>
|
|
1003
|
+
The list of IDs for health update providers configured for this cluster.
|
|
1206
1004
|
"""
|
|
1207
1005
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
1208
1006
|
|
|
@@ -1214,12 +1012,8 @@ class ComputeClusterArgs:
|
|
|
1214
1012
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
1215
1013
|
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
1216
1014
|
"""
|
|
1217
|
-
The configured remediation for
|
|
1218
|
-
|
|
1219
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
1220
|
-
`proactive_ha_moderate_remediation` is
|
|
1221
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
1222
|
-
<sup>\\*</sup>
|
|
1015
|
+
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
1016
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
1223
1017
|
"""
|
|
1224
1018
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
1225
1019
|
|
|
@@ -1243,8 +1037,7 @@ class ComputeClusterArgs:
|
|
|
1243
1037
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
1244
1038
|
def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1245
1039
|
"""
|
|
1246
|
-
|
|
1247
|
-
cluster.
|
|
1040
|
+
Whether the vSAN compression service is enabled for the cluster.
|
|
1248
1041
|
"""
|
|
1249
1042
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
1250
1043
|
|
|
@@ -1256,9 +1049,7 @@ class ComputeClusterArgs:
|
|
|
1256
1049
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
1257
1050
|
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1258
1051
|
"""
|
|
1259
|
-
|
|
1260
|
-
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
1261
|
-
compression must also be enabled.
|
|
1052
|
+
Whether the vSAN deduplication service is enabled for the cluster.
|
|
1262
1053
|
"""
|
|
1263
1054
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
1264
1055
|
|
|
@@ -1270,8 +1061,7 @@ class ComputeClusterArgs:
|
|
|
1270
1061
|
@pulumi.getter(name="vsanDiskGroups")
|
|
1271
1062
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
1272
1063
|
"""
|
|
1273
|
-
|
|
1274
|
-
group in the cluster.
|
|
1064
|
+
A list of disk UUIDs to add to the vSAN cluster.
|
|
1275
1065
|
"""
|
|
1276
1066
|
return pulumi.get(self, "vsan_disk_groups")
|
|
1277
1067
|
|
|
@@ -1283,10 +1073,7 @@ class ComputeClusterArgs:
|
|
|
1283
1073
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
1284
1074
|
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1285
1075
|
"""
|
|
1286
|
-
|
|
1287
|
-
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
1288
|
-
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
1289
|
-
at the same time.
|
|
1076
|
+
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
1290
1077
|
"""
|
|
1291
1078
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
1292
1079
|
|
|
@@ -1298,9 +1085,7 @@ class ComputeClusterArgs:
|
|
|
1298
1085
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
1299
1086
|
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
|
|
1300
1087
|
"""
|
|
1301
|
-
|
|
1302
|
-
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
1303
|
-
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
1088
|
+
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
1304
1089
|
"""
|
|
1305
1090
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
1306
1091
|
|
|
@@ -1312,7 +1097,7 @@ class ComputeClusterArgs:
|
|
|
1312
1097
|
@pulumi.getter(name="vsanEnabled")
|
|
1313
1098
|
def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1314
1099
|
"""
|
|
1315
|
-
|
|
1100
|
+
Whether the vSAN service is enabled for the cluster.
|
|
1316
1101
|
"""
|
|
1317
1102
|
return pulumi.get(self, "vsan_enabled")
|
|
1318
1103
|
|
|
@@ -1324,7 +1109,7 @@ class ComputeClusterArgs:
|
|
|
1324
1109
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
1325
1110
|
def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1326
1111
|
"""
|
|
1327
|
-
|
|
1112
|
+
Whether the vSAN ESA service is enabled for the cluster.
|
|
1328
1113
|
"""
|
|
1329
1114
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
1330
1115
|
|
|
@@ -1336,7 +1121,7 @@ class ComputeClusterArgs:
|
|
|
1336
1121
|
@pulumi.getter(name="vsanFaultDomains")
|
|
1337
1122
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
1338
1123
|
"""
|
|
1339
|
-
|
|
1124
|
+
The configuration for vSAN fault domains.
|
|
1340
1125
|
"""
|
|
1341
1126
|
return pulumi.get(self, "vsan_fault_domains")
|
|
1342
1127
|
|
|
@@ -1348,8 +1133,7 @@ class ComputeClusterArgs:
|
|
|
1348
1133
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
1349
1134
|
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1350
1135
|
"""
|
|
1351
|
-
|
|
1352
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
1136
|
+
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
1353
1137
|
"""
|
|
1354
1138
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
1355
1139
|
|
|
@@ -1361,8 +1145,7 @@ class ComputeClusterArgs:
|
|
|
1361
1145
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
1362
1146
|
def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1363
1147
|
"""
|
|
1364
|
-
|
|
1365
|
-
the cluster. Default: `true`.
|
|
1148
|
+
Whether the vSAN performance service is enabled for the cluster.
|
|
1366
1149
|
"""
|
|
1367
1150
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
1368
1151
|
|
|
@@ -1374,10 +1157,7 @@ class ComputeClusterArgs:
|
|
|
1374
1157
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
1375
1158
|
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1376
1159
|
"""
|
|
1377
|
-
The
|
|
1378
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
1379
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
1380
|
-
data-in-transit encryption feature at the same time.
|
|
1160
|
+
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
1381
1161
|
"""
|
|
1382
1162
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
1383
1163
|
|
|
@@ -1389,7 +1169,7 @@ class ComputeClusterArgs:
|
|
|
1389
1169
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
1390
1170
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
1391
1171
|
"""
|
|
1392
|
-
|
|
1172
|
+
The configuration for stretched cluster.
|
|
1393
1173
|
"""
|
|
1394
1174
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
1395
1175
|
|
|
@@ -1401,8 +1181,7 @@ class ComputeClusterArgs:
|
|
|
1401
1181
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
1402
1182
|
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1403
1183
|
"""
|
|
1404
|
-
|
|
1405
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
1184
|
+
Whether the vSAN unmap service is enabled for the cluster.
|
|
1406
1185
|
"""
|
|
1407
1186
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
1408
1187
|
|
|
@@ -1414,8 +1193,7 @@ class ComputeClusterArgs:
|
|
|
1414
1193
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
1415
1194
|
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1416
1195
|
"""
|
|
1417
|
-
|
|
1418
|
-
performance service on the cluster.
|
|
1196
|
+
Whether the vSAN verbose mode is enabled for the cluster.
|
|
1419
1197
|
"""
|
|
1420
1198
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
1421
1199
|
|
|
@@ -1505,230 +1283,119 @@ class _ComputeClusterState:
|
|
|
1505
1283
|
and require vCenter Server.
|
|
1506
1284
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
1507
1285
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
1508
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
:param pulumi.Input[
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
:param pulumi.Input[
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
:param pulumi.Input[
|
|
1519
|
-
|
|
1520
|
-
:param pulumi.Input[
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
1524
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
1525
|
-
recommendations. <sup>\\*</sup>
|
|
1526
|
-
|
|
1527
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
1528
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
1529
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
1530
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
1531
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
1532
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
1533
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
1534
|
-
`3`.
|
|
1535
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
1536
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
1537
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
1286
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
1287
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
1288
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
1289
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
1290
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
1291
|
+
setting.
|
|
1292
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
1293
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
1294
|
+
fullyAutomated.
|
|
1295
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
1296
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
1297
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
1298
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
1299
|
+
more imbalance while a higher setting will tolerate less.
|
|
1300
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
1538
1301
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
1539
1302
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
1540
1303
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
1541
1304
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
1542
1305
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
1543
1306
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
1544
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
:param pulumi.Input[
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
:param pulumi.Input[str]
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
:param pulumi.Input[
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
:param pulumi.Input[
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
:param pulumi.Input[
|
|
1588
|
-
|
|
1589
|
-
:param pulumi.Input[
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
:param pulumi.Input[
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
1609
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
1610
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
1611
|
-
`restartAggressive`. Default: `disabled`.
|
|
1612
|
-
<sup>\\*</sup>
|
|
1613
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
1614
|
-
`false`.
|
|
1615
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
1616
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
1617
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
1618
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
1619
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
1620
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
1621
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
1622
|
-
`allFeasibleDsWithUserPreference`.
|
|
1623
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
1624
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
1625
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
1626
|
-
`none`.
|
|
1627
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
1628
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
1629
|
-
or `disabled`. Default: `enabled`.
|
|
1630
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
1631
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
1632
|
-
`disabled`. Default: `enabled`.
|
|
1633
|
-
<sup>\\*</sup>
|
|
1634
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
1635
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
1636
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
1637
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
1638
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
1639
|
-
is considered ready immediately after a host is found to start it on.
|
|
1640
|
-
<sup>\\*</sup>
|
|
1641
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
1642
|
-
from a virtual machine is not received within this configured interval,
|
|
1643
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
1644
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
1645
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
1646
|
-
window expires, no more resets are attempted regardless of the setting
|
|
1647
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
1648
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
1649
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
1650
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
1651
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
1652
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
1653
|
-
`120` seconds (2 minutes).
|
|
1654
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
1655
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
1656
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
1657
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
1658
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
1659
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
1660
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
1661
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
1662
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
1663
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
1664
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
1665
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
1666
|
-
<sup>\\*</sup>
|
|
1667
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
1668
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
1669
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
1670
|
-
membership will be managed through the `host` resource rather than the
|
|
1671
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
1672
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
1673
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
1307
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
1308
|
+
for testing and is not recommended in normal use.
|
|
1309
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
1310
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
1311
|
+
will ignore the host when making recommendations.
|
|
1312
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
1313
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
1314
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
1315
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
1316
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
1317
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
1318
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
1319
|
+
issues.
|
|
1320
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
1321
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
1322
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
1323
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
1324
|
+
the cluster to reserve for failover.
|
|
1325
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
1326
|
+
the cluster to reserve for failover.
|
|
1327
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
1328
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
1329
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
1330
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
1331
|
+
currently in the cluster.
|
|
1332
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
1333
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
1334
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
1335
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1336
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
1337
|
+
restartAggressive.
|
|
1338
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
1339
|
+
the response action defined in ha_datastore_apd_response.
|
|
1340
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1341
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
1342
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
1343
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
1344
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
1345
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
1346
|
+
allFeasibleDsWithUserPreference.
|
|
1347
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
1348
|
+
Can be one of none, powerOff, or shutdown.
|
|
1349
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
1350
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
1351
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
1352
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
1353
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
1354
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
1355
|
+
failed. The value is in seconds.
|
|
1356
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
1357
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
1358
|
+
time is allotted.
|
|
1359
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
1360
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
1361
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
1362
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
1363
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
1364
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
1365
|
+
high, or highest.
|
|
1366
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
1367
|
+
proceeding with the next priority.
|
|
1368
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
1369
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
1370
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
1674
1371
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
1675
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
:param pulumi.Input[
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
1683
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
1684
|
-
`proactive_ha_severe_remediation` is set
|
|
1685
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
1686
|
-
<sup>\\*</sup>
|
|
1687
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
1688
|
-
providers configured for this cluster.
|
|
1689
|
-
<sup>\\*</sup>
|
|
1690
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
1691
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
1692
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
1693
|
-
`proactive_ha_moderate_remediation` is
|
|
1694
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
1695
|
-
<sup>\\*</sup>
|
|
1372
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
1373
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
1374
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
1375
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
1376
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
1377
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
1378
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
1696
1379
|
:param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
|
|
1697
1380
|
resource pool for this cluster. This can be passed directly to the
|
|
1698
1381
|
`resource_pool_id`
|
|
1699
1382
|
attribute of the
|
|
1700
1383
|
`VirtualMachine` resource.
|
|
1701
1384
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
1702
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
1703
|
-
|
|
1704
|
-
:param pulumi.Input[
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
:param pulumi.Input[
|
|
1708
|
-
|
|
1709
|
-
:param pulumi.Input[
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
:param pulumi.Input[
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
1717
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
1718
|
-
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
1719
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
1720
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
1721
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
1722
|
-
the cluster. Default: `true`.
|
|
1723
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
1724
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
1725
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
1726
|
-
data-in-transit encryption feature at the same time.
|
|
1727
|
-
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
1728
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
1729
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
1730
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
1731
|
-
performance service on the cluster.
|
|
1385
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
1386
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
1387
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
1388
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
1389
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
1390
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
1391
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
1392
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
1393
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
1394
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
1395
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
1396
|
+
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
1397
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
1398
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
1732
1399
|
"""
|
|
1733
1400
|
if custom_attributes is not None:
|
|
1734
1401
|
pulumi.set(__self__, "custom_attributes", custom_attributes)
|
|
@@ -1902,9 +1569,7 @@ class _ComputeClusterState:
|
|
|
1902
1569
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
1903
1570
|
def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1904
1571
|
"""
|
|
1905
|
-
The automation level for host power
|
|
1906
|
-
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
1907
|
-
`manual`.
|
|
1572
|
+
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
1908
1573
|
"""
|
|
1909
1574
|
return pulumi.get(self, "dpm_automation_level")
|
|
1910
1575
|
|
|
@@ -1916,9 +1581,8 @@ class _ComputeClusterState:
|
|
|
1916
1581
|
@pulumi.getter(name="dpmEnabled")
|
|
1917
1582
|
def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1918
1583
|
"""
|
|
1919
|
-
Enable DPM support for DRS
|
|
1920
|
-
|
|
1921
|
-
Default: `false`.
|
|
1584
|
+
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
1585
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
1922
1586
|
"""
|
|
1923
1587
|
return pulumi.get(self, "dpm_enabled")
|
|
1924
1588
|
|
|
@@ -1930,10 +1594,9 @@ class _ComputeClusterState:
|
|
|
1930
1594
|
@pulumi.getter(name="dpmThreshold")
|
|
1931
1595
|
def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
1932
1596
|
"""
|
|
1933
|
-
A value between
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
1597
|
+
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
1598
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
1599
|
+
setting.
|
|
1937
1600
|
"""
|
|
1938
1601
|
return pulumi.get(self, "dpm_threshold")
|
|
1939
1602
|
|
|
@@ -1945,8 +1608,7 @@ class _ComputeClusterState:
|
|
|
1945
1608
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
1946
1609
|
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
1947
1610
|
"""
|
|
1948
|
-
|
|
1949
|
-
options for DRS and DPM.
|
|
1611
|
+
Advanced configuration options for DRS and DPM.
|
|
1950
1612
|
"""
|
|
1951
1613
|
return pulumi.get(self, "drs_advanced_options")
|
|
1952
1614
|
|
|
@@ -1958,9 +1620,8 @@ class _ComputeClusterState:
|
|
|
1958
1620
|
@pulumi.getter(name="drsAutomationLevel")
|
|
1959
1621
|
def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1960
1622
|
"""
|
|
1961
|
-
The default automation level for all
|
|
1962
|
-
|
|
1963
|
-
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
1623
|
+
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
1624
|
+
fullyAutomated.
|
|
1964
1625
|
"""
|
|
1965
1626
|
return pulumi.get(self, "drs_automation_level")
|
|
1966
1627
|
|
|
@@ -1972,11 +1633,7 @@ class _ComputeClusterState:
|
|
|
1972
1633
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
1973
1634
|
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
|
|
1974
1635
|
"""
|
|
1975
|
-
When
|
|
1976
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
1977
|
-
recommendations. <sup>\\*</sup>
|
|
1978
|
-
|
|
1979
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
1636
|
+
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
1980
1637
|
"""
|
|
1981
1638
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
1982
1639
|
|
|
@@ -1988,8 +1645,7 @@ class _ComputeClusterState:
|
|
|
1988
1645
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
1989
1646
|
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
|
|
1990
1647
|
"""
|
|
1991
|
-
|
|
1992
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
1648
|
+
When true, allows individual VM overrides within this cluster to be set.
|
|
1993
1649
|
"""
|
|
1994
1650
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
1995
1651
|
|
|
@@ -2001,7 +1657,7 @@ class _ComputeClusterState:
|
|
|
2001
1657
|
@pulumi.getter(name="drsEnabled")
|
|
2002
1658
|
def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2003
1659
|
"""
|
|
2004
|
-
Enable DRS for this cluster.
|
|
1660
|
+
Enable DRS for this cluster.
|
|
2005
1661
|
"""
|
|
2006
1662
|
return pulumi.get(self, "drs_enabled")
|
|
2007
1663
|
|
|
@@ -2013,10 +1669,8 @@ class _ComputeClusterState:
|
|
|
2013
1669
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
2014
1670
|
def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
2015
1671
|
"""
|
|
2016
|
-
A value between
|
|
2017
|
-
|
|
2018
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
2019
|
-
`3`.
|
|
1672
|
+
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
1673
|
+
more imbalance while a higher setting will tolerate less.
|
|
2020
1674
|
"""
|
|
2021
1675
|
return pulumi.get(self, "drs_migration_threshold")
|
|
2022
1676
|
|
|
@@ -2028,9 +1682,7 @@ class _ComputeClusterState:
|
|
|
2028
1682
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
2029
1683
|
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
|
|
2030
1684
|
"""
|
|
2031
|
-
Enable scalable shares for all
|
|
2032
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
2033
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
1685
|
+
Enable scalable shares for all descendants of this cluster.
|
|
2034
1686
|
"""
|
|
2035
1687
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
2036
1688
|
|
|
@@ -2059,18 +1711,8 @@ class _ComputeClusterState:
|
|
|
2059
1711
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
2060
1712
|
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
|
|
2061
1713
|
"""
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
2065
|
-
below. This is an advanced
|
|
2066
|
-
option and should only be used for testing. Default: `false`.
|
|
2067
|
-
|
|
2068
|
-
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
2069
|
-
there are many pitfalls to its use when working with complex cluster
|
|
2070
|
-
configurations. Depending on the virtual machines currently on the cluster, and
|
|
2071
|
-
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
2072
|
-
incrementally remove hosts from your configuration by adjusting the contents of
|
|
2073
|
-
the `host_system_ids` attribute.
|
|
1714
|
+
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
1715
|
+
for testing and is not recommended in normal use.
|
|
2074
1716
|
"""
|
|
2075
1717
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
2076
1718
|
|
|
@@ -2082,11 +1724,9 @@ class _ComputeClusterState:
|
|
|
2082
1724
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
2083
1725
|
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2084
1726
|
"""
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
block access to the host, and DRS will ignore the host when making
|
|
2089
|
-
recommendations.
|
|
1727
|
+
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
1728
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
1729
|
+
will ignore the host when making recommendations.
|
|
2090
1730
|
"""
|
|
2091
1731
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
2092
1732
|
|
|
@@ -2098,11 +1738,8 @@ class _ComputeClusterState:
|
|
|
2098
1738
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
2099
1739
|
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
2100
1740
|
"""
|
|
2101
|
-
The maximum number
|
|
2102
|
-
|
|
2103
|
-
whether to permit virtual machine operations. The maximum is one less than
|
|
2104
|
-
the number of hosts in the cluster. Default: `1`.
|
|
2105
|
-
<sup>\\*</sup>
|
|
1741
|
+
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
1742
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
2106
1743
|
"""
|
|
2107
1744
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
2108
1745
|
|
|
@@ -2114,10 +1751,8 @@ class _ComputeClusterState:
|
|
|
2114
1751
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
2115
1752
|
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
2116
1753
|
"""
|
|
2117
|
-
The percentage of
|
|
2118
|
-
|
|
2119
|
-
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
2120
|
-
disables the setting. Default: `100` (disabled).
|
|
1754
|
+
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
1755
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
2121
1756
|
"""
|
|
2122
1757
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
2123
1758
|
|
|
@@ -2129,9 +1764,10 @@ class _ComputeClusterState:
|
|
|
2129
1764
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
2130
1765
|
def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
|
|
2131
1766
|
"""
|
|
2132
|
-
The type of admission control
|
|
2133
|
-
|
|
2134
|
-
|
|
1767
|
+
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
1768
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
1769
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
1770
|
+
issues.
|
|
2135
1771
|
"""
|
|
2136
1772
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
2137
1773
|
|
|
@@ -2143,12 +1779,9 @@ class _ComputeClusterState:
|
|
|
2143
1779
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
2144
1780
|
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
|
|
2145
1781
|
"""
|
|
2146
|
-
|
|
2147
|
-
average number of host resources represented by the
|
|
2148
|
-
|
|
2149
|
-
setting from the total amount of resources in the cluster. Disable to supply
|
|
2150
|
-
user-defined values. Default: `true`.
|
|
2151
|
-
<sup>\\*</sup>
|
|
1782
|
+
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
1783
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
1784
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
2152
1785
|
"""
|
|
2153
1786
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
2154
1787
|
|
|
@@ -2160,9 +1793,8 @@ class _ComputeClusterState:
|
|
|
2160
1793
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
2161
1794
|
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
2162
1795
|
"""
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
failover. Default: `100`.
|
|
1796
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
1797
|
+
the cluster to reserve for failover.
|
|
2166
1798
|
"""
|
|
2167
1799
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
2168
1800
|
|
|
@@ -2174,9 +1806,8 @@ class _ComputeClusterState:
|
|
|
2174
1806
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
2175
1807
|
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
|
|
2176
1808
|
"""
|
|
2177
|
-
|
|
2178
|
-
|
|
2179
|
-
failover. Default: `100`.
|
|
1809
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
1810
|
+
the cluster to reserve for failover.
|
|
2180
1811
|
"""
|
|
2181
1812
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
2182
1813
|
|
|
@@ -2188,8 +1819,7 @@ class _ComputeClusterState:
|
|
|
2188
1819
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
2189
1820
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
2190
1821
|
"""
|
|
2191
|
-
|
|
2192
|
-
user-defined CPU slot size, in MHz. Default: `32`.
|
|
1822
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
2193
1823
|
"""
|
|
2194
1824
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
2195
1825
|
|
|
@@ -2201,8 +1831,7 @@ class _ComputeClusterState:
|
|
|
2201
1831
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
2202
1832
|
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
|
|
2203
1833
|
"""
|
|
2204
|
-
|
|
2205
|
-
user-defined memory slot size, in MB. Default: `100`.
|
|
1834
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
2206
1835
|
"""
|
|
2207
1836
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
2208
1837
|
|
|
@@ -2214,10 +1843,9 @@ class _ComputeClusterState:
|
|
|
2214
1843
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
2215
1844
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
|
|
2216
1845
|
"""
|
|
2217
|
-
|
|
2218
|
-
|
|
2219
|
-
|
|
2220
|
-
average based on all powered-on virtual machines currently in the cluster.
|
|
1846
|
+
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
1847
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
1848
|
+
currently in the cluster.
|
|
2221
1849
|
"""
|
|
2222
1850
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
2223
1851
|
|
|
@@ -2229,8 +1857,7 @@ class _ComputeClusterState:
|
|
|
2229
1857
|
@pulumi.getter(name="haAdvancedOptions")
|
|
2230
1858
|
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
2231
1859
|
"""
|
|
2232
|
-
|
|
2233
|
-
options for vSphere HA.
|
|
1860
|
+
Advanced configuration options for vSphere HA.
|
|
2234
1861
|
"""
|
|
2235
1862
|
return pulumi.get(self, "ha_advanced_options")
|
|
2236
1863
|
|
|
@@ -2242,10 +1869,8 @@ class _ComputeClusterState:
|
|
|
2242
1869
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
2243
1870
|
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
|
|
2244
1871
|
"""
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
2248
|
-
<sup>\\*</sup>
|
|
1872
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
1873
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
2249
1874
|
"""
|
|
2250
1875
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
2251
1876
|
|
|
@@ -2257,11 +1882,9 @@ class _ComputeClusterState:
|
|
|
2257
1882
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
2258
1883
|
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
|
|
2259
1884
|
"""
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
2264
|
-
<sup>\\*</sup>
|
|
1885
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1886
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
1887
|
+
restartAggressive.
|
|
2265
1888
|
"""
|
|
2266
1889
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
2267
1890
|
|
|
@@ -2273,10 +1896,8 @@ class _ComputeClusterState:
|
|
|
2273
1896
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
2274
1897
|
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
|
|
2275
1898
|
"""
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
`ha_datastore_apd_response`. Default: `180`
|
|
2279
|
-
seconds (3 minutes). <sup>\\*</sup>
|
|
1899
|
+
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
1900
|
+
the response action defined in ha_datastore_apd_response.
|
|
2280
1901
|
"""
|
|
2281
1902
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
2282
1903
|
|
|
@@ -2288,11 +1909,8 @@ class _ComputeClusterState:
|
|
|
2288
1909
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
2289
1910
|
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
|
|
2290
1911
|
"""
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
2294
|
-
`restartAggressive`. Default: `disabled`.
|
|
2295
|
-
<sup>\\*</sup>
|
|
1912
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1913
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
2296
1914
|
"""
|
|
2297
1915
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
2298
1916
|
|
|
@@ -2304,8 +1922,7 @@ class _ComputeClusterState:
|
|
|
2304
1922
|
@pulumi.getter(name="haEnabled")
|
|
2305
1923
|
def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2306
1924
|
"""
|
|
2307
|
-
Enable vSphere HA for this cluster.
|
|
2308
|
-
`false`.
|
|
1925
|
+
Enable vSphere HA for this cluster.
|
|
2309
1926
|
"""
|
|
2310
1927
|
return pulumi.get(self, "ha_enabled")
|
|
2311
1928
|
|
|
@@ -2317,10 +1934,8 @@ class _ComputeClusterState:
|
|
|
2317
1934
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
2318
1935
|
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2319
1936
|
"""
|
|
2320
|
-
The list of managed object IDs for
|
|
2321
|
-
|
|
2322
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
2323
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
1937
|
+
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
1938
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
2324
1939
|
"""
|
|
2325
1940
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
2326
1941
|
|
|
@@ -2332,10 +1947,8 @@ class _ComputeClusterState:
|
|
|
2332
1947
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
2333
1948
|
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
|
|
2334
1949
|
"""
|
|
2335
|
-
The selection policy for HA
|
|
2336
|
-
|
|
2337
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
2338
|
-
`allFeasibleDsWithUserPreference`.
|
|
1950
|
+
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
1951
|
+
allFeasibleDsWithUserPreference.
|
|
2339
1952
|
"""
|
|
2340
1953
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
2341
1954
|
|
|
@@ -2347,10 +1960,8 @@ class _ComputeClusterState:
|
|
|
2347
1960
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
2348
1961
|
def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
|
|
2349
1962
|
"""
|
|
2350
|
-
The action to take on virtual
|
|
2351
|
-
|
|
2352
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
2353
|
-
`none`.
|
|
1963
|
+
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
1964
|
+
Can be one of none, powerOff, or shutdown.
|
|
2354
1965
|
"""
|
|
2355
1966
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
2356
1967
|
|
|
@@ -2362,9 +1973,7 @@ class _ComputeClusterState:
|
|
|
2362
1973
|
@pulumi.getter(name="haHostMonitoring")
|
|
2363
1974
|
def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
2364
1975
|
"""
|
|
2365
|
-
Global setting that controls whether
|
|
2366
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
2367
|
-
or `disabled`. Default: `enabled`.
|
|
1976
|
+
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2368
1977
|
"""
|
|
2369
1978
|
return pulumi.get(self, "ha_host_monitoring")
|
|
2370
1979
|
|
|
@@ -2376,10 +1985,8 @@ class _ComputeClusterState:
|
|
|
2376
1985
|
@pulumi.getter(name="haVmComponentProtection")
|
|
2377
1986
|
def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
|
|
2378
1987
|
"""
|
|
2379
|
-
Controls vSphere VM component
|
|
2380
|
-
|
|
2381
|
-
`disabled`. Default: `enabled`.
|
|
2382
|
-
<sup>\\*</sup>
|
|
1988
|
+
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
1989
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
2383
1990
|
"""
|
|
2384
1991
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
2385
1992
|
|
|
@@ -2391,13 +1998,8 @@ class _ComputeClusterState:
|
|
|
2391
1998
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
2392
1999
|
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
|
|
2393
2000
|
"""
|
|
2394
|
-
The condition used to
|
|
2395
|
-
|
|
2396
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
2397
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
2398
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
2399
|
-
is considered ready immediately after a host is found to start it on.
|
|
2400
|
-
<sup>\\*</sup>
|
|
2001
|
+
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2002
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
2401
2003
|
"""
|
|
2402
2004
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
2403
2005
|
|
|
@@ -2409,9 +2011,8 @@ class _ComputeClusterState:
|
|
|
2409
2011
|
@pulumi.getter(name="haVmFailureInterval")
|
|
2410
2012
|
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
|
|
2411
2013
|
"""
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
2014
|
+
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2015
|
+
failed. The value is in seconds.
|
|
2415
2016
|
"""
|
|
2416
2017
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
2417
2018
|
|
|
@@ -2423,11 +2024,9 @@ class _ComputeClusterState:
|
|
|
2423
2024
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
2424
2025
|
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
|
|
2425
2026
|
"""
|
|
2426
|
-
The
|
|
2427
|
-
|
|
2428
|
-
|
|
2429
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
2430
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
2027
|
+
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2028
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
2029
|
+
time is allotted.
|
|
2431
2030
|
"""
|
|
2432
2031
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
2433
2032
|
|
|
@@ -2439,8 +2038,7 @@ class _ComputeClusterState:
|
|
|
2439
2038
|
@pulumi.getter(name="haVmMaximumResets")
|
|
2440
2039
|
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
|
|
2441
2040
|
"""
|
|
2442
|
-
The maximum number of resets that HA will
|
|
2443
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
2041
|
+
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2444
2042
|
"""
|
|
2445
2043
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
2446
2044
|
|
|
@@ -2452,9 +2050,7 @@ class _ComputeClusterState:
|
|
|
2452
2050
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
2453
2051
|
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
|
|
2454
2052
|
"""
|
|
2455
|
-
The time, in seconds, that HA waits after
|
|
2456
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
2457
|
-
`120` seconds (2 minutes).
|
|
2053
|
+
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2458
2054
|
"""
|
|
2459
2055
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
2460
2056
|
|
|
@@ -2466,9 +2062,8 @@ class _ComputeClusterState:
|
|
|
2466
2062
|
@pulumi.getter(name="haVmMonitoring")
|
|
2467
2063
|
def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
2468
2064
|
"""
|
|
2469
|
-
The type of virtual machine monitoring to use
|
|
2470
|
-
|
|
2471
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
2065
|
+
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2066
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
2472
2067
|
"""
|
|
2473
2068
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
2474
2069
|
|
|
@@ -2480,9 +2075,7 @@ class _ComputeClusterState:
|
|
|
2480
2075
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
2481
2076
|
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
|
|
2482
2077
|
"""
|
|
2483
|
-
Additional delay
|
|
2484
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
2485
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
2078
|
+
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2486
2079
|
"""
|
|
2487
2080
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
2488
2081
|
|
|
@@ -2494,9 +2087,8 @@ class _ComputeClusterState:
|
|
|
2494
2087
|
@pulumi.getter(name="haVmRestartPriority")
|
|
2495
2088
|
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
|
|
2496
2089
|
"""
|
|
2497
|
-
The default restart priority
|
|
2498
|
-
|
|
2499
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
2090
|
+
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2091
|
+
high, or highest.
|
|
2500
2092
|
"""
|
|
2501
2093
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
2502
2094
|
|
|
@@ -2508,10 +2100,8 @@ class _ComputeClusterState:
|
|
|
2508
2100
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
2509
2101
|
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
2510
2102
|
"""
|
|
2511
|
-
The maximum time, in seconds,
|
|
2512
|
-
|
|
2513
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
2514
|
-
<sup>\\*</sup>
|
|
2103
|
+
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2104
|
+
proceeding with the next priority.
|
|
2515
2105
|
"""
|
|
2516
2106
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
2517
2107
|
|
|
@@ -2523,8 +2113,7 @@ class _ComputeClusterState:
|
|
|
2523
2113
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
2524
2114
|
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
2525
2115
|
"""
|
|
2526
|
-
The timeout
|
|
2527
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
2116
|
+
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2528
2117
|
"""
|
|
2529
2118
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
2530
2119
|
|
|
@@ -2536,9 +2125,7 @@ class _ComputeClusterState:
|
|
|
2536
2125
|
@pulumi.getter(name="hostManaged")
|
|
2537
2126
|
def host_managed(self) -> Optional[pulumi.Input[bool]]:
|
|
2538
2127
|
"""
|
|
2539
|
-
|
|
2540
|
-
membership will be managed through the `host` resource rather than the
|
|
2541
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
2128
|
+
Must be set if cluster enrollment is managed from host resource.
|
|
2542
2129
|
"""
|
|
2543
2130
|
return pulumi.get(self, "host_managed")
|
|
2544
2131
|
|
|
@@ -2550,8 +2137,7 @@ class _ComputeClusterState:
|
|
|
2550
2137
|
@pulumi.getter(name="hostSystemIds")
|
|
2551
2138
|
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2552
2139
|
"""
|
|
2553
|
-
The managed object IDs of
|
|
2554
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2140
|
+
The managed object IDs of the hosts to put in the cluster.
|
|
2555
2141
|
"""
|
|
2556
2142
|
return pulumi.get(self, "host_system_ids")
|
|
2557
2143
|
|
|
@@ -2575,10 +2161,7 @@ class _ComputeClusterState:
|
|
|
2575
2161
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
2576
2162
|
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
2577
2163
|
"""
|
|
2578
|
-
|
|
2579
|
-
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
2580
|
-
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
2581
|
-
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
2164
|
+
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
2582
2165
|
"""
|
|
2583
2166
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
2584
2167
|
|
|
@@ -2590,8 +2173,7 @@ class _ComputeClusterState:
|
|
|
2590
2173
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
2591
2174
|
def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2592
2175
|
"""
|
|
2593
|
-
Enables
|
|
2594
|
-
<sup>\\*</sup>
|
|
2176
|
+
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
2595
2177
|
"""
|
|
2596
2178
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
2597
2179
|
|
|
@@ -2603,12 +2185,8 @@ class _ComputeClusterState:
|
|
|
2603
2185
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
2604
2186
|
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
2605
2187
|
"""
|
|
2606
|
-
The configured remediation
|
|
2607
|
-
|
|
2608
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
2609
|
-
`proactive_ha_severe_remediation` is set
|
|
2610
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
2611
|
-
<sup>\\*</sup>
|
|
2188
|
+
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
2189
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
2612
2190
|
"""
|
|
2613
2191
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
2614
2192
|
|
|
@@ -2620,9 +2198,7 @@ class _ComputeClusterState:
|
|
|
2620
2198
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
2621
2199
|
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2622
2200
|
"""
|
|
2623
|
-
The list of IDs for health update
|
|
2624
|
-
providers configured for this cluster.
|
|
2625
|
-
<sup>\\*</sup>
|
|
2201
|
+
The list of IDs for health update providers configured for this cluster.
|
|
2626
2202
|
"""
|
|
2627
2203
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
2628
2204
|
|
|
@@ -2634,12 +2210,8 @@ class _ComputeClusterState:
|
|
|
2634
2210
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
2635
2211
|
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
2636
2212
|
"""
|
|
2637
|
-
The configured remediation for
|
|
2638
|
-
|
|
2639
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
2640
|
-
`proactive_ha_moderate_remediation` is
|
|
2641
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
2642
|
-
<sup>\\*</sup>
|
|
2213
|
+
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
2214
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
2643
2215
|
"""
|
|
2644
2216
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
2645
2217
|
|
|
@@ -2679,8 +2251,7 @@ class _ComputeClusterState:
|
|
|
2679
2251
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
2680
2252
|
def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2681
2253
|
"""
|
|
2682
|
-
|
|
2683
|
-
cluster.
|
|
2254
|
+
Whether the vSAN compression service is enabled for the cluster.
|
|
2684
2255
|
"""
|
|
2685
2256
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
2686
2257
|
|
|
@@ -2692,9 +2263,7 @@ class _ComputeClusterState:
|
|
|
2692
2263
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
2693
2264
|
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2694
2265
|
"""
|
|
2695
|
-
|
|
2696
|
-
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
2697
|
-
compression must also be enabled.
|
|
2266
|
+
Whether the vSAN deduplication service is enabled for the cluster.
|
|
2698
2267
|
"""
|
|
2699
2268
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
2700
2269
|
|
|
@@ -2706,8 +2275,7 @@ class _ComputeClusterState:
|
|
|
2706
2275
|
@pulumi.getter(name="vsanDiskGroups")
|
|
2707
2276
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
2708
2277
|
"""
|
|
2709
|
-
|
|
2710
|
-
group in the cluster.
|
|
2278
|
+
A list of disk UUIDs to add to the vSAN cluster.
|
|
2711
2279
|
"""
|
|
2712
2280
|
return pulumi.get(self, "vsan_disk_groups")
|
|
2713
2281
|
|
|
@@ -2719,10 +2287,7 @@ class _ComputeClusterState:
|
|
|
2719
2287
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
2720
2288
|
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2721
2289
|
"""
|
|
2722
|
-
|
|
2723
|
-
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
2724
|
-
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
2725
|
-
at the same time.
|
|
2290
|
+
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
2726
2291
|
"""
|
|
2727
2292
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
2728
2293
|
|
|
@@ -2734,9 +2299,7 @@ class _ComputeClusterState:
|
|
|
2734
2299
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
2735
2300
|
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
|
|
2736
2301
|
"""
|
|
2737
|
-
|
|
2738
|
-
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
2739
|
-
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
2302
|
+
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
2740
2303
|
"""
|
|
2741
2304
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
2742
2305
|
|
|
@@ -2748,7 +2311,7 @@ class _ComputeClusterState:
|
|
|
2748
2311
|
@pulumi.getter(name="vsanEnabled")
|
|
2749
2312
|
def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2750
2313
|
"""
|
|
2751
|
-
|
|
2314
|
+
Whether the vSAN service is enabled for the cluster.
|
|
2752
2315
|
"""
|
|
2753
2316
|
return pulumi.get(self, "vsan_enabled")
|
|
2754
2317
|
|
|
@@ -2760,7 +2323,7 @@ class _ComputeClusterState:
|
|
|
2760
2323
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
2761
2324
|
def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2762
2325
|
"""
|
|
2763
|
-
|
|
2326
|
+
Whether the vSAN ESA service is enabled for the cluster.
|
|
2764
2327
|
"""
|
|
2765
2328
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
2766
2329
|
|
|
@@ -2772,7 +2335,7 @@ class _ComputeClusterState:
|
|
|
2772
2335
|
@pulumi.getter(name="vsanFaultDomains")
|
|
2773
2336
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
2774
2337
|
"""
|
|
2775
|
-
|
|
2338
|
+
The configuration for vSAN fault domains.
|
|
2776
2339
|
"""
|
|
2777
2340
|
return pulumi.get(self, "vsan_fault_domains")
|
|
2778
2341
|
|
|
@@ -2784,8 +2347,7 @@ class _ComputeClusterState:
|
|
|
2784
2347
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
2785
2348
|
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2786
2349
|
"""
|
|
2787
|
-
|
|
2788
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
2350
|
+
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
2789
2351
|
"""
|
|
2790
2352
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
2791
2353
|
|
|
@@ -2797,8 +2359,7 @@ class _ComputeClusterState:
|
|
|
2797
2359
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
2798
2360
|
def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2799
2361
|
"""
|
|
2800
|
-
|
|
2801
|
-
the cluster. Default: `true`.
|
|
2362
|
+
Whether the vSAN performance service is enabled for the cluster.
|
|
2802
2363
|
"""
|
|
2803
2364
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
2804
2365
|
|
|
@@ -2810,10 +2371,7 @@ class _ComputeClusterState:
|
|
|
2810
2371
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
2811
2372
|
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2812
2373
|
"""
|
|
2813
|
-
The
|
|
2814
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
2815
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
2816
|
-
data-in-transit encryption feature at the same time.
|
|
2374
|
+
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
2817
2375
|
"""
|
|
2818
2376
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
2819
2377
|
|
|
@@ -2825,7 +2383,7 @@ class _ComputeClusterState:
|
|
|
2825
2383
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
2826
2384
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
2827
2385
|
"""
|
|
2828
|
-
|
|
2386
|
+
The configuration for stretched cluster.
|
|
2829
2387
|
"""
|
|
2830
2388
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
2831
2389
|
|
|
@@ -2837,8 +2395,7 @@ class _ComputeClusterState:
|
|
|
2837
2395
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
2838
2396
|
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2839
2397
|
"""
|
|
2840
|
-
|
|
2841
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
2398
|
+
Whether the vSAN unmap service is enabled for the cluster.
|
|
2842
2399
|
"""
|
|
2843
2400
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
2844
2401
|
|
|
@@ -2850,8 +2407,7 @@ class _ComputeClusterState:
|
|
|
2850
2407
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
2851
2408
|
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2852
2409
|
"""
|
|
2853
|
-
|
|
2854
|
-
performance service on the cluster.
|
|
2410
|
+
Whether the vSAN verbose mode is enabled for the cluster.
|
|
2855
2411
|
"""
|
|
2856
2412
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
2857
2413
|
|
|
@@ -2945,225 +2501,114 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2945
2501
|
and require vCenter Server.
|
|
2946
2502
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
2947
2503
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
2948
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
2949
|
-
|
|
2950
|
-
|
|
2951
|
-
:param pulumi.Input[
|
|
2952
|
-
|
|
2953
|
-
|
|
2954
|
-
:param pulumi.Input[
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
2958
|
-
:param pulumi.Input[
|
|
2959
|
-
|
|
2960
|
-
:param pulumi.Input[
|
|
2961
|
-
|
|
2962
|
-
|
|
2963
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
2964
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
2965
|
-
recommendations. <sup>\\*</sup>
|
|
2966
|
-
|
|
2967
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
2968
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
2969
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
2970
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
2971
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
2972
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
2973
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
2974
|
-
`3`.
|
|
2975
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
2976
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
2977
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
2504
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
2505
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
2506
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
2507
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
2508
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
2509
|
+
setting.
|
|
2510
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
2511
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
2512
|
+
fullyAutomated.
|
|
2513
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
2514
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
2515
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
2516
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
2517
|
+
more imbalance while a higher setting will tolerate less.
|
|
2518
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
2978
2519
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
2979
2520
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
2980
2521
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
2981
2522
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
2982
2523
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
2983
2524
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
2984
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
2985
|
-
|
|
2986
|
-
|
|
2987
|
-
|
|
2988
|
-
|
|
2989
|
-
|
|
2990
|
-
|
|
2991
|
-
|
|
2992
|
-
|
|
2993
|
-
|
|
2994
|
-
|
|
2995
|
-
|
|
2996
|
-
|
|
2997
|
-
|
|
2998
|
-
|
|
2999
|
-
|
|
3000
|
-
|
|
3001
|
-
|
|
3002
|
-
|
|
3003
|
-
|
|
3004
|
-
|
|
3005
|
-
|
|
3006
|
-
:param pulumi.Input[
|
|
3007
|
-
|
|
3008
|
-
|
|
3009
|
-
|
|
3010
|
-
:param pulumi.Input[str]
|
|
3011
|
-
|
|
3012
|
-
|
|
3013
|
-
|
|
3014
|
-
|
|
3015
|
-
|
|
3016
|
-
|
|
3017
|
-
|
|
3018
|
-
|
|
3019
|
-
:param pulumi.Input[
|
|
3020
|
-
|
|
3021
|
-
|
|
3022
|
-
:param pulumi.Input[
|
|
3023
|
-
|
|
3024
|
-
|
|
3025
|
-
|
|
3026
|
-
|
|
3027
|
-
:param pulumi.Input[
|
|
3028
|
-
|
|
3029
|
-
:param pulumi.Input[
|
|
3030
|
-
|
|
3031
|
-
|
|
3032
|
-
|
|
3033
|
-
:param pulumi.Input[
|
|
3034
|
-
|
|
3035
|
-
|
|
3036
|
-
|
|
3037
|
-
|
|
3038
|
-
|
|
3039
|
-
|
|
3040
|
-
|
|
3041
|
-
|
|
3042
|
-
|
|
3043
|
-
|
|
3044
|
-
|
|
3045
|
-
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
3049
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
3050
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
3051
|
-
`restartAggressive`. Default: `disabled`.
|
|
3052
|
-
<sup>\\*</sup>
|
|
3053
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
3054
|
-
`false`.
|
|
3055
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
3056
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
3057
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
3058
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3059
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
3060
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
3061
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
3062
|
-
`allFeasibleDsWithUserPreference`.
|
|
3063
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
3064
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
3065
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
3066
|
-
`none`.
|
|
3067
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
3068
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
3069
|
-
or `disabled`. Default: `enabled`.
|
|
3070
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
3071
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
3072
|
-
`disabled`. Default: `enabled`.
|
|
3073
|
-
<sup>\\*</sup>
|
|
3074
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
3075
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
3076
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
3077
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
3078
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
3079
|
-
is considered ready immediately after a host is found to start it on.
|
|
3080
|
-
<sup>\\*</sup>
|
|
3081
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
3082
|
-
from a virtual machine is not received within this configured interval,
|
|
3083
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3084
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
3085
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
3086
|
-
window expires, no more resets are attempted regardless of the setting
|
|
3087
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
3088
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3089
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
3090
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3091
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
3092
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
3093
|
-
`120` seconds (2 minutes).
|
|
3094
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
3095
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
3096
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3097
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
3098
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
3099
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3100
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
3101
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
3102
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3103
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
3104
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
3105
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
3106
|
-
<sup>\\*</sup>
|
|
3107
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
3108
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3109
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
3110
|
-
membership will be managed through the `host` resource rather than the
|
|
3111
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3112
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
3113
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2525
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
2526
|
+
for testing and is not recommended in normal use.
|
|
2527
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
2528
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
2529
|
+
will ignore the host when making recommendations.
|
|
2530
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
2531
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
2532
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
2533
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
2534
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
2535
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
2536
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
2537
|
+
issues.
|
|
2538
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
2539
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
2540
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
2541
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
2542
|
+
the cluster to reserve for failover.
|
|
2543
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
2544
|
+
the cluster to reserve for failover.
|
|
2545
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
2546
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
2547
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
2548
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
2549
|
+
currently in the cluster.
|
|
2550
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
2551
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
2552
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
2553
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2554
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
2555
|
+
restartAggressive.
|
|
2556
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
2557
|
+
the response action defined in ha_datastore_apd_response.
|
|
2558
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2559
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
2560
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
2561
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
2562
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
2563
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
2564
|
+
allFeasibleDsWithUserPreference.
|
|
2565
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
2566
|
+
Can be one of none, powerOff, or shutdown.
|
|
2567
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2568
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
2569
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
2570
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2571
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
2572
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2573
|
+
failed. The value is in seconds.
|
|
2574
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2575
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
2576
|
+
time is allotted.
|
|
2577
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2578
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2579
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2580
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
2581
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2582
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2583
|
+
high, or highest.
|
|
2584
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2585
|
+
proceeding with the next priority.
|
|
2586
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2587
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
2588
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
3114
2589
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
3115
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
3116
|
-
|
|
3117
|
-
|
|
3118
|
-
|
|
3119
|
-
:param pulumi.Input[
|
|
3120
|
-
|
|
3121
|
-
|
|
3122
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
3123
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
3124
|
-
`proactive_ha_severe_remediation` is set
|
|
3125
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
3126
|
-
<sup>\\*</sup>
|
|
3127
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
3128
|
-
providers configured for this cluster.
|
|
3129
|
-
<sup>\\*</sup>
|
|
3130
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
3131
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
3132
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
3133
|
-
`proactive_ha_moderate_remediation` is
|
|
3134
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
3135
|
-
<sup>\\*</sup>
|
|
2590
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
2591
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
2592
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
2593
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
2594
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
2595
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
2596
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
3136
2597
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
3137
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
3138
|
-
|
|
3139
|
-
:param pulumi.Input[
|
|
3140
|
-
|
|
3141
|
-
|
|
3142
|
-
:param pulumi.Input[
|
|
3143
|
-
|
|
3144
|
-
:param pulumi.Input[
|
|
3145
|
-
|
|
3146
|
-
|
|
3147
|
-
|
|
3148
|
-
:param pulumi.Input[
|
|
3149
|
-
|
|
3150
|
-
|
|
3151
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
3152
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
3153
|
-
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
3154
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
3155
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
3156
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
3157
|
-
the cluster. Default: `true`.
|
|
3158
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
3159
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
3160
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
3161
|
-
data-in-transit encryption feature at the same time.
|
|
3162
|
-
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
3163
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
3164
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3165
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
3166
|
-
performance service on the cluster.
|
|
2598
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
2599
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
2600
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanDiskGroupArgs']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
2601
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
2602
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
2603
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
2604
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
2605
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
2606
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
2607
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
2608
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
2609
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
2610
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
2611
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
3167
2612
|
"""
|
|
3168
2613
|
...
|
|
3169
2614
|
@overload
|
|
@@ -3429,230 +2874,119 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3429
2874
|
and require vCenter Server.
|
|
3430
2875
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
3431
2876
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
3432
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
3433
|
-
|
|
3434
|
-
|
|
3435
|
-
:param pulumi.Input[
|
|
3436
|
-
|
|
3437
|
-
|
|
3438
|
-
:param pulumi.Input[
|
|
3439
|
-
|
|
3440
|
-
|
|
3441
|
-
|
|
3442
|
-
:param pulumi.Input[
|
|
3443
|
-
|
|
3444
|
-
:param pulumi.Input[
|
|
3445
|
-
|
|
3446
|
-
|
|
3447
|
-
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
3448
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
3449
|
-
recommendations. <sup>\\*</sup>
|
|
3450
|
-
|
|
3451
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
3452
|
-
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
3453
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
3454
|
-
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
3455
|
-
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
3456
|
-
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
3457
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
3458
|
-
`3`.
|
|
3459
|
-
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
3460
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
3461
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
2877
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
2878
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
2879
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
2880
|
+
:param pulumi.Input[int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
2881
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
2882
|
+
setting.
|
|
2883
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
2884
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
2885
|
+
fullyAutomated.
|
|
2886
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
2887
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
2888
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster.
|
|
2889
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
2890
|
+
more imbalance while a higher setting will tolerate less.
|
|
2891
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
3462
2892
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
3463
2893
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
3464
2894
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
3465
2895
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
3466
2896
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
3467
2897
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
3468
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
3469
|
-
|
|
3470
|
-
|
|
3471
|
-
|
|
3472
|
-
|
|
3473
|
-
|
|
3474
|
-
|
|
3475
|
-
|
|
3476
|
-
|
|
3477
|
-
|
|
3478
|
-
|
|
3479
|
-
|
|
3480
|
-
|
|
3481
|
-
|
|
3482
|
-
|
|
3483
|
-
|
|
3484
|
-
|
|
3485
|
-
|
|
3486
|
-
|
|
3487
|
-
|
|
3488
|
-
|
|
3489
|
-
|
|
3490
|
-
:param pulumi.Input[
|
|
3491
|
-
|
|
3492
|
-
|
|
3493
|
-
|
|
3494
|
-
:param pulumi.Input[str]
|
|
3495
|
-
|
|
3496
|
-
|
|
3497
|
-
|
|
3498
|
-
|
|
3499
|
-
|
|
3500
|
-
|
|
3501
|
-
|
|
3502
|
-
|
|
3503
|
-
:param pulumi.Input[
|
|
3504
|
-
|
|
3505
|
-
|
|
3506
|
-
:param pulumi.Input[
|
|
3507
|
-
|
|
3508
|
-
|
|
3509
|
-
|
|
3510
|
-
|
|
3511
|
-
:param pulumi.Input[
|
|
3512
|
-
|
|
3513
|
-
:param pulumi.Input[
|
|
3514
|
-
|
|
3515
|
-
|
|
3516
|
-
|
|
3517
|
-
:param pulumi.Input[
|
|
3518
|
-
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
|
|
3522
|
-
|
|
3523
|
-
|
|
3524
|
-
|
|
3525
|
-
|
|
3526
|
-
|
|
3527
|
-
|
|
3528
|
-
|
|
3529
|
-
|
|
3530
|
-
|
|
3531
|
-
|
|
3532
|
-
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
3533
|
-
virtual machines when the cluster has detected a permanent device loss to a
|
|
3534
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
3535
|
-
`restartAggressive`. Default: `disabled`.
|
|
3536
|
-
<sup>\\*</sup>
|
|
3537
|
-
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
3538
|
-
`false`.
|
|
3539
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
3540
|
-
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
3541
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
3542
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3543
|
-
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
3544
|
-
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
3545
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
3546
|
-
`allFeasibleDsWithUserPreference`.
|
|
3547
|
-
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
3548
|
-
machines when a host has detected that it has been isolated from the rest of
|
|
3549
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
3550
|
-
`none`.
|
|
3551
|
-
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
3552
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
3553
|
-
or `disabled`. Default: `enabled`.
|
|
3554
|
-
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
3555
|
-
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
3556
|
-
`disabled`. Default: `enabled`.
|
|
3557
|
-
<sup>\\*</sup>
|
|
3558
|
-
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
3559
|
-
determine whether or not virtual machines in a certain restart priority class
|
|
3560
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
3561
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
3562
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
3563
|
-
is considered ready immediately after a host is found to start it on.
|
|
3564
|
-
<sup>\\*</sup>
|
|
3565
|
-
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
3566
|
-
from a virtual machine is not received within this configured interval,
|
|
3567
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3568
|
-
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
3569
|
-
which `ha_vm_maximum_resets` can operate. When this
|
|
3570
|
-
window expires, no more resets are attempted regardless of the setting
|
|
3571
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
3572
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3573
|
-
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
3574
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3575
|
-
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
3576
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
3577
|
-
`120` seconds (2 minutes).
|
|
3578
|
-
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
3579
|
-
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
3580
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3581
|
-
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
3582
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
3583
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3584
|
-
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
3585
|
-
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
3586
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3587
|
-
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
3588
|
-
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
3589
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
3590
|
-
<sup>\\*</sup>
|
|
3591
|
-
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
3592
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3593
|
-
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
3594
|
-
membership will be managed through the `host` resource rather than the
|
|
3595
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3596
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
3597
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2898
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
2899
|
+
for testing and is not recommended in normal use.
|
|
2900
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
2901
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
2902
|
+
will ignore the host when making recommendations.
|
|
2903
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
2904
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
2905
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
2906
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
2907
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
2908
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
2909
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
2910
|
+
issues.
|
|
2911
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
2912
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
2913
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
2914
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
2915
|
+
the cluster to reserve for failover.
|
|
2916
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
2917
|
+
the cluster to reserve for failover.
|
|
2918
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
2919
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
2920
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
2921
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
2922
|
+
currently in the cluster.
|
|
2923
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
2924
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
2925
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
2926
|
+
:param pulumi.Input[str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2927
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
2928
|
+
restartAggressive.
|
|
2929
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
2930
|
+
the response action defined in ha_datastore_apd_response.
|
|
2931
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2932
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
2933
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
2934
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
2935
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
2936
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
2937
|
+
allFeasibleDsWithUserPreference.
|
|
2938
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
2939
|
+
Can be one of none, powerOff, or shutdown.
|
|
2940
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2941
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
2942
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
2943
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2944
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
2945
|
+
:param pulumi.Input[int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2946
|
+
failed. The value is in seconds.
|
|
2947
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2948
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
2949
|
+
time is allotted.
|
|
2950
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2951
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2952
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2953
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
2954
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2955
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2956
|
+
high, or highest.
|
|
2957
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2958
|
+
proceeding with the next priority.
|
|
2959
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2960
|
+
:param pulumi.Input[bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
2961
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
3598
2962
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
3599
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
3600
|
-
|
|
3601
|
-
|
|
3602
|
-
|
|
3603
|
-
:param pulumi.Input[
|
|
3604
|
-
|
|
3605
|
-
|
|
3606
|
-
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
3607
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
3608
|
-
`proactive_ha_severe_remediation` is set
|
|
3609
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
3610
|
-
<sup>\\*</sup>
|
|
3611
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
3612
|
-
providers configured for this cluster.
|
|
3613
|
-
<sup>\\*</sup>
|
|
3614
|
-
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
3615
|
-
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
3616
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
3617
|
-
`proactive_ha_moderate_remediation` is
|
|
3618
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
3619
|
-
<sup>\\*</sup>
|
|
2963
|
+
:param pulumi.Input[str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
2964
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
2965
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
2966
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
2967
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
2968
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
2969
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
3620
2970
|
:param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
|
|
3621
2971
|
resource pool for this cluster. This can be passed directly to the
|
|
3622
2972
|
`resource_pool_id`
|
|
3623
2973
|
attribute of the
|
|
3624
2974
|
`VirtualMachine` resource.
|
|
3625
2975
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
3626
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
3627
|
-
|
|
3628
|
-
:param pulumi.Input[
|
|
3629
|
-
|
|
3630
|
-
|
|
3631
|
-
:param pulumi.Input[
|
|
3632
|
-
|
|
3633
|
-
:param pulumi.Input[
|
|
3634
|
-
|
|
3635
|
-
|
|
3636
|
-
|
|
3637
|
-
:param pulumi.Input[
|
|
3638
|
-
|
|
3639
|
-
|
|
3640
|
-
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
3641
|
-
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
3642
|
-
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
3643
|
-
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
3644
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
3645
|
-
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
3646
|
-
the cluster. Default: `true`.
|
|
3647
|
-
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
3648
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
3649
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
3650
|
-
data-in-transit encryption feature at the same time.
|
|
3651
|
-
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
3652
|
-
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
3653
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3654
|
-
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
3655
|
-
performance service on the cluster.
|
|
2976
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
2977
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
2978
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanDiskGroupArgs']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
2979
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
2980
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
2981
|
+
:param pulumi.Input[bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
2982
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
2983
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
2984
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
2985
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
2986
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
2987
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
2988
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
2989
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
3656
2990
|
"""
|
|
3657
2991
|
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
|
|
3658
2992
|
|
|
@@ -3754,9 +3088,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3754
3088
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
3755
3089
|
def dpm_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
3756
3090
|
"""
|
|
3757
|
-
The automation level for host power
|
|
3758
|
-
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
3759
|
-
`manual`.
|
|
3091
|
+
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
3760
3092
|
"""
|
|
3761
3093
|
return pulumi.get(self, "dpm_automation_level")
|
|
3762
3094
|
|
|
@@ -3764,9 +3096,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3764
3096
|
@pulumi.getter(name="dpmEnabled")
|
|
3765
3097
|
def dpm_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3766
3098
|
"""
|
|
3767
|
-
Enable DPM support for DRS
|
|
3768
|
-
|
|
3769
|
-
Default: `false`.
|
|
3099
|
+
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
3100
|
+
machines in the cluster. Requires that DRS be enabled.
|
|
3770
3101
|
"""
|
|
3771
3102
|
return pulumi.get(self, "dpm_enabled")
|
|
3772
3103
|
|
|
@@ -3774,10 +3105,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3774
3105
|
@pulumi.getter(name="dpmThreshold")
|
|
3775
3106
|
def dpm_threshold(self) -> pulumi.Output[Optional[int]]:
|
|
3776
3107
|
"""
|
|
3777
|
-
A value between
|
|
3778
|
-
|
|
3779
|
-
|
|
3780
|
-
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
3108
|
+
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
3109
|
+
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
3110
|
+
setting.
|
|
3781
3111
|
"""
|
|
3782
3112
|
return pulumi.get(self, "dpm_threshold")
|
|
3783
3113
|
|
|
@@ -3785,8 +3115,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3785
3115
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
3786
3116
|
def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
|
|
3787
3117
|
"""
|
|
3788
|
-
|
|
3789
|
-
options for DRS and DPM.
|
|
3118
|
+
Advanced configuration options for DRS and DPM.
|
|
3790
3119
|
"""
|
|
3791
3120
|
return pulumi.get(self, "drs_advanced_options")
|
|
3792
3121
|
|
|
@@ -3794,9 +3123,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3794
3123
|
@pulumi.getter(name="drsAutomationLevel")
|
|
3795
3124
|
def drs_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
3796
3125
|
"""
|
|
3797
|
-
The default automation level for all
|
|
3798
|
-
|
|
3799
|
-
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
3126
|
+
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
3127
|
+
fullyAutomated.
|
|
3800
3128
|
"""
|
|
3801
3129
|
return pulumi.get(self, "drs_automation_level")
|
|
3802
3130
|
|
|
@@ -3804,11 +3132,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3804
3132
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
3805
3133
|
def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[bool]]:
|
|
3806
3134
|
"""
|
|
3807
|
-
When
|
|
3808
|
-
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
3809
|
-
recommendations. <sup>\\*</sup>
|
|
3810
|
-
|
|
3811
|
-
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
3135
|
+
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
3812
3136
|
"""
|
|
3813
3137
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
3814
3138
|
|
|
@@ -3816,8 +3140,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3816
3140
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
3817
3141
|
def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[bool]]:
|
|
3818
3142
|
"""
|
|
3819
|
-
|
|
3820
|
-
set for virtual machines in the cluster. Default: `true`.
|
|
3143
|
+
When true, allows individual VM overrides within this cluster to be set.
|
|
3821
3144
|
"""
|
|
3822
3145
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
3823
3146
|
|
|
@@ -3825,7 +3148,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3825
3148
|
@pulumi.getter(name="drsEnabled")
|
|
3826
3149
|
def drs_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3827
3150
|
"""
|
|
3828
|
-
Enable DRS for this cluster.
|
|
3151
|
+
Enable DRS for this cluster.
|
|
3829
3152
|
"""
|
|
3830
3153
|
return pulumi.get(self, "drs_enabled")
|
|
3831
3154
|
|
|
@@ -3833,10 +3156,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3833
3156
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
3834
3157
|
def drs_migration_threshold(self) -> pulumi.Output[Optional[int]]:
|
|
3835
3158
|
"""
|
|
3836
|
-
A value between
|
|
3837
|
-
|
|
3838
|
-
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
3839
|
-
`3`.
|
|
3159
|
+
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
3160
|
+
more imbalance while a higher setting will tolerate less.
|
|
3840
3161
|
"""
|
|
3841
3162
|
return pulumi.get(self, "drs_migration_threshold")
|
|
3842
3163
|
|
|
@@ -3844,9 +3165,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3844
3165
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
3845
3166
|
def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[str]]:
|
|
3846
3167
|
"""
|
|
3847
|
-
Enable scalable shares for all
|
|
3848
|
-
resource pools in the cluster. Can be one of `disabled` or
|
|
3849
|
-
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
3168
|
+
Enable scalable shares for all descendants of this cluster.
|
|
3850
3169
|
"""
|
|
3851
3170
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
3852
3171
|
|
|
@@ -3867,18 +3186,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3867
3186
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
3868
3187
|
def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[bool]]:
|
|
3869
3188
|
"""
|
|
3870
|
-
|
|
3871
|
-
|
|
3872
|
-
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
3873
|
-
below. This is an advanced
|
|
3874
|
-
option and should only be used for testing. Default: `false`.
|
|
3875
|
-
|
|
3876
|
-
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
3877
|
-
there are many pitfalls to its use when working with complex cluster
|
|
3878
|
-
configurations. Depending on the virtual machines currently on the cluster, and
|
|
3879
|
-
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
3880
|
-
incrementally remove hosts from your configuration by adjusting the contents of
|
|
3881
|
-
the `host_system_ids` attribute.
|
|
3189
|
+
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
3190
|
+
for testing and is not recommended in normal use.
|
|
3882
3191
|
"""
|
|
3883
3192
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
3884
3193
|
|
|
@@ -3886,11 +3195,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3886
3195
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
3887
3196
|
def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
3888
3197
|
"""
|
|
3889
|
-
|
|
3890
|
-
|
|
3891
|
-
|
|
3892
|
-
block access to the host, and DRS will ignore the host when making
|
|
3893
|
-
recommendations.
|
|
3198
|
+
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
3199
|
+
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
3200
|
+
will ignore the host when making recommendations.
|
|
3894
3201
|
"""
|
|
3895
3202
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
3896
3203
|
|
|
@@ -3898,11 +3205,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3898
3205
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
3899
3206
|
def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[int]]:
|
|
3900
3207
|
"""
|
|
3901
|
-
The maximum number
|
|
3902
|
-
|
|
3903
|
-
whether to permit virtual machine operations. The maximum is one less than
|
|
3904
|
-
the number of hosts in the cluster. Default: `1`.
|
|
3905
|
-
<sup>\\*</sup>
|
|
3208
|
+
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
3209
|
+
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
3906
3210
|
"""
|
|
3907
3211
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
3908
3212
|
|
|
@@ -3910,10 +3214,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3910
3214
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
3911
3215
|
def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[int]]:
|
|
3912
3216
|
"""
|
|
3913
|
-
The percentage of
|
|
3914
|
-
|
|
3915
|
-
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
3916
|
-
disables the setting. Default: `100` (disabled).
|
|
3217
|
+
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
3218
|
+
warnings only, whereas a value of 100 disables the setting.
|
|
3917
3219
|
"""
|
|
3918
3220
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
3919
3221
|
|
|
@@ -3921,9 +3223,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3921
3223
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
3922
3224
|
def ha_admission_control_policy(self) -> pulumi.Output[Optional[str]]:
|
|
3923
3225
|
"""
|
|
3924
|
-
The type of admission control
|
|
3925
|
-
|
|
3926
|
-
|
|
3226
|
+
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
3227
|
+
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
3228
|
+
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
3229
|
+
issues.
|
|
3927
3230
|
"""
|
|
3928
3231
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
3929
3232
|
|
|
@@ -3931,12 +3234,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3931
3234
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
3932
3235
|
def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[bool]]:
|
|
3933
3236
|
"""
|
|
3934
|
-
|
|
3935
|
-
average number of host resources represented by the
|
|
3936
|
-
|
|
3937
|
-
setting from the total amount of resources in the cluster. Disable to supply
|
|
3938
|
-
user-defined values. Default: `true`.
|
|
3939
|
-
<sup>\\*</sup>
|
|
3237
|
+
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
3238
|
+
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
3239
|
+
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
3940
3240
|
"""
|
|
3941
3241
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
3942
3242
|
|
|
@@ -3944,9 +3244,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3944
3244
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
3945
3245
|
def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[int]]:
|
|
3946
3246
|
"""
|
|
3947
|
-
|
|
3948
|
-
|
|
3949
|
-
failover. Default: `100`.
|
|
3247
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
3248
|
+
the cluster to reserve for failover.
|
|
3950
3249
|
"""
|
|
3951
3250
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
3952
3251
|
|
|
@@ -3954,9 +3253,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3954
3253
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
3955
3254
|
def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[int]]:
|
|
3956
3255
|
"""
|
|
3957
|
-
|
|
3958
|
-
|
|
3959
|
-
failover. Default: `100`.
|
|
3256
|
+
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
3257
|
+
the cluster to reserve for failover.
|
|
3960
3258
|
"""
|
|
3961
3259
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
3962
3260
|
|
|
@@ -3964,8 +3262,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3964
3262
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
3965
3263
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[int]]:
|
|
3966
3264
|
"""
|
|
3967
|
-
|
|
3968
|
-
user-defined CPU slot size, in MHz. Default: `32`.
|
|
3265
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
3969
3266
|
"""
|
|
3970
3267
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
3971
3268
|
|
|
@@ -3973,8 +3270,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3973
3270
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
3974
3271
|
def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[int]]:
|
|
3975
3272
|
"""
|
|
3976
|
-
|
|
3977
|
-
user-defined memory slot size, in MB. Default: `100`.
|
|
3273
|
+
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
3978
3274
|
"""
|
|
3979
3275
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
3980
3276
|
|
|
@@ -3982,10 +3278,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3982
3278
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
3983
3279
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[bool]]:
|
|
3984
3280
|
"""
|
|
3985
|
-
|
|
3986
|
-
|
|
3987
|
-
|
|
3988
|
-
average based on all powered-on virtual machines currently in the cluster.
|
|
3281
|
+
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
3282
|
+
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
3283
|
+
currently in the cluster.
|
|
3989
3284
|
"""
|
|
3990
3285
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
3991
3286
|
|
|
@@ -3993,8 +3288,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3993
3288
|
@pulumi.getter(name="haAdvancedOptions")
|
|
3994
3289
|
def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
|
|
3995
3290
|
"""
|
|
3996
|
-
|
|
3997
|
-
options for vSphere HA.
|
|
3291
|
+
Advanced configuration options for vSphere HA.
|
|
3998
3292
|
"""
|
|
3999
3293
|
return pulumi.get(self, "ha_advanced_options")
|
|
4000
3294
|
|
|
@@ -4002,10 +3296,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4002
3296
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
4003
3297
|
def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[str]]:
|
|
4004
3298
|
"""
|
|
4005
|
-
|
|
4006
|
-
|
|
4007
|
-
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
4008
|
-
<sup>\\*</sup>
|
|
3299
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
3300
|
+
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
4009
3301
|
"""
|
|
4010
3302
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
4011
3303
|
|
|
@@ -4013,11 +3305,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4013
3305
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
4014
3306
|
def ha_datastore_apd_response(self) -> pulumi.Output[Optional[str]]:
|
|
4015
3307
|
"""
|
|
4016
|
-
|
|
4017
|
-
|
|
4018
|
-
|
|
4019
|
-
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
4020
|
-
<sup>\\*</sup>
|
|
3308
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3309
|
+
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
3310
|
+
restartAggressive.
|
|
4021
3311
|
"""
|
|
4022
3312
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
4023
3313
|
|
|
@@ -4025,10 +3315,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4025
3315
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
4026
3316
|
def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[int]]:
|
|
4027
3317
|
"""
|
|
4028
|
-
|
|
4029
|
-
|
|
4030
|
-
`ha_datastore_apd_response`. Default: `180`
|
|
4031
|
-
seconds (3 minutes). <sup>\\*</sup>
|
|
3318
|
+
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
3319
|
+
the response action defined in ha_datastore_apd_response.
|
|
4032
3320
|
"""
|
|
4033
3321
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
4034
3322
|
|
|
@@ -4036,11 +3324,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4036
3324
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
4037
3325
|
def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[str]]:
|
|
4038
3326
|
"""
|
|
4039
|
-
|
|
4040
|
-
|
|
4041
|
-
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
4042
|
-
`restartAggressive`. Default: `disabled`.
|
|
4043
|
-
<sup>\\*</sup>
|
|
3327
|
+
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3328
|
+
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
4044
3329
|
"""
|
|
4045
3330
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
4046
3331
|
|
|
@@ -4048,8 +3333,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4048
3333
|
@pulumi.getter(name="haEnabled")
|
|
4049
3334
|
def ha_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4050
3335
|
"""
|
|
4051
|
-
Enable vSphere HA for this cluster.
|
|
4052
|
-
`false`.
|
|
3336
|
+
Enable vSphere HA for this cluster.
|
|
4053
3337
|
"""
|
|
4054
3338
|
return pulumi.get(self, "ha_enabled")
|
|
4055
3339
|
|
|
@@ -4057,10 +3341,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4057
3341
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
4058
3342
|
def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4059
3343
|
"""
|
|
4060
|
-
The list of managed object IDs for
|
|
4061
|
-
|
|
4062
|
-
when `ha_heartbeat_datastore_policy` is set
|
|
4063
|
-
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3344
|
+
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
3345
|
+
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
4064
3346
|
"""
|
|
4065
3347
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
4066
3348
|
|
|
@@ -4068,10 +3350,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4068
3350
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
4069
3351
|
def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[str]]:
|
|
4070
3352
|
"""
|
|
4071
|
-
The selection policy for HA
|
|
4072
|
-
|
|
4073
|
-
`allFeasibleDsWithUserPreference`. Default:
|
|
4074
|
-
`allFeasibleDsWithUserPreference`.
|
|
3353
|
+
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
3354
|
+
allFeasibleDsWithUserPreference.
|
|
4075
3355
|
"""
|
|
4076
3356
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
4077
3357
|
|
|
@@ -4079,10 +3359,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4079
3359
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
4080
3360
|
def ha_host_isolation_response(self) -> pulumi.Output[Optional[str]]:
|
|
4081
3361
|
"""
|
|
4082
|
-
The action to take on virtual
|
|
4083
|
-
|
|
4084
|
-
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
4085
|
-
`none`.
|
|
3362
|
+
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
3363
|
+
Can be one of none, powerOff, or shutdown.
|
|
4086
3364
|
"""
|
|
4087
3365
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
4088
3366
|
|
|
@@ -4090,9 +3368,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4090
3368
|
@pulumi.getter(name="haHostMonitoring")
|
|
4091
3369
|
def ha_host_monitoring(self) -> pulumi.Output[Optional[str]]:
|
|
4092
3370
|
"""
|
|
4093
|
-
Global setting that controls whether
|
|
4094
|
-
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
4095
|
-
or `disabled`. Default: `enabled`.
|
|
3371
|
+
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
4096
3372
|
"""
|
|
4097
3373
|
return pulumi.get(self, "ha_host_monitoring")
|
|
4098
3374
|
|
|
@@ -4100,10 +3376,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4100
3376
|
@pulumi.getter(name="haVmComponentProtection")
|
|
4101
3377
|
def ha_vm_component_protection(self) -> pulumi.Output[Optional[str]]:
|
|
4102
3378
|
"""
|
|
4103
|
-
Controls vSphere VM component
|
|
4104
|
-
|
|
4105
|
-
`disabled`. Default: `enabled`.
|
|
4106
|
-
<sup>\\*</sup>
|
|
3379
|
+
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
3380
|
+
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
4107
3381
|
"""
|
|
4108
3382
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
4109
3383
|
|
|
@@ -4111,13 +3385,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4111
3385
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
4112
3386
|
def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[str]]:
|
|
4113
3387
|
"""
|
|
4114
|
-
The condition used to
|
|
4115
|
-
|
|
4116
|
-
are online, allowing HA to move on to restarting virtual machines on the next
|
|
4117
|
-
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
4118
|
-
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
4119
|
-
is considered ready immediately after a host is found to start it on.
|
|
4120
|
-
<sup>\\*</sup>
|
|
3388
|
+
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
3389
|
+
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
4121
3390
|
"""
|
|
4122
3391
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
4123
3392
|
|
|
@@ -4125,9 +3394,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4125
3394
|
@pulumi.getter(name="haVmFailureInterval")
|
|
4126
3395
|
def ha_vm_failure_interval(self) -> pulumi.Output[Optional[int]]:
|
|
4127
3396
|
"""
|
|
4128
|
-
|
|
4129
|
-
|
|
4130
|
-
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3397
|
+
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
3398
|
+
failed. The value is in seconds.
|
|
4131
3399
|
"""
|
|
4132
3400
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
4133
3401
|
|
|
@@ -4135,11 +3403,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4135
3403
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
4136
3404
|
def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[int]]:
|
|
4137
3405
|
"""
|
|
4138
|
-
The
|
|
4139
|
-
|
|
4140
|
-
|
|
4141
|
-
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
4142
|
-
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3406
|
+
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
3407
|
+
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
3408
|
+
time is allotted.
|
|
4143
3409
|
"""
|
|
4144
3410
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
4145
3411
|
|
|
@@ -4147,8 +3413,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4147
3413
|
@pulumi.getter(name="haVmMaximumResets")
|
|
4148
3414
|
def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[int]]:
|
|
4149
3415
|
"""
|
|
4150
|
-
The maximum number of resets that HA will
|
|
4151
|
-
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3416
|
+
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
4152
3417
|
"""
|
|
4153
3418
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
4154
3419
|
|
|
@@ -4156,9 +3421,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4156
3421
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
4157
3422
|
def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[int]]:
|
|
4158
3423
|
"""
|
|
4159
|
-
The time, in seconds, that HA waits after
|
|
4160
|
-
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
4161
|
-
`120` seconds (2 minutes).
|
|
3424
|
+
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
4162
3425
|
"""
|
|
4163
3426
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
4164
3427
|
|
|
@@ -4166,9 +3429,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4166
3429
|
@pulumi.getter(name="haVmMonitoring")
|
|
4167
3430
|
def ha_vm_monitoring(self) -> pulumi.Output[Optional[str]]:
|
|
4168
3431
|
"""
|
|
4169
|
-
The type of virtual machine monitoring to use
|
|
4170
|
-
|
|
4171
|
-
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3432
|
+
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
3433
|
+
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
4172
3434
|
"""
|
|
4173
3435
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
4174
3436
|
|
|
@@ -4176,9 +3438,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4176
3438
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
4177
3439
|
def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[int]]:
|
|
4178
3440
|
"""
|
|
4179
|
-
Additional delay
|
|
4180
|
-
after ready condition is met. A VM is considered ready at this point.
|
|
4181
|
-
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3441
|
+
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
4182
3442
|
"""
|
|
4183
3443
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
4184
3444
|
|
|
@@ -4186,9 +3446,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4186
3446
|
@pulumi.getter(name="haVmRestartPriority")
|
|
4187
3447
|
def ha_vm_restart_priority(self) -> pulumi.Output[Optional[str]]:
|
|
4188
3448
|
"""
|
|
4189
|
-
The default restart priority
|
|
4190
|
-
|
|
4191
|
-
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3449
|
+
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
3450
|
+
high, or highest.
|
|
4192
3451
|
"""
|
|
4193
3452
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
4194
3453
|
|
|
@@ -4196,10 +3455,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4196
3455
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
4197
3456
|
def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[int]]:
|
|
4198
3457
|
"""
|
|
4199
|
-
The maximum time, in seconds,
|
|
4200
|
-
|
|
4201
|
-
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
4202
|
-
<sup>\\*</sup>
|
|
3458
|
+
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
3459
|
+
proceeding with the next priority.
|
|
4203
3460
|
"""
|
|
4204
3461
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
4205
3462
|
|
|
@@ -4207,8 +3464,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4207
3464
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
4208
3465
|
def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[int]]:
|
|
4209
3466
|
"""
|
|
4210
|
-
The timeout
|
|
4211
|
-
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3467
|
+
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
4212
3468
|
"""
|
|
4213
3469
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
4214
3470
|
|
|
@@ -4216,9 +3472,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4216
3472
|
@pulumi.getter(name="hostManaged")
|
|
4217
3473
|
def host_managed(self) -> pulumi.Output[Optional[bool]]:
|
|
4218
3474
|
"""
|
|
4219
|
-
|
|
4220
|
-
membership will be managed through the `host` resource rather than the
|
|
4221
|
-
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3475
|
+
Must be set if cluster enrollment is managed from host resource.
|
|
4222
3476
|
"""
|
|
4223
3477
|
return pulumi.get(self, "host_managed")
|
|
4224
3478
|
|
|
@@ -4226,8 +3480,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4226
3480
|
@pulumi.getter(name="hostSystemIds")
|
|
4227
3481
|
def host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4228
3482
|
"""
|
|
4229
|
-
The managed object IDs of
|
|
4230
|
-
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
3483
|
+
The managed object IDs of the hosts to put in the cluster.
|
|
4231
3484
|
"""
|
|
4232
3485
|
return pulumi.get(self, "host_system_ids")
|
|
4233
3486
|
|
|
@@ -4243,10 +3496,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4243
3496
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
4244
3497
|
def proactive_ha_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
4245
3498
|
"""
|
|
4246
|
-
|
|
4247
|
-
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
4248
|
-
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
4249
|
-
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
3499
|
+
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
4250
3500
|
"""
|
|
4251
3501
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
4252
3502
|
|
|
@@ -4254,8 +3504,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4254
3504
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
4255
3505
|
def proactive_ha_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4256
3506
|
"""
|
|
4257
|
-
Enables
|
|
4258
|
-
<sup>\\*</sup>
|
|
3507
|
+
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
4259
3508
|
"""
|
|
4260
3509
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
4261
3510
|
|
|
@@ -4263,12 +3512,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4263
3512
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
4264
3513
|
def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[str]]:
|
|
4265
3514
|
"""
|
|
4266
|
-
The configured remediation
|
|
4267
|
-
|
|
4268
|
-
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
4269
|
-
`proactive_ha_severe_remediation` is set
|
|
4270
|
-
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
4271
|
-
<sup>\\*</sup>
|
|
3515
|
+
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
3516
|
+
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
4272
3517
|
"""
|
|
4273
3518
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
4274
3519
|
|
|
@@ -4276,9 +3521,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4276
3521
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
4277
3522
|
def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4278
3523
|
"""
|
|
4279
|
-
The list of IDs for health update
|
|
4280
|
-
providers configured for this cluster.
|
|
4281
|
-
<sup>\\*</sup>
|
|
3524
|
+
The list of IDs for health update providers configured for this cluster.
|
|
4282
3525
|
"""
|
|
4283
3526
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
4284
3527
|
|
|
@@ -4286,12 +3529,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4286
3529
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
4287
3530
|
def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[str]]:
|
|
4288
3531
|
"""
|
|
4289
|
-
The configured remediation for
|
|
4290
|
-
|
|
4291
|
-
Note that this cannot be set to `QuarantineMode` when
|
|
4292
|
-
`proactive_ha_moderate_remediation` is
|
|
4293
|
-
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
4294
|
-
<sup>\\*</sup>
|
|
3532
|
+
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
3533
|
+
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
4295
3534
|
"""
|
|
4296
3535
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
4297
3536
|
|
|
@@ -4319,8 +3558,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4319
3558
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
4320
3559
|
def vsan_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4321
3560
|
"""
|
|
4322
|
-
|
|
4323
|
-
cluster.
|
|
3561
|
+
Whether the vSAN compression service is enabled for the cluster.
|
|
4324
3562
|
"""
|
|
4325
3563
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
4326
3564
|
|
|
@@ -4328,9 +3566,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4328
3566
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
4329
3567
|
def vsan_dedup_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4330
3568
|
"""
|
|
4331
|
-
|
|
4332
|
-
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
4333
|
-
compression must also be enabled.
|
|
3569
|
+
Whether the vSAN deduplication service is enabled for the cluster.
|
|
4334
3570
|
"""
|
|
4335
3571
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
4336
3572
|
|
|
@@ -4338,8 +3574,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4338
3574
|
@pulumi.getter(name="vsanDiskGroups")
|
|
4339
3575
|
def vsan_disk_groups(self) -> pulumi.Output[Sequence['outputs.ComputeClusterVsanDiskGroup']]:
|
|
4340
3576
|
"""
|
|
4341
|
-
|
|
4342
|
-
group in the cluster.
|
|
3577
|
+
A list of disk UUIDs to add to the vSAN cluster.
|
|
4343
3578
|
"""
|
|
4344
3579
|
return pulumi.get(self, "vsan_disk_groups")
|
|
4345
3580
|
|
|
@@ -4347,10 +3582,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4347
3582
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
4348
3583
|
def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4349
3584
|
"""
|
|
4350
|
-
|
|
4351
|
-
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
4352
|
-
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
4353
|
-
at the same time.
|
|
3585
|
+
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
4354
3586
|
"""
|
|
4355
3587
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
4356
3588
|
|
|
@@ -4358,9 +3590,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4358
3590
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
4359
3591
|
def vsan_dit_rekey_interval(self) -> pulumi.Output[int]:
|
|
4360
3592
|
"""
|
|
4361
|
-
|
|
4362
|
-
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
4363
|
-
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
3593
|
+
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
4364
3594
|
"""
|
|
4365
3595
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
4366
3596
|
|
|
@@ -4368,7 +3598,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4368
3598
|
@pulumi.getter(name="vsanEnabled")
|
|
4369
3599
|
def vsan_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4370
3600
|
"""
|
|
4371
|
-
|
|
3601
|
+
Whether the vSAN service is enabled for the cluster.
|
|
4372
3602
|
"""
|
|
4373
3603
|
return pulumi.get(self, "vsan_enabled")
|
|
4374
3604
|
|
|
@@ -4376,7 +3606,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4376
3606
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
4377
3607
|
def vsan_esa_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4378
3608
|
"""
|
|
4379
|
-
|
|
3609
|
+
Whether the vSAN ESA service is enabled for the cluster.
|
|
4380
3610
|
"""
|
|
4381
3611
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
4382
3612
|
|
|
@@ -4384,7 +3614,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4384
3614
|
@pulumi.getter(name="vsanFaultDomains")
|
|
4385
3615
|
def vsan_fault_domains(self) -> pulumi.Output[Optional[Sequence['outputs.ComputeClusterVsanFaultDomain']]]:
|
|
4386
3616
|
"""
|
|
4387
|
-
|
|
3617
|
+
The configuration for vSAN fault domains.
|
|
4388
3618
|
"""
|
|
4389
3619
|
return pulumi.get(self, "vsan_fault_domains")
|
|
4390
3620
|
|
|
@@ -4392,8 +3622,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4392
3622
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
4393
3623
|
def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4394
3624
|
"""
|
|
4395
|
-
|
|
4396
|
-
diagnostic mode for vSAN performance service on the cluster.
|
|
3625
|
+
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
4397
3626
|
"""
|
|
4398
3627
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
4399
3628
|
|
|
@@ -4401,8 +3630,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4401
3630
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
4402
3631
|
def vsan_performance_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4403
3632
|
"""
|
|
4404
|
-
|
|
4405
|
-
the cluster. Default: `true`.
|
|
3633
|
+
Whether the vSAN performance service is enabled for the cluster.
|
|
4406
3634
|
"""
|
|
4407
3635
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
4408
3636
|
|
|
@@ -4410,10 +3638,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4410
3638
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
4411
3639
|
def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
4412
3640
|
"""
|
|
4413
|
-
The
|
|
4414
|
-
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
4415
|
-
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
4416
|
-
data-in-transit encryption feature at the same time.
|
|
3641
|
+
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
4417
3642
|
"""
|
|
4418
3643
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
4419
3644
|
|
|
@@ -4421,7 +3646,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4421
3646
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
4422
3647
|
def vsan_stretched_cluster(self) -> pulumi.Output[Optional['outputs.ComputeClusterVsanStretchedCluster']]:
|
|
4423
3648
|
"""
|
|
4424
|
-
|
|
3649
|
+
The configuration for stretched cluster.
|
|
4425
3650
|
"""
|
|
4426
3651
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
4427
3652
|
|
|
@@ -4429,8 +3654,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4429
3654
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
4430
3655
|
def vsan_unmap_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4431
3656
|
"""
|
|
4432
|
-
|
|
4433
|
-
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3657
|
+
Whether the vSAN unmap service is enabled for the cluster.
|
|
4434
3658
|
"""
|
|
4435
3659
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
4436
3660
|
|
|
@@ -4438,8 +3662,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
4438
3662
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
4439
3663
|
def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
4440
3664
|
"""
|
|
4441
|
-
|
|
4442
|
-
performance service on the cluster.
|
|
3665
|
+
Whether the vSAN verbose mode is enabled for the cluster.
|
|
4443
3666
|
"""
|
|
4444
3667
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
4445
3668
|
|