pulumi-vsphere 4.11.0a1__py3-none-any.whl → 4.11.0a1711033215__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-vsphere might be problematic. Click here for more details.
- pulumi_vsphere/__init__.py +0 -28
- pulumi_vsphere/_inputs.py +230 -554
- pulumi_vsphere/compute_cluster.py +1477 -747
- pulumi_vsphere/compute_cluster_vm_affinity_rule.py +16 -28
- pulumi_vsphere/datacenter.py +12 -26
- pulumi_vsphere/datastore_cluster.py +350 -154
- pulumi_vsphere/distributed_port_group.py +175 -70
- pulumi_vsphere/distributed_virtual_switch.py +805 -308
- pulumi_vsphere/file.py +24 -16
- pulumi_vsphere/folder.py +7 -7
- pulumi_vsphere/get_compute_cluster.py +4 -0
- pulumi_vsphere/get_compute_cluster_host_group.py +10 -8
- pulumi_vsphere/get_content_library.py +4 -0
- pulumi_vsphere/get_custom_attribute.py +4 -0
- pulumi_vsphere/get_datacenter.py +4 -0
- pulumi_vsphere/get_datastore.py +4 -0
- pulumi_vsphere/get_datastore_cluster.py +4 -0
- pulumi_vsphere/get_datastore_stats.py +12 -4
- pulumi_vsphere/get_distributed_virtual_switch.py +4 -2
- pulumi_vsphere/get_dynamic.py +8 -4
- pulumi_vsphere/get_folder.py +6 -10
- pulumi_vsphere/get_guest_os_customization.py +4 -0
- pulumi_vsphere/get_host.py +4 -0
- pulumi_vsphere/get_host_pci_device.py +12 -4
- pulumi_vsphere/get_host_thumbprint.py +4 -0
- pulumi_vsphere/get_host_vgpu_profile.py +8 -0
- pulumi_vsphere/get_license.py +4 -0
- pulumi_vsphere/get_network.py +4 -0
- pulumi_vsphere/get_policy.py +4 -0
- pulumi_vsphere/get_resource_pool.py +10 -2
- pulumi_vsphere/get_role.py +4 -0
- pulumi_vsphere/get_tag.py +4 -0
- pulumi_vsphere/get_tag_category.py +4 -0
- pulumi_vsphere/get_vapp_container.py +4 -0
- pulumi_vsphere/get_virtual_machine.py +8 -0
- pulumi_vsphere/get_vmfs_disks.py +4 -0
- pulumi_vsphere/guest_os_customization.py +0 -50
- pulumi_vsphere/ha_vm_override.py +378 -189
- pulumi_vsphere/host.py +20 -0
- pulumi_vsphere/host_port_group.py +24 -12
- pulumi_vsphere/host_virtual_switch.py +287 -140
- pulumi_vsphere/license.py +32 -0
- pulumi_vsphere/outputs.py +230 -543
- pulumi_vsphere/pulumi-plugin.json +1 -2
- pulumi_vsphere/resource_pool.py +22 -48
- pulumi_vsphere/virtual_machine.py +807 -578
- pulumi_vsphere/virtual_machine_snapshot.py +10 -6
- pulumi_vsphere/vm_storage_policy.py +84 -72
- pulumi_vsphere/vnic.py +20 -8
- {pulumi_vsphere-4.11.0a1.dist-info → pulumi_vsphere-4.11.0a1711033215.dist-info}/METADATA +1 -1
- pulumi_vsphere-4.11.0a1711033215.dist-info/RECORD +82 -0
- pulumi_vsphere/get_host_base_images.py +0 -97
- pulumi_vsphere/offline_software_depot.py +0 -180
- pulumi_vsphere/supervisor.py +0 -858
- pulumi_vsphere/virtual_machine_class.py +0 -440
- pulumi_vsphere-4.11.0a1.dist-info/RECORD +0 -86
- {pulumi_vsphere-4.11.0a1.dist-info → pulumi_vsphere-4.11.0a1711033215.dist-info}/WHEEL +0 -0
- {pulumi_vsphere-4.11.0a1.dist-info → pulumi_vsphere-4.11.0a1711033215.dist-info}/top_level.txt +0 -0
|
@@ -61,7 +61,6 @@ class ComputeClusterArgs:
|
|
|
61
61
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
62
62
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
63
63
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
64
|
-
host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
|
|
65
64
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
66
65
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
67
66
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -94,115 +93,225 @@ class ComputeClusterArgs:
|
|
|
94
93
|
|
|
95
94
|
> **NOTE:** Custom attributes are unsupported on direct ESXi connections
|
|
96
95
|
and require vCenter Server.
|
|
97
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
:param pulumi.Input[
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
:param pulumi.Input[
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
:param pulumi.Input[
|
|
108
|
-
|
|
109
|
-
:param pulumi.Input[
|
|
110
|
-
|
|
111
|
-
|
|
96
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
97
|
+
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
98
|
+
`manual`.
|
|
99
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS in this cluster.
|
|
100
|
+
Requires `drs_enabled` to be `true` in order to be effective.
|
|
101
|
+
Default: `false`.
|
|
102
|
+
:param pulumi.Input[int] dpm_threshold: A value between `1` and `5` indicating the
|
|
103
|
+
threshold of load within the cluster that influences host power operations.
|
|
104
|
+
This affects both power on and power off operations - a lower setting will
|
|
105
|
+
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
106
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: A key/value map that specifies advanced
|
|
107
|
+
options for DRS and DPM.
|
|
108
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all
|
|
109
|
+
virtual machines in this cluster. Can be one of `manual`,
|
|
110
|
+
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
111
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
112
|
+
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
113
|
+
recommendations. <sup>\\*</sup>
|
|
114
|
+
|
|
115
|
+
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
116
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
117
|
+
set for virtual machines in the cluster. Default: `true`.
|
|
118
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
119
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
120
|
+
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
121
|
+
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
122
|
+
`3`.
|
|
123
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
124
|
+
resource pools in the cluster. Can be one of `disabled` or
|
|
125
|
+
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
112
126
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
113
127
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
114
128
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
115
129
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
116
130
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
117
131
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
118
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
:param pulumi.Input[
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
:param pulumi.Input[str]
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
the
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
:param pulumi.Input[
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
:param pulumi.Input[
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
:param pulumi.Input[
|
|
162
|
-
|
|
163
|
-
:param pulumi.Input[
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
:param pulumi.Input[
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
:param pulumi.Input[
|
|
132
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: When destroying the resource, setting this to
|
|
133
|
+
`true` will auto-remove any hosts that are currently a member of the cluster,
|
|
134
|
+
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
135
|
+
below. This is an advanced
|
|
136
|
+
option and should only be used for testing. Default: `false`.
|
|
137
|
+
|
|
138
|
+
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
139
|
+
there are many pitfalls to its use when working with complex cluster
|
|
140
|
+
configurations. Depending on the virtual machines currently on the cluster, and
|
|
141
|
+
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
142
|
+
incrementally remove hosts from your configuration by adjusting the contents of
|
|
143
|
+
the `host_system_ids` attribute.
|
|
144
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: Defines the
|
|
145
|
+
managed object IDs of hosts to use as dedicated failover
|
|
146
|
+
hosts. These hosts are kept as available as possible - admission control will
|
|
147
|
+
block access to the host, and DRS will ignore the host when making
|
|
148
|
+
recommendations.
|
|
149
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number
|
|
150
|
+
of failed hosts that admission control tolerates when making decisions on
|
|
151
|
+
whether to permit virtual machine operations. The maximum is one less than
|
|
152
|
+
the number of hosts in the cluster. Default: `1`.
|
|
153
|
+
<sup>\\*</sup>
|
|
154
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of
|
|
155
|
+
resource reduction that a cluster of virtual machines can tolerate in case of
|
|
156
|
+
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
157
|
+
disables the setting. Default: `100` (disabled).
|
|
158
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control
|
|
159
|
+
policy to use with vSphere HA. Can be one of `resourcePercentage`,
|
|
160
|
+
`slotPolicy`, `failoverHosts`, or `disabled`. Default: `resourcePercentage`.
|
|
161
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: Automatically determine available resource percentages by subtracting the
|
|
162
|
+
average number of host resources represented by the
|
|
163
|
+
`ha_admission_control_host_failure_tolerance`
|
|
164
|
+
setting from the total amount of resources in the cluster. Disable to supply
|
|
165
|
+
user-defined values. Default: `true`.
|
|
166
|
+
<sup>\\*</sup>
|
|
167
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: Controls the
|
|
168
|
+
user-defined percentage of CPU resources in the cluster to reserve for
|
|
169
|
+
failover. Default: `100`.
|
|
170
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: Controls the
|
|
171
|
+
user-defined percentage of memory resources in the cluster to reserve for
|
|
172
|
+
failover. Default: `100`.
|
|
173
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: Controls the
|
|
174
|
+
user-defined CPU slot size, in MHz. Default: `32`.
|
|
175
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: Controls the
|
|
176
|
+
user-defined memory slot size, in MB. Default: `100`.
|
|
177
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: Controls
|
|
178
|
+
whether or not you wish to supply explicit values to CPU and memory slot
|
|
179
|
+
sizes. The default is `false`, which tells vSphere to gather a automatic
|
|
180
|
+
average based on all powered-on virtual machines currently in the cluster.
|
|
181
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: A key/value map that specifies advanced
|
|
182
|
+
options for vSphere HA.
|
|
183
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: Controls the action to take
|
|
184
|
+
on virtual machines if an APD status on an affected datastore clears in the
|
|
185
|
+
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
186
|
+
<sup>\\*</sup>
|
|
187
|
+
:param pulumi.Input[str] ha_datastore_apd_response: Controls the action to take on
|
|
188
|
+
virtual machines when the cluster has detected loss to all paths to a
|
|
189
|
+
relevant datastore. Can be one of `disabled`, `warning`,
|
|
190
|
+
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
191
|
+
<sup>\\*</sup>
|
|
192
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: The time, in seconds,
|
|
193
|
+
to wait after an APD timeout event to run the response action defined in
|
|
194
|
+
`ha_datastore_apd_response`. Default: `180`
|
|
195
|
+
seconds (3 minutes). <sup>\\*</sup>
|
|
196
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
197
|
+
virtual machines when the cluster has detected a permanent device loss to a
|
|
198
|
+
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
199
|
+
`restartAggressive`. Default: `disabled`.
|
|
200
|
+
<sup>\\*</sup>
|
|
201
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
202
|
+
`false`.
|
|
203
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
204
|
+
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
205
|
+
when `ha_heartbeat_datastore_policy` is set
|
|
206
|
+
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
207
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
208
|
+
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
209
|
+
`allFeasibleDsWithUserPreference`. Default:
|
|
210
|
+
`allFeasibleDsWithUserPreference`.
|
|
211
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
212
|
+
machines when a host has detected that it has been isolated from the rest of
|
|
213
|
+
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
214
|
+
`none`.
|
|
215
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
216
|
+
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
217
|
+
or `disabled`. Default: `enabled`.
|
|
218
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
219
|
+
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
220
|
+
`disabled`. Default: `enabled`.
|
|
221
|
+
<sup>\\*</sup>
|
|
222
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
223
|
+
determine whether or not virtual machines in a certain restart priority class
|
|
224
|
+
are online, allowing HA to move on to restarting virtual machines on the next
|
|
225
|
+
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
226
|
+
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
227
|
+
is considered ready immediately after a host is found to start it on.
|
|
228
|
+
<sup>\\*</sup>
|
|
229
|
+
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
230
|
+
from a virtual machine is not received within this configured interval,
|
|
231
|
+
the virtual machine is marked as failed. Default: `30` seconds.
|
|
232
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
233
|
+
which `ha_vm_maximum_resets` can operate. When this
|
|
234
|
+
window expires, no more resets are attempted regardless of the setting
|
|
235
|
+
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
236
|
+
unlimited reset time is allotted. Default: `-1` (no window).
|
|
237
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
238
|
+
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
239
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
240
|
+
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
241
|
+
`120` seconds (2 minutes).
|
|
242
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
243
|
+
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
244
|
+
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
245
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
246
|
+
after ready condition is met. A VM is considered ready at this point.
|
|
247
|
+
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
248
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
249
|
+
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
250
|
+
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
251
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
252
|
+
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
253
|
+
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
254
|
+
<sup>\\*</sup>
|
|
255
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
256
|
+
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
257
|
+
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
258
|
+
membership will be managed through the `host` resource rather than the
|
|
259
|
+
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
260
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
261
|
+
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
183
262
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
184
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
:param pulumi.Input[
|
|
189
|
-
|
|
190
|
-
|
|
263
|
+
:param pulumi.Input[str] proactive_ha_automation_level: Determines how the host
|
|
264
|
+
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
265
|
+
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
266
|
+
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
267
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables Proactive HA. Default: `false`.
|
|
268
|
+
<sup>\\*</sup>
|
|
269
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation
|
|
270
|
+
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
271
|
+
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
272
|
+
`proactive_ha_severe_remediation` is set
|
|
273
|
+
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
274
|
+
<sup>\\*</sup>
|
|
275
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
276
|
+
providers configured for this cluster.
|
|
277
|
+
<sup>\\*</sup>
|
|
278
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
279
|
+
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
280
|
+
Note that this cannot be set to `QuarantineMode` when
|
|
281
|
+
`proactive_ha_moderate_remediation` is
|
|
282
|
+
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
283
|
+
<sup>\\*</sup>
|
|
191
284
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
192
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
193
|
-
|
|
194
|
-
:param pulumi.Input[
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
:param pulumi.Input[
|
|
198
|
-
|
|
199
|
-
:param pulumi.Input[
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
:param pulumi.Input[
|
|
204
|
-
|
|
205
|
-
|
|
285
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Enables vSAN compression on the
|
|
286
|
+
cluster.
|
|
287
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Enables vSAN deduplication on the cluster.
|
|
288
|
+
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
289
|
+
compression must also be enabled.
|
|
290
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: Represents the configuration of a host disk
|
|
291
|
+
group in the cluster.
|
|
292
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Enables vSAN data-in-transit
|
|
293
|
+
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
294
|
+
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
295
|
+
at the same time.
|
|
296
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: Indicates the rekey interval in
|
|
297
|
+
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
298
|
+
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
299
|
+
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
300
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
301
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
302
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
303
|
+
diagnostic mode for vSAN performance service on the cluster.
|
|
304
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
305
|
+
the cluster. Default: `true`.
|
|
306
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
307
|
+
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
308
|
+
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
309
|
+
data-in-transit encryption feature at the same time.
|
|
310
|
+
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
311
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
312
|
+
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
313
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
314
|
+
performance service on the cluster.
|
|
206
315
|
"""
|
|
207
316
|
pulumi.set(__self__, "datacenter_id", datacenter_id)
|
|
208
317
|
if custom_attributes is not None:
|
|
@@ -293,8 +402,6 @@ class ComputeClusterArgs:
|
|
|
293
402
|
pulumi.set(__self__, "ha_vm_restart_timeout", ha_vm_restart_timeout)
|
|
294
403
|
if host_cluster_exit_timeout is not None:
|
|
295
404
|
pulumi.set(__self__, "host_cluster_exit_timeout", host_cluster_exit_timeout)
|
|
296
|
-
if host_image is not None:
|
|
297
|
-
pulumi.set(__self__, "host_image", host_image)
|
|
298
405
|
if host_managed is not None:
|
|
299
406
|
pulumi.set(__self__, "host_managed", host_managed)
|
|
300
407
|
if host_system_ids is not None:
|
|
@@ -375,7 +482,9 @@ class ComputeClusterArgs:
|
|
|
375
482
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
376
483
|
def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
377
484
|
"""
|
|
378
|
-
The automation level for host power
|
|
485
|
+
The automation level for host power
|
|
486
|
+
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
487
|
+
`manual`.
|
|
379
488
|
"""
|
|
380
489
|
return pulumi.get(self, "dpm_automation_level")
|
|
381
490
|
|
|
@@ -387,8 +496,9 @@ class ComputeClusterArgs:
|
|
|
387
496
|
@pulumi.getter(name="dpmEnabled")
|
|
388
497
|
def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
389
498
|
"""
|
|
390
|
-
Enable DPM support for DRS
|
|
391
|
-
|
|
499
|
+
Enable DPM support for DRS in this cluster.
|
|
500
|
+
Requires `drs_enabled` to be `true` in order to be effective.
|
|
501
|
+
Default: `false`.
|
|
392
502
|
"""
|
|
393
503
|
return pulumi.get(self, "dpm_enabled")
|
|
394
504
|
|
|
@@ -400,9 +510,10 @@ class ComputeClusterArgs:
|
|
|
400
510
|
@pulumi.getter(name="dpmThreshold")
|
|
401
511
|
def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
402
512
|
"""
|
|
403
|
-
A value between 1 and 5 indicating the
|
|
404
|
-
|
|
405
|
-
setting
|
|
513
|
+
A value between `1` and `5` indicating the
|
|
514
|
+
threshold of load within the cluster that influences host power operations.
|
|
515
|
+
This affects both power on and power off operations - a lower setting will
|
|
516
|
+
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
406
517
|
"""
|
|
407
518
|
return pulumi.get(self, "dpm_threshold")
|
|
408
519
|
|
|
@@ -414,7 +525,8 @@ class ComputeClusterArgs:
|
|
|
414
525
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
415
526
|
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
416
527
|
"""
|
|
417
|
-
|
|
528
|
+
A key/value map that specifies advanced
|
|
529
|
+
options for DRS and DPM.
|
|
418
530
|
"""
|
|
419
531
|
return pulumi.get(self, "drs_advanced_options")
|
|
420
532
|
|
|
@@ -426,8 +538,9 @@ class ComputeClusterArgs:
|
|
|
426
538
|
@pulumi.getter(name="drsAutomationLevel")
|
|
427
539
|
def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
428
540
|
"""
|
|
429
|
-
The default automation level for all
|
|
430
|
-
|
|
541
|
+
The default automation level for all
|
|
542
|
+
virtual machines in this cluster. Can be one of `manual`,
|
|
543
|
+
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
431
544
|
"""
|
|
432
545
|
return pulumi.get(self, "drs_automation_level")
|
|
433
546
|
|
|
@@ -439,7 +552,11 @@ class ComputeClusterArgs:
|
|
|
439
552
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
440
553
|
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
|
|
441
554
|
"""
|
|
442
|
-
When true
|
|
555
|
+
When `true`, enables DRS to use data
|
|
556
|
+
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
557
|
+
recommendations. <sup>\\*</sup>
|
|
558
|
+
|
|
559
|
+
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
443
560
|
"""
|
|
444
561
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
445
562
|
|
|
@@ -451,7 +568,8 @@ class ComputeClusterArgs:
|
|
|
451
568
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
452
569
|
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
|
|
453
570
|
"""
|
|
454
|
-
|
|
571
|
+
Allow individual DRS overrides to be
|
|
572
|
+
set for virtual machines in the cluster. Default: `true`.
|
|
455
573
|
"""
|
|
456
574
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
457
575
|
|
|
@@ -463,7 +581,7 @@ class ComputeClusterArgs:
|
|
|
463
581
|
@pulumi.getter(name="drsEnabled")
|
|
464
582
|
def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
465
583
|
"""
|
|
466
|
-
Enable DRS for this cluster.
|
|
584
|
+
Enable DRS for this cluster. Default: `false`.
|
|
467
585
|
"""
|
|
468
586
|
return pulumi.get(self, "drs_enabled")
|
|
469
587
|
|
|
@@ -475,8 +593,10 @@ class ComputeClusterArgs:
|
|
|
475
593
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
476
594
|
def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
477
595
|
"""
|
|
478
|
-
A value between 1 and 5 indicating
|
|
479
|
-
|
|
596
|
+
A value between `1` and `5` indicating
|
|
597
|
+
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
598
|
+
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
599
|
+
`3`.
|
|
480
600
|
"""
|
|
481
601
|
return pulumi.get(self, "drs_migration_threshold")
|
|
482
602
|
|
|
@@ -488,7 +608,9 @@ class ComputeClusterArgs:
|
|
|
488
608
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
489
609
|
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
|
|
490
610
|
"""
|
|
491
|
-
Enable scalable shares for all
|
|
611
|
+
Enable scalable shares for all
|
|
612
|
+
resource pools in the cluster. Can be one of `disabled` or
|
|
613
|
+
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
492
614
|
"""
|
|
493
615
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
494
616
|
|
|
@@ -517,8 +639,18 @@ class ComputeClusterArgs:
|
|
|
517
639
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
518
640
|
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
|
|
519
641
|
"""
|
|
520
|
-
|
|
521
|
-
|
|
642
|
+
When destroying the resource, setting this to
|
|
643
|
+
`true` will auto-remove any hosts that are currently a member of the cluster,
|
|
644
|
+
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
645
|
+
below. This is an advanced
|
|
646
|
+
option and should only be used for testing. Default: `false`.
|
|
647
|
+
|
|
648
|
+
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
649
|
+
there are many pitfalls to its use when working with complex cluster
|
|
650
|
+
configurations. Depending on the virtual machines currently on the cluster, and
|
|
651
|
+
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
652
|
+
incrementally remove hosts from your configuration by adjusting the contents of
|
|
653
|
+
the `host_system_ids` attribute.
|
|
522
654
|
"""
|
|
523
655
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
524
656
|
|
|
@@ -530,9 +662,11 @@ class ComputeClusterArgs:
|
|
|
530
662
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
531
663
|
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
532
664
|
"""
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
665
|
+
Defines the
|
|
666
|
+
managed object IDs of hosts to use as dedicated failover
|
|
667
|
+
hosts. These hosts are kept as available as possible - admission control will
|
|
668
|
+
block access to the host, and DRS will ignore the host when making
|
|
669
|
+
recommendations.
|
|
536
670
|
"""
|
|
537
671
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
538
672
|
|
|
@@ -544,8 +678,11 @@ class ComputeClusterArgs:
|
|
|
544
678
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
545
679
|
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
546
680
|
"""
|
|
547
|
-
The maximum number
|
|
548
|
-
|
|
681
|
+
The maximum number
|
|
682
|
+
of failed hosts that admission control tolerates when making decisions on
|
|
683
|
+
whether to permit virtual machine operations. The maximum is one less than
|
|
684
|
+
the number of hosts in the cluster. Default: `1`.
|
|
685
|
+
<sup>\\*</sup>
|
|
549
686
|
"""
|
|
550
687
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
551
688
|
|
|
@@ -557,8 +694,10 @@ class ComputeClusterArgs:
|
|
|
557
694
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
558
695
|
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
559
696
|
"""
|
|
560
|
-
The percentage of
|
|
561
|
-
|
|
697
|
+
The percentage of
|
|
698
|
+
resource reduction that a cluster of virtual machines can tolerate in case of
|
|
699
|
+
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
700
|
+
disables the setting. Default: `100` (disabled).
|
|
562
701
|
"""
|
|
563
702
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
564
703
|
|
|
@@ -570,10 +709,9 @@ class ComputeClusterArgs:
|
|
|
570
709
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
571
710
|
def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
|
|
572
711
|
"""
|
|
573
|
-
The type of admission control
|
|
574
|
-
|
|
575
|
-
slotPolicy
|
|
576
|
-
issues.
|
|
712
|
+
The type of admission control
|
|
713
|
+
policy to use with vSphere HA. Can be one of `resourcePercentage`,
|
|
714
|
+
`slotPolicy`, `failoverHosts`, or `disabled`. Default: `resourcePercentage`.
|
|
577
715
|
"""
|
|
578
716
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
579
717
|
|
|
@@ -585,9 +723,12 @@ class ComputeClusterArgs:
|
|
|
585
723
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
586
724
|
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
|
|
587
725
|
"""
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
726
|
+
Automatically determine available resource percentages by subtracting the
|
|
727
|
+
average number of host resources represented by the
|
|
728
|
+
`ha_admission_control_host_failure_tolerance`
|
|
729
|
+
setting from the total amount of resources in the cluster. Disable to supply
|
|
730
|
+
user-defined values. Default: `true`.
|
|
731
|
+
<sup>\\*</sup>
|
|
591
732
|
"""
|
|
592
733
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
593
734
|
|
|
@@ -599,8 +740,9 @@ class ComputeClusterArgs:
|
|
|
599
740
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
600
741
|
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
601
742
|
"""
|
|
602
|
-
|
|
603
|
-
the cluster to reserve for
|
|
743
|
+
Controls the
|
|
744
|
+
user-defined percentage of CPU resources in the cluster to reserve for
|
|
745
|
+
failover. Default: `100`.
|
|
604
746
|
"""
|
|
605
747
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
606
748
|
|
|
@@ -612,8 +754,9 @@ class ComputeClusterArgs:
|
|
|
612
754
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
613
755
|
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
|
|
614
756
|
"""
|
|
615
|
-
|
|
616
|
-
the cluster to reserve for
|
|
757
|
+
Controls the
|
|
758
|
+
user-defined percentage of memory resources in the cluster to reserve for
|
|
759
|
+
failover. Default: `100`.
|
|
617
760
|
"""
|
|
618
761
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
619
762
|
|
|
@@ -625,7 +768,8 @@ class ComputeClusterArgs:
|
|
|
625
768
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
626
769
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
627
770
|
"""
|
|
628
|
-
|
|
771
|
+
Controls the
|
|
772
|
+
user-defined CPU slot size, in MHz. Default: `32`.
|
|
629
773
|
"""
|
|
630
774
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
631
775
|
|
|
@@ -637,7 +781,8 @@ class ComputeClusterArgs:
|
|
|
637
781
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
638
782
|
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
|
|
639
783
|
"""
|
|
640
|
-
|
|
784
|
+
Controls the
|
|
785
|
+
user-defined memory slot size, in MB. Default: `100`.
|
|
641
786
|
"""
|
|
642
787
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
643
788
|
|
|
@@ -649,9 +794,10 @@ class ComputeClusterArgs:
|
|
|
649
794
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
650
795
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
|
|
651
796
|
"""
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
797
|
+
Controls
|
|
798
|
+
whether or not you wish to supply explicit values to CPU and memory slot
|
|
799
|
+
sizes. The default is `false`, which tells vSphere to gather a automatic
|
|
800
|
+
average based on all powered-on virtual machines currently in the cluster.
|
|
655
801
|
"""
|
|
656
802
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
657
803
|
|
|
@@ -663,7 +809,8 @@ class ComputeClusterArgs:
|
|
|
663
809
|
@pulumi.getter(name="haAdvancedOptions")
|
|
664
810
|
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
665
811
|
"""
|
|
666
|
-
|
|
812
|
+
A key/value map that specifies advanced
|
|
813
|
+
options for vSphere HA.
|
|
667
814
|
"""
|
|
668
815
|
return pulumi.get(self, "ha_advanced_options")
|
|
669
816
|
|
|
@@ -675,8 +822,10 @@ class ComputeClusterArgs:
|
|
|
675
822
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
676
823
|
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
|
|
677
824
|
"""
|
|
678
|
-
|
|
679
|
-
|
|
825
|
+
Controls the action to take
|
|
826
|
+
on virtual machines if an APD status on an affected datastore clears in the
|
|
827
|
+
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
828
|
+
<sup>\\*</sup>
|
|
680
829
|
"""
|
|
681
830
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
682
831
|
|
|
@@ -688,9 +837,11 @@ class ComputeClusterArgs:
|
|
|
688
837
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
689
838
|
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
|
|
690
839
|
"""
|
|
691
|
-
|
|
692
|
-
detected loss to all paths to a
|
|
693
|
-
|
|
840
|
+
Controls the action to take on
|
|
841
|
+
virtual machines when the cluster has detected loss to all paths to a
|
|
842
|
+
relevant datastore. Can be one of `disabled`, `warning`,
|
|
843
|
+
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
844
|
+
<sup>\\*</sup>
|
|
694
845
|
"""
|
|
695
846
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
696
847
|
|
|
@@ -702,8 +853,10 @@ class ComputeClusterArgs:
|
|
|
702
853
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
703
854
|
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
|
|
704
855
|
"""
|
|
705
|
-
|
|
706
|
-
the response action defined in
|
|
856
|
+
The time, in seconds,
|
|
857
|
+
to wait after an APD timeout event to run the response action defined in
|
|
858
|
+
`ha_datastore_apd_response`. Default: `180`
|
|
859
|
+
seconds (3 minutes). <sup>\\*</sup>
|
|
707
860
|
"""
|
|
708
861
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
709
862
|
|
|
@@ -715,8 +868,11 @@ class ComputeClusterArgs:
|
|
|
715
868
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
716
869
|
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
|
|
717
870
|
"""
|
|
718
|
-
|
|
719
|
-
detected a permanent device loss to a
|
|
871
|
+
Controls the action to take on
|
|
872
|
+
virtual machines when the cluster has detected a permanent device loss to a
|
|
873
|
+
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
874
|
+
`restartAggressive`. Default: `disabled`.
|
|
875
|
+
<sup>\\*</sup>
|
|
720
876
|
"""
|
|
721
877
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
722
878
|
|
|
@@ -728,7 +884,8 @@ class ComputeClusterArgs:
|
|
|
728
884
|
@pulumi.getter(name="haEnabled")
|
|
729
885
|
def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
730
886
|
"""
|
|
731
|
-
Enable vSphere HA for this cluster.
|
|
887
|
+
Enable vSphere HA for this cluster. Default:
|
|
888
|
+
`false`.
|
|
732
889
|
"""
|
|
733
890
|
return pulumi.get(self, "ha_enabled")
|
|
734
891
|
|
|
@@ -740,8 +897,10 @@ class ComputeClusterArgs:
|
|
|
740
897
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
741
898
|
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
742
899
|
"""
|
|
743
|
-
The list of managed object IDs for
|
|
744
|
-
|
|
900
|
+
The list of managed object IDs for
|
|
901
|
+
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
902
|
+
when `ha_heartbeat_datastore_policy` is set
|
|
903
|
+
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
745
904
|
"""
|
|
746
905
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
747
906
|
|
|
@@ -753,8 +912,10 @@ class ComputeClusterArgs:
|
|
|
753
912
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
754
913
|
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
|
|
755
914
|
"""
|
|
756
|
-
The selection policy for HA
|
|
757
|
-
|
|
915
|
+
The selection policy for HA
|
|
916
|
+
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
917
|
+
`allFeasibleDsWithUserPreference`. Default:
|
|
918
|
+
`allFeasibleDsWithUserPreference`.
|
|
758
919
|
"""
|
|
759
920
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
760
921
|
|
|
@@ -766,8 +927,10 @@ class ComputeClusterArgs:
|
|
|
766
927
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
767
928
|
def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
|
|
768
929
|
"""
|
|
769
|
-
The action to take on virtual
|
|
770
|
-
|
|
930
|
+
The action to take on virtual
|
|
931
|
+
machines when a host has detected that it has been isolated from the rest of
|
|
932
|
+
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
933
|
+
`none`.
|
|
771
934
|
"""
|
|
772
935
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
773
936
|
|
|
@@ -779,7 +942,9 @@ class ComputeClusterArgs:
|
|
|
779
942
|
@pulumi.getter(name="haHostMonitoring")
|
|
780
943
|
def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
781
944
|
"""
|
|
782
|
-
Global setting that controls whether
|
|
945
|
+
Global setting that controls whether
|
|
946
|
+
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
947
|
+
or `disabled`. Default: `enabled`.
|
|
783
948
|
"""
|
|
784
949
|
return pulumi.get(self, "ha_host_monitoring")
|
|
785
950
|
|
|
@@ -791,8 +956,10 @@ class ComputeClusterArgs:
|
|
|
791
956
|
@pulumi.getter(name="haVmComponentProtection")
|
|
792
957
|
def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
|
|
793
958
|
"""
|
|
794
|
-
Controls vSphere VM component
|
|
795
|
-
|
|
959
|
+
Controls vSphere VM component
|
|
960
|
+
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
961
|
+
`disabled`. Default: `enabled`.
|
|
962
|
+
<sup>\\*</sup>
|
|
796
963
|
"""
|
|
797
964
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
798
965
|
|
|
@@ -804,8 +971,13 @@ class ComputeClusterArgs:
|
|
|
804
971
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
805
972
|
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
|
|
806
973
|
"""
|
|
807
|
-
The condition used to
|
|
808
|
-
|
|
974
|
+
The condition used to
|
|
975
|
+
determine whether or not virtual machines in a certain restart priority class
|
|
976
|
+
are online, allowing HA to move on to restarting virtual machines on the next
|
|
977
|
+
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
978
|
+
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
979
|
+
is considered ready immediately after a host is found to start it on.
|
|
980
|
+
<sup>\\*</sup>
|
|
809
981
|
"""
|
|
810
982
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
811
983
|
|
|
@@ -817,8 +989,9 @@ class ComputeClusterArgs:
|
|
|
817
989
|
@pulumi.getter(name="haVmFailureInterval")
|
|
818
990
|
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
|
|
819
991
|
"""
|
|
820
|
-
|
|
821
|
-
|
|
992
|
+
The time interval, in seconds, a heartbeat
|
|
993
|
+
from a virtual machine is not received within this configured interval,
|
|
994
|
+
the virtual machine is marked as failed. Default: `30` seconds.
|
|
822
995
|
"""
|
|
823
996
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
824
997
|
|
|
@@ -830,9 +1003,11 @@ class ComputeClusterArgs:
|
|
|
830
1003
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
831
1004
|
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
|
|
832
1005
|
"""
|
|
833
|
-
The
|
|
834
|
-
|
|
835
|
-
|
|
1006
|
+
The time, in seconds, for the reset window in
|
|
1007
|
+
which `ha_vm_maximum_resets` can operate. When this
|
|
1008
|
+
window expires, no more resets are attempted regardless of the setting
|
|
1009
|
+
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
1010
|
+
unlimited reset time is allotted. Default: `-1` (no window).
|
|
836
1011
|
"""
|
|
837
1012
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
838
1013
|
|
|
@@ -844,7 +1019,8 @@ class ComputeClusterArgs:
|
|
|
844
1019
|
@pulumi.getter(name="haVmMaximumResets")
|
|
845
1020
|
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
|
|
846
1021
|
"""
|
|
847
|
-
The maximum number of resets that HA will
|
|
1022
|
+
The maximum number of resets that HA will
|
|
1023
|
+
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
848
1024
|
"""
|
|
849
1025
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
850
1026
|
|
|
@@ -856,7 +1032,9 @@ class ComputeClusterArgs:
|
|
|
856
1032
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
857
1033
|
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
|
|
858
1034
|
"""
|
|
859
|
-
The time, in seconds, that HA waits after
|
|
1035
|
+
The time, in seconds, that HA waits after
|
|
1036
|
+
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
1037
|
+
`120` seconds (2 minutes).
|
|
860
1038
|
"""
|
|
861
1039
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
862
1040
|
|
|
@@ -868,8 +1046,9 @@ class ComputeClusterArgs:
|
|
|
868
1046
|
@pulumi.getter(name="haVmMonitoring")
|
|
869
1047
|
def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
870
1048
|
"""
|
|
871
|
-
The type of virtual machine monitoring to use
|
|
872
|
-
|
|
1049
|
+
The type of virtual machine monitoring to use
|
|
1050
|
+
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
1051
|
+
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
873
1052
|
"""
|
|
874
1053
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
875
1054
|
|
|
@@ -881,7 +1060,9 @@ class ComputeClusterArgs:
|
|
|
881
1060
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
882
1061
|
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
|
|
883
1062
|
"""
|
|
884
|
-
Additional delay in seconds
|
|
1063
|
+
Additional delay, in seconds,
|
|
1064
|
+
after ready condition is met. A VM is considered ready at this point.
|
|
1065
|
+
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
885
1066
|
"""
|
|
886
1067
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
887
1068
|
|
|
@@ -893,8 +1074,9 @@ class ComputeClusterArgs:
|
|
|
893
1074
|
@pulumi.getter(name="haVmRestartPriority")
|
|
894
1075
|
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
|
|
895
1076
|
"""
|
|
896
|
-
The default restart priority
|
|
897
|
-
|
|
1077
|
+
The default restart priority
|
|
1078
|
+
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
1079
|
+
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
898
1080
|
"""
|
|
899
1081
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
900
1082
|
|
|
@@ -906,8 +1088,10 @@ class ComputeClusterArgs:
|
|
|
906
1088
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
907
1089
|
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
908
1090
|
"""
|
|
909
|
-
The maximum time, in seconds,
|
|
910
|
-
|
|
1091
|
+
The maximum time, in seconds,
|
|
1092
|
+
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
1093
|
+
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
1094
|
+
<sup>\\*</sup>
|
|
911
1095
|
"""
|
|
912
1096
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
913
1097
|
|
|
@@ -919,7 +1103,8 @@ class ComputeClusterArgs:
|
|
|
919
1103
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
920
1104
|
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
921
1105
|
"""
|
|
922
|
-
The timeout for each host maintenance
|
|
1106
|
+
The timeout, in seconds, for each host maintenance
|
|
1107
|
+
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
923
1108
|
"""
|
|
924
1109
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
925
1110
|
|
|
@@ -927,23 +1112,13 @@ class ComputeClusterArgs:
|
|
|
927
1112
|
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
|
|
928
1113
|
pulumi.set(self, "host_cluster_exit_timeout", value)
|
|
929
1114
|
|
|
930
|
-
@property
|
|
931
|
-
@pulumi.getter(name="hostImage")
|
|
932
|
-
def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
|
|
933
|
-
"""
|
|
934
|
-
Details about the host image which should be applied to the cluster.
|
|
935
|
-
"""
|
|
936
|
-
return pulumi.get(self, "host_image")
|
|
937
|
-
|
|
938
|
-
@host_image.setter
|
|
939
|
-
def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
|
|
940
|
-
pulumi.set(self, "host_image", value)
|
|
941
|
-
|
|
942
1115
|
@property
|
|
943
1116
|
@pulumi.getter(name="hostManaged")
|
|
944
1117
|
def host_managed(self) -> Optional[pulumi.Input[bool]]:
|
|
945
1118
|
"""
|
|
946
|
-
|
|
1119
|
+
Can be set to `true` if compute cluster
|
|
1120
|
+
membership will be managed through the `host` resource rather than the
|
|
1121
|
+
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
947
1122
|
"""
|
|
948
1123
|
return pulumi.get(self, "host_managed")
|
|
949
1124
|
|
|
@@ -955,7 +1130,8 @@ class ComputeClusterArgs:
|
|
|
955
1130
|
@pulumi.getter(name="hostSystemIds")
|
|
956
1131
|
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
957
1132
|
"""
|
|
958
|
-
The managed object IDs of
|
|
1133
|
+
The managed object IDs of
|
|
1134
|
+
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
959
1135
|
"""
|
|
960
1136
|
return pulumi.get(self, "host_system_ids")
|
|
961
1137
|
|
|
@@ -979,7 +1155,10 @@ class ComputeClusterArgs:
|
|
|
979
1155
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
980
1156
|
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
981
1157
|
"""
|
|
982
|
-
|
|
1158
|
+
Determines how the host
|
|
1159
|
+
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
1160
|
+
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
1161
|
+
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
983
1162
|
"""
|
|
984
1163
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
985
1164
|
|
|
@@ -991,7 +1170,8 @@ class ComputeClusterArgs:
|
|
|
991
1170
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
992
1171
|
def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
993
1172
|
"""
|
|
994
|
-
Enables
|
|
1173
|
+
Enables Proactive HA. Default: `false`.
|
|
1174
|
+
<sup>\\*</sup>
|
|
995
1175
|
"""
|
|
996
1176
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
997
1177
|
|
|
@@ -1003,8 +1183,12 @@ class ComputeClusterArgs:
|
|
|
1003
1183
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
1004
1184
|
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
1005
1185
|
"""
|
|
1006
|
-
The configured remediation
|
|
1007
|
-
|
|
1186
|
+
The configured remediation
|
|
1187
|
+
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
1188
|
+
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
1189
|
+
`proactive_ha_severe_remediation` is set
|
|
1190
|
+
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
1191
|
+
<sup>\\*</sup>
|
|
1008
1192
|
"""
|
|
1009
1193
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
1010
1194
|
|
|
@@ -1016,7 +1200,9 @@ class ComputeClusterArgs:
|
|
|
1016
1200
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
1017
1201
|
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1018
1202
|
"""
|
|
1019
|
-
The list of IDs for health update
|
|
1203
|
+
The list of IDs for health update
|
|
1204
|
+
providers configured for this cluster.
|
|
1205
|
+
<sup>\\*</sup>
|
|
1020
1206
|
"""
|
|
1021
1207
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
1022
1208
|
|
|
@@ -1028,8 +1214,12 @@ class ComputeClusterArgs:
|
|
|
1028
1214
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
1029
1215
|
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
1030
1216
|
"""
|
|
1031
|
-
The configured remediation for
|
|
1032
|
-
|
|
1217
|
+
The configured remediation for
|
|
1218
|
+
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
1219
|
+
Note that this cannot be set to `QuarantineMode` when
|
|
1220
|
+
`proactive_ha_moderate_remediation` is
|
|
1221
|
+
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
1222
|
+
<sup>\\*</sup>
|
|
1033
1223
|
"""
|
|
1034
1224
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
1035
1225
|
|
|
@@ -1053,7 +1243,8 @@ class ComputeClusterArgs:
|
|
|
1053
1243
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
1054
1244
|
def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1055
1245
|
"""
|
|
1056
|
-
|
|
1246
|
+
Enables vSAN compression on the
|
|
1247
|
+
cluster.
|
|
1057
1248
|
"""
|
|
1058
1249
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
1059
1250
|
|
|
@@ -1065,7 +1256,9 @@ class ComputeClusterArgs:
|
|
|
1065
1256
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
1066
1257
|
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1067
1258
|
"""
|
|
1068
|
-
|
|
1259
|
+
Enables vSAN deduplication on the cluster.
|
|
1260
|
+
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
1261
|
+
compression must also be enabled.
|
|
1069
1262
|
"""
|
|
1070
1263
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
1071
1264
|
|
|
@@ -1077,7 +1270,8 @@ class ComputeClusterArgs:
|
|
|
1077
1270
|
@pulumi.getter(name="vsanDiskGroups")
|
|
1078
1271
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
1079
1272
|
"""
|
|
1080
|
-
|
|
1273
|
+
Represents the configuration of a host disk
|
|
1274
|
+
group in the cluster.
|
|
1081
1275
|
"""
|
|
1082
1276
|
return pulumi.get(self, "vsan_disk_groups")
|
|
1083
1277
|
|
|
@@ -1089,7 +1283,10 @@ class ComputeClusterArgs:
|
|
|
1089
1283
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
1090
1284
|
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1091
1285
|
"""
|
|
1092
|
-
|
|
1286
|
+
Enables vSAN data-in-transit
|
|
1287
|
+
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
1288
|
+
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
1289
|
+
at the same time.
|
|
1093
1290
|
"""
|
|
1094
1291
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
1095
1292
|
|
|
@@ -1101,7 +1298,9 @@ class ComputeClusterArgs:
|
|
|
1101
1298
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
1102
1299
|
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
|
|
1103
1300
|
"""
|
|
1104
|
-
|
|
1301
|
+
Indicates the rekey interval in
|
|
1302
|
+
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
1303
|
+
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
1105
1304
|
"""
|
|
1106
1305
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
1107
1306
|
|
|
@@ -1113,7 +1312,7 @@ class ComputeClusterArgs:
|
|
|
1113
1312
|
@pulumi.getter(name="vsanEnabled")
|
|
1114
1313
|
def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1115
1314
|
"""
|
|
1116
|
-
|
|
1315
|
+
Enables vSAN on the cluster.
|
|
1117
1316
|
"""
|
|
1118
1317
|
return pulumi.get(self, "vsan_enabled")
|
|
1119
1318
|
|
|
@@ -1125,7 +1324,7 @@ class ComputeClusterArgs:
|
|
|
1125
1324
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
1126
1325
|
def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1127
1326
|
"""
|
|
1128
|
-
|
|
1327
|
+
Enables vSAN ESA on the cluster.
|
|
1129
1328
|
"""
|
|
1130
1329
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
1131
1330
|
|
|
@@ -1137,7 +1336,7 @@ class ComputeClusterArgs:
|
|
|
1137
1336
|
@pulumi.getter(name="vsanFaultDomains")
|
|
1138
1337
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
1139
1338
|
"""
|
|
1140
|
-
|
|
1339
|
+
Configurations of vSAN fault domains.
|
|
1141
1340
|
"""
|
|
1142
1341
|
return pulumi.get(self, "vsan_fault_domains")
|
|
1143
1342
|
|
|
@@ -1149,7 +1348,8 @@ class ComputeClusterArgs:
|
|
|
1149
1348
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
1150
1349
|
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1151
1350
|
"""
|
|
1152
|
-
|
|
1351
|
+
Enables network
|
|
1352
|
+
diagnostic mode for vSAN performance service on the cluster.
|
|
1153
1353
|
"""
|
|
1154
1354
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
1155
1355
|
|
|
@@ -1161,7 +1361,8 @@ class ComputeClusterArgs:
|
|
|
1161
1361
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
1162
1362
|
def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1163
1363
|
"""
|
|
1164
|
-
|
|
1364
|
+
Enables vSAN performance service on
|
|
1365
|
+
the cluster. Default: `true`.
|
|
1165
1366
|
"""
|
|
1166
1367
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
1167
1368
|
|
|
@@ -1173,7 +1374,10 @@ class ComputeClusterArgs:
|
|
|
1173
1374
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
1174
1375
|
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1175
1376
|
"""
|
|
1176
|
-
The
|
|
1377
|
+
The remote vSAN datastore IDs to be
|
|
1378
|
+
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
1379
|
+
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
1380
|
+
data-in-transit encryption feature at the same time.
|
|
1177
1381
|
"""
|
|
1178
1382
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
1179
1383
|
|
|
@@ -1185,7 +1389,7 @@ class ComputeClusterArgs:
|
|
|
1185
1389
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
1186
1390
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
1187
1391
|
"""
|
|
1188
|
-
|
|
1392
|
+
Configurations of vSAN stretched cluster.
|
|
1189
1393
|
"""
|
|
1190
1394
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
1191
1395
|
|
|
@@ -1197,7 +1401,8 @@ class ComputeClusterArgs:
|
|
|
1197
1401
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
1198
1402
|
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1199
1403
|
"""
|
|
1200
|
-
|
|
1404
|
+
Enables vSAN unmap on the cluster.
|
|
1405
|
+
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
1201
1406
|
"""
|
|
1202
1407
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
1203
1408
|
|
|
@@ -1209,7 +1414,8 @@ class ComputeClusterArgs:
|
|
|
1209
1414
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
1210
1415
|
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1211
1416
|
"""
|
|
1212
|
-
|
|
1417
|
+
Enables verbose mode for vSAN
|
|
1418
|
+
performance service on the cluster.
|
|
1213
1419
|
"""
|
|
1214
1420
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
1215
1421
|
|
|
@@ -1266,7 +1472,6 @@ class _ComputeClusterState:
|
|
|
1266
1472
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
1267
1473
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
1268
1474
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
1269
|
-
host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
|
|
1270
1475
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
1271
1476
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
1272
1477
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -1300,120 +1505,230 @@ class _ComputeClusterState:
|
|
|
1300
1505
|
and require vCenter Server.
|
|
1301
1506
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
1302
1507
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
1303
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
:param pulumi.Input[
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
:param pulumi.Input[
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
:param pulumi.Input[
|
|
1314
|
-
|
|
1315
|
-
:param pulumi.Input[
|
|
1316
|
-
|
|
1317
|
-
|
|
1508
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
1509
|
+
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
1510
|
+
`manual`.
|
|
1511
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS in this cluster.
|
|
1512
|
+
Requires `drs_enabled` to be `true` in order to be effective.
|
|
1513
|
+
Default: `false`.
|
|
1514
|
+
:param pulumi.Input[int] dpm_threshold: A value between `1` and `5` indicating the
|
|
1515
|
+
threshold of load within the cluster that influences host power operations.
|
|
1516
|
+
This affects both power on and power off operations - a lower setting will
|
|
1517
|
+
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
1518
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: A key/value map that specifies advanced
|
|
1519
|
+
options for DRS and DPM.
|
|
1520
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all
|
|
1521
|
+
virtual machines in this cluster. Can be one of `manual`,
|
|
1522
|
+
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
1523
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
1524
|
+
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
1525
|
+
recommendations. <sup>\\*</sup>
|
|
1526
|
+
|
|
1527
|
+
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
1528
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
1529
|
+
set for virtual machines in the cluster. Default: `true`.
|
|
1530
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
1531
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
1532
|
+
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
1533
|
+
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
1534
|
+
`3`.
|
|
1535
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
1536
|
+
resource pools in the cluster. Can be one of `disabled` or
|
|
1537
|
+
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
1318
1538
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
1319
1539
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
1320
1540
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
1321
1541
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
1322
1542
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
1323
1543
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
1324
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
:param pulumi.Input[
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
:param pulumi.Input[str]
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
the
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
:param pulumi.Input[
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
:param pulumi.Input[
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
:param pulumi.Input[
|
|
1368
|
-
|
|
1369
|
-
:param pulumi.Input[
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
:param pulumi.Input[
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
:param pulumi.Input[
|
|
1544
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: When destroying the resource, setting this to
|
|
1545
|
+
`true` will auto-remove any hosts that are currently a member of the cluster,
|
|
1546
|
+
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
1547
|
+
below. This is an advanced
|
|
1548
|
+
option and should only be used for testing. Default: `false`.
|
|
1549
|
+
|
|
1550
|
+
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
1551
|
+
there are many pitfalls to its use when working with complex cluster
|
|
1552
|
+
configurations. Depending on the virtual machines currently on the cluster, and
|
|
1553
|
+
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
1554
|
+
incrementally remove hosts from your configuration by adjusting the contents of
|
|
1555
|
+
the `host_system_ids` attribute.
|
|
1556
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: Defines the
|
|
1557
|
+
managed object IDs of hosts to use as dedicated failover
|
|
1558
|
+
hosts. These hosts are kept as available as possible - admission control will
|
|
1559
|
+
block access to the host, and DRS will ignore the host when making
|
|
1560
|
+
recommendations.
|
|
1561
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number
|
|
1562
|
+
of failed hosts that admission control tolerates when making decisions on
|
|
1563
|
+
whether to permit virtual machine operations. The maximum is one less than
|
|
1564
|
+
the number of hosts in the cluster. Default: `1`.
|
|
1565
|
+
<sup>\\*</sup>
|
|
1566
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of
|
|
1567
|
+
resource reduction that a cluster of virtual machines can tolerate in case of
|
|
1568
|
+
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
1569
|
+
disables the setting. Default: `100` (disabled).
|
|
1570
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control
|
|
1571
|
+
policy to use with vSphere HA. Can be one of `resourcePercentage`,
|
|
1572
|
+
`slotPolicy`, `failoverHosts`, or `disabled`. Default: `resourcePercentage`.
|
|
1573
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: Automatically determine available resource percentages by subtracting the
|
|
1574
|
+
average number of host resources represented by the
|
|
1575
|
+
`ha_admission_control_host_failure_tolerance`
|
|
1576
|
+
setting from the total amount of resources in the cluster. Disable to supply
|
|
1577
|
+
user-defined values. Default: `true`.
|
|
1578
|
+
<sup>\\*</sup>
|
|
1579
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: Controls the
|
|
1580
|
+
user-defined percentage of CPU resources in the cluster to reserve for
|
|
1581
|
+
failover. Default: `100`.
|
|
1582
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: Controls the
|
|
1583
|
+
user-defined percentage of memory resources in the cluster to reserve for
|
|
1584
|
+
failover. Default: `100`.
|
|
1585
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: Controls the
|
|
1586
|
+
user-defined CPU slot size, in MHz. Default: `32`.
|
|
1587
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: Controls the
|
|
1588
|
+
user-defined memory slot size, in MB. Default: `100`.
|
|
1589
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: Controls
|
|
1590
|
+
whether or not you wish to supply explicit values to CPU and memory slot
|
|
1591
|
+
sizes. The default is `false`, which tells vSphere to gather a automatic
|
|
1592
|
+
average based on all powered-on virtual machines currently in the cluster.
|
|
1593
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: A key/value map that specifies advanced
|
|
1594
|
+
options for vSphere HA.
|
|
1595
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: Controls the action to take
|
|
1596
|
+
on virtual machines if an APD status on an affected datastore clears in the
|
|
1597
|
+
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
1598
|
+
<sup>\\*</sup>
|
|
1599
|
+
:param pulumi.Input[str] ha_datastore_apd_response: Controls the action to take on
|
|
1600
|
+
virtual machines when the cluster has detected loss to all paths to a
|
|
1601
|
+
relevant datastore. Can be one of `disabled`, `warning`,
|
|
1602
|
+
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
1603
|
+
<sup>\\*</sup>
|
|
1604
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: The time, in seconds,
|
|
1605
|
+
to wait after an APD timeout event to run the response action defined in
|
|
1606
|
+
`ha_datastore_apd_response`. Default: `180`
|
|
1607
|
+
seconds (3 minutes). <sup>\\*</sup>
|
|
1608
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
1609
|
+
virtual machines when the cluster has detected a permanent device loss to a
|
|
1610
|
+
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
1611
|
+
`restartAggressive`. Default: `disabled`.
|
|
1612
|
+
<sup>\\*</sup>
|
|
1613
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
1614
|
+
`false`.
|
|
1615
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
1616
|
+
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
1617
|
+
when `ha_heartbeat_datastore_policy` is set
|
|
1618
|
+
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
1619
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
1620
|
+
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
1621
|
+
`allFeasibleDsWithUserPreference`. Default:
|
|
1622
|
+
`allFeasibleDsWithUserPreference`.
|
|
1623
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
1624
|
+
machines when a host has detected that it has been isolated from the rest of
|
|
1625
|
+
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
1626
|
+
`none`.
|
|
1627
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
1628
|
+
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
1629
|
+
or `disabled`. Default: `enabled`.
|
|
1630
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
1631
|
+
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
1632
|
+
`disabled`. Default: `enabled`.
|
|
1633
|
+
<sup>\\*</sup>
|
|
1634
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
1635
|
+
determine whether or not virtual machines in a certain restart priority class
|
|
1636
|
+
are online, allowing HA to move on to restarting virtual machines on the next
|
|
1637
|
+
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
1638
|
+
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
1639
|
+
is considered ready immediately after a host is found to start it on.
|
|
1640
|
+
<sup>\\*</sup>
|
|
1641
|
+
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
1642
|
+
from a virtual machine is not received within this configured interval,
|
|
1643
|
+
the virtual machine is marked as failed. Default: `30` seconds.
|
|
1644
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
1645
|
+
which `ha_vm_maximum_resets` can operate. When this
|
|
1646
|
+
window expires, no more resets are attempted regardless of the setting
|
|
1647
|
+
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
1648
|
+
unlimited reset time is allotted. Default: `-1` (no window).
|
|
1649
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
1650
|
+
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
1651
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
1652
|
+
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
1653
|
+
`120` seconds (2 minutes).
|
|
1654
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
1655
|
+
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
1656
|
+
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
1657
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
1658
|
+
after ready condition is met. A VM is considered ready at this point.
|
|
1659
|
+
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
1660
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
1661
|
+
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
1662
|
+
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
1663
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
1664
|
+
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
1665
|
+
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
1666
|
+
<sup>\\*</sup>
|
|
1667
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
1668
|
+
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
1669
|
+
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
1670
|
+
membership will be managed through the `host` resource rather than the
|
|
1671
|
+
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
1672
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
1673
|
+
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
1389
1674
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
1390
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
:param pulumi.Input[
|
|
1395
|
-
|
|
1396
|
-
|
|
1675
|
+
:param pulumi.Input[str] proactive_ha_automation_level: Determines how the host
|
|
1676
|
+
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
1677
|
+
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
1678
|
+
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
1679
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables Proactive HA. Default: `false`.
|
|
1680
|
+
<sup>\\*</sup>
|
|
1681
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation
|
|
1682
|
+
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
1683
|
+
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
1684
|
+
`proactive_ha_severe_remediation` is set
|
|
1685
|
+
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
1686
|
+
<sup>\\*</sup>
|
|
1687
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
1688
|
+
providers configured for this cluster.
|
|
1689
|
+
<sup>\\*</sup>
|
|
1690
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
1691
|
+
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
1692
|
+
Note that this cannot be set to `QuarantineMode` when
|
|
1693
|
+
`proactive_ha_moderate_remediation` is
|
|
1694
|
+
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
1695
|
+
<sup>\\*</sup>
|
|
1397
1696
|
:param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
|
|
1398
1697
|
resource pool for this cluster. This can be passed directly to the
|
|
1399
1698
|
`resource_pool_id`
|
|
1400
1699
|
attribute of the
|
|
1401
1700
|
`VirtualMachine` resource.
|
|
1402
1701
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
1403
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
1404
|
-
|
|
1405
|
-
:param pulumi.Input[
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
:param pulumi.Input[
|
|
1409
|
-
|
|
1410
|
-
:param pulumi.Input[
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
:param pulumi.Input[
|
|
1415
|
-
|
|
1416
|
-
|
|
1702
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Enables vSAN compression on the
|
|
1703
|
+
cluster.
|
|
1704
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Enables vSAN deduplication on the cluster.
|
|
1705
|
+
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
1706
|
+
compression must also be enabled.
|
|
1707
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: Represents the configuration of a host disk
|
|
1708
|
+
group in the cluster.
|
|
1709
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Enables vSAN data-in-transit
|
|
1710
|
+
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
1711
|
+
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
1712
|
+
at the same time.
|
|
1713
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: Indicates the rekey interval in
|
|
1714
|
+
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
1715
|
+
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
1716
|
+
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
1717
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
1718
|
+
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
1719
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
1720
|
+
diagnostic mode for vSAN performance service on the cluster.
|
|
1721
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
1722
|
+
the cluster. Default: `true`.
|
|
1723
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
1724
|
+
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
1725
|
+
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
1726
|
+
data-in-transit encryption feature at the same time.
|
|
1727
|
+
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
1728
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
1729
|
+
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
1730
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
1731
|
+
performance service on the cluster.
|
|
1417
1732
|
"""
|
|
1418
1733
|
if custom_attributes is not None:
|
|
1419
1734
|
pulumi.set(__self__, "custom_attributes", custom_attributes)
|
|
@@ -1505,8 +1820,6 @@ class _ComputeClusterState:
|
|
|
1505
1820
|
pulumi.set(__self__, "ha_vm_restart_timeout", ha_vm_restart_timeout)
|
|
1506
1821
|
if host_cluster_exit_timeout is not None:
|
|
1507
1822
|
pulumi.set(__self__, "host_cluster_exit_timeout", host_cluster_exit_timeout)
|
|
1508
|
-
if host_image is not None:
|
|
1509
|
-
pulumi.set(__self__, "host_image", host_image)
|
|
1510
1823
|
if host_managed is not None:
|
|
1511
1824
|
pulumi.set(__self__, "host_managed", host_managed)
|
|
1512
1825
|
if host_system_ids is not None:
|
|
@@ -1589,7 +1902,9 @@ class _ComputeClusterState:
|
|
|
1589
1902
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
1590
1903
|
def dpm_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1591
1904
|
"""
|
|
1592
|
-
The automation level for host power
|
|
1905
|
+
The automation level for host power
|
|
1906
|
+
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
1907
|
+
`manual`.
|
|
1593
1908
|
"""
|
|
1594
1909
|
return pulumi.get(self, "dpm_automation_level")
|
|
1595
1910
|
|
|
@@ -1601,8 +1916,9 @@ class _ComputeClusterState:
|
|
|
1601
1916
|
@pulumi.getter(name="dpmEnabled")
|
|
1602
1917
|
def dpm_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1603
1918
|
"""
|
|
1604
|
-
Enable DPM support for DRS
|
|
1605
|
-
|
|
1919
|
+
Enable DPM support for DRS in this cluster.
|
|
1920
|
+
Requires `drs_enabled` to be `true` in order to be effective.
|
|
1921
|
+
Default: `false`.
|
|
1606
1922
|
"""
|
|
1607
1923
|
return pulumi.get(self, "dpm_enabled")
|
|
1608
1924
|
|
|
@@ -1614,9 +1930,10 @@ class _ComputeClusterState:
|
|
|
1614
1930
|
@pulumi.getter(name="dpmThreshold")
|
|
1615
1931
|
def dpm_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
1616
1932
|
"""
|
|
1617
|
-
A value between 1 and 5 indicating the
|
|
1618
|
-
|
|
1619
|
-
setting
|
|
1933
|
+
A value between `1` and `5` indicating the
|
|
1934
|
+
threshold of load within the cluster that influences host power operations.
|
|
1935
|
+
This affects both power on and power off operations - a lower setting will
|
|
1936
|
+
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
1620
1937
|
"""
|
|
1621
1938
|
return pulumi.get(self, "dpm_threshold")
|
|
1622
1939
|
|
|
@@ -1628,7 +1945,8 @@ class _ComputeClusterState:
|
|
|
1628
1945
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
1629
1946
|
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
1630
1947
|
"""
|
|
1631
|
-
|
|
1948
|
+
A key/value map that specifies advanced
|
|
1949
|
+
options for DRS and DPM.
|
|
1632
1950
|
"""
|
|
1633
1951
|
return pulumi.get(self, "drs_advanced_options")
|
|
1634
1952
|
|
|
@@ -1640,8 +1958,9 @@ class _ComputeClusterState:
|
|
|
1640
1958
|
@pulumi.getter(name="drsAutomationLevel")
|
|
1641
1959
|
def drs_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
1642
1960
|
"""
|
|
1643
|
-
The default automation level for all
|
|
1644
|
-
|
|
1961
|
+
The default automation level for all
|
|
1962
|
+
virtual machines in this cluster. Can be one of `manual`,
|
|
1963
|
+
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
1645
1964
|
"""
|
|
1646
1965
|
return pulumi.get(self, "drs_automation_level")
|
|
1647
1966
|
|
|
@@ -1653,7 +1972,11 @@ class _ComputeClusterState:
|
|
|
1653
1972
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
1654
1973
|
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[bool]]:
|
|
1655
1974
|
"""
|
|
1656
|
-
When true
|
|
1975
|
+
When `true`, enables DRS to use data
|
|
1976
|
+
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
1977
|
+
recommendations. <sup>\\*</sup>
|
|
1978
|
+
|
|
1979
|
+
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
1657
1980
|
"""
|
|
1658
1981
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
1659
1982
|
|
|
@@ -1665,7 +1988,8 @@ class _ComputeClusterState:
|
|
|
1665
1988
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
1666
1989
|
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[bool]]:
|
|
1667
1990
|
"""
|
|
1668
|
-
|
|
1991
|
+
Allow individual DRS overrides to be
|
|
1992
|
+
set for virtual machines in the cluster. Default: `true`.
|
|
1669
1993
|
"""
|
|
1670
1994
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
1671
1995
|
|
|
@@ -1677,7 +2001,7 @@ class _ComputeClusterState:
|
|
|
1677
2001
|
@pulumi.getter(name="drsEnabled")
|
|
1678
2002
|
def drs_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1679
2003
|
"""
|
|
1680
|
-
Enable DRS for this cluster.
|
|
2004
|
+
Enable DRS for this cluster. Default: `false`.
|
|
1681
2005
|
"""
|
|
1682
2006
|
return pulumi.get(self, "drs_enabled")
|
|
1683
2007
|
|
|
@@ -1689,8 +2013,10 @@ class _ComputeClusterState:
|
|
|
1689
2013
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
1690
2014
|
def drs_migration_threshold(self) -> Optional[pulumi.Input[int]]:
|
|
1691
2015
|
"""
|
|
1692
|
-
A value between 1 and 5 indicating
|
|
1693
|
-
|
|
2016
|
+
A value between `1` and `5` indicating
|
|
2017
|
+
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
2018
|
+
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
2019
|
+
`3`.
|
|
1694
2020
|
"""
|
|
1695
2021
|
return pulumi.get(self, "drs_migration_threshold")
|
|
1696
2022
|
|
|
@@ -1702,7 +2028,9 @@ class _ComputeClusterState:
|
|
|
1702
2028
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
1703
2029
|
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[str]]:
|
|
1704
2030
|
"""
|
|
1705
|
-
Enable scalable shares for all
|
|
2031
|
+
Enable scalable shares for all
|
|
2032
|
+
resource pools in the cluster. Can be one of `disabled` or
|
|
2033
|
+
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
1706
2034
|
"""
|
|
1707
2035
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
1708
2036
|
|
|
@@ -1731,8 +2059,18 @@ class _ComputeClusterState:
|
|
|
1731
2059
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
1732
2060
|
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[bool]]:
|
|
1733
2061
|
"""
|
|
1734
|
-
|
|
1735
|
-
|
|
2062
|
+
When destroying the resource, setting this to
|
|
2063
|
+
`true` will auto-remove any hosts that are currently a member of the cluster,
|
|
2064
|
+
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
2065
|
+
below. This is an advanced
|
|
2066
|
+
option and should only be used for testing. Default: `false`.
|
|
2067
|
+
|
|
2068
|
+
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
2069
|
+
there are many pitfalls to its use when working with complex cluster
|
|
2070
|
+
configurations. Depending on the virtual machines currently on the cluster, and
|
|
2071
|
+
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
2072
|
+
incrementally remove hosts from your configuration by adjusting the contents of
|
|
2073
|
+
the `host_system_ids` attribute.
|
|
1736
2074
|
"""
|
|
1737
2075
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
1738
2076
|
|
|
@@ -1744,9 +2082,11 @@ class _ComputeClusterState:
|
|
|
1744
2082
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
1745
2083
|
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1746
2084
|
"""
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
2085
|
+
Defines the
|
|
2086
|
+
managed object IDs of hosts to use as dedicated failover
|
|
2087
|
+
hosts. These hosts are kept as available as possible - admission control will
|
|
2088
|
+
block access to the host, and DRS will ignore the host when making
|
|
2089
|
+
recommendations.
|
|
1750
2090
|
"""
|
|
1751
2091
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
1752
2092
|
|
|
@@ -1758,8 +2098,11 @@ class _ComputeClusterState:
|
|
|
1758
2098
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
1759
2099
|
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
1760
2100
|
"""
|
|
1761
|
-
The maximum number
|
|
1762
|
-
|
|
2101
|
+
The maximum number
|
|
2102
|
+
of failed hosts that admission control tolerates when making decisions on
|
|
2103
|
+
whether to permit virtual machine operations. The maximum is one less than
|
|
2104
|
+
the number of hosts in the cluster. Default: `1`.
|
|
2105
|
+
<sup>\\*</sup>
|
|
1763
2106
|
"""
|
|
1764
2107
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
1765
2108
|
|
|
@@ -1771,8 +2114,10 @@ class _ComputeClusterState:
|
|
|
1771
2114
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
1772
2115
|
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[int]]:
|
|
1773
2116
|
"""
|
|
1774
|
-
The percentage of
|
|
1775
|
-
|
|
2117
|
+
The percentage of
|
|
2118
|
+
resource reduction that a cluster of virtual machines can tolerate in case of
|
|
2119
|
+
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
2120
|
+
disables the setting. Default: `100` (disabled).
|
|
1776
2121
|
"""
|
|
1777
2122
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
1778
2123
|
|
|
@@ -1784,10 +2129,9 @@ class _ComputeClusterState:
|
|
|
1784
2129
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
1785
2130
|
def ha_admission_control_policy(self) -> Optional[pulumi.Input[str]]:
|
|
1786
2131
|
"""
|
|
1787
|
-
The type of admission control
|
|
1788
|
-
|
|
1789
|
-
slotPolicy
|
|
1790
|
-
issues.
|
|
2132
|
+
The type of admission control
|
|
2133
|
+
policy to use with vSphere HA. Can be one of `resourcePercentage`,
|
|
2134
|
+
`slotPolicy`, `failoverHosts`, or `disabled`. Default: `resourcePercentage`.
|
|
1791
2135
|
"""
|
|
1792
2136
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
1793
2137
|
|
|
@@ -1799,9 +2143,12 @@ class _ComputeClusterState:
|
|
|
1799
2143
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
1800
2144
|
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[bool]]:
|
|
1801
2145
|
"""
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
2146
|
+
Automatically determine available resource percentages by subtracting the
|
|
2147
|
+
average number of host resources represented by the
|
|
2148
|
+
`ha_admission_control_host_failure_tolerance`
|
|
2149
|
+
setting from the total amount of resources in the cluster. Disable to supply
|
|
2150
|
+
user-defined values. Default: `true`.
|
|
2151
|
+
<sup>\\*</sup>
|
|
1805
2152
|
"""
|
|
1806
2153
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
1807
2154
|
|
|
@@ -1813,8 +2160,9 @@ class _ComputeClusterState:
|
|
|
1813
2160
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
1814
2161
|
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
1815
2162
|
"""
|
|
1816
|
-
|
|
1817
|
-
the cluster to reserve for
|
|
2163
|
+
Controls the
|
|
2164
|
+
user-defined percentage of CPU resources in the cluster to reserve for
|
|
2165
|
+
failover. Default: `100`.
|
|
1818
2166
|
"""
|
|
1819
2167
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
1820
2168
|
|
|
@@ -1826,8 +2174,9 @@ class _ComputeClusterState:
|
|
|
1826
2174
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
1827
2175
|
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[int]]:
|
|
1828
2176
|
"""
|
|
1829
|
-
|
|
1830
|
-
the cluster to reserve for
|
|
2177
|
+
Controls the
|
|
2178
|
+
user-defined percentage of memory resources in the cluster to reserve for
|
|
2179
|
+
failover. Default: `100`.
|
|
1831
2180
|
"""
|
|
1832
2181
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
1833
2182
|
|
|
@@ -1839,7 +2188,8 @@ class _ComputeClusterState:
|
|
|
1839
2188
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
1840
2189
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[int]]:
|
|
1841
2190
|
"""
|
|
1842
|
-
|
|
2191
|
+
Controls the
|
|
2192
|
+
user-defined CPU slot size, in MHz. Default: `32`.
|
|
1843
2193
|
"""
|
|
1844
2194
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
1845
2195
|
|
|
@@ -1851,7 +2201,8 @@ class _ComputeClusterState:
|
|
|
1851
2201
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
1852
2202
|
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[int]]:
|
|
1853
2203
|
"""
|
|
1854
|
-
|
|
2204
|
+
Controls the
|
|
2205
|
+
user-defined memory slot size, in MB. Default: `100`.
|
|
1855
2206
|
"""
|
|
1856
2207
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
1857
2208
|
|
|
@@ -1863,9 +2214,10 @@ class _ComputeClusterState:
|
|
|
1863
2214
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
1864
2215
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[bool]]:
|
|
1865
2216
|
"""
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
2217
|
+
Controls
|
|
2218
|
+
whether or not you wish to supply explicit values to CPU and memory slot
|
|
2219
|
+
sizes. The default is `false`, which tells vSphere to gather a automatic
|
|
2220
|
+
average based on all powered-on virtual machines currently in the cluster.
|
|
1869
2221
|
"""
|
|
1870
2222
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
1871
2223
|
|
|
@@ -1877,7 +2229,8 @@ class _ComputeClusterState:
|
|
|
1877
2229
|
@pulumi.getter(name="haAdvancedOptions")
|
|
1878
2230
|
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
|
1879
2231
|
"""
|
|
1880
|
-
|
|
2232
|
+
A key/value map that specifies advanced
|
|
2233
|
+
options for vSphere HA.
|
|
1881
2234
|
"""
|
|
1882
2235
|
return pulumi.get(self, "ha_advanced_options")
|
|
1883
2236
|
|
|
@@ -1889,8 +2242,10 @@ class _ComputeClusterState:
|
|
|
1889
2242
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
1890
2243
|
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[str]]:
|
|
1891
2244
|
"""
|
|
1892
|
-
|
|
1893
|
-
|
|
2245
|
+
Controls the action to take
|
|
2246
|
+
on virtual machines if an APD status on an affected datastore clears in the
|
|
2247
|
+
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
2248
|
+
<sup>\\*</sup>
|
|
1894
2249
|
"""
|
|
1895
2250
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
1896
2251
|
|
|
@@ -1902,9 +2257,11 @@ class _ComputeClusterState:
|
|
|
1902
2257
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
1903
2258
|
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[str]]:
|
|
1904
2259
|
"""
|
|
1905
|
-
|
|
1906
|
-
detected loss to all paths to a
|
|
1907
|
-
|
|
2260
|
+
Controls the action to take on
|
|
2261
|
+
virtual machines when the cluster has detected loss to all paths to a
|
|
2262
|
+
relevant datastore. Can be one of `disabled`, `warning`,
|
|
2263
|
+
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
2264
|
+
<sup>\\*</sup>
|
|
1908
2265
|
"""
|
|
1909
2266
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
1910
2267
|
|
|
@@ -1916,8 +2273,10 @@ class _ComputeClusterState:
|
|
|
1916
2273
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
1917
2274
|
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[int]]:
|
|
1918
2275
|
"""
|
|
1919
|
-
|
|
1920
|
-
the response action defined in
|
|
2276
|
+
The time, in seconds,
|
|
2277
|
+
to wait after an APD timeout event to run the response action defined in
|
|
2278
|
+
`ha_datastore_apd_response`. Default: `180`
|
|
2279
|
+
seconds (3 minutes). <sup>\\*</sup>
|
|
1921
2280
|
"""
|
|
1922
2281
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
1923
2282
|
|
|
@@ -1929,8 +2288,11 @@ class _ComputeClusterState:
|
|
|
1929
2288
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
1930
2289
|
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[str]]:
|
|
1931
2290
|
"""
|
|
1932
|
-
|
|
1933
|
-
detected a permanent device loss to a
|
|
2291
|
+
Controls the action to take on
|
|
2292
|
+
virtual machines when the cluster has detected a permanent device loss to a
|
|
2293
|
+
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
2294
|
+
`restartAggressive`. Default: `disabled`.
|
|
2295
|
+
<sup>\\*</sup>
|
|
1934
2296
|
"""
|
|
1935
2297
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
1936
2298
|
|
|
@@ -1942,7 +2304,8 @@ class _ComputeClusterState:
|
|
|
1942
2304
|
@pulumi.getter(name="haEnabled")
|
|
1943
2305
|
def ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
1944
2306
|
"""
|
|
1945
|
-
Enable vSphere HA for this cluster.
|
|
2307
|
+
Enable vSphere HA for this cluster. Default:
|
|
2308
|
+
`false`.
|
|
1946
2309
|
"""
|
|
1947
2310
|
return pulumi.get(self, "ha_enabled")
|
|
1948
2311
|
|
|
@@ -1954,8 +2317,10 @@ class _ComputeClusterState:
|
|
|
1954
2317
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
1955
2318
|
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
1956
2319
|
"""
|
|
1957
|
-
The list of managed object IDs for
|
|
1958
|
-
|
|
2320
|
+
The list of managed object IDs for
|
|
2321
|
+
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
2322
|
+
when `ha_heartbeat_datastore_policy` is set
|
|
2323
|
+
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
1959
2324
|
"""
|
|
1960
2325
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
1961
2326
|
|
|
@@ -1967,8 +2332,10 @@ class _ComputeClusterState:
|
|
|
1967
2332
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
1968
2333
|
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[str]]:
|
|
1969
2334
|
"""
|
|
1970
|
-
The selection policy for HA
|
|
1971
|
-
|
|
2335
|
+
The selection policy for HA
|
|
2336
|
+
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
2337
|
+
`allFeasibleDsWithUserPreference`. Default:
|
|
2338
|
+
`allFeasibleDsWithUserPreference`.
|
|
1972
2339
|
"""
|
|
1973
2340
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
1974
2341
|
|
|
@@ -1980,8 +2347,10 @@ class _ComputeClusterState:
|
|
|
1980
2347
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
1981
2348
|
def ha_host_isolation_response(self) -> Optional[pulumi.Input[str]]:
|
|
1982
2349
|
"""
|
|
1983
|
-
The action to take on virtual
|
|
1984
|
-
|
|
2350
|
+
The action to take on virtual
|
|
2351
|
+
machines when a host has detected that it has been isolated from the rest of
|
|
2352
|
+
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
2353
|
+
`none`.
|
|
1985
2354
|
"""
|
|
1986
2355
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
1987
2356
|
|
|
@@ -1993,7 +2362,9 @@ class _ComputeClusterState:
|
|
|
1993
2362
|
@pulumi.getter(name="haHostMonitoring")
|
|
1994
2363
|
def ha_host_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
1995
2364
|
"""
|
|
1996
|
-
Global setting that controls whether
|
|
2365
|
+
Global setting that controls whether
|
|
2366
|
+
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
2367
|
+
or `disabled`. Default: `enabled`.
|
|
1997
2368
|
"""
|
|
1998
2369
|
return pulumi.get(self, "ha_host_monitoring")
|
|
1999
2370
|
|
|
@@ -2005,8 +2376,10 @@ class _ComputeClusterState:
|
|
|
2005
2376
|
@pulumi.getter(name="haVmComponentProtection")
|
|
2006
2377
|
def ha_vm_component_protection(self) -> Optional[pulumi.Input[str]]:
|
|
2007
2378
|
"""
|
|
2008
|
-
Controls vSphere VM component
|
|
2009
|
-
|
|
2379
|
+
Controls vSphere VM component
|
|
2380
|
+
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
2381
|
+
`disabled`. Default: `enabled`.
|
|
2382
|
+
<sup>\\*</sup>
|
|
2010
2383
|
"""
|
|
2011
2384
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
2012
2385
|
|
|
@@ -2018,8 +2391,13 @@ class _ComputeClusterState:
|
|
|
2018
2391
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
2019
2392
|
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[str]]:
|
|
2020
2393
|
"""
|
|
2021
|
-
The condition used to
|
|
2022
|
-
|
|
2394
|
+
The condition used to
|
|
2395
|
+
determine whether or not virtual machines in a certain restart priority class
|
|
2396
|
+
are online, allowing HA to move on to restarting virtual machines on the next
|
|
2397
|
+
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
2398
|
+
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
2399
|
+
is considered ready immediately after a host is found to start it on.
|
|
2400
|
+
<sup>\\*</sup>
|
|
2023
2401
|
"""
|
|
2024
2402
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
2025
2403
|
|
|
@@ -2031,8 +2409,9 @@ class _ComputeClusterState:
|
|
|
2031
2409
|
@pulumi.getter(name="haVmFailureInterval")
|
|
2032
2410
|
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[int]]:
|
|
2033
2411
|
"""
|
|
2034
|
-
|
|
2035
|
-
|
|
2412
|
+
The time interval, in seconds, a heartbeat
|
|
2413
|
+
from a virtual machine is not received within this configured interval,
|
|
2414
|
+
the virtual machine is marked as failed. Default: `30` seconds.
|
|
2036
2415
|
"""
|
|
2037
2416
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
2038
2417
|
|
|
@@ -2044,9 +2423,11 @@ class _ComputeClusterState:
|
|
|
2044
2423
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
2045
2424
|
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[int]]:
|
|
2046
2425
|
"""
|
|
2047
|
-
The
|
|
2048
|
-
|
|
2049
|
-
|
|
2426
|
+
The time, in seconds, for the reset window in
|
|
2427
|
+
which `ha_vm_maximum_resets` can operate. When this
|
|
2428
|
+
window expires, no more resets are attempted regardless of the setting
|
|
2429
|
+
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
2430
|
+
unlimited reset time is allotted. Default: `-1` (no window).
|
|
2050
2431
|
"""
|
|
2051
2432
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
2052
2433
|
|
|
@@ -2058,7 +2439,8 @@ class _ComputeClusterState:
|
|
|
2058
2439
|
@pulumi.getter(name="haVmMaximumResets")
|
|
2059
2440
|
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[int]]:
|
|
2060
2441
|
"""
|
|
2061
|
-
The maximum number of resets that HA will
|
|
2442
|
+
The maximum number of resets that HA will
|
|
2443
|
+
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
2062
2444
|
"""
|
|
2063
2445
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
2064
2446
|
|
|
@@ -2070,7 +2452,9 @@ class _ComputeClusterState:
|
|
|
2070
2452
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
2071
2453
|
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[int]]:
|
|
2072
2454
|
"""
|
|
2073
|
-
The time, in seconds, that HA waits after
|
|
2455
|
+
The time, in seconds, that HA waits after
|
|
2456
|
+
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
2457
|
+
`120` seconds (2 minutes).
|
|
2074
2458
|
"""
|
|
2075
2459
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
2076
2460
|
|
|
@@ -2082,8 +2466,9 @@ class _ComputeClusterState:
|
|
|
2082
2466
|
@pulumi.getter(name="haVmMonitoring")
|
|
2083
2467
|
def ha_vm_monitoring(self) -> Optional[pulumi.Input[str]]:
|
|
2084
2468
|
"""
|
|
2085
|
-
The type of virtual machine monitoring to use
|
|
2086
|
-
|
|
2469
|
+
The type of virtual machine monitoring to use
|
|
2470
|
+
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
2471
|
+
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
2087
2472
|
"""
|
|
2088
2473
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
2089
2474
|
|
|
@@ -2095,7 +2480,9 @@ class _ComputeClusterState:
|
|
|
2095
2480
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
2096
2481
|
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[int]]:
|
|
2097
2482
|
"""
|
|
2098
|
-
Additional delay in seconds
|
|
2483
|
+
Additional delay, in seconds,
|
|
2484
|
+
after ready condition is met. A VM is considered ready at this point.
|
|
2485
|
+
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
2099
2486
|
"""
|
|
2100
2487
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
2101
2488
|
|
|
@@ -2107,8 +2494,9 @@ class _ComputeClusterState:
|
|
|
2107
2494
|
@pulumi.getter(name="haVmRestartPriority")
|
|
2108
2495
|
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[str]]:
|
|
2109
2496
|
"""
|
|
2110
|
-
The default restart priority
|
|
2111
|
-
|
|
2497
|
+
The default restart priority
|
|
2498
|
+
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
2499
|
+
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
2112
2500
|
"""
|
|
2113
2501
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
2114
2502
|
|
|
@@ -2120,8 +2508,10 @@ class _ComputeClusterState:
|
|
|
2120
2508
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
2121
2509
|
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
2122
2510
|
"""
|
|
2123
|
-
The maximum time, in seconds,
|
|
2124
|
-
|
|
2511
|
+
The maximum time, in seconds,
|
|
2512
|
+
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
2513
|
+
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
2514
|
+
<sup>\\*</sup>
|
|
2125
2515
|
"""
|
|
2126
2516
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
2127
2517
|
|
|
@@ -2133,7 +2523,8 @@ class _ComputeClusterState:
|
|
|
2133
2523
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
2134
2524
|
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[int]]:
|
|
2135
2525
|
"""
|
|
2136
|
-
The timeout for each host maintenance
|
|
2526
|
+
The timeout, in seconds, for each host maintenance
|
|
2527
|
+
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
2137
2528
|
"""
|
|
2138
2529
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
2139
2530
|
|
|
@@ -2141,23 +2532,13 @@ class _ComputeClusterState:
|
|
|
2141
2532
|
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[int]]):
|
|
2142
2533
|
pulumi.set(self, "host_cluster_exit_timeout", value)
|
|
2143
2534
|
|
|
2144
|
-
@property
|
|
2145
|
-
@pulumi.getter(name="hostImage")
|
|
2146
|
-
def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
|
|
2147
|
-
"""
|
|
2148
|
-
Details about the host image which should be applied to the cluster.
|
|
2149
|
-
"""
|
|
2150
|
-
return pulumi.get(self, "host_image")
|
|
2151
|
-
|
|
2152
|
-
@host_image.setter
|
|
2153
|
-
def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
|
|
2154
|
-
pulumi.set(self, "host_image", value)
|
|
2155
|
-
|
|
2156
2535
|
@property
|
|
2157
2536
|
@pulumi.getter(name="hostManaged")
|
|
2158
2537
|
def host_managed(self) -> Optional[pulumi.Input[bool]]:
|
|
2159
2538
|
"""
|
|
2160
|
-
|
|
2539
|
+
Can be set to `true` if compute cluster
|
|
2540
|
+
membership will be managed through the `host` resource rather than the
|
|
2541
|
+
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
2161
2542
|
"""
|
|
2162
2543
|
return pulumi.get(self, "host_managed")
|
|
2163
2544
|
|
|
@@ -2169,7 +2550,8 @@ class _ComputeClusterState:
|
|
|
2169
2550
|
@pulumi.getter(name="hostSystemIds")
|
|
2170
2551
|
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2171
2552
|
"""
|
|
2172
|
-
The managed object IDs of
|
|
2553
|
+
The managed object IDs of
|
|
2554
|
+
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2173
2555
|
"""
|
|
2174
2556
|
return pulumi.get(self, "host_system_ids")
|
|
2175
2557
|
|
|
@@ -2193,7 +2575,10 @@ class _ComputeClusterState:
|
|
|
2193
2575
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
2194
2576
|
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[str]]:
|
|
2195
2577
|
"""
|
|
2196
|
-
|
|
2578
|
+
Determines how the host
|
|
2579
|
+
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
2580
|
+
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
2581
|
+
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
2197
2582
|
"""
|
|
2198
2583
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
2199
2584
|
|
|
@@ -2205,7 +2590,8 @@ class _ComputeClusterState:
|
|
|
2205
2590
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
2206
2591
|
def proactive_ha_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2207
2592
|
"""
|
|
2208
|
-
Enables
|
|
2593
|
+
Enables Proactive HA. Default: `false`.
|
|
2594
|
+
<sup>\\*</sup>
|
|
2209
2595
|
"""
|
|
2210
2596
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
2211
2597
|
|
|
@@ -2217,8 +2603,12 @@ class _ComputeClusterState:
|
|
|
2217
2603
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
2218
2604
|
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
2219
2605
|
"""
|
|
2220
|
-
The configured remediation
|
|
2221
|
-
|
|
2606
|
+
The configured remediation
|
|
2607
|
+
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
2608
|
+
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
2609
|
+
`proactive_ha_severe_remediation` is set
|
|
2610
|
+
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
2611
|
+
<sup>\\*</sup>
|
|
2222
2612
|
"""
|
|
2223
2613
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
2224
2614
|
|
|
@@ -2230,7 +2620,9 @@ class _ComputeClusterState:
|
|
|
2230
2620
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
2231
2621
|
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2232
2622
|
"""
|
|
2233
|
-
The list of IDs for health update
|
|
2623
|
+
The list of IDs for health update
|
|
2624
|
+
providers configured for this cluster.
|
|
2625
|
+
<sup>\\*</sup>
|
|
2234
2626
|
"""
|
|
2235
2627
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
2236
2628
|
|
|
@@ -2242,8 +2634,12 @@ class _ComputeClusterState:
|
|
|
2242
2634
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
2243
2635
|
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[str]]:
|
|
2244
2636
|
"""
|
|
2245
|
-
The configured remediation for
|
|
2246
|
-
|
|
2637
|
+
The configured remediation for
|
|
2638
|
+
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
2639
|
+
Note that this cannot be set to `QuarantineMode` when
|
|
2640
|
+
`proactive_ha_moderate_remediation` is
|
|
2641
|
+
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
2642
|
+
<sup>\\*</sup>
|
|
2247
2643
|
"""
|
|
2248
2644
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
2249
2645
|
|
|
@@ -2283,7 +2679,8 @@ class _ComputeClusterState:
|
|
|
2283
2679
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
2284
2680
|
def vsan_compression_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2285
2681
|
"""
|
|
2286
|
-
|
|
2682
|
+
Enables vSAN compression on the
|
|
2683
|
+
cluster.
|
|
2287
2684
|
"""
|
|
2288
2685
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
2289
2686
|
|
|
@@ -2295,7 +2692,9 @@ class _ComputeClusterState:
|
|
|
2295
2692
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
2296
2693
|
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2297
2694
|
"""
|
|
2298
|
-
|
|
2695
|
+
Enables vSAN deduplication on the cluster.
|
|
2696
|
+
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
2697
|
+
compression must also be enabled.
|
|
2299
2698
|
"""
|
|
2300
2699
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
2301
2700
|
|
|
@@ -2307,7 +2706,8 @@ class _ComputeClusterState:
|
|
|
2307
2706
|
@pulumi.getter(name="vsanDiskGroups")
|
|
2308
2707
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
2309
2708
|
"""
|
|
2310
|
-
|
|
2709
|
+
Represents the configuration of a host disk
|
|
2710
|
+
group in the cluster.
|
|
2311
2711
|
"""
|
|
2312
2712
|
return pulumi.get(self, "vsan_disk_groups")
|
|
2313
2713
|
|
|
@@ -2319,7 +2719,10 @@ class _ComputeClusterState:
|
|
|
2319
2719
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
2320
2720
|
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2321
2721
|
"""
|
|
2322
|
-
|
|
2722
|
+
Enables vSAN data-in-transit
|
|
2723
|
+
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
2724
|
+
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
2725
|
+
at the same time.
|
|
2323
2726
|
"""
|
|
2324
2727
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
2325
2728
|
|
|
@@ -2331,7 +2734,9 @@ class _ComputeClusterState:
|
|
|
2331
2734
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
2332
2735
|
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[int]]:
|
|
2333
2736
|
"""
|
|
2334
|
-
|
|
2737
|
+
Indicates the rekey interval in
|
|
2738
|
+
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
2739
|
+
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
2335
2740
|
"""
|
|
2336
2741
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
2337
2742
|
|
|
@@ -2343,7 +2748,7 @@ class _ComputeClusterState:
|
|
|
2343
2748
|
@pulumi.getter(name="vsanEnabled")
|
|
2344
2749
|
def vsan_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2345
2750
|
"""
|
|
2346
|
-
|
|
2751
|
+
Enables vSAN on the cluster.
|
|
2347
2752
|
"""
|
|
2348
2753
|
return pulumi.get(self, "vsan_enabled")
|
|
2349
2754
|
|
|
@@ -2355,7 +2760,7 @@ class _ComputeClusterState:
|
|
|
2355
2760
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
2356
2761
|
def vsan_esa_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2357
2762
|
"""
|
|
2358
|
-
|
|
2763
|
+
Enables vSAN ESA on the cluster.
|
|
2359
2764
|
"""
|
|
2360
2765
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
2361
2766
|
|
|
@@ -2367,7 +2772,7 @@ class _ComputeClusterState:
|
|
|
2367
2772
|
@pulumi.getter(name="vsanFaultDomains")
|
|
2368
2773
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
2369
2774
|
"""
|
|
2370
|
-
|
|
2775
|
+
Configurations of vSAN fault domains.
|
|
2371
2776
|
"""
|
|
2372
2777
|
return pulumi.get(self, "vsan_fault_domains")
|
|
2373
2778
|
|
|
@@ -2379,7 +2784,8 @@ class _ComputeClusterState:
|
|
|
2379
2784
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
2380
2785
|
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2381
2786
|
"""
|
|
2382
|
-
|
|
2787
|
+
Enables network
|
|
2788
|
+
diagnostic mode for vSAN performance service on the cluster.
|
|
2383
2789
|
"""
|
|
2384
2790
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
2385
2791
|
|
|
@@ -2391,7 +2797,8 @@ class _ComputeClusterState:
|
|
|
2391
2797
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
2392
2798
|
def vsan_performance_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2393
2799
|
"""
|
|
2394
|
-
|
|
2800
|
+
Enables vSAN performance service on
|
|
2801
|
+
the cluster. Default: `true`.
|
|
2395
2802
|
"""
|
|
2396
2803
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
2397
2804
|
|
|
@@ -2403,7 +2810,10 @@ class _ComputeClusterState:
|
|
|
2403
2810
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
2404
2811
|
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
|
2405
2812
|
"""
|
|
2406
|
-
The
|
|
2813
|
+
The remote vSAN datastore IDs to be
|
|
2814
|
+
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
2815
|
+
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
2816
|
+
data-in-transit encryption feature at the same time.
|
|
2407
2817
|
"""
|
|
2408
2818
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
2409
2819
|
|
|
@@ -2415,7 +2825,7 @@ class _ComputeClusterState:
|
|
|
2415
2825
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
2416
2826
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
2417
2827
|
"""
|
|
2418
|
-
|
|
2828
|
+
Configurations of vSAN stretched cluster.
|
|
2419
2829
|
"""
|
|
2420
2830
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
2421
2831
|
|
|
@@ -2427,7 +2837,8 @@ class _ComputeClusterState:
|
|
|
2427
2837
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
2428
2838
|
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2429
2839
|
"""
|
|
2430
|
-
|
|
2840
|
+
Enables vSAN unmap on the cluster.
|
|
2841
|
+
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
2431
2842
|
"""
|
|
2432
2843
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
2433
2844
|
|
|
@@ -2439,7 +2850,8 @@ class _ComputeClusterState:
|
|
|
2439
2850
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
2440
2851
|
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[bool]]:
|
|
2441
2852
|
"""
|
|
2442
|
-
|
|
2853
|
+
Enables verbose mode for vSAN
|
|
2854
|
+
performance service on the cluster.
|
|
2443
2855
|
"""
|
|
2444
2856
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
2445
2857
|
|
|
@@ -2498,7 +2910,6 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2498
2910
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
2499
2911
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
2500
2912
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
2501
|
-
host_image: Optional[pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']]] = None,
|
|
2502
2913
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
2503
2914
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
2504
2915
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -2534,115 +2945,225 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2534
2945
|
and require vCenter Server.
|
|
2535
2946
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
2536
2947
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
2537
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
2538
|
-
|
|
2539
|
-
|
|
2540
|
-
:param pulumi.Input[
|
|
2541
|
-
|
|
2542
|
-
|
|
2543
|
-
:param pulumi.Input[
|
|
2544
|
-
|
|
2545
|
-
|
|
2546
|
-
|
|
2547
|
-
:param pulumi.Input[
|
|
2548
|
-
|
|
2549
|
-
:param pulumi.Input[
|
|
2550
|
-
|
|
2551
|
-
|
|
2948
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
2949
|
+
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
2950
|
+
`manual`.
|
|
2951
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS in this cluster.
|
|
2952
|
+
Requires `drs_enabled` to be `true` in order to be effective.
|
|
2953
|
+
Default: `false`.
|
|
2954
|
+
:param pulumi.Input[int] dpm_threshold: A value between `1` and `5` indicating the
|
|
2955
|
+
threshold of load within the cluster that influences host power operations.
|
|
2956
|
+
This affects both power on and power off operations - a lower setting will
|
|
2957
|
+
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
2958
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: A key/value map that specifies advanced
|
|
2959
|
+
options for DRS and DPM.
|
|
2960
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all
|
|
2961
|
+
virtual machines in this cluster. Can be one of `manual`,
|
|
2962
|
+
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
2963
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
2964
|
+
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
2965
|
+
recommendations. <sup>\\*</sup>
|
|
2966
|
+
|
|
2967
|
+
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
2968
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
2969
|
+
set for virtual machines in the cluster. Default: `true`.
|
|
2970
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
2971
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
2972
|
+
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
2973
|
+
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
2974
|
+
`3`.
|
|
2975
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
2976
|
+
resource pools in the cluster. Can be one of `disabled` or
|
|
2977
|
+
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
2552
2978
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
2553
2979
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
2554
2980
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
2555
2981
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
2556
2982
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
2557
2983
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
2558
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
2559
|
-
|
|
2560
|
-
|
|
2561
|
-
|
|
2562
|
-
|
|
2563
|
-
|
|
2564
|
-
|
|
2565
|
-
|
|
2566
|
-
|
|
2567
|
-
|
|
2568
|
-
|
|
2569
|
-
|
|
2570
|
-
|
|
2571
|
-
|
|
2572
|
-
|
|
2573
|
-
|
|
2574
|
-
|
|
2575
|
-
|
|
2576
|
-
|
|
2577
|
-
|
|
2578
|
-
|
|
2579
|
-
|
|
2580
|
-
:param pulumi.Input[
|
|
2581
|
-
|
|
2582
|
-
|
|
2583
|
-
|
|
2584
|
-
:param pulumi.Input[str]
|
|
2585
|
-
|
|
2586
|
-
|
|
2587
|
-
|
|
2588
|
-
|
|
2589
|
-
|
|
2590
|
-
the
|
|
2591
|
-
|
|
2592
|
-
|
|
2593
|
-
:param pulumi.Input[
|
|
2594
|
-
|
|
2595
|
-
|
|
2596
|
-
:param pulumi.Input[
|
|
2597
|
-
|
|
2598
|
-
|
|
2599
|
-
|
|
2600
|
-
|
|
2601
|
-
:param pulumi.Input[
|
|
2602
|
-
|
|
2603
|
-
:param pulumi.Input[
|
|
2604
|
-
|
|
2605
|
-
|
|
2606
|
-
|
|
2607
|
-
:param pulumi.Input[
|
|
2608
|
-
|
|
2609
|
-
|
|
2610
|
-
|
|
2611
|
-
|
|
2612
|
-
|
|
2613
|
-
|
|
2614
|
-
|
|
2615
|
-
|
|
2616
|
-
|
|
2617
|
-
|
|
2618
|
-
|
|
2619
|
-
|
|
2620
|
-
|
|
2621
|
-
|
|
2622
|
-
:param pulumi.Input[
|
|
2984
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: When destroying the resource, setting this to
|
|
2985
|
+
`true` will auto-remove any hosts that are currently a member of the cluster,
|
|
2986
|
+
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
2987
|
+
below. This is an advanced
|
|
2988
|
+
option and should only be used for testing. Default: `false`.
|
|
2989
|
+
|
|
2990
|
+
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
2991
|
+
there are many pitfalls to its use when working with complex cluster
|
|
2992
|
+
configurations. Depending on the virtual machines currently on the cluster, and
|
|
2993
|
+
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
2994
|
+
incrementally remove hosts from your configuration by adjusting the contents of
|
|
2995
|
+
the `host_system_ids` attribute.
|
|
2996
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: Defines the
|
|
2997
|
+
managed object IDs of hosts to use as dedicated failover
|
|
2998
|
+
hosts. These hosts are kept as available as possible - admission control will
|
|
2999
|
+
block access to the host, and DRS will ignore the host when making
|
|
3000
|
+
recommendations.
|
|
3001
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number
|
|
3002
|
+
of failed hosts that admission control tolerates when making decisions on
|
|
3003
|
+
whether to permit virtual machine operations. The maximum is one less than
|
|
3004
|
+
the number of hosts in the cluster. Default: `1`.
|
|
3005
|
+
<sup>\\*</sup>
|
|
3006
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of
|
|
3007
|
+
resource reduction that a cluster of virtual machines can tolerate in case of
|
|
3008
|
+
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
3009
|
+
disables the setting. Default: `100` (disabled).
|
|
3010
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control
|
|
3011
|
+
policy to use with vSphere HA. Can be one of `resourcePercentage`,
|
|
3012
|
+
`slotPolicy`, `failoverHosts`, or `disabled`. Default: `resourcePercentage`.
|
|
3013
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: Automatically determine available resource percentages by subtracting the
|
|
3014
|
+
average number of host resources represented by the
|
|
3015
|
+
`ha_admission_control_host_failure_tolerance`
|
|
3016
|
+
setting from the total amount of resources in the cluster. Disable to supply
|
|
3017
|
+
user-defined values. Default: `true`.
|
|
3018
|
+
<sup>\\*</sup>
|
|
3019
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: Controls the
|
|
3020
|
+
user-defined percentage of CPU resources in the cluster to reserve for
|
|
3021
|
+
failover. Default: `100`.
|
|
3022
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: Controls the
|
|
3023
|
+
user-defined percentage of memory resources in the cluster to reserve for
|
|
3024
|
+
failover. Default: `100`.
|
|
3025
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: Controls the
|
|
3026
|
+
user-defined CPU slot size, in MHz. Default: `32`.
|
|
3027
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: Controls the
|
|
3028
|
+
user-defined memory slot size, in MB. Default: `100`.
|
|
3029
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: Controls
|
|
3030
|
+
whether or not you wish to supply explicit values to CPU and memory slot
|
|
3031
|
+
sizes. The default is `false`, which tells vSphere to gather a automatic
|
|
3032
|
+
average based on all powered-on virtual machines currently in the cluster.
|
|
3033
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: A key/value map that specifies advanced
|
|
3034
|
+
options for vSphere HA.
|
|
3035
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: Controls the action to take
|
|
3036
|
+
on virtual machines if an APD status on an affected datastore clears in the
|
|
3037
|
+
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
3038
|
+
<sup>\\*</sup>
|
|
3039
|
+
:param pulumi.Input[str] ha_datastore_apd_response: Controls the action to take on
|
|
3040
|
+
virtual machines when the cluster has detected loss to all paths to a
|
|
3041
|
+
relevant datastore. Can be one of `disabled`, `warning`,
|
|
3042
|
+
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
3043
|
+
<sup>\\*</sup>
|
|
3044
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: The time, in seconds,
|
|
3045
|
+
to wait after an APD timeout event to run the response action defined in
|
|
3046
|
+
`ha_datastore_apd_response`. Default: `180`
|
|
3047
|
+
seconds (3 minutes). <sup>\\*</sup>
|
|
3048
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
3049
|
+
virtual machines when the cluster has detected a permanent device loss to a
|
|
3050
|
+
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
3051
|
+
`restartAggressive`. Default: `disabled`.
|
|
3052
|
+
<sup>\\*</sup>
|
|
3053
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
3054
|
+
`false`.
|
|
3055
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
3056
|
+
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
3057
|
+
when `ha_heartbeat_datastore_policy` is set
|
|
3058
|
+
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3059
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
3060
|
+
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
3061
|
+
`allFeasibleDsWithUserPreference`. Default:
|
|
3062
|
+
`allFeasibleDsWithUserPreference`.
|
|
3063
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
3064
|
+
machines when a host has detected that it has been isolated from the rest of
|
|
3065
|
+
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
3066
|
+
`none`.
|
|
3067
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
3068
|
+
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
3069
|
+
or `disabled`. Default: `enabled`.
|
|
3070
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
3071
|
+
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
3072
|
+
`disabled`. Default: `enabled`.
|
|
3073
|
+
<sup>\\*</sup>
|
|
3074
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
3075
|
+
determine whether or not virtual machines in a certain restart priority class
|
|
3076
|
+
are online, allowing HA to move on to restarting virtual machines on the next
|
|
3077
|
+
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
3078
|
+
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
3079
|
+
is considered ready immediately after a host is found to start it on.
|
|
3080
|
+
<sup>\\*</sup>
|
|
3081
|
+
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
3082
|
+
from a virtual machine is not received within this configured interval,
|
|
3083
|
+
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3084
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
3085
|
+
which `ha_vm_maximum_resets` can operate. When this
|
|
3086
|
+
window expires, no more resets are attempted regardless of the setting
|
|
3087
|
+
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
3088
|
+
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3089
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
3090
|
+
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3091
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
3092
|
+
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
3093
|
+
`120` seconds (2 minutes).
|
|
3094
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
3095
|
+
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
3096
|
+
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3097
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
3098
|
+
after ready condition is met. A VM is considered ready at this point.
|
|
3099
|
+
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3100
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
3101
|
+
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
3102
|
+
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3103
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
3104
|
+
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
3105
|
+
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
3106
|
+
<sup>\\*</sup>
|
|
3107
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
3108
|
+
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3109
|
+
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
3110
|
+
membership will be managed through the `host` resource rather than the
|
|
3111
|
+
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3112
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
3113
|
+
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
2623
3114
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
2624
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
2625
|
-
|
|
2626
|
-
|
|
2627
|
-
|
|
2628
|
-
:param pulumi.Input[
|
|
2629
|
-
|
|
2630
|
-
|
|
3115
|
+
:param pulumi.Input[str] proactive_ha_automation_level: Determines how the host
|
|
3116
|
+
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
3117
|
+
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
3118
|
+
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
3119
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables Proactive HA. Default: `false`.
|
|
3120
|
+
<sup>\\*</sup>
|
|
3121
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation
|
|
3122
|
+
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
3123
|
+
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
3124
|
+
`proactive_ha_severe_remediation` is set
|
|
3125
|
+
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
3126
|
+
<sup>\\*</sup>
|
|
3127
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
3128
|
+
providers configured for this cluster.
|
|
3129
|
+
<sup>\\*</sup>
|
|
3130
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
3131
|
+
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
3132
|
+
Note that this cannot be set to `QuarantineMode` when
|
|
3133
|
+
`proactive_ha_moderate_remediation` is
|
|
3134
|
+
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
3135
|
+
<sup>\\*</sup>
|
|
2631
3136
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
2632
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
2633
|
-
|
|
2634
|
-
:param pulumi.Input[
|
|
2635
|
-
|
|
2636
|
-
|
|
2637
|
-
:param pulumi.Input[
|
|
2638
|
-
|
|
2639
|
-
:param pulumi.Input[
|
|
2640
|
-
|
|
2641
|
-
|
|
2642
|
-
|
|
2643
|
-
:param pulumi.Input[
|
|
2644
|
-
|
|
2645
|
-
|
|
3137
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Enables vSAN compression on the
|
|
3138
|
+
cluster.
|
|
3139
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Enables vSAN deduplication on the cluster.
|
|
3140
|
+
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
3141
|
+
compression must also be enabled.
|
|
3142
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanDiskGroupArgs']]]] vsan_disk_groups: Represents the configuration of a host disk
|
|
3143
|
+
group in the cluster.
|
|
3144
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Enables vSAN data-in-transit
|
|
3145
|
+
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
3146
|
+
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
3147
|
+
at the same time.
|
|
3148
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: Indicates the rekey interval in
|
|
3149
|
+
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
3150
|
+
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
3151
|
+
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
3152
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
3153
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
3154
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
3155
|
+
diagnostic mode for vSAN performance service on the cluster.
|
|
3156
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
3157
|
+
the cluster. Default: `true`.
|
|
3158
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
3159
|
+
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
3160
|
+
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
3161
|
+
data-in-transit encryption feature at the same time.
|
|
3162
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
3163
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
3164
|
+
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3165
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
3166
|
+
performance service on the cluster.
|
|
2646
3167
|
"""
|
|
2647
3168
|
...
|
|
2648
3169
|
@overload
|
|
@@ -2712,7 +3233,6 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2712
3233
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
2713
3234
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
2714
3235
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
2715
|
-
host_image: Optional[pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']]] = None,
|
|
2716
3236
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
2717
3237
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
2718
3238
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -2792,7 +3312,6 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2792
3312
|
__props__.__dict__["ha_vm_restart_priority"] = ha_vm_restart_priority
|
|
2793
3313
|
__props__.__dict__["ha_vm_restart_timeout"] = ha_vm_restart_timeout
|
|
2794
3314
|
__props__.__dict__["host_cluster_exit_timeout"] = host_cluster_exit_timeout
|
|
2795
|
-
__props__.__dict__["host_image"] = host_image
|
|
2796
3315
|
__props__.__dict__["host_managed"] = host_managed
|
|
2797
3316
|
__props__.__dict__["host_system_ids"] = host_system_ids
|
|
2798
3317
|
__props__.__dict__["name"] = name
|
|
@@ -2872,7 +3391,6 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2872
3391
|
ha_vm_restart_priority: Optional[pulumi.Input[str]] = None,
|
|
2873
3392
|
ha_vm_restart_timeout: Optional[pulumi.Input[int]] = None,
|
|
2874
3393
|
host_cluster_exit_timeout: Optional[pulumi.Input[int]] = None,
|
|
2875
|
-
host_image: Optional[pulumi.Input[pulumi.InputType['ComputeClusterHostImageArgs']]] = None,
|
|
2876
3394
|
host_managed: Optional[pulumi.Input[bool]] = None,
|
|
2877
3395
|
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
|
|
2878
3396
|
name: Optional[pulumi.Input[str]] = None,
|
|
@@ -2911,120 +3429,230 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2911
3429
|
and require vCenter Server.
|
|
2912
3430
|
:param pulumi.Input[str] datacenter_id: The managed object ID of
|
|
2913
3431
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
2914
|
-
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
2915
|
-
|
|
2916
|
-
|
|
2917
|
-
:param pulumi.Input[
|
|
2918
|
-
|
|
2919
|
-
|
|
2920
|
-
:param pulumi.Input[
|
|
2921
|
-
|
|
2922
|
-
|
|
2923
|
-
|
|
2924
|
-
:param pulumi.Input[
|
|
2925
|
-
|
|
2926
|
-
:param pulumi.Input[
|
|
2927
|
-
|
|
2928
|
-
|
|
3432
|
+
:param pulumi.Input[str] dpm_automation_level: The automation level for host power
|
|
3433
|
+
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
3434
|
+
`manual`.
|
|
3435
|
+
:param pulumi.Input[bool] dpm_enabled: Enable DPM support for DRS in this cluster.
|
|
3436
|
+
Requires `drs_enabled` to be `true` in order to be effective.
|
|
3437
|
+
Default: `false`.
|
|
3438
|
+
:param pulumi.Input[int] dpm_threshold: A value between `1` and `5` indicating the
|
|
3439
|
+
threshold of load within the cluster that influences host power operations.
|
|
3440
|
+
This affects both power on and power off operations - a lower setting will
|
|
3441
|
+
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
3442
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] drs_advanced_options: A key/value map that specifies advanced
|
|
3443
|
+
options for DRS and DPM.
|
|
3444
|
+
:param pulumi.Input[str] drs_automation_level: The default automation level for all
|
|
3445
|
+
virtual machines in this cluster. Can be one of `manual`,
|
|
3446
|
+
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
3447
|
+
:param pulumi.Input[bool] drs_enable_predictive_drs: When `true`, enables DRS to use data
|
|
3448
|
+
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
3449
|
+
recommendations. <sup>\\*</sup>
|
|
3450
|
+
|
|
3451
|
+
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
3452
|
+
:param pulumi.Input[bool] drs_enable_vm_overrides: Allow individual DRS overrides to be
|
|
3453
|
+
set for virtual machines in the cluster. Default: `true`.
|
|
3454
|
+
:param pulumi.Input[bool] drs_enabled: Enable DRS for this cluster. Default: `false`.
|
|
3455
|
+
:param pulumi.Input[int] drs_migration_threshold: A value between `1` and `5` indicating
|
|
3456
|
+
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
3457
|
+
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
3458
|
+
`3`.
|
|
3459
|
+
:param pulumi.Input[str] drs_scale_descendants_shares: Enable scalable shares for all
|
|
3460
|
+
resource pools in the cluster. Can be one of `disabled` or
|
|
3461
|
+
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
2929
3462
|
:param pulumi.Input[str] folder: The relative path to a folder to put this cluster in.
|
|
2930
3463
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
2931
3464
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
2932
3465
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
2933
3466
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
2934
3467
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
2935
|
-
:param pulumi.Input[bool] force_evacuate_on_destroy:
|
|
2936
|
-
|
|
2937
|
-
|
|
2938
|
-
|
|
2939
|
-
|
|
2940
|
-
|
|
2941
|
-
|
|
2942
|
-
|
|
2943
|
-
|
|
2944
|
-
|
|
2945
|
-
|
|
2946
|
-
|
|
2947
|
-
|
|
2948
|
-
|
|
2949
|
-
|
|
2950
|
-
|
|
2951
|
-
|
|
2952
|
-
|
|
2953
|
-
|
|
2954
|
-
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
:param pulumi.Input[
|
|
2958
|
-
|
|
2959
|
-
|
|
2960
|
-
|
|
2961
|
-
:param pulumi.Input[str]
|
|
2962
|
-
|
|
2963
|
-
|
|
2964
|
-
|
|
2965
|
-
|
|
2966
|
-
|
|
2967
|
-
the
|
|
2968
|
-
|
|
2969
|
-
|
|
2970
|
-
:param pulumi.Input[
|
|
2971
|
-
|
|
2972
|
-
|
|
2973
|
-
:param pulumi.Input[
|
|
2974
|
-
|
|
2975
|
-
|
|
2976
|
-
|
|
2977
|
-
|
|
2978
|
-
:param pulumi.Input[
|
|
2979
|
-
|
|
2980
|
-
:param pulumi.Input[
|
|
2981
|
-
|
|
2982
|
-
|
|
2983
|
-
|
|
2984
|
-
:param pulumi.Input[
|
|
2985
|
-
|
|
2986
|
-
|
|
2987
|
-
|
|
2988
|
-
|
|
2989
|
-
|
|
2990
|
-
|
|
2991
|
-
|
|
2992
|
-
|
|
2993
|
-
|
|
2994
|
-
|
|
2995
|
-
|
|
2996
|
-
|
|
2997
|
-
|
|
2998
|
-
|
|
2999
|
-
:param pulumi.Input[
|
|
3468
|
+
:param pulumi.Input[bool] force_evacuate_on_destroy: When destroying the resource, setting this to
|
|
3469
|
+
`true` will auto-remove any hosts that are currently a member of the cluster,
|
|
3470
|
+
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
3471
|
+
below. This is an advanced
|
|
3472
|
+
option and should only be used for testing. Default: `false`.
|
|
3473
|
+
|
|
3474
|
+
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
3475
|
+
there are many pitfalls to its use when working with complex cluster
|
|
3476
|
+
configurations. Depending on the virtual machines currently on the cluster, and
|
|
3477
|
+
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
3478
|
+
incrementally remove hosts from your configuration by adjusting the contents of
|
|
3479
|
+
the `host_system_ids` attribute.
|
|
3480
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_admission_control_failover_host_system_ids: Defines the
|
|
3481
|
+
managed object IDs of hosts to use as dedicated failover
|
|
3482
|
+
hosts. These hosts are kept as available as possible - admission control will
|
|
3483
|
+
block access to the host, and DRS will ignore the host when making
|
|
3484
|
+
recommendations.
|
|
3485
|
+
:param pulumi.Input[int] ha_admission_control_host_failure_tolerance: The maximum number
|
|
3486
|
+
of failed hosts that admission control tolerates when making decisions on
|
|
3487
|
+
whether to permit virtual machine operations. The maximum is one less than
|
|
3488
|
+
the number of hosts in the cluster. Default: `1`.
|
|
3489
|
+
<sup>\\*</sup>
|
|
3490
|
+
:param pulumi.Input[int] ha_admission_control_performance_tolerance: The percentage of
|
|
3491
|
+
resource reduction that a cluster of virtual machines can tolerate in case of
|
|
3492
|
+
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
3493
|
+
disables the setting. Default: `100` (disabled).
|
|
3494
|
+
:param pulumi.Input[str] ha_admission_control_policy: The type of admission control
|
|
3495
|
+
policy to use with vSphere HA. Can be one of `resourcePercentage`,
|
|
3496
|
+
`slotPolicy`, `failoverHosts`, or `disabled`. Default: `resourcePercentage`.
|
|
3497
|
+
:param pulumi.Input[bool] ha_admission_control_resource_percentage_auto_compute: Automatically determine available resource percentages by subtracting the
|
|
3498
|
+
average number of host resources represented by the
|
|
3499
|
+
`ha_admission_control_host_failure_tolerance`
|
|
3500
|
+
setting from the total amount of resources in the cluster. Disable to supply
|
|
3501
|
+
user-defined values. Default: `true`.
|
|
3502
|
+
<sup>\\*</sup>
|
|
3503
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_cpu: Controls the
|
|
3504
|
+
user-defined percentage of CPU resources in the cluster to reserve for
|
|
3505
|
+
failover. Default: `100`.
|
|
3506
|
+
:param pulumi.Input[int] ha_admission_control_resource_percentage_memory: Controls the
|
|
3507
|
+
user-defined percentage of memory resources in the cluster to reserve for
|
|
3508
|
+
failover. Default: `100`.
|
|
3509
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_cpu: Controls the
|
|
3510
|
+
user-defined CPU slot size, in MHz. Default: `32`.
|
|
3511
|
+
:param pulumi.Input[int] ha_admission_control_slot_policy_explicit_memory: Controls the
|
|
3512
|
+
user-defined memory slot size, in MB. Default: `100`.
|
|
3513
|
+
:param pulumi.Input[bool] ha_admission_control_slot_policy_use_explicit_size: Controls
|
|
3514
|
+
whether or not you wish to supply explicit values to CPU and memory slot
|
|
3515
|
+
sizes. The default is `false`, which tells vSphere to gather a automatic
|
|
3516
|
+
average based on all powered-on virtual machines currently in the cluster.
|
|
3517
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] ha_advanced_options: A key/value map that specifies advanced
|
|
3518
|
+
options for vSphere HA.
|
|
3519
|
+
:param pulumi.Input[str] ha_datastore_apd_recovery_action: Controls the action to take
|
|
3520
|
+
on virtual machines if an APD status on an affected datastore clears in the
|
|
3521
|
+
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
3522
|
+
<sup>\\*</sup>
|
|
3523
|
+
:param pulumi.Input[str] ha_datastore_apd_response: Controls the action to take on
|
|
3524
|
+
virtual machines when the cluster has detected loss to all paths to a
|
|
3525
|
+
relevant datastore. Can be one of `disabled`, `warning`,
|
|
3526
|
+
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
3527
|
+
<sup>\\*</sup>
|
|
3528
|
+
:param pulumi.Input[int] ha_datastore_apd_response_delay: The time, in seconds,
|
|
3529
|
+
to wait after an APD timeout event to run the response action defined in
|
|
3530
|
+
`ha_datastore_apd_response`. Default: `180`
|
|
3531
|
+
seconds (3 minutes). <sup>\\*</sup>
|
|
3532
|
+
:param pulumi.Input[str] ha_datastore_pdl_response: Controls the action to take on
|
|
3533
|
+
virtual machines when the cluster has detected a permanent device loss to a
|
|
3534
|
+
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
3535
|
+
`restartAggressive`. Default: `disabled`.
|
|
3536
|
+
<sup>\\*</sup>
|
|
3537
|
+
:param pulumi.Input[bool] ha_enabled: Enable vSphere HA for this cluster. Default:
|
|
3538
|
+
`false`.
|
|
3539
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for
|
|
3540
|
+
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
3541
|
+
when `ha_heartbeat_datastore_policy` is set
|
|
3542
|
+
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3543
|
+
:param pulumi.Input[str] ha_heartbeat_datastore_policy: The selection policy for HA
|
|
3544
|
+
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
3545
|
+
`allFeasibleDsWithUserPreference`. Default:
|
|
3546
|
+
`allFeasibleDsWithUserPreference`.
|
|
3547
|
+
:param pulumi.Input[str] ha_host_isolation_response: The action to take on virtual
|
|
3548
|
+
machines when a host has detected that it has been isolated from the rest of
|
|
3549
|
+
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
3550
|
+
`none`.
|
|
3551
|
+
:param pulumi.Input[str] ha_host_monitoring: Global setting that controls whether
|
|
3552
|
+
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
3553
|
+
or `disabled`. Default: `enabled`.
|
|
3554
|
+
:param pulumi.Input[str] ha_vm_component_protection: Controls vSphere VM component
|
|
3555
|
+
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
3556
|
+
`disabled`. Default: `enabled`.
|
|
3557
|
+
<sup>\\*</sup>
|
|
3558
|
+
:param pulumi.Input[str] ha_vm_dependency_restart_condition: The condition used to
|
|
3559
|
+
determine whether or not virtual machines in a certain restart priority class
|
|
3560
|
+
are online, allowing HA to move on to restarting virtual machines on the next
|
|
3561
|
+
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
3562
|
+
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
3563
|
+
is considered ready immediately after a host is found to start it on.
|
|
3564
|
+
<sup>\\*</sup>
|
|
3565
|
+
:param pulumi.Input[int] ha_vm_failure_interval: The time interval, in seconds, a heartbeat
|
|
3566
|
+
from a virtual machine is not received within this configured interval,
|
|
3567
|
+
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3568
|
+
:param pulumi.Input[int] ha_vm_maximum_failure_window: The time, in seconds, for the reset window in
|
|
3569
|
+
which `ha_vm_maximum_resets` can operate. When this
|
|
3570
|
+
window expires, no more resets are attempted regardless of the setting
|
|
3571
|
+
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
3572
|
+
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3573
|
+
:param pulumi.Input[int] ha_vm_maximum_resets: The maximum number of resets that HA will
|
|
3574
|
+
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3575
|
+
:param pulumi.Input[int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after
|
|
3576
|
+
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
3577
|
+
`120` seconds (2 minutes).
|
|
3578
|
+
:param pulumi.Input[str] ha_vm_monitoring: The type of virtual machine monitoring to use
|
|
3579
|
+
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
3580
|
+
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3581
|
+
:param pulumi.Input[int] ha_vm_restart_additional_delay: Additional delay, in seconds,
|
|
3582
|
+
after ready condition is met. A VM is considered ready at this point.
|
|
3583
|
+
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3584
|
+
:param pulumi.Input[str] ha_vm_restart_priority: The default restart priority
|
|
3585
|
+
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
3586
|
+
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3587
|
+
:param pulumi.Input[int] ha_vm_restart_timeout: The maximum time, in seconds,
|
|
3588
|
+
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
3589
|
+
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
3590
|
+
<sup>\\*</sup>
|
|
3591
|
+
:param pulumi.Input[int] host_cluster_exit_timeout: The timeout, in seconds, for each host maintenance
|
|
3592
|
+
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3593
|
+
:param pulumi.Input[bool] host_managed: Can be set to `true` if compute cluster
|
|
3594
|
+
membership will be managed through the `host` resource rather than the
|
|
3595
|
+
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3596
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_system_ids: The managed object IDs of
|
|
3597
|
+
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
3000
3598
|
:param pulumi.Input[str] name: The name of the cluster.
|
|
3001
|
-
:param pulumi.Input[str] proactive_ha_automation_level:
|
|
3002
|
-
|
|
3003
|
-
|
|
3004
|
-
|
|
3005
|
-
:param pulumi.Input[
|
|
3006
|
-
|
|
3007
|
-
|
|
3599
|
+
:param pulumi.Input[str] proactive_ha_automation_level: Determines how the host
|
|
3600
|
+
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
3601
|
+
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
3602
|
+
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
3603
|
+
:param pulumi.Input[bool] proactive_ha_enabled: Enables Proactive HA. Default: `false`.
|
|
3604
|
+
<sup>\\*</sup>
|
|
3605
|
+
:param pulumi.Input[str] proactive_ha_moderate_remediation: The configured remediation
|
|
3606
|
+
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
3607
|
+
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
3608
|
+
`proactive_ha_severe_remediation` is set
|
|
3609
|
+
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
3610
|
+
<sup>\\*</sup>
|
|
3611
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] proactive_ha_provider_ids: The list of IDs for health update
|
|
3612
|
+
providers configured for this cluster.
|
|
3613
|
+
<sup>\\*</sup>
|
|
3614
|
+
:param pulumi.Input[str] proactive_ha_severe_remediation: The configured remediation for
|
|
3615
|
+
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
3616
|
+
Note that this cannot be set to `QuarantineMode` when
|
|
3617
|
+
`proactive_ha_moderate_remediation` is
|
|
3618
|
+
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
3619
|
+
<sup>\\*</sup>
|
|
3008
3620
|
:param pulumi.Input[str] resource_pool_id: The managed object ID of the primary
|
|
3009
3621
|
resource pool for this cluster. This can be passed directly to the
|
|
3010
3622
|
`resource_pool_id`
|
|
3011
3623
|
attribute of the
|
|
3012
3624
|
`VirtualMachine` resource.
|
|
3013
3625
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
|
|
3014
|
-
:param pulumi.Input[bool] vsan_compression_enabled:
|
|
3015
|
-
|
|
3016
|
-
:param pulumi.Input[
|
|
3017
|
-
|
|
3018
|
-
|
|
3019
|
-
:param pulumi.Input[
|
|
3020
|
-
|
|
3021
|
-
:param pulumi.Input[
|
|
3022
|
-
|
|
3023
|
-
|
|
3024
|
-
|
|
3025
|
-
:param pulumi.Input[
|
|
3026
|
-
|
|
3027
|
-
|
|
3626
|
+
:param pulumi.Input[bool] vsan_compression_enabled: Enables vSAN compression on the
|
|
3627
|
+
cluster.
|
|
3628
|
+
:param pulumi.Input[bool] vsan_dedup_enabled: Enables vSAN deduplication on the cluster.
|
|
3629
|
+
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
3630
|
+
compression must also be enabled.
|
|
3631
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanDiskGroupArgs']]]] vsan_disk_groups: Represents the configuration of a host disk
|
|
3632
|
+
group in the cluster.
|
|
3633
|
+
:param pulumi.Input[bool] vsan_dit_encryption_enabled: Enables vSAN data-in-transit
|
|
3634
|
+
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
3635
|
+
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
3636
|
+
at the same time.
|
|
3637
|
+
:param pulumi.Input[int] vsan_dit_rekey_interval: Indicates the rekey interval in
|
|
3638
|
+
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
3639
|
+
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
3640
|
+
:param pulumi.Input[bool] vsan_enabled: Enables vSAN on the cluster.
|
|
3641
|
+
:param pulumi.Input[bool] vsan_esa_enabled: Enables vSAN ESA on the cluster.
|
|
3642
|
+
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ComputeClusterVsanFaultDomainArgs']]]] vsan_fault_domains: Configurations of vSAN fault domains.
|
|
3643
|
+
:param pulumi.Input[bool] vsan_network_diagnostic_mode_enabled: Enables network
|
|
3644
|
+
diagnostic mode for vSAN performance service on the cluster.
|
|
3645
|
+
:param pulumi.Input[bool] vsan_performance_enabled: Enables vSAN performance service on
|
|
3646
|
+
the cluster. Default: `true`.
|
|
3647
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] vsan_remote_datastore_ids: The remote vSAN datastore IDs to be
|
|
3648
|
+
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
3649
|
+
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
3650
|
+
data-in-transit encryption feature at the same time.
|
|
3651
|
+
:param pulumi.Input[pulumi.InputType['ComputeClusterVsanStretchedClusterArgs']] vsan_stretched_cluster: Configurations of vSAN stretched cluster.
|
|
3652
|
+
:param pulumi.Input[bool] vsan_unmap_enabled: Enables vSAN unmap on the cluster.
|
|
3653
|
+
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3654
|
+
:param pulumi.Input[bool] vsan_verbose_mode_enabled: Enables verbose mode for vSAN
|
|
3655
|
+
performance service on the cluster.
|
|
3028
3656
|
"""
|
|
3029
3657
|
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
|
|
3030
3658
|
|
|
@@ -3075,7 +3703,6 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3075
3703
|
__props__.__dict__["ha_vm_restart_priority"] = ha_vm_restart_priority
|
|
3076
3704
|
__props__.__dict__["ha_vm_restart_timeout"] = ha_vm_restart_timeout
|
|
3077
3705
|
__props__.__dict__["host_cluster_exit_timeout"] = host_cluster_exit_timeout
|
|
3078
|
-
__props__.__dict__["host_image"] = host_image
|
|
3079
3706
|
__props__.__dict__["host_managed"] = host_managed
|
|
3080
3707
|
__props__.__dict__["host_system_ids"] = host_system_ids
|
|
3081
3708
|
__props__.__dict__["name"] = name
|
|
@@ -3127,7 +3754,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3127
3754
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
3128
3755
|
def dpm_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
3129
3756
|
"""
|
|
3130
|
-
The automation level for host power
|
|
3757
|
+
The automation level for host power
|
|
3758
|
+
operations in this cluster. Can be one of `manual` or `automated`. Default:
|
|
3759
|
+
`manual`.
|
|
3131
3760
|
"""
|
|
3132
3761
|
return pulumi.get(self, "dpm_automation_level")
|
|
3133
3762
|
|
|
@@ -3135,8 +3764,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3135
3764
|
@pulumi.getter(name="dpmEnabled")
|
|
3136
3765
|
def dpm_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3137
3766
|
"""
|
|
3138
|
-
Enable DPM support for DRS
|
|
3139
|
-
|
|
3767
|
+
Enable DPM support for DRS in this cluster.
|
|
3768
|
+
Requires `drs_enabled` to be `true` in order to be effective.
|
|
3769
|
+
Default: `false`.
|
|
3140
3770
|
"""
|
|
3141
3771
|
return pulumi.get(self, "dpm_enabled")
|
|
3142
3772
|
|
|
@@ -3144,9 +3774,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3144
3774
|
@pulumi.getter(name="dpmThreshold")
|
|
3145
3775
|
def dpm_threshold(self) -> pulumi.Output[Optional[int]]:
|
|
3146
3776
|
"""
|
|
3147
|
-
A value between 1 and 5 indicating the
|
|
3148
|
-
|
|
3149
|
-
setting
|
|
3777
|
+
A value between `1` and `5` indicating the
|
|
3778
|
+
threshold of load within the cluster that influences host power operations.
|
|
3779
|
+
This affects both power on and power off operations - a lower setting will
|
|
3780
|
+
tolerate more of a surplus/deficit than a higher setting. Default: `3`.
|
|
3150
3781
|
"""
|
|
3151
3782
|
return pulumi.get(self, "dpm_threshold")
|
|
3152
3783
|
|
|
@@ -3154,7 +3785,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3154
3785
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
3155
3786
|
def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
|
|
3156
3787
|
"""
|
|
3157
|
-
|
|
3788
|
+
A key/value map that specifies advanced
|
|
3789
|
+
options for DRS and DPM.
|
|
3158
3790
|
"""
|
|
3159
3791
|
return pulumi.get(self, "drs_advanced_options")
|
|
3160
3792
|
|
|
@@ -3162,8 +3794,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3162
3794
|
@pulumi.getter(name="drsAutomationLevel")
|
|
3163
3795
|
def drs_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
3164
3796
|
"""
|
|
3165
|
-
The default automation level for all
|
|
3166
|
-
|
|
3797
|
+
The default automation level for all
|
|
3798
|
+
virtual machines in this cluster. Can be one of `manual`,
|
|
3799
|
+
`partiallyAutomated`, or `fullyAutomated`. Default: `manual`.
|
|
3167
3800
|
"""
|
|
3168
3801
|
return pulumi.get(self, "drs_automation_level")
|
|
3169
3802
|
|
|
@@ -3171,7 +3804,11 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3171
3804
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
3172
3805
|
def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[bool]]:
|
|
3173
3806
|
"""
|
|
3174
|
-
When true
|
|
3807
|
+
When `true`, enables DRS to use data
|
|
3808
|
+
from [vRealize Operations Manager][ref-vsphere-vrops] to make proactive DRS
|
|
3809
|
+
recommendations. <sup>\\*</sup>
|
|
3810
|
+
|
|
3811
|
+
[ref-vsphere-vrops]: https://docs.vmware.com/en/vRealize-Operations-Manager/index.html
|
|
3175
3812
|
"""
|
|
3176
3813
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
3177
3814
|
|
|
@@ -3179,7 +3816,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3179
3816
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
3180
3817
|
def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[bool]]:
|
|
3181
3818
|
"""
|
|
3182
|
-
|
|
3819
|
+
Allow individual DRS overrides to be
|
|
3820
|
+
set for virtual machines in the cluster. Default: `true`.
|
|
3183
3821
|
"""
|
|
3184
3822
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
3185
3823
|
|
|
@@ -3187,7 +3825,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3187
3825
|
@pulumi.getter(name="drsEnabled")
|
|
3188
3826
|
def drs_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3189
3827
|
"""
|
|
3190
|
-
Enable DRS for this cluster.
|
|
3828
|
+
Enable DRS for this cluster. Default: `false`.
|
|
3191
3829
|
"""
|
|
3192
3830
|
return pulumi.get(self, "drs_enabled")
|
|
3193
3831
|
|
|
@@ -3195,8 +3833,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3195
3833
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
3196
3834
|
def drs_migration_threshold(self) -> pulumi.Output[Optional[int]]:
|
|
3197
3835
|
"""
|
|
3198
|
-
A value between 1 and 5 indicating
|
|
3199
|
-
|
|
3836
|
+
A value between `1` and `5` indicating
|
|
3837
|
+
the threshold of imbalance tolerated between hosts. A lower setting will
|
|
3838
|
+
tolerate more imbalance while a higher setting will tolerate less. Default:
|
|
3839
|
+
`3`.
|
|
3200
3840
|
"""
|
|
3201
3841
|
return pulumi.get(self, "drs_migration_threshold")
|
|
3202
3842
|
|
|
@@ -3204,7 +3844,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3204
3844
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
3205
3845
|
def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[str]]:
|
|
3206
3846
|
"""
|
|
3207
|
-
Enable scalable shares for all
|
|
3847
|
+
Enable scalable shares for all
|
|
3848
|
+
resource pools in the cluster. Can be one of `disabled` or
|
|
3849
|
+
`scaleCpuAndMemoryShares`. Default: `disabled`.
|
|
3208
3850
|
"""
|
|
3209
3851
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
3210
3852
|
|
|
@@ -3225,8 +3867,18 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3225
3867
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
3226
3868
|
def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[bool]]:
|
|
3227
3869
|
"""
|
|
3228
|
-
|
|
3229
|
-
|
|
3870
|
+
When destroying the resource, setting this to
|
|
3871
|
+
`true` will auto-remove any hosts that are currently a member of the cluster,
|
|
3872
|
+
as if they were removed by taking their entry out of `host_system_ids` (see
|
|
3873
|
+
below. This is an advanced
|
|
3874
|
+
option and should only be used for testing. Default: `false`.
|
|
3875
|
+
|
|
3876
|
+
> **NOTE:** Do not set `force_evacuate_on_destroy` in production operation as
|
|
3877
|
+
there are many pitfalls to its use when working with complex cluster
|
|
3878
|
+
configurations. Depending on the virtual machines currently on the cluster, and
|
|
3879
|
+
your DRS and HA settings, the full host evacuation may fail. Instead,
|
|
3880
|
+
incrementally remove hosts from your configuration by adjusting the contents of
|
|
3881
|
+
the `host_system_ids` attribute.
|
|
3230
3882
|
"""
|
|
3231
3883
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
3232
3884
|
|
|
@@ -3234,9 +3886,11 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3234
3886
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
3235
3887
|
def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
3236
3888
|
"""
|
|
3237
|
-
|
|
3238
|
-
|
|
3239
|
-
|
|
3889
|
+
Defines the
|
|
3890
|
+
managed object IDs of hosts to use as dedicated failover
|
|
3891
|
+
hosts. These hosts are kept as available as possible - admission control will
|
|
3892
|
+
block access to the host, and DRS will ignore the host when making
|
|
3893
|
+
recommendations.
|
|
3240
3894
|
"""
|
|
3241
3895
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
3242
3896
|
|
|
@@ -3244,8 +3898,11 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3244
3898
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
3245
3899
|
def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[int]]:
|
|
3246
3900
|
"""
|
|
3247
|
-
The maximum number
|
|
3248
|
-
|
|
3901
|
+
The maximum number
|
|
3902
|
+
of failed hosts that admission control tolerates when making decisions on
|
|
3903
|
+
whether to permit virtual machine operations. The maximum is one less than
|
|
3904
|
+
the number of hosts in the cluster. Default: `1`.
|
|
3905
|
+
<sup>\\*</sup>
|
|
3249
3906
|
"""
|
|
3250
3907
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
3251
3908
|
|
|
@@ -3253,8 +3910,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3253
3910
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
3254
3911
|
def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[int]]:
|
|
3255
3912
|
"""
|
|
3256
|
-
The percentage of
|
|
3257
|
-
|
|
3913
|
+
The percentage of
|
|
3914
|
+
resource reduction that a cluster of virtual machines can tolerate in case of
|
|
3915
|
+
a failover. A value of 0 produces warnings only, whereas a value of 100
|
|
3916
|
+
disables the setting. Default: `100` (disabled).
|
|
3258
3917
|
"""
|
|
3259
3918
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
3260
3919
|
|
|
@@ -3262,10 +3921,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3262
3921
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
3263
3922
|
def ha_admission_control_policy(self) -> pulumi.Output[Optional[str]]:
|
|
3264
3923
|
"""
|
|
3265
|
-
The type of admission control
|
|
3266
|
-
|
|
3267
|
-
slotPolicy
|
|
3268
|
-
issues.
|
|
3924
|
+
The type of admission control
|
|
3925
|
+
policy to use with vSphere HA. Can be one of `resourcePercentage`,
|
|
3926
|
+
`slotPolicy`, `failoverHosts`, or `disabled`. Default: `resourcePercentage`.
|
|
3269
3927
|
"""
|
|
3270
3928
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
3271
3929
|
|
|
@@ -3273,9 +3931,12 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3273
3931
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
3274
3932
|
def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[bool]]:
|
|
3275
3933
|
"""
|
|
3276
|
-
|
|
3277
|
-
|
|
3278
|
-
|
|
3934
|
+
Automatically determine available resource percentages by subtracting the
|
|
3935
|
+
average number of host resources represented by the
|
|
3936
|
+
`ha_admission_control_host_failure_tolerance`
|
|
3937
|
+
setting from the total amount of resources in the cluster. Disable to supply
|
|
3938
|
+
user-defined values. Default: `true`.
|
|
3939
|
+
<sup>\\*</sup>
|
|
3279
3940
|
"""
|
|
3280
3941
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
3281
3942
|
|
|
@@ -3283,8 +3944,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3283
3944
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
3284
3945
|
def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[int]]:
|
|
3285
3946
|
"""
|
|
3286
|
-
|
|
3287
|
-
the cluster to reserve for
|
|
3947
|
+
Controls the
|
|
3948
|
+
user-defined percentage of CPU resources in the cluster to reserve for
|
|
3949
|
+
failover. Default: `100`.
|
|
3288
3950
|
"""
|
|
3289
3951
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
3290
3952
|
|
|
@@ -3292,8 +3954,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3292
3954
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
3293
3955
|
def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[int]]:
|
|
3294
3956
|
"""
|
|
3295
|
-
|
|
3296
|
-
the cluster to reserve for
|
|
3957
|
+
Controls the
|
|
3958
|
+
user-defined percentage of memory resources in the cluster to reserve for
|
|
3959
|
+
failover. Default: `100`.
|
|
3297
3960
|
"""
|
|
3298
3961
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
3299
3962
|
|
|
@@ -3301,7 +3964,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3301
3964
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
3302
3965
|
def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[int]]:
|
|
3303
3966
|
"""
|
|
3304
|
-
|
|
3967
|
+
Controls the
|
|
3968
|
+
user-defined CPU slot size, in MHz. Default: `32`.
|
|
3305
3969
|
"""
|
|
3306
3970
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
3307
3971
|
|
|
@@ -3309,7 +3973,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3309
3973
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
3310
3974
|
def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[int]]:
|
|
3311
3975
|
"""
|
|
3312
|
-
|
|
3976
|
+
Controls the
|
|
3977
|
+
user-defined memory slot size, in MB. Default: `100`.
|
|
3313
3978
|
"""
|
|
3314
3979
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
3315
3980
|
|
|
@@ -3317,9 +3982,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3317
3982
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
3318
3983
|
def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[bool]]:
|
|
3319
3984
|
"""
|
|
3320
|
-
|
|
3321
|
-
|
|
3322
|
-
|
|
3985
|
+
Controls
|
|
3986
|
+
whether or not you wish to supply explicit values to CPU and memory slot
|
|
3987
|
+
sizes. The default is `false`, which tells vSphere to gather a automatic
|
|
3988
|
+
average based on all powered-on virtual machines currently in the cluster.
|
|
3323
3989
|
"""
|
|
3324
3990
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
3325
3991
|
|
|
@@ -3327,7 +3993,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3327
3993
|
@pulumi.getter(name="haAdvancedOptions")
|
|
3328
3994
|
def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
|
|
3329
3995
|
"""
|
|
3330
|
-
|
|
3996
|
+
A key/value map that specifies advanced
|
|
3997
|
+
options for vSphere HA.
|
|
3331
3998
|
"""
|
|
3332
3999
|
return pulumi.get(self, "ha_advanced_options")
|
|
3333
4000
|
|
|
@@ -3335,8 +4002,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3335
4002
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
3336
4003
|
def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[str]]:
|
|
3337
4004
|
"""
|
|
3338
|
-
|
|
3339
|
-
|
|
4005
|
+
Controls the action to take
|
|
4006
|
+
on virtual machines if an APD status on an affected datastore clears in the
|
|
4007
|
+
middle of an APD event. Can be one of `none` or `reset`. Default: `none`.
|
|
4008
|
+
<sup>\\*</sup>
|
|
3340
4009
|
"""
|
|
3341
4010
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
3342
4011
|
|
|
@@ -3344,9 +4013,11 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3344
4013
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
3345
4014
|
def ha_datastore_apd_response(self) -> pulumi.Output[Optional[str]]:
|
|
3346
4015
|
"""
|
|
3347
|
-
|
|
3348
|
-
detected loss to all paths to a
|
|
3349
|
-
|
|
4016
|
+
Controls the action to take on
|
|
4017
|
+
virtual machines when the cluster has detected loss to all paths to a
|
|
4018
|
+
relevant datastore. Can be one of `disabled`, `warning`,
|
|
4019
|
+
`restartConservative`, or `restartAggressive`. Default: `disabled`.
|
|
4020
|
+
<sup>\\*</sup>
|
|
3350
4021
|
"""
|
|
3351
4022
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
3352
4023
|
|
|
@@ -3354,8 +4025,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3354
4025
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
3355
4026
|
def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[int]]:
|
|
3356
4027
|
"""
|
|
3357
|
-
|
|
3358
|
-
the response action defined in
|
|
4028
|
+
The time, in seconds,
|
|
4029
|
+
to wait after an APD timeout event to run the response action defined in
|
|
4030
|
+
`ha_datastore_apd_response`. Default: `180`
|
|
4031
|
+
seconds (3 minutes). <sup>\\*</sup>
|
|
3359
4032
|
"""
|
|
3360
4033
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
3361
4034
|
|
|
@@ -3363,8 +4036,11 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3363
4036
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
3364
4037
|
def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[str]]:
|
|
3365
4038
|
"""
|
|
3366
|
-
|
|
3367
|
-
detected a permanent device loss to a
|
|
4039
|
+
Controls the action to take on
|
|
4040
|
+
virtual machines when the cluster has detected a permanent device loss to a
|
|
4041
|
+
relevant datastore. Can be one of `disabled`, `warning`, or
|
|
4042
|
+
`restartAggressive`. Default: `disabled`.
|
|
4043
|
+
<sup>\\*</sup>
|
|
3368
4044
|
"""
|
|
3369
4045
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
3370
4046
|
|
|
@@ -3372,7 +4048,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3372
4048
|
@pulumi.getter(name="haEnabled")
|
|
3373
4049
|
def ha_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3374
4050
|
"""
|
|
3375
|
-
Enable vSphere HA for this cluster.
|
|
4051
|
+
Enable vSphere HA for this cluster. Default:
|
|
4052
|
+
`false`.
|
|
3376
4053
|
"""
|
|
3377
4054
|
return pulumi.get(self, "ha_enabled")
|
|
3378
4055
|
|
|
@@ -3380,8 +4057,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3380
4057
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
3381
4058
|
def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
3382
4059
|
"""
|
|
3383
|
-
The list of managed object IDs for
|
|
3384
|
-
|
|
4060
|
+
The list of managed object IDs for
|
|
4061
|
+
preferred datastores to use for HA heartbeating. This setting is only useful
|
|
4062
|
+
when `ha_heartbeat_datastore_policy` is set
|
|
4063
|
+
to either `userSelectedDs` or `allFeasibleDsWithUserPreference`.
|
|
3385
4064
|
"""
|
|
3386
4065
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
3387
4066
|
|
|
@@ -3389,8 +4068,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3389
4068
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
3390
4069
|
def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[str]]:
|
|
3391
4070
|
"""
|
|
3392
|
-
The selection policy for HA
|
|
3393
|
-
|
|
4071
|
+
The selection policy for HA
|
|
4072
|
+
heartbeat datastores. Can be one of `allFeasibleDs`, `userSelectedDs`, or
|
|
4073
|
+
`allFeasibleDsWithUserPreference`. Default:
|
|
4074
|
+
`allFeasibleDsWithUserPreference`.
|
|
3394
4075
|
"""
|
|
3395
4076
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
3396
4077
|
|
|
@@ -3398,8 +4079,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3398
4079
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
3399
4080
|
def ha_host_isolation_response(self) -> pulumi.Output[Optional[str]]:
|
|
3400
4081
|
"""
|
|
3401
|
-
The action to take on virtual
|
|
3402
|
-
|
|
4082
|
+
The action to take on virtual
|
|
4083
|
+
machines when a host has detected that it has been isolated from the rest of
|
|
4084
|
+
the cluster. Can be one of `none`, `powerOff`, or `shutdown`. Default:
|
|
4085
|
+
`none`.
|
|
3403
4086
|
"""
|
|
3404
4087
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
3405
4088
|
|
|
@@ -3407,7 +4090,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3407
4090
|
@pulumi.getter(name="haHostMonitoring")
|
|
3408
4091
|
def ha_host_monitoring(self) -> pulumi.Output[Optional[str]]:
|
|
3409
4092
|
"""
|
|
3410
|
-
Global setting that controls whether
|
|
4093
|
+
Global setting that controls whether
|
|
4094
|
+
vSphere HA remediates virtual machines on host failure. Can be one of `enabled`
|
|
4095
|
+
or `disabled`. Default: `enabled`.
|
|
3411
4096
|
"""
|
|
3412
4097
|
return pulumi.get(self, "ha_host_monitoring")
|
|
3413
4098
|
|
|
@@ -3415,8 +4100,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3415
4100
|
@pulumi.getter(name="haVmComponentProtection")
|
|
3416
4101
|
def ha_vm_component_protection(self) -> pulumi.Output[Optional[str]]:
|
|
3417
4102
|
"""
|
|
3418
|
-
Controls vSphere VM component
|
|
3419
|
-
|
|
4103
|
+
Controls vSphere VM component
|
|
4104
|
+
protection for virtual machines in this cluster. Can be one of `enabled` or
|
|
4105
|
+
`disabled`. Default: `enabled`.
|
|
4106
|
+
<sup>\\*</sup>
|
|
3420
4107
|
"""
|
|
3421
4108
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
3422
4109
|
|
|
@@ -3424,8 +4111,13 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3424
4111
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
3425
4112
|
def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[str]]:
|
|
3426
4113
|
"""
|
|
3427
|
-
The condition used to
|
|
3428
|
-
|
|
4114
|
+
The condition used to
|
|
4115
|
+
determine whether or not virtual machines in a certain restart priority class
|
|
4116
|
+
are online, allowing HA to move on to restarting virtual machines on the next
|
|
4117
|
+
priority. Can be one of `none`, `poweredOn`, `guestHbStatusGreen`, or
|
|
4118
|
+
`appHbStatusGreen`. The default is `none`, which means that a virtual machine
|
|
4119
|
+
is considered ready immediately after a host is found to start it on.
|
|
4120
|
+
<sup>\\*</sup>
|
|
3429
4121
|
"""
|
|
3430
4122
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
3431
4123
|
|
|
@@ -3433,8 +4125,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3433
4125
|
@pulumi.getter(name="haVmFailureInterval")
|
|
3434
4126
|
def ha_vm_failure_interval(self) -> pulumi.Output[Optional[int]]:
|
|
3435
4127
|
"""
|
|
3436
|
-
|
|
3437
|
-
|
|
4128
|
+
The time interval, in seconds, a heartbeat
|
|
4129
|
+
from a virtual machine is not received within this configured interval,
|
|
4130
|
+
the virtual machine is marked as failed. Default: `30` seconds.
|
|
3438
4131
|
"""
|
|
3439
4132
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
3440
4133
|
|
|
@@ -3442,9 +4135,11 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3442
4135
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
3443
4136
|
def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[int]]:
|
|
3444
4137
|
"""
|
|
3445
|
-
The
|
|
3446
|
-
|
|
3447
|
-
|
|
4138
|
+
The time, in seconds, for the reset window in
|
|
4139
|
+
which `ha_vm_maximum_resets` can operate. When this
|
|
4140
|
+
window expires, no more resets are attempted regardless of the setting
|
|
4141
|
+
configured in `ha_vm_maximum_resets`. `-1` means no window, meaning an
|
|
4142
|
+
unlimited reset time is allotted. Default: `-1` (no window).
|
|
3448
4143
|
"""
|
|
3449
4144
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
3450
4145
|
|
|
@@ -3452,7 +4147,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3452
4147
|
@pulumi.getter(name="haVmMaximumResets")
|
|
3453
4148
|
def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[int]]:
|
|
3454
4149
|
"""
|
|
3455
|
-
The maximum number of resets that HA will
|
|
4150
|
+
The maximum number of resets that HA will
|
|
4151
|
+
perform to a virtual machine when responding to a failure event. Default: `3`
|
|
3456
4152
|
"""
|
|
3457
4153
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
3458
4154
|
|
|
@@ -3460,7 +4156,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3460
4156
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
3461
4157
|
def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[int]]:
|
|
3462
4158
|
"""
|
|
3463
|
-
The time, in seconds, that HA waits after
|
|
4159
|
+
The time, in seconds, that HA waits after
|
|
4160
|
+
powering on a virtual machine before monitoring for heartbeats. Default:
|
|
4161
|
+
`120` seconds (2 minutes).
|
|
3464
4162
|
"""
|
|
3465
4163
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
3466
4164
|
|
|
@@ -3468,8 +4166,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3468
4166
|
@pulumi.getter(name="haVmMonitoring")
|
|
3469
4167
|
def ha_vm_monitoring(self) -> pulumi.Output[Optional[str]]:
|
|
3470
4168
|
"""
|
|
3471
|
-
The type of virtual machine monitoring to use
|
|
3472
|
-
|
|
4169
|
+
The type of virtual machine monitoring to use
|
|
4170
|
+
when HA is enabled in the cluster. Can be one of `vmMonitoringDisabled`,
|
|
4171
|
+
`vmMonitoringOnly`, or `vmAndAppMonitoring`. Default: `vmMonitoringDisabled`.
|
|
3473
4172
|
"""
|
|
3474
4173
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
3475
4174
|
|
|
@@ -3477,7 +4176,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3477
4176
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
3478
4177
|
def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[int]]:
|
|
3479
4178
|
"""
|
|
3480
|
-
Additional delay in seconds
|
|
4179
|
+
Additional delay, in seconds,
|
|
4180
|
+
after ready condition is met. A VM is considered ready at this point.
|
|
4181
|
+
Default: `0` seconds (no delay). <sup>\\*</sup>
|
|
3481
4182
|
"""
|
|
3482
4183
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
3483
4184
|
|
|
@@ -3485,8 +4186,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3485
4186
|
@pulumi.getter(name="haVmRestartPriority")
|
|
3486
4187
|
def ha_vm_restart_priority(self) -> pulumi.Output[Optional[str]]:
|
|
3487
4188
|
"""
|
|
3488
|
-
The default restart priority
|
|
3489
|
-
|
|
4189
|
+
The default restart priority
|
|
4190
|
+
for affected virtual machines when vSphere detects a host failure. Can be one
|
|
4191
|
+
of `lowest`, `low`, `medium`, `high`, or `highest`. Default: `medium`.
|
|
3490
4192
|
"""
|
|
3491
4193
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
3492
4194
|
|
|
@@ -3494,8 +4196,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3494
4196
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
3495
4197
|
def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[int]]:
|
|
3496
4198
|
"""
|
|
3497
|
-
The maximum time, in seconds,
|
|
3498
|
-
|
|
4199
|
+
The maximum time, in seconds,
|
|
4200
|
+
that vSphere HA will wait for virtual machines in one priority to be ready
|
|
4201
|
+
before proceeding with the next priority. Default: `600` seconds (10 minutes).
|
|
4202
|
+
<sup>\\*</sup>
|
|
3499
4203
|
"""
|
|
3500
4204
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
3501
4205
|
|
|
@@ -3503,23 +4207,18 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3503
4207
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
3504
4208
|
def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[int]]:
|
|
3505
4209
|
"""
|
|
3506
|
-
The timeout for each host maintenance
|
|
4210
|
+
The timeout, in seconds, for each host maintenance
|
|
4211
|
+
mode operation when removing hosts from a cluster. Default: `3600` seconds (1 hour).
|
|
3507
4212
|
"""
|
|
3508
4213
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
3509
4214
|
|
|
3510
|
-
@property
|
|
3511
|
-
@pulumi.getter(name="hostImage")
|
|
3512
|
-
def host_image(self) -> pulumi.Output[Optional['outputs.ComputeClusterHostImage']]:
|
|
3513
|
-
"""
|
|
3514
|
-
Details about the host image which should be applied to the cluster.
|
|
3515
|
-
"""
|
|
3516
|
-
return pulumi.get(self, "host_image")
|
|
3517
|
-
|
|
3518
4215
|
@property
|
|
3519
4216
|
@pulumi.getter(name="hostManaged")
|
|
3520
4217
|
def host_managed(self) -> pulumi.Output[Optional[bool]]:
|
|
3521
4218
|
"""
|
|
3522
|
-
|
|
4219
|
+
Can be set to `true` if compute cluster
|
|
4220
|
+
membership will be managed through the `host` resource rather than the
|
|
4221
|
+
`compute_cluster` resource. Conflicts with: `host_system_ids`.
|
|
3523
4222
|
"""
|
|
3524
4223
|
return pulumi.get(self, "host_managed")
|
|
3525
4224
|
|
|
@@ -3527,7 +4226,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3527
4226
|
@pulumi.getter(name="hostSystemIds")
|
|
3528
4227
|
def host_system_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
3529
4228
|
"""
|
|
3530
|
-
The managed object IDs of
|
|
4229
|
+
The managed object IDs of
|
|
4230
|
+
the hosts to put in the cluster. Conflicts with: `host_managed`.
|
|
3531
4231
|
"""
|
|
3532
4232
|
return pulumi.get(self, "host_system_ids")
|
|
3533
4233
|
|
|
@@ -3543,7 +4243,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3543
4243
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
3544
4244
|
def proactive_ha_automation_level(self) -> pulumi.Output[Optional[str]]:
|
|
3545
4245
|
"""
|
|
3546
|
-
|
|
4246
|
+
Determines how the host
|
|
4247
|
+
quarantine, maintenance mode, or virtual machine migration recommendations
|
|
4248
|
+
made by proactive HA are to be handled. Can be one of `Automated` or
|
|
4249
|
+
`Manual`. Default: `Manual`. <sup>\\*</sup>
|
|
3547
4250
|
"""
|
|
3548
4251
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
3549
4252
|
|
|
@@ -3551,7 +4254,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3551
4254
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
3552
4255
|
def proactive_ha_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3553
4256
|
"""
|
|
3554
|
-
Enables
|
|
4257
|
+
Enables Proactive HA. Default: `false`.
|
|
4258
|
+
<sup>\\*</sup>
|
|
3555
4259
|
"""
|
|
3556
4260
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
3557
4261
|
|
|
@@ -3559,8 +4263,12 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3559
4263
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
3560
4264
|
def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[str]]:
|
|
3561
4265
|
"""
|
|
3562
|
-
The configured remediation
|
|
3563
|
-
|
|
4266
|
+
The configured remediation
|
|
4267
|
+
for moderately degraded hosts. Can be one of `MaintenanceMode` or
|
|
4268
|
+
`QuarantineMode`. Note that this cannot be set to `MaintenanceMode` when
|
|
4269
|
+
`proactive_ha_severe_remediation` is set
|
|
4270
|
+
to `QuarantineMode`. Default: `QuarantineMode`.
|
|
4271
|
+
<sup>\\*</sup>
|
|
3564
4272
|
"""
|
|
3565
4273
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
3566
4274
|
|
|
@@ -3568,7 +4276,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3568
4276
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
3569
4277
|
def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
3570
4278
|
"""
|
|
3571
|
-
The list of IDs for health update
|
|
4279
|
+
The list of IDs for health update
|
|
4280
|
+
providers configured for this cluster.
|
|
4281
|
+
<sup>\\*</sup>
|
|
3572
4282
|
"""
|
|
3573
4283
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
3574
4284
|
|
|
@@ -3576,8 +4286,12 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3576
4286
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
3577
4287
|
def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[str]]:
|
|
3578
4288
|
"""
|
|
3579
|
-
The configured remediation for
|
|
3580
|
-
|
|
4289
|
+
The configured remediation for
|
|
4290
|
+
severely degraded hosts. Can be one of `MaintenanceMode` or `QuarantineMode`.
|
|
4291
|
+
Note that this cannot be set to `QuarantineMode` when
|
|
4292
|
+
`proactive_ha_moderate_remediation` is
|
|
4293
|
+
set to `MaintenanceMode`. Default: `QuarantineMode`.
|
|
4294
|
+
<sup>\\*</sup>
|
|
3581
4295
|
"""
|
|
3582
4296
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
3583
4297
|
|
|
@@ -3605,7 +4319,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3605
4319
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
3606
4320
|
def vsan_compression_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3607
4321
|
"""
|
|
3608
|
-
|
|
4322
|
+
Enables vSAN compression on the
|
|
4323
|
+
cluster.
|
|
3609
4324
|
"""
|
|
3610
4325
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
3611
4326
|
|
|
@@ -3613,7 +4328,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3613
4328
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
3614
4329
|
def vsan_dedup_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3615
4330
|
"""
|
|
3616
|
-
|
|
4331
|
+
Enables vSAN deduplication on the cluster.
|
|
4332
|
+
Cannot be independently set to `true`. When vSAN deduplication is enabled, vSAN
|
|
4333
|
+
compression must also be enabled.
|
|
3617
4334
|
"""
|
|
3618
4335
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
3619
4336
|
|
|
@@ -3621,7 +4338,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3621
4338
|
@pulumi.getter(name="vsanDiskGroups")
|
|
3622
4339
|
def vsan_disk_groups(self) -> pulumi.Output[Sequence['outputs.ComputeClusterVsanDiskGroup']]:
|
|
3623
4340
|
"""
|
|
3624
|
-
|
|
4341
|
+
Represents the configuration of a host disk
|
|
4342
|
+
group in the cluster.
|
|
3625
4343
|
"""
|
|
3626
4344
|
return pulumi.get(self, "vsan_disk_groups")
|
|
3627
4345
|
|
|
@@ -3629,7 +4347,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3629
4347
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
3630
4348
|
def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3631
4349
|
"""
|
|
3632
|
-
|
|
4350
|
+
Enables vSAN data-in-transit
|
|
4351
|
+
encryption on the cluster. Conflicts with `vsan_remote_datastore_ids`, i.e.,
|
|
4352
|
+
vSAN data-in-transit feature cannot be enabled with the vSAN HCI Mesh feature
|
|
4353
|
+
at the same time.
|
|
3633
4354
|
"""
|
|
3634
4355
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
3635
4356
|
|
|
@@ -3637,7 +4358,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3637
4358
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
3638
4359
|
def vsan_dit_rekey_interval(self) -> pulumi.Output[int]:
|
|
3639
4360
|
"""
|
|
3640
|
-
|
|
4361
|
+
Indicates the rekey interval in
|
|
4362
|
+
minutes for data-in-transit encryption. The valid rekey interval is 30 to
|
|
4363
|
+
10800 (feature defaults to 1440). Conflicts with `vsan_remote_datastore_ids`.
|
|
3641
4364
|
"""
|
|
3642
4365
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
3643
4366
|
|
|
@@ -3645,7 +4368,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3645
4368
|
@pulumi.getter(name="vsanEnabled")
|
|
3646
4369
|
def vsan_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3647
4370
|
"""
|
|
3648
|
-
|
|
4371
|
+
Enables vSAN on the cluster.
|
|
3649
4372
|
"""
|
|
3650
4373
|
return pulumi.get(self, "vsan_enabled")
|
|
3651
4374
|
|
|
@@ -3653,7 +4376,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3653
4376
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
3654
4377
|
def vsan_esa_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3655
4378
|
"""
|
|
3656
|
-
|
|
4379
|
+
Enables vSAN ESA on the cluster.
|
|
3657
4380
|
"""
|
|
3658
4381
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
3659
4382
|
|
|
@@ -3661,7 +4384,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3661
4384
|
@pulumi.getter(name="vsanFaultDomains")
|
|
3662
4385
|
def vsan_fault_domains(self) -> pulumi.Output[Optional[Sequence['outputs.ComputeClusterVsanFaultDomain']]]:
|
|
3663
4386
|
"""
|
|
3664
|
-
|
|
4387
|
+
Configurations of vSAN fault domains.
|
|
3665
4388
|
"""
|
|
3666
4389
|
return pulumi.get(self, "vsan_fault_domains")
|
|
3667
4390
|
|
|
@@ -3669,7 +4392,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3669
4392
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
3670
4393
|
def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3671
4394
|
"""
|
|
3672
|
-
|
|
4395
|
+
Enables network
|
|
4396
|
+
diagnostic mode for vSAN performance service on the cluster.
|
|
3673
4397
|
"""
|
|
3674
4398
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
3675
4399
|
|
|
@@ -3677,7 +4401,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3677
4401
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
3678
4402
|
def vsan_performance_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3679
4403
|
"""
|
|
3680
|
-
|
|
4404
|
+
Enables vSAN performance service on
|
|
4405
|
+
the cluster. Default: `true`.
|
|
3681
4406
|
"""
|
|
3682
4407
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
3683
4408
|
|
|
@@ -3685,7 +4410,10 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3685
4410
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
3686
4411
|
def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
|
|
3687
4412
|
"""
|
|
3688
|
-
The
|
|
4413
|
+
The remote vSAN datastore IDs to be
|
|
4414
|
+
mounted to this cluster. Conflicts with `vsan_dit_encryption_enabled` and
|
|
4415
|
+
`vsan_dit_rekey_interval`, i.e., vSAN HCI Mesh feature cannot be enabled with
|
|
4416
|
+
data-in-transit encryption feature at the same time.
|
|
3689
4417
|
"""
|
|
3690
4418
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
3691
4419
|
|
|
@@ -3693,7 +4421,7 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3693
4421
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
3694
4422
|
def vsan_stretched_cluster(self) -> pulumi.Output[Optional['outputs.ComputeClusterVsanStretchedCluster']]:
|
|
3695
4423
|
"""
|
|
3696
|
-
|
|
4424
|
+
Configurations of vSAN stretched cluster.
|
|
3697
4425
|
"""
|
|
3698
4426
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
3699
4427
|
|
|
@@ -3701,7 +4429,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3701
4429
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
3702
4430
|
def vsan_unmap_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3703
4431
|
"""
|
|
3704
|
-
|
|
4432
|
+
Enables vSAN unmap on the cluster.
|
|
4433
|
+
You must explicitly enable vSAN unmap when you enable vSAN ESA on the cluster.
|
|
3705
4434
|
"""
|
|
3706
4435
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
3707
4436
|
|
|
@@ -3709,7 +4438,8 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3709
4438
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
3710
4439
|
def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[bool]]:
|
|
3711
4440
|
"""
|
|
3712
|
-
|
|
4441
|
+
Enables verbose mode for vSAN
|
|
4442
|
+
performance service on the cluster.
|
|
3713
4443
|
"""
|
|
3714
4444
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
3715
4445
|
|