@pulumi/vsphere 4.16.0 → 4.16.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/computeCluster.d.ts +87 -195
  2. package/computeCluster.js +141 -141
  3. package/computeCluster.js.map +1 -1
  4. package/computeClusterHostGroup.js +8 -8
  5. package/computeClusterHostGroup.js.map +1 -1
  6. package/computeClusterVmAffinityRule.js +13 -13
  7. package/computeClusterVmAffinityRule.js.map +1 -1
  8. package/computeClusterVmAntiAffinityRule.js +13 -13
  9. package/computeClusterVmAntiAffinityRule.js.map +1 -1
  10. package/computeClusterVmDependencyRule.js +16 -16
  11. package/computeClusterVmDependencyRule.js.map +1 -1
  12. package/computeClusterVmGroup.js +8 -8
  13. package/computeClusterVmGroup.js.map +1 -1
  14. package/computeClusterVmHostRule.js +17 -17
  15. package/computeClusterVmHostRule.js.map +1 -1
  16. package/config/vars.js +8 -16
  17. package/config/vars.js.map +1 -1
  18. package/configurationProfile.js +9 -9
  19. package/configurationProfile.js.map +1 -1
  20. package/contentLibrary.js +12 -12
  21. package/contentLibrary.js.map +1 -1
  22. package/contentLibraryItem.js +14 -14
  23. package/contentLibraryItem.js.map +1 -1
  24. package/customAttribute.js +5 -5
  25. package/customAttribute.js.map +1 -1
  26. package/datacenter.js +10 -10
  27. package/datacenter.js.map +1 -1
  28. package/datastoreCluster.d.ts +15 -30
  29. package/datastoreCluster.js +52 -52
  30. package/datastoreCluster.js.map +1 -1
  31. package/datastoreClusterVmAntiAffinityRule.js +13 -13
  32. package/datastoreClusterVmAntiAffinityRule.js.map +1 -1
  33. package/distributedPortGroup.d.ts +9 -18
  34. package/distributedPortGroup.js +96 -96
  35. package/distributedPortGroup.js.map +1 -1
  36. package/distributedVirtualSwitch.d.ts +15 -30
  37. package/distributedVirtualSwitch.js +191 -191
  38. package/distributedVirtualSwitch.js.map +1 -1
  39. package/distributedVirtualSwitchPvlanMapping.js +13 -13
  40. package/distributedVirtualSwitchPvlanMapping.js.map +1 -1
  41. package/dpmHostOverride.js +11 -11
  42. package/dpmHostOverride.js.map +1 -1
  43. package/drsVmOverride.js +11 -11
  44. package/drsVmOverride.js.map +1 -1
  45. package/entityPermissions.js +10 -10
  46. package/entityPermissions.js.map +1 -1
  47. package/file.js +18 -18
  48. package/file.js.map +1 -1
  49. package/folder.js +13 -13
  50. package/folder.js.map +1 -1
  51. package/getGuestOsCustomization.d.ts +4 -0
  52. package/getGuestOsCustomization.js +4 -0
  53. package/getGuestOsCustomization.js.map +1 -1
  54. package/guestOsCustomization.js +13 -13
  55. package/guestOsCustomization.js.map +1 -1
  56. package/haVmOverride.d.ts +33 -69
  57. package/haVmOverride.js +33 -33
  58. package/haVmOverride.js.map +1 -1
  59. package/host.js +34 -34
  60. package/host.js.map +1 -1
  61. package/hostPortGroup.d.ts +9 -18
  62. package/hostPortGroup.js +40 -40
  63. package/hostPortGroup.js.map +1 -1
  64. package/hostVirtualSwitch.d.ts +9 -18
  65. package/hostVirtualSwitch.js +46 -46
  66. package/hostVirtualSwitch.js.map +1 -1
  67. package/license.js +10 -10
  68. package/license.js.map +1 -1
  69. package/nasDatastore.js +34 -34
  70. package/nasDatastore.js.map +1 -1
  71. package/offlineSoftwareDepot.js +5 -5
  72. package/offlineSoftwareDepot.js.map +1 -1
  73. package/package.json +2 -2
  74. package/provider.js +13 -14
  75. package/provider.js.map +1 -1
  76. package/resourcePool.js +32 -32
  77. package/resourcePool.js.map +1 -1
  78. package/role.js +6 -6
  79. package/role.js.map +1 -1
  80. package/storageDrsVmOverride.js +13 -13
  81. package/storageDrsVmOverride.js.map +1 -1
  82. package/supervisor.js +51 -51
  83. package/supervisor.js.map +1 -1
  84. package/tag.js +8 -8
  85. package/tag.js.map +1 -1
  86. package/tagCategory.js +11 -11
  87. package/tagCategory.js.map +1 -1
  88. package/utilities.js +18 -29
  89. package/utilities.js.map +1 -1
  90. package/vappContainer.js +32 -32
  91. package/vappContainer.js.map +1 -1
  92. package/vappEntity.js +23 -23
  93. package/vappEntity.js.map +1 -1
  94. package/virtualDisk.js +18 -18
  95. package/virtualDisk.js.map +1 -1
  96. package/virtualMachine.d.ts +39 -84
  97. package/virtualMachine.js +167 -167
  98. package/virtualMachine.js.map +1 -1
  99. package/virtualMachineClass.js +15 -15
  100. package/virtualMachineClass.js.map +1 -1
  101. package/virtualMachineSnapshot.js +20 -20
  102. package/virtualMachineSnapshot.js.map +1 -1
  103. package/vmStoragePolicy.js +8 -8
  104. package/vmStoragePolicy.js.map +1 -1
  105. package/vmfsDatastore.js +24 -24
  106. package/vmfsDatastore.js.map +1 -1
  107. package/vnic.js +22 -22
  108. package/vnic.js.map +1 -1
@@ -126,14 +126,11 @@ export declare class ComputeCluster extends pulumi.CustomResource {
126
126
  */
127
127
  readonly dpmAutomationLevel: pulumi.Output<string | undefined>;
128
128
  /**
129
- * Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
130
- * machines in the cluster. Requires that DRS be enabled.
129
+ * Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
131
130
  */
132
131
  readonly dpmEnabled: pulumi.Output<boolean | undefined>;
133
132
  /**
134
- * A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
135
- * affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
136
- * setting.
133
+ * A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
137
134
  */
138
135
  readonly dpmThreshold: pulumi.Output<number | undefined>;
139
136
  /**
@@ -143,8 +140,7 @@ export declare class ComputeCluster extends pulumi.CustomResource {
143
140
  [key: string]: string;
144
141
  } | undefined>;
145
142
  /**
146
- * The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
147
- * fullyAutomated.
143
+ * The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
148
144
  */
149
145
  readonly drsAutomationLevel: pulumi.Output<string | undefined>;
150
146
  /**
@@ -160,8 +156,7 @@ export declare class ComputeCluster extends pulumi.CustomResource {
160
156
  */
161
157
  readonly drsEnabled: pulumi.Output<boolean | undefined>;
162
158
  /**
163
- * A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
164
- * more imbalance while a higher setting will tolerate less.
159
+ * A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
165
160
  */
166
161
  readonly drsMigrationThreshold: pulumi.Output<number | undefined>;
167
162
  /**
@@ -178,47 +173,35 @@ export declare class ComputeCluster extends pulumi.CustomResource {
178
173
  */
179
174
  readonly folder: pulumi.Output<string | undefined>;
180
175
  /**
181
- * Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
182
- * for testing and is not recommended in normal use.
176
+ * Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
183
177
  */
184
178
  readonly forceEvacuateOnDestroy: pulumi.Output<boolean | undefined>;
185
179
  /**
186
- * When haAdmissionControlPolicy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
187
- * failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
188
- * will ignore the host when making recommendations.
180
+ * When haAdmissionControlPolicy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
189
181
  */
190
182
  readonly haAdmissionControlFailoverHostSystemIds: pulumi.Output<string[] | undefined>;
191
183
  /**
192
- * The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
193
- * machine operations. The maximum is one less than the number of hosts in the cluster.
184
+ * The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
194
185
  */
195
186
  readonly haAdmissionControlHostFailureTolerance: pulumi.Output<number | undefined>;
196
187
  /**
197
- * The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
198
- * warnings only, whereas a value of 100 disables the setting.
188
+ * The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
199
189
  */
200
190
  readonly haAdmissionControlPerformanceTolerance: pulumi.Output<number | undefined>;
201
191
  /**
202
- * The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
203
- * permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
204
- * slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
205
- * issues.
192
+ * The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
206
193
  */
207
194
  readonly haAdmissionControlPolicy: pulumi.Output<string | undefined>;
208
195
  /**
209
- * When haAdmissionControlPolicy is resourcePercentage, automatically determine available resource percentages by
210
- * subtracting the average number of host resources represented by the haAdmissionControlHostFailureTolerance setting from
211
- * the total amount of resources in the cluster. Disable to supply user-defined values.
196
+ * When haAdmissionControlPolicy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the haAdmissionControlHostFailureTolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
212
197
  */
213
198
  readonly haAdmissionControlResourcePercentageAutoCompute: pulumi.Output<boolean | undefined>;
214
199
  /**
215
- * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of CPU resources in the
216
- * cluster to reserve for failover.
200
+ * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
217
201
  */
218
202
  readonly haAdmissionControlResourcePercentageCpu: pulumi.Output<number | undefined>;
219
203
  /**
220
- * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of memory resources in
221
- * the cluster to reserve for failover.
204
+ * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
222
205
  */
223
206
  readonly haAdmissionControlResourcePercentageMemory: pulumi.Output<number | undefined>;
224
207
  /**
@@ -230,9 +213,7 @@ export declare class ComputeCluster extends pulumi.CustomResource {
230
213
  */
231
214
  readonly haAdmissionControlSlotPolicyExplicitMemory: pulumi.Output<number | undefined>;
232
215
  /**
233
- * When haAdmissionControlPolicy is slotPolicy, this setting controls whether or not you wish to supply explicit values to
234
- * CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
235
- * currently in the cluster.
216
+ * When haAdmissionControlPolicy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
236
217
  */
237
218
  readonly haAdmissionControlSlotPolicyUseExplicitSize: pulumi.Output<boolean | undefined>;
238
219
  /**
@@ -242,23 +223,19 @@ export declare class ComputeCluster extends pulumi.CustomResource {
242
223
  [key: string]: string;
243
224
  } | undefined>;
244
225
  /**
245
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines if an APD status on an affected
246
- * datastore clears in the middle of an APD event. Can be one of none or reset.
226
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
247
227
  */
248
228
  readonly haDatastoreApdRecoveryAction: pulumi.Output<string | undefined>;
249
229
  /**
250
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected
251
- * loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
230
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
252
231
  */
253
232
  readonly haDatastoreApdResponse: pulumi.Output<string | undefined>;
254
233
  /**
255
- * When haVmComponentProtection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the
256
- * response action defined in ha_datastore_apd_response.
234
+ * When haVmComponentProtection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
257
235
  */
258
236
  readonly haDatastoreApdResponseDelay: pulumi.Output<number | undefined>;
259
237
  /**
260
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected a
261
- * permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
238
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
262
239
  */
263
240
  readonly haDatastorePdlResponse: pulumi.Output<string | undefined>;
264
241
  /**
@@ -266,18 +243,15 @@ export declare class ComputeCluster extends pulumi.CustomResource {
266
243
  */
267
244
  readonly haEnabled: pulumi.Output<boolean | undefined>;
268
245
  /**
269
- * The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
270
- * haHeartbeatDatastorePolicy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
246
+ * The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when haHeartbeatDatastorePolicy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
271
247
  */
272
248
  readonly haHeartbeatDatastoreIds: pulumi.Output<string[] | undefined>;
273
249
  /**
274
- * The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
275
- * allFeasibleDsWithUserPreference.
250
+ * The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
276
251
  */
277
252
  readonly haHeartbeatDatastorePolicy: pulumi.Output<string | undefined>;
278
253
  /**
279
- * The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
280
- * Can be one of none, powerOff, or shutdown.
254
+ * The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
281
255
  */
282
256
  readonly haHostIsolationResponse: pulumi.Output<string | undefined>;
283
257
  /**
@@ -285,24 +259,19 @@ export declare class ComputeCluster extends pulumi.CustomResource {
285
259
  */
286
260
  readonly haHostMonitoring: pulumi.Output<string | undefined>;
287
261
  /**
288
- * Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
289
- * failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
262
+ * Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
290
263
  */
291
264
  readonly haVmComponentProtection: pulumi.Output<string | undefined>;
292
265
  /**
293
- * The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
294
- * on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
266
+ * The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
295
267
  */
296
268
  readonly haVmDependencyRestartCondition: pulumi.Output<string | undefined>;
297
269
  /**
298
- * If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
299
- * failed. The value is in seconds.
270
+ * If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
300
271
  */
301
272
  readonly haVmFailureInterval: pulumi.Output<number | undefined>;
302
273
  /**
303
- * The length of the reset window in which haVmMaximumResets can operate. When this window expires, no more resets are
304
- * attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
305
- * time is allotted.
274
+ * The length of the reset window in which haVmMaximumResets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
306
275
  */
307
276
  readonly haVmMaximumFailureWindow: pulumi.Output<number | undefined>;
308
277
  /**
@@ -314,8 +283,7 @@ export declare class ComputeCluster extends pulumi.CustomResource {
314
283
  */
315
284
  readonly haVmMinimumUptime: pulumi.Output<number | undefined>;
316
285
  /**
317
- * The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
318
- * vmMonitoringOnly, or vmAndAppMonitoring.
286
+ * The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
319
287
  */
320
288
  readonly haVmMonitoring: pulumi.Output<string | undefined>;
321
289
  /**
@@ -323,13 +291,11 @@ export declare class ComputeCluster extends pulumi.CustomResource {
323
291
  */
324
292
  readonly haVmRestartAdditionalDelay: pulumi.Output<number | undefined>;
325
293
  /**
326
- * The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
327
- * high, or highest.
294
+ * The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
328
295
  */
329
296
  readonly haVmRestartPriority: pulumi.Output<string | undefined>;
330
297
  /**
331
- * The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
332
- * proceeding with the next priority.
298
+ * The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
333
299
  */
334
300
  readonly haVmRestartTimeout: pulumi.Output<number | undefined>;
335
301
  /**
@@ -361,8 +327,7 @@ export declare class ComputeCluster extends pulumi.CustomResource {
361
327
  */
362
328
  readonly proactiveHaEnabled: pulumi.Output<boolean | undefined>;
363
329
  /**
364
- * The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
365
- * this cannot be set to MaintenanceMode when proactiveHaSevereRemediation is set to QuarantineMode.
330
+ * The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactiveHaSevereRemediation is set to QuarantineMode.
366
331
  */
367
332
  readonly proactiveHaModerateRemediation: pulumi.Output<string | undefined>;
368
333
  /**
@@ -370,8 +335,7 @@ export declare class ComputeCluster extends pulumi.CustomResource {
370
335
  */
371
336
  readonly proactiveHaProviderIds: pulumi.Output<string[] | undefined>;
372
337
  /**
373
- * The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
374
- * cannot be set to QuarantineMode when proactiveHaModerateRemediation is set to MaintenanceMode.
338
+ * The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactiveHaModerateRemediation is set to MaintenanceMode.
375
339
  */
376
340
  readonly proactiveHaSevereRemediation: pulumi.Output<string | undefined>;
377
341
  /**
@@ -475,14 +439,11 @@ export interface ComputeClusterState {
475
439
  */
476
440
  dpmAutomationLevel?: pulumi.Input<string>;
477
441
  /**
478
- * Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
479
- * machines in the cluster. Requires that DRS be enabled.
442
+ * Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
480
443
  */
481
444
  dpmEnabled?: pulumi.Input<boolean>;
482
445
  /**
483
- * A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
484
- * affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
485
- * setting.
446
+ * A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
486
447
  */
487
448
  dpmThreshold?: pulumi.Input<number>;
488
449
  /**
@@ -492,8 +453,7 @@ export interface ComputeClusterState {
492
453
  [key: string]: pulumi.Input<string>;
493
454
  }>;
494
455
  /**
495
- * The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
496
- * fullyAutomated.
456
+ * The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
497
457
  */
498
458
  drsAutomationLevel?: pulumi.Input<string>;
499
459
  /**
@@ -509,8 +469,7 @@ export interface ComputeClusterState {
509
469
  */
510
470
  drsEnabled?: pulumi.Input<boolean>;
511
471
  /**
512
- * A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
513
- * more imbalance while a higher setting will tolerate less.
472
+ * A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
514
473
  */
515
474
  drsMigrationThreshold?: pulumi.Input<number>;
516
475
  /**
@@ -527,47 +486,35 @@ export interface ComputeClusterState {
527
486
  */
528
487
  folder?: pulumi.Input<string>;
529
488
  /**
530
- * Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
531
- * for testing and is not recommended in normal use.
489
+ * Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
532
490
  */
533
491
  forceEvacuateOnDestroy?: pulumi.Input<boolean>;
534
492
  /**
535
- * When haAdmissionControlPolicy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
536
- * failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
537
- * will ignore the host when making recommendations.
493
+ * When haAdmissionControlPolicy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
538
494
  */
539
495
  haAdmissionControlFailoverHostSystemIds?: pulumi.Input<pulumi.Input<string>[]>;
540
496
  /**
541
- * The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
542
- * machine operations. The maximum is one less than the number of hosts in the cluster.
497
+ * The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
543
498
  */
544
499
  haAdmissionControlHostFailureTolerance?: pulumi.Input<number>;
545
500
  /**
546
- * The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
547
- * warnings only, whereas a value of 100 disables the setting.
501
+ * The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
548
502
  */
549
503
  haAdmissionControlPerformanceTolerance?: pulumi.Input<number>;
550
504
  /**
551
- * The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
552
- * permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
553
- * slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
554
- * issues.
505
+ * The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
555
506
  */
556
507
  haAdmissionControlPolicy?: pulumi.Input<string>;
557
508
  /**
558
- * When haAdmissionControlPolicy is resourcePercentage, automatically determine available resource percentages by
559
- * subtracting the average number of host resources represented by the haAdmissionControlHostFailureTolerance setting from
560
- * the total amount of resources in the cluster. Disable to supply user-defined values.
509
+ * When haAdmissionControlPolicy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the haAdmissionControlHostFailureTolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
561
510
  */
562
511
  haAdmissionControlResourcePercentageAutoCompute?: pulumi.Input<boolean>;
563
512
  /**
564
- * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of CPU resources in the
565
- * cluster to reserve for failover.
513
+ * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
566
514
  */
567
515
  haAdmissionControlResourcePercentageCpu?: pulumi.Input<number>;
568
516
  /**
569
- * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of memory resources in
570
- * the cluster to reserve for failover.
517
+ * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
571
518
  */
572
519
  haAdmissionControlResourcePercentageMemory?: pulumi.Input<number>;
573
520
  /**
@@ -579,9 +526,7 @@ export interface ComputeClusterState {
579
526
  */
580
527
  haAdmissionControlSlotPolicyExplicitMemory?: pulumi.Input<number>;
581
528
  /**
582
- * When haAdmissionControlPolicy is slotPolicy, this setting controls whether or not you wish to supply explicit values to
583
- * CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
584
- * currently in the cluster.
529
+ * When haAdmissionControlPolicy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
585
530
  */
586
531
  haAdmissionControlSlotPolicyUseExplicitSize?: pulumi.Input<boolean>;
587
532
  /**
@@ -591,23 +536,19 @@ export interface ComputeClusterState {
591
536
  [key: string]: pulumi.Input<string>;
592
537
  }>;
593
538
  /**
594
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines if an APD status on an affected
595
- * datastore clears in the middle of an APD event. Can be one of none or reset.
539
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
596
540
  */
597
541
  haDatastoreApdRecoveryAction?: pulumi.Input<string>;
598
542
  /**
599
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected
600
- * loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
543
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
601
544
  */
602
545
  haDatastoreApdResponse?: pulumi.Input<string>;
603
546
  /**
604
- * When haVmComponentProtection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the
605
- * response action defined in ha_datastore_apd_response.
547
+ * When haVmComponentProtection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
606
548
  */
607
549
  haDatastoreApdResponseDelay?: pulumi.Input<number>;
608
550
  /**
609
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected a
610
- * permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
551
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
611
552
  */
612
553
  haDatastorePdlResponse?: pulumi.Input<string>;
613
554
  /**
@@ -615,18 +556,15 @@ export interface ComputeClusterState {
615
556
  */
616
557
  haEnabled?: pulumi.Input<boolean>;
617
558
  /**
618
- * The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
619
- * haHeartbeatDatastorePolicy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
559
+ * The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when haHeartbeatDatastorePolicy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
620
560
  */
621
561
  haHeartbeatDatastoreIds?: pulumi.Input<pulumi.Input<string>[]>;
622
562
  /**
623
- * The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
624
- * allFeasibleDsWithUserPreference.
563
+ * The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
625
564
  */
626
565
  haHeartbeatDatastorePolicy?: pulumi.Input<string>;
627
566
  /**
628
- * The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
629
- * Can be one of none, powerOff, or shutdown.
567
+ * The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
630
568
  */
631
569
  haHostIsolationResponse?: pulumi.Input<string>;
632
570
  /**
@@ -634,24 +572,19 @@ export interface ComputeClusterState {
634
572
  */
635
573
  haHostMonitoring?: pulumi.Input<string>;
636
574
  /**
637
- * Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
638
- * failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
575
+ * Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
639
576
  */
640
577
  haVmComponentProtection?: pulumi.Input<string>;
641
578
  /**
642
- * The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
643
- * on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
579
+ * The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
644
580
  */
645
581
  haVmDependencyRestartCondition?: pulumi.Input<string>;
646
582
  /**
647
- * If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
648
- * failed. The value is in seconds.
583
+ * If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
649
584
  */
650
585
  haVmFailureInterval?: pulumi.Input<number>;
651
586
  /**
652
- * The length of the reset window in which haVmMaximumResets can operate. When this window expires, no more resets are
653
- * attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
654
- * time is allotted.
587
+ * The length of the reset window in which haVmMaximumResets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
655
588
  */
656
589
  haVmMaximumFailureWindow?: pulumi.Input<number>;
657
590
  /**
@@ -663,8 +596,7 @@ export interface ComputeClusterState {
663
596
  */
664
597
  haVmMinimumUptime?: pulumi.Input<number>;
665
598
  /**
666
- * The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
667
- * vmMonitoringOnly, or vmAndAppMonitoring.
599
+ * The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
668
600
  */
669
601
  haVmMonitoring?: pulumi.Input<string>;
670
602
  /**
@@ -672,13 +604,11 @@ export interface ComputeClusterState {
672
604
  */
673
605
  haVmRestartAdditionalDelay?: pulumi.Input<number>;
674
606
  /**
675
- * The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
676
- * high, or highest.
607
+ * The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
677
608
  */
678
609
  haVmRestartPriority?: pulumi.Input<string>;
679
610
  /**
680
- * The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
681
- * proceeding with the next priority.
611
+ * The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
682
612
  */
683
613
  haVmRestartTimeout?: pulumi.Input<number>;
684
614
  /**
@@ -710,8 +640,7 @@ export interface ComputeClusterState {
710
640
  */
711
641
  proactiveHaEnabled?: pulumi.Input<boolean>;
712
642
  /**
713
- * The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
714
- * this cannot be set to MaintenanceMode when proactiveHaSevereRemediation is set to QuarantineMode.
643
+ * The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactiveHaSevereRemediation is set to QuarantineMode.
715
644
  */
716
645
  proactiveHaModerateRemediation?: pulumi.Input<string>;
717
646
  /**
@@ -719,8 +648,7 @@ export interface ComputeClusterState {
719
648
  */
720
649
  proactiveHaProviderIds?: pulumi.Input<pulumi.Input<string>[]>;
721
650
  /**
722
- * The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
723
- * cannot be set to QuarantineMode when proactiveHaModerateRemediation is set to MaintenanceMode.
651
+ * The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactiveHaModerateRemediation is set to MaintenanceMode.
724
652
  */
725
653
  proactiveHaSevereRemediation?: pulumi.Input<string>;
726
654
  /**
@@ -816,14 +744,11 @@ export interface ComputeClusterArgs {
816
744
  */
817
745
  dpmAutomationLevel?: pulumi.Input<string>;
818
746
  /**
819
- * Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
820
- * machines in the cluster. Requires that DRS be enabled.
747
+ * Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual machines in the cluster. Requires that DRS be enabled.
821
748
  */
822
749
  dpmEnabled?: pulumi.Input<boolean>;
823
750
  /**
824
- * A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
825
- * affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
826
- * setting.
751
+ * A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher setting.
827
752
  */
828
753
  dpmThreshold?: pulumi.Input<number>;
829
754
  /**
@@ -833,8 +758,7 @@ export interface ComputeClusterArgs {
833
758
  [key: string]: pulumi.Input<string>;
834
759
  }>;
835
760
  /**
836
- * The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
837
- * fullyAutomated.
761
+ * The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or fullyAutomated.
838
762
  */
839
763
  drsAutomationLevel?: pulumi.Input<string>;
840
764
  /**
@@ -850,8 +774,7 @@ export interface ComputeClusterArgs {
850
774
  */
851
775
  drsEnabled?: pulumi.Input<boolean>;
852
776
  /**
853
- * A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
854
- * more imbalance while a higher setting will tolerate less.
777
+ * A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate more imbalance while a higher setting will tolerate less.
855
778
  */
856
779
  drsMigrationThreshold?: pulumi.Input<number>;
857
780
  /**
@@ -868,47 +791,35 @@ export interface ComputeClusterArgs {
868
791
  */
869
792
  folder?: pulumi.Input<string>;
870
793
  /**
871
- * Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
872
- * for testing and is not recommended in normal use.
794
+ * Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists for testing and is not recommended in normal use.
873
795
  */
874
796
  forceEvacuateOnDestroy?: pulumi.Input<boolean>;
875
797
  /**
876
- * When haAdmissionControlPolicy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
877
- * failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
878
- * will ignore the host when making recommendations.
798
+ * When haAdmissionControlPolicy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS will ignore the host when making recommendations.
879
799
  */
880
800
  haAdmissionControlFailoverHostSystemIds?: pulumi.Input<pulumi.Input<string>[]>;
881
801
  /**
882
- * The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
883
- * machine operations. The maximum is one less than the number of hosts in the cluster.
802
+ * The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual machine operations. The maximum is one less than the number of hosts in the cluster.
884
803
  */
885
804
  haAdmissionControlHostFailureTolerance?: pulumi.Input<number>;
886
805
  /**
887
- * The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
888
- * warnings only, whereas a value of 100 disables the setting.
806
+ * The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces warnings only, whereas a value of 100 disables the setting.
889
807
  */
890
808
  haAdmissionControlPerformanceTolerance?: pulumi.Input<number>;
891
809
  /**
892
- * The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
893
- * permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
894
- * slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
895
- * issues.
810
+ * The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage, slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service issues.
896
811
  */
897
812
  haAdmissionControlPolicy?: pulumi.Input<string>;
898
813
  /**
899
- * When haAdmissionControlPolicy is resourcePercentage, automatically determine available resource percentages by
900
- * subtracting the average number of host resources represented by the haAdmissionControlHostFailureTolerance setting from
901
- * the total amount of resources in the cluster. Disable to supply user-defined values.
814
+ * When haAdmissionControlPolicy is resourcePercentage, automatically determine available resource percentages by subtracting the average number of host resources represented by the haAdmissionControlHostFailureTolerance setting from the total amount of resources in the cluster. Disable to supply user-defined values.
902
815
  */
903
816
  haAdmissionControlResourcePercentageAutoCompute?: pulumi.Input<boolean>;
904
817
  /**
905
- * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of CPU resources in the
906
- * cluster to reserve for failover.
818
+ * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of CPU resources in the cluster to reserve for failover.
907
819
  */
908
820
  haAdmissionControlResourcePercentageCpu?: pulumi.Input<number>;
909
821
  /**
910
- * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of memory resources in
911
- * the cluster to reserve for failover.
822
+ * When haAdmissionControlPolicy is resourcePercentage, this controls the user-defined percentage of memory resources in the cluster to reserve for failover.
912
823
  */
913
824
  haAdmissionControlResourcePercentageMemory?: pulumi.Input<number>;
914
825
  /**
@@ -920,9 +831,7 @@ export interface ComputeClusterArgs {
920
831
  */
921
832
  haAdmissionControlSlotPolicyExplicitMemory?: pulumi.Input<number>;
922
833
  /**
923
- * When haAdmissionControlPolicy is slotPolicy, this setting controls whether or not you wish to supply explicit values to
924
- * CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
925
- * currently in the cluster.
834
+ * When haAdmissionControlPolicy is slotPolicy, this setting controls whether or not you wish to supply explicit values to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines currently in the cluster.
926
835
  */
927
836
  haAdmissionControlSlotPolicyUseExplicitSize?: pulumi.Input<boolean>;
928
837
  /**
@@ -932,23 +841,19 @@ export interface ComputeClusterArgs {
932
841
  [key: string]: pulumi.Input<string>;
933
842
  }>;
934
843
  /**
935
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines if an APD status on an affected
936
- * datastore clears in the middle of an APD event. Can be one of none or reset.
844
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines if an APD status on an affected datastore clears in the middle of an APD event. Can be one of none or reset.
937
845
  */
938
846
  haDatastoreApdRecoveryAction?: pulumi.Input<string>;
939
847
  /**
940
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected
941
- * loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
848
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or restartAggressive.
942
849
  */
943
850
  haDatastoreApdResponse?: pulumi.Input<string>;
944
851
  /**
945
- * When haVmComponentProtection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the
946
- * response action defined in ha_datastore_apd_response.
852
+ * When haVmComponentProtection is enabled, controls the delay in seconds to wait after an APD timeout event to execute the response action defined in ha_datastore_apd_response.
947
853
  */
948
854
  haDatastoreApdResponseDelay?: pulumi.Input<number>;
949
855
  /**
950
- * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected a
951
- * permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
856
+ * When haVmComponentProtection is enabled, controls the action to take on virtual machines when the cluster has detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
952
857
  */
953
858
  haDatastorePdlResponse?: pulumi.Input<string>;
954
859
  /**
@@ -956,18 +861,15 @@ export interface ComputeClusterArgs {
956
861
  */
957
862
  haEnabled?: pulumi.Input<boolean>;
958
863
  /**
959
- * The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
960
- * haHeartbeatDatastorePolicy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
864
+ * The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when haHeartbeatDatastorePolicy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
961
865
  */
962
866
  haHeartbeatDatastoreIds?: pulumi.Input<pulumi.Input<string>[]>;
963
867
  /**
964
- * The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
965
- * allFeasibleDsWithUserPreference.
868
+ * The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or allFeasibleDsWithUserPreference.
966
869
  */
967
870
  haHeartbeatDatastorePolicy?: pulumi.Input<string>;
968
871
  /**
969
- * The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
970
- * Can be one of none, powerOff, or shutdown.
872
+ * The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster. Can be one of none, powerOff, or shutdown.
971
873
  */
972
874
  haHostIsolationResponse?: pulumi.Input<string>;
973
875
  /**
@@ -975,24 +877,19 @@ export interface ComputeClusterArgs {
975
877
  */
976
878
  haHostMonitoring?: pulumi.Input<string>;
977
879
  /**
978
- * Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
979
- * failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
880
+ * Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
980
881
  */
981
882
  haVmComponentProtection?: pulumi.Input<string>;
982
883
  /**
983
- * The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
984
- * on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
884
+ * The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
985
885
  */
986
886
  haVmDependencyRestartCondition?: pulumi.Input<string>;
987
887
  /**
988
- * If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
989
- * failed. The value is in seconds.
888
+ * If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as failed. The value is in seconds.
990
889
  */
991
890
  haVmFailureInterval?: pulumi.Input<number>;
992
891
  /**
993
- * The length of the reset window in which haVmMaximumResets can operate. When this window expires, no more resets are
994
- * attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
995
- * time is allotted.
892
+ * The length of the reset window in which haVmMaximumResets can operate. When this window expires, no more resets are attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset time is allotted.
996
893
  */
997
894
  haVmMaximumFailureWindow?: pulumi.Input<number>;
998
895
  /**
@@ -1004,8 +901,7 @@ export interface ComputeClusterArgs {
1004
901
  */
1005
902
  haVmMinimumUptime?: pulumi.Input<number>;
1006
903
  /**
1007
- * The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
1008
- * vmMonitoringOnly, or vmAndAppMonitoring.
904
+ * The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled, vmMonitoringOnly, or vmAndAppMonitoring.
1009
905
  */
1010
906
  haVmMonitoring?: pulumi.Input<string>;
1011
907
  /**
@@ -1013,13 +909,11 @@ export interface ComputeClusterArgs {
1013
909
  */
1014
910
  haVmRestartAdditionalDelay?: pulumi.Input<number>;
1015
911
  /**
1016
- * The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
1017
- * high, or highest.
912
+ * The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium, high, or highest.
1018
913
  */
1019
914
  haVmRestartPriority?: pulumi.Input<string>;
1020
915
  /**
1021
- * The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
1022
- * proceeding with the next priority.
916
+ * The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before proceeding with the next priority.
1023
917
  */
1024
918
  haVmRestartTimeout?: pulumi.Input<number>;
1025
919
  /**
@@ -1051,8 +945,7 @@ export interface ComputeClusterArgs {
1051
945
  */
1052
946
  proactiveHaEnabled?: pulumi.Input<boolean>;
1053
947
  /**
1054
- * The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
1055
- * this cannot be set to MaintenanceMode when proactiveHaSevereRemediation is set to QuarantineMode.
948
+ * The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to MaintenanceMode when proactiveHaSevereRemediation is set to QuarantineMode.
1056
949
  */
1057
950
  proactiveHaModerateRemediation?: pulumi.Input<string>;
1058
951
  /**
@@ -1060,8 +953,7 @@ export interface ComputeClusterArgs {
1060
953
  */
1061
954
  proactiveHaProviderIds?: pulumi.Input<pulumi.Input<string>[]>;
1062
955
  /**
1063
- * The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
1064
- * cannot be set to QuarantineMode when proactiveHaModerateRemediation is set to MaintenanceMode.
956
+ * The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this cannot be set to QuarantineMode when proactiveHaModerateRemediation is set to MaintenanceMode.
1065
957
  */
1066
958
  proactiveHaSevereRemediation?: pulumi.Input<string>;
1067
959
  /**