pulumi-vsphere 4.15.0a1753206257__py3-none-any.whl → 4.16.0a1753398270__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pulumi-vsphere might be problematic. Click here for more details.
- pulumi_vsphere/__init__.py +1 -1
- pulumi_vsphere/_inputs.py +1171 -1172
- pulumi_vsphere/compute_cluster.py +1127 -1128
- pulumi_vsphere/compute_cluster_host_group.py +52 -53
- pulumi_vsphere/compute_cluster_vm_affinity_rule.py +86 -87
- pulumi_vsphere/compute_cluster_vm_anti_affinity_rule.py +86 -87
- pulumi_vsphere/compute_cluster_vm_dependency_rule.py +103 -104
- pulumi_vsphere/compute_cluster_vm_group.py +52 -53
- pulumi_vsphere/compute_cluster_vm_host_rule.py +120 -121
- pulumi_vsphere/config/__init__.py +1 -1
- pulumi_vsphere/config/__init__.pyi +1 -2
- pulumi_vsphere/config/vars.py +14 -15
- pulumi_vsphere/content_library.py +58 -59
- pulumi_vsphere/content_library_item.py +103 -104
- pulumi_vsphere/custom_attribute.py +35 -36
- pulumi_vsphere/datacenter.py +78 -79
- pulumi_vsphere/datastore_cluster.py +426 -427
- pulumi_vsphere/datastore_cluster_vm_anti_affinity_rule.py +86 -87
- pulumi_vsphere/distributed_port_group.py +787 -788
- pulumi_vsphere/distributed_virtual_switch.py +1566 -1567
- pulumi_vsphere/distributed_virtual_switch_pvlan_mapping.py +69 -70
- pulumi_vsphere/dpm_host_override.py +69 -70
- pulumi_vsphere/drs_vm_override.py +69 -70
- pulumi_vsphere/entity_permissions.py +38 -39
- pulumi_vsphere/file.py +120 -121
- pulumi_vsphere/folder.py +86 -87
- pulumi_vsphere/get_compute_cluster.py +17 -18
- pulumi_vsphere/get_compute_cluster_host_group.py +17 -18
- pulumi_vsphere/get_content_library.py +9 -10
- pulumi_vsphere/get_content_library_item.py +21 -22
- pulumi_vsphere/get_custom_attribute.py +11 -12
- pulumi_vsphere/get_datacenter.py +11 -12
- pulumi_vsphere/get_datastore.py +21 -22
- pulumi_vsphere/get_datastore_cluster.py +17 -18
- pulumi_vsphere/get_datastore_stats.py +21 -22
- pulumi_vsphere/get_distributed_virtual_switch.py +17 -18
- pulumi_vsphere/get_dynamic.py +21 -22
- pulumi_vsphere/get_folder.py +9 -10
- pulumi_vsphere/get_guest_os_customization.py +18 -19
- pulumi_vsphere/get_host.py +17 -18
- pulumi_vsphere/get_host_base_images.py +5 -6
- pulumi_vsphere/get_host_pci_device.py +29 -30
- pulumi_vsphere/get_host_thumbprint.py +21 -22
- pulumi_vsphere/get_host_vgpu_profile.py +16 -17
- pulumi_vsphere/get_license.py +19 -20
- pulumi_vsphere/get_network.py +42 -43
- pulumi_vsphere/get_ovf_vm_template.py +121 -122
- pulumi_vsphere/get_policy.py +9 -10
- pulumi_vsphere/get_resource_pool.py +21 -22
- pulumi_vsphere/get_role.py +25 -26
- pulumi_vsphere/get_tag.py +17 -18
- pulumi_vsphere/get_tag_category.py +15 -16
- pulumi_vsphere/get_vapp_container.py +15 -16
- pulumi_vsphere/get_virtual_machine.py +272 -273
- pulumi_vsphere/get_vmfs_disks.py +23 -24
- pulumi_vsphere/guest_os_customization.py +73 -74
- pulumi_vsphere/ha_vm_override.py +256 -257
- pulumi_vsphere/host.py +242 -243
- pulumi_vsphere/host_port_group.py +310 -311
- pulumi_vsphere/host_virtual_switch.py +358 -359
- pulumi_vsphere/license.py +71 -72
- pulumi_vsphere/nas_datastore.py +260 -261
- pulumi_vsphere/offline_software_depot.py +20 -21
- pulumi_vsphere/outputs.py +981 -982
- pulumi_vsphere/provider.py +120 -121
- pulumi_vsphere/pulumi-plugin.json +1 -1
- pulumi_vsphere/resource_pool.py +256 -257
- pulumi_vsphere/role.py +44 -45
- pulumi_vsphere/storage_drs_vm_override.py +86 -87
- pulumi_vsphere/supervisor.py +206 -207
- pulumi_vsphere/tag.py +52 -53
- pulumi_vsphere/tag_category.py +69 -70
- pulumi_vsphere/vapp_container.py +256 -257
- pulumi_vsphere/vapp_entity.py +171 -172
- pulumi_vsphere/virtual_disk.py +120 -121
- pulumi_vsphere/virtual_machine.py +1293 -1294
- pulumi_vsphere/virtual_machine_class.py +103 -104
- pulumi_vsphere/virtual_machine_snapshot.py +120 -121
- pulumi_vsphere/vm_storage_policy.py +38 -39
- pulumi_vsphere/vmfs_datastore.py +183 -184
- pulumi_vsphere/vnic.py +143 -144
- {pulumi_vsphere-4.15.0a1753206257.dist-info → pulumi_vsphere-4.16.0a1753398270.dist-info}/METADATA +1 -1
- pulumi_vsphere-4.16.0a1753398270.dist-info/RECORD +87 -0
- pulumi_vsphere-4.15.0a1753206257.dist-info/RECORD +0 -87
- {pulumi_vsphere-4.15.0a1753206257.dist-info → pulumi_vsphere-4.16.0a1753398270.dist-info}/WHEEL +0 -0
- {pulumi_vsphere-4.15.0a1753206257.dist-info → pulumi_vsphere-4.16.0a1753398270.dist-info}/top_level.txt +0 -0
|
@@ -2,8 +2,7 @@
|
|
|
2
2
|
# *** WARNING: this file was generated by pulumi-language-python. ***
|
|
3
3
|
# *** Do not edit by hand unless you're certain you know what you are doing! ***
|
|
4
4
|
|
|
5
|
-
import builtins
|
|
6
|
-
import copy
|
|
5
|
+
import builtins as _builtins
|
|
7
6
|
import warnings
|
|
8
7
|
import sys
|
|
9
8
|
import pulumi
|
|
@@ -22,193 +21,193 @@ __all__ = ['ComputeClusterArgs', 'ComputeCluster']
|
|
|
22
21
|
@pulumi.input_type
|
|
23
22
|
class ComputeClusterArgs:
|
|
24
23
|
def __init__(__self__, *,
|
|
25
|
-
datacenter_id: pulumi.Input[
|
|
26
|
-
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
27
|
-
dpm_automation_level: Optional[pulumi.Input[
|
|
28
|
-
dpm_enabled: Optional[pulumi.Input[
|
|
29
|
-
dpm_threshold: Optional[pulumi.Input[
|
|
30
|
-
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
31
|
-
drs_automation_level: Optional[pulumi.Input[
|
|
32
|
-
drs_enable_predictive_drs: Optional[pulumi.Input[
|
|
33
|
-
drs_enable_vm_overrides: Optional[pulumi.Input[
|
|
34
|
-
drs_enabled: Optional[pulumi.Input[
|
|
35
|
-
drs_migration_threshold: Optional[pulumi.Input[
|
|
36
|
-
drs_scale_descendants_shares: Optional[pulumi.Input[
|
|
37
|
-
folder: Optional[pulumi.Input[
|
|
38
|
-
force_evacuate_on_destroy: Optional[pulumi.Input[
|
|
39
|
-
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
40
|
-
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[
|
|
41
|
-
ha_admission_control_performance_tolerance: Optional[pulumi.Input[
|
|
42
|
-
ha_admission_control_policy: Optional[pulumi.Input[
|
|
43
|
-
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[
|
|
44
|
-
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[
|
|
45
|
-
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[
|
|
46
|
-
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[
|
|
47
|
-
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[
|
|
48
|
-
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[
|
|
49
|
-
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
50
|
-
ha_datastore_apd_recovery_action: Optional[pulumi.Input[
|
|
51
|
-
ha_datastore_apd_response: Optional[pulumi.Input[
|
|
52
|
-
ha_datastore_apd_response_delay: Optional[pulumi.Input[
|
|
53
|
-
ha_datastore_pdl_response: Optional[pulumi.Input[
|
|
54
|
-
ha_enabled: Optional[pulumi.Input[
|
|
55
|
-
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
56
|
-
ha_heartbeat_datastore_policy: Optional[pulumi.Input[
|
|
57
|
-
ha_host_isolation_response: Optional[pulumi.Input[
|
|
58
|
-
ha_host_monitoring: Optional[pulumi.Input[
|
|
59
|
-
ha_vm_component_protection: Optional[pulumi.Input[
|
|
60
|
-
ha_vm_dependency_restart_condition: Optional[pulumi.Input[
|
|
61
|
-
ha_vm_failure_interval: Optional[pulumi.Input[
|
|
62
|
-
ha_vm_maximum_failure_window: Optional[pulumi.Input[
|
|
63
|
-
ha_vm_maximum_resets: Optional[pulumi.Input[
|
|
64
|
-
ha_vm_minimum_uptime: Optional[pulumi.Input[
|
|
65
|
-
ha_vm_monitoring: Optional[pulumi.Input[
|
|
66
|
-
ha_vm_restart_additional_delay: Optional[pulumi.Input[
|
|
67
|
-
ha_vm_restart_priority: Optional[pulumi.Input[
|
|
68
|
-
ha_vm_restart_timeout: Optional[pulumi.Input[
|
|
69
|
-
host_cluster_exit_timeout: Optional[pulumi.Input[
|
|
24
|
+
datacenter_id: pulumi.Input[_builtins.str],
|
|
25
|
+
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
26
|
+
dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
27
|
+
dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
28
|
+
dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
29
|
+
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
30
|
+
drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
31
|
+
drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
32
|
+
drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
33
|
+
drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
34
|
+
drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
35
|
+
drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
|
|
36
|
+
folder: Optional[pulumi.Input[_builtins.str]] = None,
|
|
37
|
+
force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
38
|
+
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
39
|
+
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
40
|
+
ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
41
|
+
ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
42
|
+
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
43
|
+
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
44
|
+
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
45
|
+
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
46
|
+
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
47
|
+
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
48
|
+
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
49
|
+
ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
|
|
50
|
+
ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
51
|
+
ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
52
|
+
ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
53
|
+
ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
54
|
+
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
55
|
+
ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
56
|
+
ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
57
|
+
ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
58
|
+
ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
|
|
59
|
+
ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
60
|
+
ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
61
|
+
ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
|
|
62
|
+
ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
|
|
63
|
+
ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
|
|
64
|
+
ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
65
|
+
ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
66
|
+
ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
|
|
67
|
+
ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
68
|
+
host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
70
69
|
host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
|
|
71
|
-
host_managed: Optional[pulumi.Input[
|
|
72
|
-
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
73
|
-
name: Optional[pulumi.Input[
|
|
74
|
-
proactive_ha_automation_level: Optional[pulumi.Input[
|
|
75
|
-
proactive_ha_enabled: Optional[pulumi.Input[
|
|
76
|
-
proactive_ha_moderate_remediation: Optional[pulumi.Input[
|
|
77
|
-
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
78
|
-
proactive_ha_severe_remediation: Optional[pulumi.Input[
|
|
79
|
-
tags: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
80
|
-
vsan_compression_enabled: Optional[pulumi.Input[
|
|
81
|
-
vsan_dedup_enabled: Optional[pulumi.Input[
|
|
70
|
+
host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
71
|
+
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
72
|
+
name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
73
|
+
proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
74
|
+
proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
75
|
+
proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
76
|
+
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
77
|
+
proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
78
|
+
tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
79
|
+
vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
80
|
+
vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
82
81
|
vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]] = None,
|
|
83
|
-
vsan_dit_encryption_enabled: Optional[pulumi.Input[
|
|
84
|
-
vsan_dit_rekey_interval: Optional[pulumi.Input[
|
|
85
|
-
vsan_enabled: Optional[pulumi.Input[
|
|
86
|
-
vsan_esa_enabled: Optional[pulumi.Input[
|
|
82
|
+
vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
83
|
+
vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
84
|
+
vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
85
|
+
vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
87
86
|
vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]] = None,
|
|
88
|
-
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[
|
|
89
|
-
vsan_performance_enabled: Optional[pulumi.Input[
|
|
90
|
-
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
87
|
+
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
88
|
+
vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
89
|
+
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
91
90
|
vsan_stretched_cluster: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']] = None,
|
|
92
|
-
vsan_unmap_enabled: Optional[pulumi.Input[
|
|
93
|
-
vsan_verbose_mode_enabled: Optional[pulumi.Input[
|
|
91
|
+
vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
92
|
+
vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None):
|
|
94
93
|
"""
|
|
95
94
|
The set of arguments for constructing a ComputeCluster resource.
|
|
96
|
-
:param pulumi.Input[
|
|
95
|
+
:param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
|
|
97
96
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
98
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
97
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
|
|
99
98
|
value strings to set for the datastore cluster.
|
|
100
99
|
|
|
101
100
|
> **NOTE:** Custom attributes are unsupported on direct ESXi connections
|
|
102
101
|
and require vCenter Server.
|
|
103
|
-
:param pulumi.Input[
|
|
104
|
-
:param pulumi.Input[
|
|
102
|
+
:param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
103
|
+
:param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
105
104
|
machines in the cluster. Requires that DRS be enabled.
|
|
106
|
-
:param pulumi.Input[
|
|
105
|
+
:param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
107
106
|
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
108
107
|
setting.
|
|
109
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
110
|
-
:param pulumi.Input[
|
|
108
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
109
|
+
:param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
111
110
|
fullyAutomated.
|
|
112
|
-
:param pulumi.Input[
|
|
113
|
-
:param pulumi.Input[
|
|
114
|
-
:param pulumi.Input[
|
|
115
|
-
:param pulumi.Input[
|
|
111
|
+
:param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
112
|
+
:param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
113
|
+
:param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
|
|
114
|
+
:param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
116
115
|
more imbalance while a higher setting will tolerate less.
|
|
117
|
-
:param pulumi.Input[
|
|
118
|
-
:param pulumi.Input[
|
|
116
|
+
:param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
117
|
+
:param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
|
|
119
118
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
120
119
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
121
120
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
122
121
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
123
122
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
124
|
-
:param pulumi.Input[
|
|
123
|
+
:param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
125
124
|
for testing and is not recommended in normal use.
|
|
126
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
125
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
127
126
|
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
128
127
|
will ignore the host when making recommendations.
|
|
129
|
-
:param pulumi.Input[
|
|
128
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
130
129
|
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
131
|
-
:param pulumi.Input[
|
|
130
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
132
131
|
warnings only, whereas a value of 100 disables the setting.
|
|
133
|
-
:param pulumi.Input[
|
|
132
|
+
:param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
134
133
|
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
135
134
|
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
136
135
|
issues.
|
|
137
|
-
:param pulumi.Input[
|
|
136
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
138
137
|
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
139
138
|
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
140
|
-
:param pulumi.Input[
|
|
139
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
141
140
|
the cluster to reserve for failover.
|
|
142
|
-
:param pulumi.Input[
|
|
141
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
143
142
|
the cluster to reserve for failover.
|
|
144
|
-
:param pulumi.Input[
|
|
145
|
-
:param pulumi.Input[
|
|
146
|
-
:param pulumi.Input[
|
|
143
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
144
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
145
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
147
146
|
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
148
147
|
currently in the cluster.
|
|
149
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
150
|
-
:param pulumi.Input[
|
|
148
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
149
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
151
150
|
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
152
|
-
:param pulumi.Input[
|
|
151
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
153
152
|
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
154
153
|
restartAggressive.
|
|
155
|
-
:param pulumi.Input[
|
|
154
|
+
:param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
156
155
|
the response action defined in ha_datastore_apd_response.
|
|
157
|
-
:param pulumi.Input[
|
|
156
|
+
:param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
158
157
|
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
159
|
-
:param pulumi.Input[
|
|
160
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
158
|
+
:param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
159
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
161
160
|
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
162
|
-
:param pulumi.Input[
|
|
161
|
+
:param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
163
162
|
allFeasibleDsWithUserPreference.
|
|
164
|
-
:param pulumi.Input[
|
|
163
|
+
:param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
165
164
|
Can be one of none, powerOff, or shutdown.
|
|
166
|
-
:param pulumi.Input[
|
|
167
|
-
:param pulumi.Input[
|
|
165
|
+
:param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
166
|
+
:param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
168
167
|
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
169
|
-
:param pulumi.Input[
|
|
168
|
+
:param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
170
169
|
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
171
|
-
:param pulumi.Input[
|
|
170
|
+
:param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
172
171
|
failed. The value is in seconds.
|
|
173
|
-
:param pulumi.Input[
|
|
172
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
174
173
|
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
175
174
|
time is allotted.
|
|
176
|
-
:param pulumi.Input[
|
|
177
|
-
:param pulumi.Input[
|
|
178
|
-
:param pulumi.Input[
|
|
175
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
176
|
+
:param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
177
|
+
:param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
179
178
|
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
180
|
-
:param pulumi.Input[
|
|
181
|
-
:param pulumi.Input[
|
|
179
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
180
|
+
:param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
182
181
|
high, or highest.
|
|
183
|
-
:param pulumi.Input[
|
|
182
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
184
183
|
proceeding with the next priority.
|
|
185
|
-
:param pulumi.Input[
|
|
184
|
+
:param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
186
185
|
:param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
|
|
187
|
-
:param pulumi.Input[
|
|
188
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
189
|
-
:param pulumi.Input[
|
|
190
|
-
:param pulumi.Input[
|
|
191
|
-
:param pulumi.Input[
|
|
192
|
-
:param pulumi.Input[
|
|
186
|
+
:param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
187
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
188
|
+
:param pulumi.Input[_builtins.str] name: The name of the cluster.
|
|
189
|
+
:param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
190
|
+
:param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
191
|
+
:param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
193
192
|
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
194
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
195
|
-
:param pulumi.Input[
|
|
193
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
194
|
+
:param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
196
195
|
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
197
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
198
|
-
:param pulumi.Input[
|
|
199
|
-
:param pulumi.Input[
|
|
196
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
|
|
197
|
+
:param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
198
|
+
:param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
200
199
|
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
201
|
-
:param pulumi.Input[
|
|
202
|
-
:param pulumi.Input[
|
|
203
|
-
:param pulumi.Input[
|
|
204
|
-
:param pulumi.Input[
|
|
200
|
+
:param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
201
|
+
:param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
202
|
+
:param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
203
|
+
:param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
205
204
|
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
206
|
-
:param pulumi.Input[
|
|
207
|
-
:param pulumi.Input[
|
|
208
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
205
|
+
:param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
206
|
+
:param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
207
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
209
208
|
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
210
|
-
:param pulumi.Input[
|
|
211
|
-
:param pulumi.Input[
|
|
209
|
+
:param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
210
|
+
:param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
212
211
|
"""
|
|
213
212
|
pulumi.set(__self__, "datacenter_id", datacenter_id)
|
|
214
213
|
if custom_attributes is not None:
|
|
@@ -348,9 +347,9 @@ class ComputeClusterArgs:
|
|
|
348
347
|
if vsan_verbose_mode_enabled is not None:
|
|
349
348
|
pulumi.set(__self__, "vsan_verbose_mode_enabled", vsan_verbose_mode_enabled)
|
|
350
349
|
|
|
351
|
-
@property
|
|
350
|
+
@_builtins.property
|
|
352
351
|
@pulumi.getter(name="datacenterId")
|
|
353
|
-
def datacenter_id(self) -> pulumi.Input[
|
|
352
|
+
def datacenter_id(self) -> pulumi.Input[_builtins.str]:
|
|
354
353
|
"""
|
|
355
354
|
The managed object ID of
|
|
356
355
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
@@ -358,12 +357,12 @@ class ComputeClusterArgs:
|
|
|
358
357
|
return pulumi.get(self, "datacenter_id")
|
|
359
358
|
|
|
360
359
|
@datacenter_id.setter
|
|
361
|
-
def datacenter_id(self, value: pulumi.Input[
|
|
360
|
+
def datacenter_id(self, value: pulumi.Input[_builtins.str]):
|
|
362
361
|
pulumi.set(self, "datacenter_id", value)
|
|
363
362
|
|
|
364
|
-
@property
|
|
363
|
+
@_builtins.property
|
|
365
364
|
@pulumi.getter(name="customAttributes")
|
|
366
|
-
def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
365
|
+
def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
367
366
|
"""
|
|
368
367
|
A map of custom attribute ids to attribute
|
|
369
368
|
value strings to set for the datastore cluster.
|
|
@@ -374,24 +373,24 @@ class ComputeClusterArgs:
|
|
|
374
373
|
return pulumi.get(self, "custom_attributes")
|
|
375
374
|
|
|
376
375
|
@custom_attributes.setter
|
|
377
|
-
def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
376
|
+
def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
378
377
|
pulumi.set(self, "custom_attributes", value)
|
|
379
378
|
|
|
380
|
-
@property
|
|
379
|
+
@_builtins.property
|
|
381
380
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
382
|
-
def dpm_automation_level(self) -> Optional[pulumi.Input[
|
|
381
|
+
def dpm_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
383
382
|
"""
|
|
384
383
|
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
385
384
|
"""
|
|
386
385
|
return pulumi.get(self, "dpm_automation_level")
|
|
387
386
|
|
|
388
387
|
@dpm_automation_level.setter
|
|
389
|
-
def dpm_automation_level(self, value: Optional[pulumi.Input[
|
|
388
|
+
def dpm_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
390
389
|
pulumi.set(self, "dpm_automation_level", value)
|
|
391
390
|
|
|
392
|
-
@property
|
|
391
|
+
@_builtins.property
|
|
393
392
|
@pulumi.getter(name="dpmEnabled")
|
|
394
|
-
def dpm_enabled(self) -> Optional[pulumi.Input[
|
|
393
|
+
def dpm_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
395
394
|
"""
|
|
396
395
|
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
397
396
|
machines in the cluster. Requires that DRS be enabled.
|
|
@@ -399,12 +398,12 @@ class ComputeClusterArgs:
|
|
|
399
398
|
return pulumi.get(self, "dpm_enabled")
|
|
400
399
|
|
|
401
400
|
@dpm_enabled.setter
|
|
402
|
-
def dpm_enabled(self, value: Optional[pulumi.Input[
|
|
401
|
+
def dpm_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
403
402
|
pulumi.set(self, "dpm_enabled", value)
|
|
404
403
|
|
|
405
|
-
@property
|
|
404
|
+
@_builtins.property
|
|
406
405
|
@pulumi.getter(name="dpmThreshold")
|
|
407
|
-
def dpm_threshold(self) -> Optional[pulumi.Input[
|
|
406
|
+
def dpm_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
408
407
|
"""
|
|
409
408
|
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
410
409
|
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
@@ -413,24 +412,24 @@ class ComputeClusterArgs:
|
|
|
413
412
|
return pulumi.get(self, "dpm_threshold")
|
|
414
413
|
|
|
415
414
|
@dpm_threshold.setter
|
|
416
|
-
def dpm_threshold(self, value: Optional[pulumi.Input[
|
|
415
|
+
def dpm_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
417
416
|
pulumi.set(self, "dpm_threshold", value)
|
|
418
417
|
|
|
419
|
-
@property
|
|
418
|
+
@_builtins.property
|
|
420
419
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
421
|
-
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
420
|
+
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
422
421
|
"""
|
|
423
422
|
Advanced configuration options for DRS and DPM.
|
|
424
423
|
"""
|
|
425
424
|
return pulumi.get(self, "drs_advanced_options")
|
|
426
425
|
|
|
427
426
|
@drs_advanced_options.setter
|
|
428
|
-
def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
427
|
+
def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
429
428
|
pulumi.set(self, "drs_advanced_options", value)
|
|
430
429
|
|
|
431
|
-
@property
|
|
430
|
+
@_builtins.property
|
|
432
431
|
@pulumi.getter(name="drsAutomationLevel")
|
|
433
|
-
def drs_automation_level(self) -> Optional[pulumi.Input[
|
|
432
|
+
def drs_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
434
433
|
"""
|
|
435
434
|
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
436
435
|
fullyAutomated.
|
|
@@ -438,48 +437,48 @@ class ComputeClusterArgs:
|
|
|
438
437
|
return pulumi.get(self, "drs_automation_level")
|
|
439
438
|
|
|
440
439
|
@drs_automation_level.setter
|
|
441
|
-
def drs_automation_level(self, value: Optional[pulumi.Input[
|
|
440
|
+
def drs_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
442
441
|
pulumi.set(self, "drs_automation_level", value)
|
|
443
442
|
|
|
444
|
-
@property
|
|
443
|
+
@_builtins.property
|
|
445
444
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
446
|
-
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[
|
|
445
|
+
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
447
446
|
"""
|
|
448
447
|
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
449
448
|
"""
|
|
450
449
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
451
450
|
|
|
452
451
|
@drs_enable_predictive_drs.setter
|
|
453
|
-
def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[
|
|
452
|
+
def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
454
453
|
pulumi.set(self, "drs_enable_predictive_drs", value)
|
|
455
454
|
|
|
456
|
-
@property
|
|
455
|
+
@_builtins.property
|
|
457
456
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
458
|
-
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[
|
|
457
|
+
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
459
458
|
"""
|
|
460
459
|
When true, allows individual VM overrides within this cluster to be set.
|
|
461
460
|
"""
|
|
462
461
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
463
462
|
|
|
464
463
|
@drs_enable_vm_overrides.setter
|
|
465
|
-
def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[
|
|
464
|
+
def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
466
465
|
pulumi.set(self, "drs_enable_vm_overrides", value)
|
|
467
466
|
|
|
468
|
-
@property
|
|
467
|
+
@_builtins.property
|
|
469
468
|
@pulumi.getter(name="drsEnabled")
|
|
470
|
-
def drs_enabled(self) -> Optional[pulumi.Input[
|
|
469
|
+
def drs_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
471
470
|
"""
|
|
472
471
|
Enable DRS for this cluster.
|
|
473
472
|
"""
|
|
474
473
|
return pulumi.get(self, "drs_enabled")
|
|
475
474
|
|
|
476
475
|
@drs_enabled.setter
|
|
477
|
-
def drs_enabled(self, value: Optional[pulumi.Input[
|
|
476
|
+
def drs_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
478
477
|
pulumi.set(self, "drs_enabled", value)
|
|
479
478
|
|
|
480
|
-
@property
|
|
479
|
+
@_builtins.property
|
|
481
480
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
482
|
-
def drs_migration_threshold(self) -> Optional[pulumi.Input[
|
|
481
|
+
def drs_migration_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
483
482
|
"""
|
|
484
483
|
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
485
484
|
more imbalance while a higher setting will tolerate less.
|
|
@@ -487,24 +486,24 @@ class ComputeClusterArgs:
|
|
|
487
486
|
return pulumi.get(self, "drs_migration_threshold")
|
|
488
487
|
|
|
489
488
|
@drs_migration_threshold.setter
|
|
490
|
-
def drs_migration_threshold(self, value: Optional[pulumi.Input[
|
|
489
|
+
def drs_migration_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
491
490
|
pulumi.set(self, "drs_migration_threshold", value)
|
|
492
491
|
|
|
493
|
-
@property
|
|
492
|
+
@_builtins.property
|
|
494
493
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
495
|
-
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[
|
|
494
|
+
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
496
495
|
"""
|
|
497
496
|
Enable scalable shares for all descendants of this cluster.
|
|
498
497
|
"""
|
|
499
498
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
500
499
|
|
|
501
500
|
@drs_scale_descendants_shares.setter
|
|
502
|
-
def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[
|
|
501
|
+
def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
503
502
|
pulumi.set(self, "drs_scale_descendants_shares", value)
|
|
504
503
|
|
|
505
|
-
@property
|
|
504
|
+
@_builtins.property
|
|
506
505
|
@pulumi.getter
|
|
507
|
-
def folder(self) -> Optional[pulumi.Input[
|
|
506
|
+
def folder(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
508
507
|
"""
|
|
509
508
|
The relative path to a folder to put this cluster in.
|
|
510
509
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
@@ -516,12 +515,12 @@ class ComputeClusterArgs:
|
|
|
516
515
|
return pulumi.get(self, "folder")
|
|
517
516
|
|
|
518
517
|
@folder.setter
|
|
519
|
-
def folder(self, value: Optional[pulumi.Input[
|
|
518
|
+
def folder(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
520
519
|
pulumi.set(self, "folder", value)
|
|
521
520
|
|
|
522
|
-
@property
|
|
521
|
+
@_builtins.property
|
|
523
522
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
524
|
-
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[
|
|
523
|
+
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
525
524
|
"""
|
|
526
525
|
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
527
526
|
for testing and is not recommended in normal use.
|
|
@@ -529,12 +528,12 @@ class ComputeClusterArgs:
|
|
|
529
528
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
530
529
|
|
|
531
530
|
@force_evacuate_on_destroy.setter
|
|
532
|
-
def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[
|
|
531
|
+
def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
533
532
|
pulumi.set(self, "force_evacuate_on_destroy", value)
|
|
534
533
|
|
|
535
|
-
@property
|
|
534
|
+
@_builtins.property
|
|
536
535
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
537
|
-
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
536
|
+
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
538
537
|
"""
|
|
539
538
|
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
540
539
|
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
@@ -543,12 +542,12 @@ class ComputeClusterArgs:
|
|
|
543
542
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
544
543
|
|
|
545
544
|
@ha_admission_control_failover_host_system_ids.setter
|
|
546
|
-
def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
545
|
+
def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
547
546
|
pulumi.set(self, "ha_admission_control_failover_host_system_ids", value)
|
|
548
547
|
|
|
549
|
-
@property
|
|
548
|
+
@_builtins.property
|
|
550
549
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
551
|
-
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[
|
|
550
|
+
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
552
551
|
"""
|
|
553
552
|
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
554
553
|
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
@@ -556,12 +555,12 @@ class ComputeClusterArgs:
|
|
|
556
555
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
557
556
|
|
|
558
557
|
@ha_admission_control_host_failure_tolerance.setter
|
|
559
|
-
def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[
|
|
558
|
+
def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
560
559
|
pulumi.set(self, "ha_admission_control_host_failure_tolerance", value)
|
|
561
560
|
|
|
562
|
-
@property
|
|
561
|
+
@_builtins.property
|
|
563
562
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
564
|
-
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[
|
|
563
|
+
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
565
564
|
"""
|
|
566
565
|
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
567
566
|
warnings only, whereas a value of 100 disables the setting.
|
|
@@ -569,12 +568,12 @@ class ComputeClusterArgs:
|
|
|
569
568
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
570
569
|
|
|
571
570
|
@ha_admission_control_performance_tolerance.setter
|
|
572
|
-
def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[
|
|
571
|
+
def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
573
572
|
pulumi.set(self, "ha_admission_control_performance_tolerance", value)
|
|
574
573
|
|
|
575
|
-
@property
|
|
574
|
+
@_builtins.property
|
|
576
575
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
577
|
-
def ha_admission_control_policy(self) -> Optional[pulumi.Input[
|
|
576
|
+
def ha_admission_control_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
578
577
|
"""
|
|
579
578
|
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
580
579
|
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
@@ -584,12 +583,12 @@ class ComputeClusterArgs:
|
|
|
584
583
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
585
584
|
|
|
586
585
|
@ha_admission_control_policy.setter
|
|
587
|
-
def ha_admission_control_policy(self, value: Optional[pulumi.Input[
|
|
586
|
+
def ha_admission_control_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
588
587
|
pulumi.set(self, "ha_admission_control_policy", value)
|
|
589
588
|
|
|
590
|
-
@property
|
|
589
|
+
@_builtins.property
|
|
591
590
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
592
|
-
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[
|
|
591
|
+
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
593
592
|
"""
|
|
594
593
|
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
595
594
|
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
@@ -598,12 +597,12 @@ class ComputeClusterArgs:
|
|
|
598
597
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
599
598
|
|
|
600
599
|
@ha_admission_control_resource_percentage_auto_compute.setter
|
|
601
|
-
def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[
|
|
600
|
+
def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
602
601
|
pulumi.set(self, "ha_admission_control_resource_percentage_auto_compute", value)
|
|
603
602
|
|
|
604
|
-
@property
|
|
603
|
+
@_builtins.property
|
|
605
604
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
606
|
-
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[
|
|
605
|
+
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
607
606
|
"""
|
|
608
607
|
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
609
608
|
the cluster to reserve for failover.
|
|
@@ -611,12 +610,12 @@ class ComputeClusterArgs:
|
|
|
611
610
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
612
611
|
|
|
613
612
|
@ha_admission_control_resource_percentage_cpu.setter
|
|
614
|
-
def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[
|
|
613
|
+
def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
615
614
|
pulumi.set(self, "ha_admission_control_resource_percentage_cpu", value)
|
|
616
615
|
|
|
617
|
-
@property
|
|
616
|
+
@_builtins.property
|
|
618
617
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
619
|
-
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[
|
|
618
|
+
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
620
619
|
"""
|
|
621
620
|
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
622
621
|
the cluster to reserve for failover.
|
|
@@ -624,36 +623,36 @@ class ComputeClusterArgs:
|
|
|
624
623
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
625
624
|
|
|
626
625
|
@ha_admission_control_resource_percentage_memory.setter
|
|
627
|
-
def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[
|
|
626
|
+
def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
628
627
|
pulumi.set(self, "ha_admission_control_resource_percentage_memory", value)
|
|
629
628
|
|
|
630
|
-
@property
|
|
629
|
+
@_builtins.property
|
|
631
630
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
632
|
-
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[
|
|
631
|
+
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
633
632
|
"""
|
|
634
633
|
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
635
634
|
"""
|
|
636
635
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
637
636
|
|
|
638
637
|
@ha_admission_control_slot_policy_explicit_cpu.setter
|
|
639
|
-
def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[
|
|
638
|
+
def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
640
639
|
pulumi.set(self, "ha_admission_control_slot_policy_explicit_cpu", value)
|
|
641
640
|
|
|
642
|
-
@property
|
|
641
|
+
@_builtins.property
|
|
643
642
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
644
|
-
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[
|
|
643
|
+
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
645
644
|
"""
|
|
646
645
|
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
647
646
|
"""
|
|
648
647
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
649
648
|
|
|
650
649
|
@ha_admission_control_slot_policy_explicit_memory.setter
|
|
651
|
-
def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[
|
|
650
|
+
def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
652
651
|
pulumi.set(self, "ha_admission_control_slot_policy_explicit_memory", value)
|
|
653
652
|
|
|
654
|
-
@property
|
|
653
|
+
@_builtins.property
|
|
655
654
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
656
|
-
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[
|
|
655
|
+
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
657
656
|
"""
|
|
658
657
|
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
659
658
|
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
@@ -662,24 +661,24 @@ class ComputeClusterArgs:
|
|
|
662
661
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
663
662
|
|
|
664
663
|
@ha_admission_control_slot_policy_use_explicit_size.setter
|
|
665
|
-
def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[
|
|
664
|
+
def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
666
665
|
pulumi.set(self, "ha_admission_control_slot_policy_use_explicit_size", value)
|
|
667
666
|
|
|
668
|
-
@property
|
|
667
|
+
@_builtins.property
|
|
669
668
|
@pulumi.getter(name="haAdvancedOptions")
|
|
670
|
-
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
669
|
+
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
671
670
|
"""
|
|
672
671
|
Advanced configuration options for vSphere HA.
|
|
673
672
|
"""
|
|
674
673
|
return pulumi.get(self, "ha_advanced_options")
|
|
675
674
|
|
|
676
675
|
@ha_advanced_options.setter
|
|
677
|
-
def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
676
|
+
def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
678
677
|
pulumi.set(self, "ha_advanced_options", value)
|
|
679
678
|
|
|
680
|
-
@property
|
|
679
|
+
@_builtins.property
|
|
681
680
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
682
|
-
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[
|
|
681
|
+
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
683
682
|
"""
|
|
684
683
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
685
684
|
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
@@ -687,12 +686,12 @@ class ComputeClusterArgs:
|
|
|
687
686
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
688
687
|
|
|
689
688
|
@ha_datastore_apd_recovery_action.setter
|
|
690
|
-
def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[
|
|
689
|
+
def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
691
690
|
pulumi.set(self, "ha_datastore_apd_recovery_action", value)
|
|
692
691
|
|
|
693
|
-
@property
|
|
692
|
+
@_builtins.property
|
|
694
693
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
695
|
-
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[
|
|
694
|
+
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
696
695
|
"""
|
|
697
696
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
698
697
|
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
@@ -701,12 +700,12 @@ class ComputeClusterArgs:
|
|
|
701
700
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
702
701
|
|
|
703
702
|
@ha_datastore_apd_response.setter
|
|
704
|
-
def ha_datastore_apd_response(self, value: Optional[pulumi.Input[
|
|
703
|
+
def ha_datastore_apd_response(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
705
704
|
pulumi.set(self, "ha_datastore_apd_response", value)
|
|
706
705
|
|
|
707
|
-
@property
|
|
706
|
+
@_builtins.property
|
|
708
707
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
709
|
-
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[
|
|
708
|
+
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
710
709
|
"""
|
|
711
710
|
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
712
711
|
the response action defined in ha_datastore_apd_response.
|
|
@@ -714,12 +713,12 @@ class ComputeClusterArgs:
|
|
|
714
713
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
715
714
|
|
|
716
715
|
@ha_datastore_apd_response_delay.setter
|
|
717
|
-
def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[
|
|
716
|
+
def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
718
717
|
pulumi.set(self, "ha_datastore_apd_response_delay", value)
|
|
719
718
|
|
|
720
|
-
@property
|
|
719
|
+
@_builtins.property
|
|
721
720
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
722
|
-
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[
|
|
721
|
+
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
723
722
|
"""
|
|
724
723
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
725
724
|
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
@@ -727,24 +726,24 @@ class ComputeClusterArgs:
|
|
|
727
726
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
728
727
|
|
|
729
728
|
@ha_datastore_pdl_response.setter
|
|
730
|
-
def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[
|
|
729
|
+
def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
731
730
|
pulumi.set(self, "ha_datastore_pdl_response", value)
|
|
732
731
|
|
|
733
|
-
@property
|
|
732
|
+
@_builtins.property
|
|
734
733
|
@pulumi.getter(name="haEnabled")
|
|
735
|
-
def ha_enabled(self) -> Optional[pulumi.Input[
|
|
734
|
+
def ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
736
735
|
"""
|
|
737
736
|
Enable vSphere HA for this cluster.
|
|
738
737
|
"""
|
|
739
738
|
return pulumi.get(self, "ha_enabled")
|
|
740
739
|
|
|
741
740
|
@ha_enabled.setter
|
|
742
|
-
def ha_enabled(self, value: Optional[pulumi.Input[
|
|
741
|
+
def ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
743
742
|
pulumi.set(self, "ha_enabled", value)
|
|
744
743
|
|
|
745
|
-
@property
|
|
744
|
+
@_builtins.property
|
|
746
745
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
747
|
-
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
746
|
+
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
748
747
|
"""
|
|
749
748
|
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
750
749
|
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
@@ -752,12 +751,12 @@ class ComputeClusterArgs:
|
|
|
752
751
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
753
752
|
|
|
754
753
|
@ha_heartbeat_datastore_ids.setter
|
|
755
|
-
def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
754
|
+
def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
756
755
|
pulumi.set(self, "ha_heartbeat_datastore_ids", value)
|
|
757
756
|
|
|
758
|
-
@property
|
|
757
|
+
@_builtins.property
|
|
759
758
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
760
|
-
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[
|
|
759
|
+
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
761
760
|
"""
|
|
762
761
|
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
763
762
|
allFeasibleDsWithUserPreference.
|
|
@@ -765,12 +764,12 @@ class ComputeClusterArgs:
|
|
|
765
764
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
766
765
|
|
|
767
766
|
@ha_heartbeat_datastore_policy.setter
|
|
768
|
-
def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[
|
|
767
|
+
def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
769
768
|
pulumi.set(self, "ha_heartbeat_datastore_policy", value)
|
|
770
769
|
|
|
771
|
-
@property
|
|
770
|
+
@_builtins.property
|
|
772
771
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
773
|
-
def ha_host_isolation_response(self) -> Optional[pulumi.Input[
|
|
772
|
+
def ha_host_isolation_response(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
774
773
|
"""
|
|
775
774
|
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
776
775
|
Can be one of none, powerOff, or shutdown.
|
|
@@ -778,24 +777,24 @@ class ComputeClusterArgs:
|
|
|
778
777
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
779
778
|
|
|
780
779
|
@ha_host_isolation_response.setter
|
|
781
|
-
def ha_host_isolation_response(self, value: Optional[pulumi.Input[
|
|
780
|
+
def ha_host_isolation_response(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
782
781
|
pulumi.set(self, "ha_host_isolation_response", value)
|
|
783
782
|
|
|
784
|
-
@property
|
|
783
|
+
@_builtins.property
|
|
785
784
|
@pulumi.getter(name="haHostMonitoring")
|
|
786
|
-
def ha_host_monitoring(self) -> Optional[pulumi.Input[
|
|
785
|
+
def ha_host_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
787
786
|
"""
|
|
788
787
|
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
789
788
|
"""
|
|
790
789
|
return pulumi.get(self, "ha_host_monitoring")
|
|
791
790
|
|
|
792
791
|
@ha_host_monitoring.setter
|
|
793
|
-
def ha_host_monitoring(self, value: Optional[pulumi.Input[
|
|
792
|
+
def ha_host_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
794
793
|
pulumi.set(self, "ha_host_monitoring", value)
|
|
795
794
|
|
|
796
|
-
@property
|
|
795
|
+
@_builtins.property
|
|
797
796
|
@pulumi.getter(name="haVmComponentProtection")
|
|
798
|
-
def ha_vm_component_protection(self) -> Optional[pulumi.Input[
|
|
797
|
+
def ha_vm_component_protection(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
799
798
|
"""
|
|
800
799
|
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
801
800
|
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
@@ -803,12 +802,12 @@ class ComputeClusterArgs:
|
|
|
803
802
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
804
803
|
|
|
805
804
|
@ha_vm_component_protection.setter
|
|
806
|
-
def ha_vm_component_protection(self, value: Optional[pulumi.Input[
|
|
805
|
+
def ha_vm_component_protection(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
807
806
|
pulumi.set(self, "ha_vm_component_protection", value)
|
|
808
807
|
|
|
809
|
-
@property
|
|
808
|
+
@_builtins.property
|
|
810
809
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
811
|
-
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[
|
|
810
|
+
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
812
811
|
"""
|
|
813
812
|
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
814
813
|
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
@@ -816,12 +815,12 @@ class ComputeClusterArgs:
|
|
|
816
815
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
817
816
|
|
|
818
817
|
@ha_vm_dependency_restart_condition.setter
|
|
819
|
-
def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[
|
|
818
|
+
def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
820
819
|
pulumi.set(self, "ha_vm_dependency_restart_condition", value)
|
|
821
820
|
|
|
822
|
-
@property
|
|
821
|
+
@_builtins.property
|
|
823
822
|
@pulumi.getter(name="haVmFailureInterval")
|
|
824
|
-
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[
|
|
823
|
+
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
825
824
|
"""
|
|
826
825
|
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
827
826
|
failed. The value is in seconds.
|
|
@@ -829,12 +828,12 @@ class ComputeClusterArgs:
|
|
|
829
828
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
830
829
|
|
|
831
830
|
@ha_vm_failure_interval.setter
|
|
832
|
-
def ha_vm_failure_interval(self, value: Optional[pulumi.Input[
|
|
831
|
+
def ha_vm_failure_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
833
832
|
pulumi.set(self, "ha_vm_failure_interval", value)
|
|
834
833
|
|
|
835
|
-
@property
|
|
834
|
+
@_builtins.property
|
|
836
835
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
837
|
-
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[
|
|
836
|
+
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
838
837
|
"""
|
|
839
838
|
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
840
839
|
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
@@ -843,36 +842,36 @@ class ComputeClusterArgs:
|
|
|
843
842
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
844
843
|
|
|
845
844
|
@ha_vm_maximum_failure_window.setter
|
|
846
|
-
def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[
|
|
845
|
+
def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
847
846
|
pulumi.set(self, "ha_vm_maximum_failure_window", value)
|
|
848
847
|
|
|
849
|
-
@property
|
|
848
|
+
@_builtins.property
|
|
850
849
|
@pulumi.getter(name="haVmMaximumResets")
|
|
851
|
-
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[
|
|
850
|
+
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
852
851
|
"""
|
|
853
852
|
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
854
853
|
"""
|
|
855
854
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
856
855
|
|
|
857
856
|
@ha_vm_maximum_resets.setter
|
|
858
|
-
def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[
|
|
857
|
+
def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
859
858
|
pulumi.set(self, "ha_vm_maximum_resets", value)
|
|
860
859
|
|
|
861
|
-
@property
|
|
860
|
+
@_builtins.property
|
|
862
861
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
863
|
-
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[
|
|
862
|
+
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
864
863
|
"""
|
|
865
864
|
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
866
865
|
"""
|
|
867
866
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
868
867
|
|
|
869
868
|
@ha_vm_minimum_uptime.setter
|
|
870
|
-
def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[
|
|
869
|
+
def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
871
870
|
pulumi.set(self, "ha_vm_minimum_uptime", value)
|
|
872
871
|
|
|
873
|
-
@property
|
|
872
|
+
@_builtins.property
|
|
874
873
|
@pulumi.getter(name="haVmMonitoring")
|
|
875
|
-
def ha_vm_monitoring(self) -> Optional[pulumi.Input[
|
|
874
|
+
def ha_vm_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
876
875
|
"""
|
|
877
876
|
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
878
877
|
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
@@ -880,24 +879,24 @@ class ComputeClusterArgs:
|
|
|
880
879
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
881
880
|
|
|
882
881
|
@ha_vm_monitoring.setter
|
|
883
|
-
def ha_vm_monitoring(self, value: Optional[pulumi.Input[
|
|
882
|
+
def ha_vm_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
884
883
|
pulumi.set(self, "ha_vm_monitoring", value)
|
|
885
884
|
|
|
886
|
-
@property
|
|
885
|
+
@_builtins.property
|
|
887
886
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
888
|
-
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[
|
|
887
|
+
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
889
888
|
"""
|
|
890
889
|
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
891
890
|
"""
|
|
892
891
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
893
892
|
|
|
894
893
|
@ha_vm_restart_additional_delay.setter
|
|
895
|
-
def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[
|
|
894
|
+
def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
896
895
|
pulumi.set(self, "ha_vm_restart_additional_delay", value)
|
|
897
896
|
|
|
898
|
-
@property
|
|
897
|
+
@_builtins.property
|
|
899
898
|
@pulumi.getter(name="haVmRestartPriority")
|
|
900
|
-
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[
|
|
899
|
+
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
901
900
|
"""
|
|
902
901
|
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
903
902
|
high, or highest.
|
|
@@ -905,12 +904,12 @@ class ComputeClusterArgs:
|
|
|
905
904
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
906
905
|
|
|
907
906
|
@ha_vm_restart_priority.setter
|
|
908
|
-
def ha_vm_restart_priority(self, value: Optional[pulumi.Input[
|
|
907
|
+
def ha_vm_restart_priority(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
909
908
|
pulumi.set(self, "ha_vm_restart_priority", value)
|
|
910
909
|
|
|
911
|
-
@property
|
|
910
|
+
@_builtins.property
|
|
912
911
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
913
|
-
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[
|
|
912
|
+
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
914
913
|
"""
|
|
915
914
|
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
916
915
|
proceeding with the next priority.
|
|
@@ -918,22 +917,22 @@ class ComputeClusterArgs:
|
|
|
918
917
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
919
918
|
|
|
920
919
|
@ha_vm_restart_timeout.setter
|
|
921
|
-
def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[
|
|
920
|
+
def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
922
921
|
pulumi.set(self, "ha_vm_restart_timeout", value)
|
|
923
922
|
|
|
924
|
-
@property
|
|
923
|
+
@_builtins.property
|
|
925
924
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
926
|
-
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[
|
|
925
|
+
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
927
926
|
"""
|
|
928
927
|
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
929
928
|
"""
|
|
930
929
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
931
930
|
|
|
932
931
|
@host_cluster_exit_timeout.setter
|
|
933
|
-
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[
|
|
932
|
+
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
934
933
|
pulumi.set(self, "host_cluster_exit_timeout", value)
|
|
935
934
|
|
|
936
|
-
@property
|
|
935
|
+
@_builtins.property
|
|
937
936
|
@pulumi.getter(name="hostImage")
|
|
938
937
|
def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
|
|
939
938
|
"""
|
|
@@ -945,69 +944,69 @@ class ComputeClusterArgs:
|
|
|
945
944
|
def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
|
|
946
945
|
pulumi.set(self, "host_image", value)
|
|
947
946
|
|
|
948
|
-
@property
|
|
947
|
+
@_builtins.property
|
|
949
948
|
@pulumi.getter(name="hostManaged")
|
|
950
|
-
def host_managed(self) -> Optional[pulumi.Input[
|
|
949
|
+
def host_managed(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
951
950
|
"""
|
|
952
951
|
Must be set if cluster enrollment is managed from host resource.
|
|
953
952
|
"""
|
|
954
953
|
return pulumi.get(self, "host_managed")
|
|
955
954
|
|
|
956
955
|
@host_managed.setter
|
|
957
|
-
def host_managed(self, value: Optional[pulumi.Input[
|
|
956
|
+
def host_managed(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
958
957
|
pulumi.set(self, "host_managed", value)
|
|
959
958
|
|
|
960
|
-
@property
|
|
959
|
+
@_builtins.property
|
|
961
960
|
@pulumi.getter(name="hostSystemIds")
|
|
962
|
-
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
961
|
+
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
963
962
|
"""
|
|
964
963
|
The managed object IDs of the hosts to put in the cluster.
|
|
965
964
|
"""
|
|
966
965
|
return pulumi.get(self, "host_system_ids")
|
|
967
966
|
|
|
968
967
|
@host_system_ids.setter
|
|
969
|
-
def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
968
|
+
def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
970
969
|
pulumi.set(self, "host_system_ids", value)
|
|
971
970
|
|
|
972
|
-
@property
|
|
971
|
+
@_builtins.property
|
|
973
972
|
@pulumi.getter
|
|
974
|
-
def name(self) -> Optional[pulumi.Input[
|
|
973
|
+
def name(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
975
974
|
"""
|
|
976
975
|
The name of the cluster.
|
|
977
976
|
"""
|
|
978
977
|
return pulumi.get(self, "name")
|
|
979
978
|
|
|
980
979
|
@name.setter
|
|
981
|
-
def name(self, value: Optional[pulumi.Input[
|
|
980
|
+
def name(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
982
981
|
pulumi.set(self, "name", value)
|
|
983
982
|
|
|
984
|
-
@property
|
|
983
|
+
@_builtins.property
|
|
985
984
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
986
|
-
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[
|
|
985
|
+
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
987
986
|
"""
|
|
988
987
|
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
989
988
|
"""
|
|
990
989
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
991
990
|
|
|
992
991
|
@proactive_ha_automation_level.setter
|
|
993
|
-
def proactive_ha_automation_level(self, value: Optional[pulumi.Input[
|
|
992
|
+
def proactive_ha_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
994
993
|
pulumi.set(self, "proactive_ha_automation_level", value)
|
|
995
994
|
|
|
996
|
-
@property
|
|
995
|
+
@_builtins.property
|
|
997
996
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
998
|
-
def proactive_ha_enabled(self) -> Optional[pulumi.Input[
|
|
997
|
+
def proactive_ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
999
998
|
"""
|
|
1000
999
|
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
1001
1000
|
"""
|
|
1002
1001
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
1003
1002
|
|
|
1004
1003
|
@proactive_ha_enabled.setter
|
|
1005
|
-
def proactive_ha_enabled(self, value: Optional[pulumi.Input[
|
|
1004
|
+
def proactive_ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1006
1005
|
pulumi.set(self, "proactive_ha_enabled", value)
|
|
1007
1006
|
|
|
1008
|
-
@property
|
|
1007
|
+
@_builtins.property
|
|
1009
1008
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
1010
|
-
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[
|
|
1009
|
+
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1011
1010
|
"""
|
|
1012
1011
|
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
1013
1012
|
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
@@ -1015,24 +1014,24 @@ class ComputeClusterArgs:
|
|
|
1015
1014
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
1016
1015
|
|
|
1017
1016
|
@proactive_ha_moderate_remediation.setter
|
|
1018
|
-
def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[
|
|
1017
|
+
def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1019
1018
|
pulumi.set(self, "proactive_ha_moderate_remediation", value)
|
|
1020
1019
|
|
|
1021
|
-
@property
|
|
1020
|
+
@_builtins.property
|
|
1022
1021
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
1023
|
-
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1022
|
+
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
1024
1023
|
"""
|
|
1025
1024
|
The list of IDs for health update providers configured for this cluster.
|
|
1026
1025
|
"""
|
|
1027
1026
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
1028
1027
|
|
|
1029
1028
|
@proactive_ha_provider_ids.setter
|
|
1030
|
-
def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1029
|
+
def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
1031
1030
|
pulumi.set(self, "proactive_ha_provider_ids", value)
|
|
1032
1031
|
|
|
1033
|
-
@property
|
|
1032
|
+
@_builtins.property
|
|
1034
1033
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
1035
|
-
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[
|
|
1034
|
+
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1036
1035
|
"""
|
|
1037
1036
|
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
1038
1037
|
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
@@ -1040,46 +1039,46 @@ class ComputeClusterArgs:
|
|
|
1040
1039
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
1041
1040
|
|
|
1042
1041
|
@proactive_ha_severe_remediation.setter
|
|
1043
|
-
def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[
|
|
1042
|
+
def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1044
1043
|
pulumi.set(self, "proactive_ha_severe_remediation", value)
|
|
1045
1044
|
|
|
1046
|
-
@property
|
|
1045
|
+
@_builtins.property
|
|
1047
1046
|
@pulumi.getter
|
|
1048
|
-
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1047
|
+
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
1049
1048
|
"""
|
|
1050
1049
|
The IDs of any tags to attach to this resource.
|
|
1051
1050
|
"""
|
|
1052
1051
|
return pulumi.get(self, "tags")
|
|
1053
1052
|
|
|
1054
1053
|
@tags.setter
|
|
1055
|
-
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1054
|
+
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
1056
1055
|
pulumi.set(self, "tags", value)
|
|
1057
1056
|
|
|
1058
|
-
@property
|
|
1057
|
+
@_builtins.property
|
|
1059
1058
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
1060
|
-
def vsan_compression_enabled(self) -> Optional[pulumi.Input[
|
|
1059
|
+
def vsan_compression_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1061
1060
|
"""
|
|
1062
1061
|
Whether the vSAN compression service is enabled for the cluster.
|
|
1063
1062
|
"""
|
|
1064
1063
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
1065
1064
|
|
|
1066
1065
|
@vsan_compression_enabled.setter
|
|
1067
|
-
def vsan_compression_enabled(self, value: Optional[pulumi.Input[
|
|
1066
|
+
def vsan_compression_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1068
1067
|
pulumi.set(self, "vsan_compression_enabled", value)
|
|
1069
1068
|
|
|
1070
|
-
@property
|
|
1069
|
+
@_builtins.property
|
|
1071
1070
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
1072
|
-
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[
|
|
1071
|
+
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1073
1072
|
"""
|
|
1074
1073
|
Whether the vSAN deduplication service is enabled for the cluster.
|
|
1075
1074
|
"""
|
|
1076
1075
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
1077
1076
|
|
|
1078
1077
|
@vsan_dedup_enabled.setter
|
|
1079
|
-
def vsan_dedup_enabled(self, value: Optional[pulumi.Input[
|
|
1078
|
+
def vsan_dedup_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1080
1079
|
pulumi.set(self, "vsan_dedup_enabled", value)
|
|
1081
1080
|
|
|
1082
|
-
@property
|
|
1081
|
+
@_builtins.property
|
|
1083
1082
|
@pulumi.getter(name="vsanDiskGroups")
|
|
1084
1083
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
1085
1084
|
"""
|
|
@@ -1091,55 +1090,55 @@ class ComputeClusterArgs:
|
|
|
1091
1090
|
def vsan_disk_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]):
|
|
1092
1091
|
pulumi.set(self, "vsan_disk_groups", value)
|
|
1093
1092
|
|
|
1094
|
-
@property
|
|
1093
|
+
@_builtins.property
|
|
1095
1094
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
1096
|
-
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[
|
|
1095
|
+
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1097
1096
|
"""
|
|
1098
1097
|
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
1099
1098
|
"""
|
|
1100
1099
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
1101
1100
|
|
|
1102
1101
|
@vsan_dit_encryption_enabled.setter
|
|
1103
|
-
def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[
|
|
1102
|
+
def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1104
1103
|
pulumi.set(self, "vsan_dit_encryption_enabled", value)
|
|
1105
1104
|
|
|
1106
|
-
@property
|
|
1105
|
+
@_builtins.property
|
|
1107
1106
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
1108
|
-
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[
|
|
1107
|
+
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1109
1108
|
"""
|
|
1110
1109
|
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
1111
1110
|
"""
|
|
1112
1111
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
1113
1112
|
|
|
1114
1113
|
@vsan_dit_rekey_interval.setter
|
|
1115
|
-
def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[
|
|
1114
|
+
def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1116
1115
|
pulumi.set(self, "vsan_dit_rekey_interval", value)
|
|
1117
1116
|
|
|
1118
|
-
@property
|
|
1117
|
+
@_builtins.property
|
|
1119
1118
|
@pulumi.getter(name="vsanEnabled")
|
|
1120
|
-
def vsan_enabled(self) -> Optional[pulumi.Input[
|
|
1119
|
+
def vsan_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1121
1120
|
"""
|
|
1122
1121
|
Whether the vSAN service is enabled for the cluster.
|
|
1123
1122
|
"""
|
|
1124
1123
|
return pulumi.get(self, "vsan_enabled")
|
|
1125
1124
|
|
|
1126
1125
|
@vsan_enabled.setter
|
|
1127
|
-
def vsan_enabled(self, value: Optional[pulumi.Input[
|
|
1126
|
+
def vsan_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1128
1127
|
pulumi.set(self, "vsan_enabled", value)
|
|
1129
1128
|
|
|
1130
|
-
@property
|
|
1129
|
+
@_builtins.property
|
|
1131
1130
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
1132
|
-
def vsan_esa_enabled(self) -> Optional[pulumi.Input[
|
|
1131
|
+
def vsan_esa_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1133
1132
|
"""
|
|
1134
1133
|
Whether the vSAN ESA service is enabled for the cluster.
|
|
1135
1134
|
"""
|
|
1136
1135
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
1137
1136
|
|
|
1138
1137
|
@vsan_esa_enabled.setter
|
|
1139
|
-
def vsan_esa_enabled(self, value: Optional[pulumi.Input[
|
|
1138
|
+
def vsan_esa_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1140
1139
|
pulumi.set(self, "vsan_esa_enabled", value)
|
|
1141
1140
|
|
|
1142
|
-
@property
|
|
1141
|
+
@_builtins.property
|
|
1143
1142
|
@pulumi.getter(name="vsanFaultDomains")
|
|
1144
1143
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
1145
1144
|
"""
|
|
@@ -1151,43 +1150,43 @@ class ComputeClusterArgs:
|
|
|
1151
1150
|
def vsan_fault_domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]):
|
|
1152
1151
|
pulumi.set(self, "vsan_fault_domains", value)
|
|
1153
1152
|
|
|
1154
|
-
@property
|
|
1153
|
+
@_builtins.property
|
|
1155
1154
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
1156
|
-
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[
|
|
1155
|
+
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1157
1156
|
"""
|
|
1158
1157
|
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
1159
1158
|
"""
|
|
1160
1159
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
1161
1160
|
|
|
1162
1161
|
@vsan_network_diagnostic_mode_enabled.setter
|
|
1163
|
-
def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[
|
|
1162
|
+
def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1164
1163
|
pulumi.set(self, "vsan_network_diagnostic_mode_enabled", value)
|
|
1165
1164
|
|
|
1166
|
-
@property
|
|
1165
|
+
@_builtins.property
|
|
1167
1166
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
1168
|
-
def vsan_performance_enabled(self) -> Optional[pulumi.Input[
|
|
1167
|
+
def vsan_performance_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1169
1168
|
"""
|
|
1170
1169
|
Whether the vSAN performance service is enabled for the cluster.
|
|
1171
1170
|
"""
|
|
1172
1171
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
1173
1172
|
|
|
1174
1173
|
@vsan_performance_enabled.setter
|
|
1175
|
-
def vsan_performance_enabled(self, value: Optional[pulumi.Input[
|
|
1174
|
+
def vsan_performance_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1176
1175
|
pulumi.set(self, "vsan_performance_enabled", value)
|
|
1177
1176
|
|
|
1178
|
-
@property
|
|
1177
|
+
@_builtins.property
|
|
1179
1178
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
1180
|
-
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1179
|
+
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
1181
1180
|
"""
|
|
1182
1181
|
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
1183
1182
|
"""
|
|
1184
1183
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
1185
1184
|
|
|
1186
1185
|
@vsan_remote_datastore_ids.setter
|
|
1187
|
-
def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1186
|
+
def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
1188
1187
|
pulumi.set(self, "vsan_remote_datastore_ids", value)
|
|
1189
1188
|
|
|
1190
|
-
@property
|
|
1189
|
+
@_builtins.property
|
|
1191
1190
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
1192
1191
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
1193
1192
|
"""
|
|
@@ -1199,227 +1198,227 @@ class ComputeClusterArgs:
|
|
|
1199
1198
|
def vsan_stretched_cluster(self, value: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]):
|
|
1200
1199
|
pulumi.set(self, "vsan_stretched_cluster", value)
|
|
1201
1200
|
|
|
1202
|
-
@property
|
|
1201
|
+
@_builtins.property
|
|
1203
1202
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
1204
|
-
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[
|
|
1203
|
+
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1205
1204
|
"""
|
|
1206
1205
|
Whether the vSAN unmap service is enabled for the cluster.
|
|
1207
1206
|
"""
|
|
1208
1207
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
1209
1208
|
|
|
1210
1209
|
@vsan_unmap_enabled.setter
|
|
1211
|
-
def vsan_unmap_enabled(self, value: Optional[pulumi.Input[
|
|
1210
|
+
def vsan_unmap_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1212
1211
|
pulumi.set(self, "vsan_unmap_enabled", value)
|
|
1213
1212
|
|
|
1214
|
-
@property
|
|
1213
|
+
@_builtins.property
|
|
1215
1214
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
1216
|
-
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[
|
|
1215
|
+
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1217
1216
|
"""
|
|
1218
1217
|
Whether the vSAN verbose mode is enabled for the cluster.
|
|
1219
1218
|
"""
|
|
1220
1219
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
1221
1220
|
|
|
1222
1221
|
@vsan_verbose_mode_enabled.setter
|
|
1223
|
-
def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[
|
|
1222
|
+
def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1224
1223
|
pulumi.set(self, "vsan_verbose_mode_enabled", value)
|
|
1225
1224
|
|
|
1226
1225
|
|
|
1227
1226
|
@pulumi.input_type
|
|
1228
1227
|
class _ComputeClusterState:
|
|
1229
1228
|
def __init__(__self__, *,
|
|
1230
|
-
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1231
|
-
datacenter_id: Optional[pulumi.Input[
|
|
1232
|
-
dpm_automation_level: Optional[pulumi.Input[
|
|
1233
|
-
dpm_enabled: Optional[pulumi.Input[
|
|
1234
|
-
dpm_threshold: Optional[pulumi.Input[
|
|
1235
|
-
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1236
|
-
drs_automation_level: Optional[pulumi.Input[
|
|
1237
|
-
drs_enable_predictive_drs: Optional[pulumi.Input[
|
|
1238
|
-
drs_enable_vm_overrides: Optional[pulumi.Input[
|
|
1239
|
-
drs_enabled: Optional[pulumi.Input[
|
|
1240
|
-
drs_migration_threshold: Optional[pulumi.Input[
|
|
1241
|
-
drs_scale_descendants_shares: Optional[pulumi.Input[
|
|
1242
|
-
folder: Optional[pulumi.Input[
|
|
1243
|
-
force_evacuate_on_destroy: Optional[pulumi.Input[
|
|
1244
|
-
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1245
|
-
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[
|
|
1246
|
-
ha_admission_control_performance_tolerance: Optional[pulumi.Input[
|
|
1247
|
-
ha_admission_control_policy: Optional[pulumi.Input[
|
|
1248
|
-
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[
|
|
1249
|
-
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[
|
|
1250
|
-
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[
|
|
1251
|
-
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[
|
|
1252
|
-
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[
|
|
1253
|
-
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[
|
|
1254
|
-
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1255
|
-
ha_datastore_apd_recovery_action: Optional[pulumi.Input[
|
|
1256
|
-
ha_datastore_apd_response: Optional[pulumi.Input[
|
|
1257
|
-
ha_datastore_apd_response_delay: Optional[pulumi.Input[
|
|
1258
|
-
ha_datastore_pdl_response: Optional[pulumi.Input[
|
|
1259
|
-
ha_enabled: Optional[pulumi.Input[
|
|
1260
|
-
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1261
|
-
ha_heartbeat_datastore_policy: Optional[pulumi.Input[
|
|
1262
|
-
ha_host_isolation_response: Optional[pulumi.Input[
|
|
1263
|
-
ha_host_monitoring: Optional[pulumi.Input[
|
|
1264
|
-
ha_vm_component_protection: Optional[pulumi.Input[
|
|
1265
|
-
ha_vm_dependency_restart_condition: Optional[pulumi.Input[
|
|
1266
|
-
ha_vm_failure_interval: Optional[pulumi.Input[
|
|
1267
|
-
ha_vm_maximum_failure_window: Optional[pulumi.Input[
|
|
1268
|
-
ha_vm_maximum_resets: Optional[pulumi.Input[
|
|
1269
|
-
ha_vm_minimum_uptime: Optional[pulumi.Input[
|
|
1270
|
-
ha_vm_monitoring: Optional[pulumi.Input[
|
|
1271
|
-
ha_vm_restart_additional_delay: Optional[pulumi.Input[
|
|
1272
|
-
ha_vm_restart_priority: Optional[pulumi.Input[
|
|
1273
|
-
ha_vm_restart_timeout: Optional[pulumi.Input[
|
|
1274
|
-
host_cluster_exit_timeout: Optional[pulumi.Input[
|
|
1229
|
+
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
1230
|
+
datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1231
|
+
dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1232
|
+
dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1233
|
+
dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1234
|
+
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
1235
|
+
drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1236
|
+
drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1237
|
+
drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1238
|
+
drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1239
|
+
drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1240
|
+
drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1241
|
+
folder: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1242
|
+
force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1243
|
+
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1244
|
+
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1245
|
+
ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1246
|
+
ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1247
|
+
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1248
|
+
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1249
|
+
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1250
|
+
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1251
|
+
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1252
|
+
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1253
|
+
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
1254
|
+
ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1255
|
+
ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1256
|
+
ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1257
|
+
ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1258
|
+
ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1259
|
+
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1260
|
+
ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1261
|
+
ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1262
|
+
ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1263
|
+
ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1264
|
+
ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1265
|
+
ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1266
|
+
ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1267
|
+
ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1268
|
+
ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1269
|
+
ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1270
|
+
ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1271
|
+
ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1272
|
+
ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1273
|
+
host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1275
1274
|
host_image: Optional[pulumi.Input['ComputeClusterHostImageArgs']] = None,
|
|
1276
|
-
host_managed: Optional[pulumi.Input[
|
|
1277
|
-
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1278
|
-
name: Optional[pulumi.Input[
|
|
1279
|
-
proactive_ha_automation_level: Optional[pulumi.Input[
|
|
1280
|
-
proactive_ha_enabled: Optional[pulumi.Input[
|
|
1281
|
-
proactive_ha_moderate_remediation: Optional[pulumi.Input[
|
|
1282
|
-
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1283
|
-
proactive_ha_severe_remediation: Optional[pulumi.Input[
|
|
1284
|
-
resource_pool_id: Optional[pulumi.Input[
|
|
1285
|
-
tags: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1286
|
-
vsan_compression_enabled: Optional[pulumi.Input[
|
|
1287
|
-
vsan_dedup_enabled: Optional[pulumi.Input[
|
|
1275
|
+
host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1276
|
+
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1277
|
+
name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1278
|
+
proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1279
|
+
proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1280
|
+
proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1281
|
+
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1282
|
+
proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1283
|
+
resource_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
1284
|
+
tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1285
|
+
vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1286
|
+
vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1288
1287
|
vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]] = None,
|
|
1289
|
-
vsan_dit_encryption_enabled: Optional[pulumi.Input[
|
|
1290
|
-
vsan_dit_rekey_interval: Optional[pulumi.Input[
|
|
1291
|
-
vsan_enabled: Optional[pulumi.Input[
|
|
1292
|
-
vsan_esa_enabled: Optional[pulumi.Input[
|
|
1288
|
+
vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1289
|
+
vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
1290
|
+
vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1291
|
+
vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1293
1292
|
vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]] = None,
|
|
1294
|
-
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[
|
|
1295
|
-
vsan_performance_enabled: Optional[pulumi.Input[
|
|
1296
|
-
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1293
|
+
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1294
|
+
vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1295
|
+
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
1297
1296
|
vsan_stretched_cluster: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']] = None,
|
|
1298
|
-
vsan_unmap_enabled: Optional[pulumi.Input[
|
|
1299
|
-
vsan_verbose_mode_enabled: Optional[pulumi.Input[
|
|
1297
|
+
vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
1298
|
+
vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None):
|
|
1300
1299
|
"""
|
|
1301
1300
|
Input properties used for looking up and filtering ComputeCluster resources.
|
|
1302
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
1301
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
|
|
1303
1302
|
value strings to set for the datastore cluster.
|
|
1304
1303
|
|
|
1305
1304
|
> **NOTE:** Custom attributes are unsupported on direct ESXi connections
|
|
1306
1305
|
and require vCenter Server.
|
|
1307
|
-
:param pulumi.Input[
|
|
1306
|
+
:param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
|
|
1308
1307
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
1309
|
-
:param pulumi.Input[
|
|
1310
|
-
:param pulumi.Input[
|
|
1308
|
+
:param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
1309
|
+
:param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
1311
1310
|
machines in the cluster. Requires that DRS be enabled.
|
|
1312
|
-
:param pulumi.Input[
|
|
1311
|
+
:param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
1313
1312
|
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
1314
1313
|
setting.
|
|
1315
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
1316
|
-
:param pulumi.Input[
|
|
1314
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
1315
|
+
:param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
1317
1316
|
fullyAutomated.
|
|
1318
|
-
:param pulumi.Input[
|
|
1319
|
-
:param pulumi.Input[
|
|
1320
|
-
:param pulumi.Input[
|
|
1321
|
-
:param pulumi.Input[
|
|
1317
|
+
:param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
1318
|
+
:param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
1319
|
+
:param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
|
|
1320
|
+
:param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
1322
1321
|
more imbalance while a higher setting will tolerate less.
|
|
1323
|
-
:param pulumi.Input[
|
|
1324
|
-
:param pulumi.Input[
|
|
1322
|
+
:param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
1323
|
+
:param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
|
|
1325
1324
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
1326
1325
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
1327
1326
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
1328
1327
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
1329
1328
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
1330
|
-
:param pulumi.Input[
|
|
1329
|
+
:param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
1331
1330
|
for testing and is not recommended in normal use.
|
|
1332
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
1331
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
1333
1332
|
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
1334
1333
|
will ignore the host when making recommendations.
|
|
1335
|
-
:param pulumi.Input[
|
|
1334
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
1336
1335
|
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
1337
|
-
:param pulumi.Input[
|
|
1336
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
1338
1337
|
warnings only, whereas a value of 100 disables the setting.
|
|
1339
|
-
:param pulumi.Input[
|
|
1338
|
+
:param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
1340
1339
|
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
1341
1340
|
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
1342
1341
|
issues.
|
|
1343
|
-
:param pulumi.Input[
|
|
1342
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
1344
1343
|
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
1345
1344
|
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
1346
|
-
:param pulumi.Input[
|
|
1345
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
1347
1346
|
the cluster to reserve for failover.
|
|
1348
|
-
:param pulumi.Input[
|
|
1347
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
1349
1348
|
the cluster to reserve for failover.
|
|
1350
|
-
:param pulumi.Input[
|
|
1351
|
-
:param pulumi.Input[
|
|
1352
|
-
:param pulumi.Input[
|
|
1349
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
1350
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
1351
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
1353
1352
|
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
1354
1353
|
currently in the cluster.
|
|
1355
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
1356
|
-
:param pulumi.Input[
|
|
1354
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
1355
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
1357
1356
|
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
1358
|
-
:param pulumi.Input[
|
|
1357
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1359
1358
|
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
1360
1359
|
restartAggressive.
|
|
1361
|
-
:param pulumi.Input[
|
|
1360
|
+
:param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
1362
1361
|
the response action defined in ha_datastore_apd_response.
|
|
1363
|
-
:param pulumi.Input[
|
|
1362
|
+
:param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1364
1363
|
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
1365
|
-
:param pulumi.Input[
|
|
1366
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
1364
|
+
:param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
1365
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
1367
1366
|
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
1368
|
-
:param pulumi.Input[
|
|
1367
|
+
:param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
1369
1368
|
allFeasibleDsWithUserPreference.
|
|
1370
|
-
:param pulumi.Input[
|
|
1369
|
+
:param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
1371
1370
|
Can be one of none, powerOff, or shutdown.
|
|
1372
|
-
:param pulumi.Input[
|
|
1373
|
-
:param pulumi.Input[
|
|
1371
|
+
:param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
1372
|
+
:param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
1374
1373
|
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
1375
|
-
:param pulumi.Input[
|
|
1374
|
+
:param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
1376
1375
|
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
1377
|
-
:param pulumi.Input[
|
|
1376
|
+
:param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
1378
1377
|
failed. The value is in seconds.
|
|
1379
|
-
:param pulumi.Input[
|
|
1378
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
1380
1379
|
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
1381
1380
|
time is allotted.
|
|
1382
|
-
:param pulumi.Input[
|
|
1383
|
-
:param pulumi.Input[
|
|
1384
|
-
:param pulumi.Input[
|
|
1381
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
1382
|
+
:param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
1383
|
+
:param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
1385
1384
|
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
1386
|
-
:param pulumi.Input[
|
|
1387
|
-
:param pulumi.Input[
|
|
1385
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
1386
|
+
:param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
1388
1387
|
high, or highest.
|
|
1389
|
-
:param pulumi.Input[
|
|
1388
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
1390
1389
|
proceeding with the next priority.
|
|
1391
|
-
:param pulumi.Input[
|
|
1390
|
+
:param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
1392
1391
|
:param pulumi.Input['ComputeClusterHostImageArgs'] host_image: Details about the host image which should be applied to the cluster.
|
|
1393
|
-
:param pulumi.Input[
|
|
1394
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
1395
|
-
:param pulumi.Input[
|
|
1396
|
-
:param pulumi.Input[
|
|
1397
|
-
:param pulumi.Input[
|
|
1398
|
-
:param pulumi.Input[
|
|
1392
|
+
:param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
1393
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
1394
|
+
:param pulumi.Input[_builtins.str] name: The name of the cluster.
|
|
1395
|
+
:param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
1396
|
+
:param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
1397
|
+
:param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
1399
1398
|
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
1400
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
1401
|
-
:param pulumi.Input[
|
|
1399
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
1400
|
+
:param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
1402
1401
|
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
1403
|
-
:param pulumi.Input[
|
|
1402
|
+
:param pulumi.Input[_builtins.str] resource_pool_id: The managed object ID of the primary
|
|
1404
1403
|
resource pool for this cluster. This can be passed directly to the
|
|
1405
1404
|
`resource_pool_id`
|
|
1406
1405
|
attribute of the
|
|
1407
1406
|
`VirtualMachine` resource.
|
|
1408
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
1409
|
-
:param pulumi.Input[
|
|
1410
|
-
:param pulumi.Input[
|
|
1407
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
|
|
1408
|
+
:param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
1409
|
+
:param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
1411
1410
|
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
1412
|
-
:param pulumi.Input[
|
|
1413
|
-
:param pulumi.Input[
|
|
1414
|
-
:param pulumi.Input[
|
|
1415
|
-
:param pulumi.Input[
|
|
1411
|
+
:param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
1412
|
+
:param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
1413
|
+
:param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
1414
|
+
:param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
1416
1415
|
:param pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
1417
|
-
:param pulumi.Input[
|
|
1418
|
-
:param pulumi.Input[
|
|
1419
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
1416
|
+
:param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
1417
|
+
:param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
1418
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
1420
1419
|
:param pulumi.Input['ComputeClusterVsanStretchedClusterArgs'] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
1421
|
-
:param pulumi.Input[
|
|
1422
|
-
:param pulumi.Input[
|
|
1420
|
+
:param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
1421
|
+
:param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
1423
1422
|
"""
|
|
1424
1423
|
if custom_attributes is not None:
|
|
1425
1424
|
pulumi.set(__self__, "custom_attributes", custom_attributes)
|
|
@@ -1562,9 +1561,9 @@ class _ComputeClusterState:
|
|
|
1562
1561
|
if vsan_verbose_mode_enabled is not None:
|
|
1563
1562
|
pulumi.set(__self__, "vsan_verbose_mode_enabled", vsan_verbose_mode_enabled)
|
|
1564
1563
|
|
|
1565
|
-
@property
|
|
1564
|
+
@_builtins.property
|
|
1566
1565
|
@pulumi.getter(name="customAttributes")
|
|
1567
|
-
def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1566
|
+
def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
1568
1567
|
"""
|
|
1569
1568
|
A map of custom attribute ids to attribute
|
|
1570
1569
|
value strings to set for the datastore cluster.
|
|
@@ -1575,12 +1574,12 @@ class _ComputeClusterState:
|
|
|
1575
1574
|
return pulumi.get(self, "custom_attributes")
|
|
1576
1575
|
|
|
1577
1576
|
@custom_attributes.setter
|
|
1578
|
-
def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1577
|
+
def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
1579
1578
|
pulumi.set(self, "custom_attributes", value)
|
|
1580
1579
|
|
|
1581
|
-
@property
|
|
1580
|
+
@_builtins.property
|
|
1582
1581
|
@pulumi.getter(name="datacenterId")
|
|
1583
|
-
def datacenter_id(self) -> Optional[pulumi.Input[
|
|
1582
|
+
def datacenter_id(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1584
1583
|
"""
|
|
1585
1584
|
The managed object ID of
|
|
1586
1585
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
@@ -1588,24 +1587,24 @@ class _ComputeClusterState:
|
|
|
1588
1587
|
return pulumi.get(self, "datacenter_id")
|
|
1589
1588
|
|
|
1590
1589
|
@datacenter_id.setter
|
|
1591
|
-
def datacenter_id(self, value: Optional[pulumi.Input[
|
|
1590
|
+
def datacenter_id(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1592
1591
|
pulumi.set(self, "datacenter_id", value)
|
|
1593
1592
|
|
|
1594
|
-
@property
|
|
1593
|
+
@_builtins.property
|
|
1595
1594
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
1596
|
-
def dpm_automation_level(self) -> Optional[pulumi.Input[
|
|
1595
|
+
def dpm_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1597
1596
|
"""
|
|
1598
1597
|
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
1599
1598
|
"""
|
|
1600
1599
|
return pulumi.get(self, "dpm_automation_level")
|
|
1601
1600
|
|
|
1602
1601
|
@dpm_automation_level.setter
|
|
1603
|
-
def dpm_automation_level(self, value: Optional[pulumi.Input[
|
|
1602
|
+
def dpm_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1604
1603
|
pulumi.set(self, "dpm_automation_level", value)
|
|
1605
1604
|
|
|
1606
|
-
@property
|
|
1605
|
+
@_builtins.property
|
|
1607
1606
|
@pulumi.getter(name="dpmEnabled")
|
|
1608
|
-
def dpm_enabled(self) -> Optional[pulumi.Input[
|
|
1607
|
+
def dpm_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1609
1608
|
"""
|
|
1610
1609
|
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
1611
1610
|
machines in the cluster. Requires that DRS be enabled.
|
|
@@ -1613,12 +1612,12 @@ class _ComputeClusterState:
|
|
|
1613
1612
|
return pulumi.get(self, "dpm_enabled")
|
|
1614
1613
|
|
|
1615
1614
|
@dpm_enabled.setter
|
|
1616
|
-
def dpm_enabled(self, value: Optional[pulumi.Input[
|
|
1615
|
+
def dpm_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1617
1616
|
pulumi.set(self, "dpm_enabled", value)
|
|
1618
1617
|
|
|
1619
|
-
@property
|
|
1618
|
+
@_builtins.property
|
|
1620
1619
|
@pulumi.getter(name="dpmThreshold")
|
|
1621
|
-
def dpm_threshold(self) -> Optional[pulumi.Input[
|
|
1620
|
+
def dpm_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1622
1621
|
"""
|
|
1623
1622
|
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
1624
1623
|
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
@@ -1627,24 +1626,24 @@ class _ComputeClusterState:
|
|
|
1627
1626
|
return pulumi.get(self, "dpm_threshold")
|
|
1628
1627
|
|
|
1629
1628
|
@dpm_threshold.setter
|
|
1630
|
-
def dpm_threshold(self, value: Optional[pulumi.Input[
|
|
1629
|
+
def dpm_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1631
1630
|
pulumi.set(self, "dpm_threshold", value)
|
|
1632
1631
|
|
|
1633
|
-
@property
|
|
1632
|
+
@_builtins.property
|
|
1634
1633
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
1635
|
-
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1634
|
+
def drs_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
1636
1635
|
"""
|
|
1637
1636
|
Advanced configuration options for DRS and DPM.
|
|
1638
1637
|
"""
|
|
1639
1638
|
return pulumi.get(self, "drs_advanced_options")
|
|
1640
1639
|
|
|
1641
1640
|
@drs_advanced_options.setter
|
|
1642
|
-
def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1641
|
+
def drs_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
1643
1642
|
pulumi.set(self, "drs_advanced_options", value)
|
|
1644
1643
|
|
|
1645
|
-
@property
|
|
1644
|
+
@_builtins.property
|
|
1646
1645
|
@pulumi.getter(name="drsAutomationLevel")
|
|
1647
|
-
def drs_automation_level(self) -> Optional[pulumi.Input[
|
|
1646
|
+
def drs_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1648
1647
|
"""
|
|
1649
1648
|
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
1650
1649
|
fullyAutomated.
|
|
@@ -1652,48 +1651,48 @@ class _ComputeClusterState:
|
|
|
1652
1651
|
return pulumi.get(self, "drs_automation_level")
|
|
1653
1652
|
|
|
1654
1653
|
@drs_automation_level.setter
|
|
1655
|
-
def drs_automation_level(self, value: Optional[pulumi.Input[
|
|
1654
|
+
def drs_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1656
1655
|
pulumi.set(self, "drs_automation_level", value)
|
|
1657
1656
|
|
|
1658
|
-
@property
|
|
1657
|
+
@_builtins.property
|
|
1659
1658
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
1660
|
-
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[
|
|
1659
|
+
def drs_enable_predictive_drs(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1661
1660
|
"""
|
|
1662
1661
|
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
1663
1662
|
"""
|
|
1664
1663
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
1665
1664
|
|
|
1666
1665
|
@drs_enable_predictive_drs.setter
|
|
1667
|
-
def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[
|
|
1666
|
+
def drs_enable_predictive_drs(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1668
1667
|
pulumi.set(self, "drs_enable_predictive_drs", value)
|
|
1669
1668
|
|
|
1670
|
-
@property
|
|
1669
|
+
@_builtins.property
|
|
1671
1670
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
1672
|
-
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[
|
|
1671
|
+
def drs_enable_vm_overrides(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1673
1672
|
"""
|
|
1674
1673
|
When true, allows individual VM overrides within this cluster to be set.
|
|
1675
1674
|
"""
|
|
1676
1675
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
1677
1676
|
|
|
1678
1677
|
@drs_enable_vm_overrides.setter
|
|
1679
|
-
def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[
|
|
1678
|
+
def drs_enable_vm_overrides(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1680
1679
|
pulumi.set(self, "drs_enable_vm_overrides", value)
|
|
1681
1680
|
|
|
1682
|
-
@property
|
|
1681
|
+
@_builtins.property
|
|
1683
1682
|
@pulumi.getter(name="drsEnabled")
|
|
1684
|
-
def drs_enabled(self) -> Optional[pulumi.Input[
|
|
1683
|
+
def drs_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1685
1684
|
"""
|
|
1686
1685
|
Enable DRS for this cluster.
|
|
1687
1686
|
"""
|
|
1688
1687
|
return pulumi.get(self, "drs_enabled")
|
|
1689
1688
|
|
|
1690
1689
|
@drs_enabled.setter
|
|
1691
|
-
def drs_enabled(self, value: Optional[pulumi.Input[
|
|
1690
|
+
def drs_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1692
1691
|
pulumi.set(self, "drs_enabled", value)
|
|
1693
1692
|
|
|
1694
|
-
@property
|
|
1693
|
+
@_builtins.property
|
|
1695
1694
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
1696
|
-
def drs_migration_threshold(self) -> Optional[pulumi.Input[
|
|
1695
|
+
def drs_migration_threshold(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1697
1696
|
"""
|
|
1698
1697
|
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
1699
1698
|
more imbalance while a higher setting will tolerate less.
|
|
@@ -1701,24 +1700,24 @@ class _ComputeClusterState:
|
|
|
1701
1700
|
return pulumi.get(self, "drs_migration_threshold")
|
|
1702
1701
|
|
|
1703
1702
|
@drs_migration_threshold.setter
|
|
1704
|
-
def drs_migration_threshold(self, value: Optional[pulumi.Input[
|
|
1703
|
+
def drs_migration_threshold(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1705
1704
|
pulumi.set(self, "drs_migration_threshold", value)
|
|
1706
1705
|
|
|
1707
|
-
@property
|
|
1706
|
+
@_builtins.property
|
|
1708
1707
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
1709
|
-
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[
|
|
1708
|
+
def drs_scale_descendants_shares(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1710
1709
|
"""
|
|
1711
1710
|
Enable scalable shares for all descendants of this cluster.
|
|
1712
1711
|
"""
|
|
1713
1712
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
1714
1713
|
|
|
1715
1714
|
@drs_scale_descendants_shares.setter
|
|
1716
|
-
def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[
|
|
1715
|
+
def drs_scale_descendants_shares(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1717
1716
|
pulumi.set(self, "drs_scale_descendants_shares", value)
|
|
1718
1717
|
|
|
1719
|
-
@property
|
|
1718
|
+
@_builtins.property
|
|
1720
1719
|
@pulumi.getter
|
|
1721
|
-
def folder(self) -> Optional[pulumi.Input[
|
|
1720
|
+
def folder(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1722
1721
|
"""
|
|
1723
1722
|
The relative path to a folder to put this cluster in.
|
|
1724
1723
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
@@ -1730,12 +1729,12 @@ class _ComputeClusterState:
|
|
|
1730
1729
|
return pulumi.get(self, "folder")
|
|
1731
1730
|
|
|
1732
1731
|
@folder.setter
|
|
1733
|
-
def folder(self, value: Optional[pulumi.Input[
|
|
1732
|
+
def folder(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1734
1733
|
pulumi.set(self, "folder", value)
|
|
1735
1734
|
|
|
1736
|
-
@property
|
|
1735
|
+
@_builtins.property
|
|
1737
1736
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
1738
|
-
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[
|
|
1737
|
+
def force_evacuate_on_destroy(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1739
1738
|
"""
|
|
1740
1739
|
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
1741
1740
|
for testing and is not recommended in normal use.
|
|
@@ -1743,12 +1742,12 @@ class _ComputeClusterState:
|
|
|
1743
1742
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
1744
1743
|
|
|
1745
1744
|
@force_evacuate_on_destroy.setter
|
|
1746
|
-
def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[
|
|
1745
|
+
def force_evacuate_on_destroy(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1747
1746
|
pulumi.set(self, "force_evacuate_on_destroy", value)
|
|
1748
1747
|
|
|
1749
|
-
@property
|
|
1748
|
+
@_builtins.property
|
|
1750
1749
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
1751
|
-
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1750
|
+
def ha_admission_control_failover_host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
1752
1751
|
"""
|
|
1753
1752
|
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
1754
1753
|
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
@@ -1757,12 +1756,12 @@ class _ComputeClusterState:
|
|
|
1757
1756
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
1758
1757
|
|
|
1759
1758
|
@ha_admission_control_failover_host_system_ids.setter
|
|
1760
|
-
def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1759
|
+
def ha_admission_control_failover_host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
1761
1760
|
pulumi.set(self, "ha_admission_control_failover_host_system_ids", value)
|
|
1762
1761
|
|
|
1763
|
-
@property
|
|
1762
|
+
@_builtins.property
|
|
1764
1763
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
1765
|
-
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[
|
|
1764
|
+
def ha_admission_control_host_failure_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1766
1765
|
"""
|
|
1767
1766
|
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
1768
1767
|
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
@@ -1770,12 +1769,12 @@ class _ComputeClusterState:
|
|
|
1770
1769
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
1771
1770
|
|
|
1772
1771
|
@ha_admission_control_host_failure_tolerance.setter
|
|
1773
|
-
def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[
|
|
1772
|
+
def ha_admission_control_host_failure_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1774
1773
|
pulumi.set(self, "ha_admission_control_host_failure_tolerance", value)
|
|
1775
1774
|
|
|
1776
|
-
@property
|
|
1775
|
+
@_builtins.property
|
|
1777
1776
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
1778
|
-
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[
|
|
1777
|
+
def ha_admission_control_performance_tolerance(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1779
1778
|
"""
|
|
1780
1779
|
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
1781
1780
|
warnings only, whereas a value of 100 disables the setting.
|
|
@@ -1783,12 +1782,12 @@ class _ComputeClusterState:
|
|
|
1783
1782
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
1784
1783
|
|
|
1785
1784
|
@ha_admission_control_performance_tolerance.setter
|
|
1786
|
-
def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[
|
|
1785
|
+
def ha_admission_control_performance_tolerance(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1787
1786
|
pulumi.set(self, "ha_admission_control_performance_tolerance", value)
|
|
1788
1787
|
|
|
1789
|
-
@property
|
|
1788
|
+
@_builtins.property
|
|
1790
1789
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
1791
|
-
def ha_admission_control_policy(self) -> Optional[pulumi.Input[
|
|
1790
|
+
def ha_admission_control_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1792
1791
|
"""
|
|
1793
1792
|
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
1794
1793
|
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
@@ -1798,12 +1797,12 @@ class _ComputeClusterState:
|
|
|
1798
1797
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
1799
1798
|
|
|
1800
1799
|
@ha_admission_control_policy.setter
|
|
1801
|
-
def ha_admission_control_policy(self, value: Optional[pulumi.Input[
|
|
1800
|
+
def ha_admission_control_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1802
1801
|
pulumi.set(self, "ha_admission_control_policy", value)
|
|
1803
1802
|
|
|
1804
|
-
@property
|
|
1803
|
+
@_builtins.property
|
|
1805
1804
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
1806
|
-
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[
|
|
1805
|
+
def ha_admission_control_resource_percentage_auto_compute(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1807
1806
|
"""
|
|
1808
1807
|
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
1809
1808
|
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
@@ -1812,12 +1811,12 @@ class _ComputeClusterState:
|
|
|
1812
1811
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
1813
1812
|
|
|
1814
1813
|
@ha_admission_control_resource_percentage_auto_compute.setter
|
|
1815
|
-
def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[
|
|
1814
|
+
def ha_admission_control_resource_percentage_auto_compute(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1816
1815
|
pulumi.set(self, "ha_admission_control_resource_percentage_auto_compute", value)
|
|
1817
1816
|
|
|
1818
|
-
@property
|
|
1817
|
+
@_builtins.property
|
|
1819
1818
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
1820
|
-
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[
|
|
1819
|
+
def ha_admission_control_resource_percentage_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1821
1820
|
"""
|
|
1822
1821
|
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
1823
1822
|
the cluster to reserve for failover.
|
|
@@ -1825,12 +1824,12 @@ class _ComputeClusterState:
|
|
|
1825
1824
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
1826
1825
|
|
|
1827
1826
|
@ha_admission_control_resource_percentage_cpu.setter
|
|
1828
|
-
def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[
|
|
1827
|
+
def ha_admission_control_resource_percentage_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1829
1828
|
pulumi.set(self, "ha_admission_control_resource_percentage_cpu", value)
|
|
1830
1829
|
|
|
1831
|
-
@property
|
|
1830
|
+
@_builtins.property
|
|
1832
1831
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
1833
|
-
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[
|
|
1832
|
+
def ha_admission_control_resource_percentage_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1834
1833
|
"""
|
|
1835
1834
|
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
1836
1835
|
the cluster to reserve for failover.
|
|
@@ -1838,36 +1837,36 @@ class _ComputeClusterState:
|
|
|
1838
1837
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
1839
1838
|
|
|
1840
1839
|
@ha_admission_control_resource_percentage_memory.setter
|
|
1841
|
-
def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[
|
|
1840
|
+
def ha_admission_control_resource_percentage_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1842
1841
|
pulumi.set(self, "ha_admission_control_resource_percentage_memory", value)
|
|
1843
1842
|
|
|
1844
|
-
@property
|
|
1843
|
+
@_builtins.property
|
|
1845
1844
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
1846
|
-
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[
|
|
1845
|
+
def ha_admission_control_slot_policy_explicit_cpu(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1847
1846
|
"""
|
|
1848
1847
|
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
1849
1848
|
"""
|
|
1850
1849
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
1851
1850
|
|
|
1852
1851
|
@ha_admission_control_slot_policy_explicit_cpu.setter
|
|
1853
|
-
def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[
|
|
1852
|
+
def ha_admission_control_slot_policy_explicit_cpu(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1854
1853
|
pulumi.set(self, "ha_admission_control_slot_policy_explicit_cpu", value)
|
|
1855
1854
|
|
|
1856
|
-
@property
|
|
1855
|
+
@_builtins.property
|
|
1857
1856
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
1858
|
-
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[
|
|
1857
|
+
def ha_admission_control_slot_policy_explicit_memory(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1859
1858
|
"""
|
|
1860
1859
|
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
1861
1860
|
"""
|
|
1862
1861
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
1863
1862
|
|
|
1864
1863
|
@ha_admission_control_slot_policy_explicit_memory.setter
|
|
1865
|
-
def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[
|
|
1864
|
+
def ha_admission_control_slot_policy_explicit_memory(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1866
1865
|
pulumi.set(self, "ha_admission_control_slot_policy_explicit_memory", value)
|
|
1867
1866
|
|
|
1868
|
-
@property
|
|
1867
|
+
@_builtins.property
|
|
1869
1868
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
1870
|
-
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[
|
|
1869
|
+
def ha_admission_control_slot_policy_use_explicit_size(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1871
1870
|
"""
|
|
1872
1871
|
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
1873
1872
|
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
@@ -1876,24 +1875,24 @@ class _ComputeClusterState:
|
|
|
1876
1875
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
1877
1876
|
|
|
1878
1877
|
@ha_admission_control_slot_policy_use_explicit_size.setter
|
|
1879
|
-
def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[
|
|
1878
|
+
def ha_admission_control_slot_policy_use_explicit_size(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1880
1879
|
pulumi.set(self, "ha_admission_control_slot_policy_use_explicit_size", value)
|
|
1881
1880
|
|
|
1882
|
-
@property
|
|
1881
|
+
@_builtins.property
|
|
1883
1882
|
@pulumi.getter(name="haAdvancedOptions")
|
|
1884
|
-
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1883
|
+
def ha_advanced_options(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
|
|
1885
1884
|
"""
|
|
1886
1885
|
Advanced configuration options for vSphere HA.
|
|
1887
1886
|
"""
|
|
1888
1887
|
return pulumi.get(self, "ha_advanced_options")
|
|
1889
1888
|
|
|
1890
1889
|
@ha_advanced_options.setter
|
|
1891
|
-
def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
1890
|
+
def ha_advanced_options(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
|
|
1892
1891
|
pulumi.set(self, "ha_advanced_options", value)
|
|
1893
1892
|
|
|
1894
|
-
@property
|
|
1893
|
+
@_builtins.property
|
|
1895
1894
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
1896
|
-
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[
|
|
1895
|
+
def ha_datastore_apd_recovery_action(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1897
1896
|
"""
|
|
1898
1897
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
1899
1898
|
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
@@ -1901,12 +1900,12 @@ class _ComputeClusterState:
|
|
|
1901
1900
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
1902
1901
|
|
|
1903
1902
|
@ha_datastore_apd_recovery_action.setter
|
|
1904
|
-
def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[
|
|
1903
|
+
def ha_datastore_apd_recovery_action(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1905
1904
|
pulumi.set(self, "ha_datastore_apd_recovery_action", value)
|
|
1906
1905
|
|
|
1907
|
-
@property
|
|
1906
|
+
@_builtins.property
|
|
1908
1907
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
1909
|
-
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[
|
|
1908
|
+
def ha_datastore_apd_response(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1910
1909
|
"""
|
|
1911
1910
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1912
1911
|
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
@@ -1915,12 +1914,12 @@ class _ComputeClusterState:
|
|
|
1915
1914
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
1916
1915
|
|
|
1917
1916
|
@ha_datastore_apd_response.setter
|
|
1918
|
-
def ha_datastore_apd_response(self, value: Optional[pulumi.Input[
|
|
1917
|
+
def ha_datastore_apd_response(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1919
1918
|
pulumi.set(self, "ha_datastore_apd_response", value)
|
|
1920
1919
|
|
|
1921
|
-
@property
|
|
1920
|
+
@_builtins.property
|
|
1922
1921
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
1923
|
-
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[
|
|
1922
|
+
def ha_datastore_apd_response_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
1924
1923
|
"""
|
|
1925
1924
|
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
1926
1925
|
the response action defined in ha_datastore_apd_response.
|
|
@@ -1928,12 +1927,12 @@ class _ComputeClusterState:
|
|
|
1928
1927
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
1929
1928
|
|
|
1930
1929
|
@ha_datastore_apd_response_delay.setter
|
|
1931
|
-
def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[
|
|
1930
|
+
def ha_datastore_apd_response_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
1932
1931
|
pulumi.set(self, "ha_datastore_apd_response_delay", value)
|
|
1933
1932
|
|
|
1934
|
-
@property
|
|
1933
|
+
@_builtins.property
|
|
1935
1934
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
1936
|
-
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[
|
|
1935
|
+
def ha_datastore_pdl_response(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1937
1936
|
"""
|
|
1938
1937
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
1939
1938
|
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
@@ -1941,24 +1940,24 @@ class _ComputeClusterState:
|
|
|
1941
1940
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
1942
1941
|
|
|
1943
1942
|
@ha_datastore_pdl_response.setter
|
|
1944
|
-
def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[
|
|
1943
|
+
def ha_datastore_pdl_response(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1945
1944
|
pulumi.set(self, "ha_datastore_pdl_response", value)
|
|
1946
1945
|
|
|
1947
|
-
@property
|
|
1946
|
+
@_builtins.property
|
|
1948
1947
|
@pulumi.getter(name="haEnabled")
|
|
1949
|
-
def ha_enabled(self) -> Optional[pulumi.Input[
|
|
1948
|
+
def ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
1950
1949
|
"""
|
|
1951
1950
|
Enable vSphere HA for this cluster.
|
|
1952
1951
|
"""
|
|
1953
1952
|
return pulumi.get(self, "ha_enabled")
|
|
1954
1953
|
|
|
1955
1954
|
@ha_enabled.setter
|
|
1956
|
-
def ha_enabled(self, value: Optional[pulumi.Input[
|
|
1955
|
+
def ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
1957
1956
|
pulumi.set(self, "ha_enabled", value)
|
|
1958
1957
|
|
|
1959
|
-
@property
|
|
1958
|
+
@_builtins.property
|
|
1960
1959
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
1961
|
-
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1960
|
+
def ha_heartbeat_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
1962
1961
|
"""
|
|
1963
1962
|
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
1964
1963
|
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
@@ -1966,12 +1965,12 @@ class _ComputeClusterState:
|
|
|
1966
1965
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
1967
1966
|
|
|
1968
1967
|
@ha_heartbeat_datastore_ids.setter
|
|
1969
|
-
def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
1968
|
+
def ha_heartbeat_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
1970
1969
|
pulumi.set(self, "ha_heartbeat_datastore_ids", value)
|
|
1971
1970
|
|
|
1972
|
-
@property
|
|
1971
|
+
@_builtins.property
|
|
1973
1972
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
1974
|
-
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[
|
|
1973
|
+
def ha_heartbeat_datastore_policy(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1975
1974
|
"""
|
|
1976
1975
|
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
1977
1976
|
allFeasibleDsWithUserPreference.
|
|
@@ -1979,12 +1978,12 @@ class _ComputeClusterState:
|
|
|
1979
1978
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
1980
1979
|
|
|
1981
1980
|
@ha_heartbeat_datastore_policy.setter
|
|
1982
|
-
def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[
|
|
1981
|
+
def ha_heartbeat_datastore_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1983
1982
|
pulumi.set(self, "ha_heartbeat_datastore_policy", value)
|
|
1984
1983
|
|
|
1985
|
-
@property
|
|
1984
|
+
@_builtins.property
|
|
1986
1985
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
1987
|
-
def ha_host_isolation_response(self) -> Optional[pulumi.Input[
|
|
1986
|
+
def ha_host_isolation_response(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
1988
1987
|
"""
|
|
1989
1988
|
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
1990
1989
|
Can be one of none, powerOff, or shutdown.
|
|
@@ -1992,24 +1991,24 @@ class _ComputeClusterState:
|
|
|
1992
1991
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
1993
1992
|
|
|
1994
1993
|
@ha_host_isolation_response.setter
|
|
1995
|
-
def ha_host_isolation_response(self, value: Optional[pulumi.Input[
|
|
1994
|
+
def ha_host_isolation_response(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
1996
1995
|
pulumi.set(self, "ha_host_isolation_response", value)
|
|
1997
1996
|
|
|
1998
|
-
@property
|
|
1997
|
+
@_builtins.property
|
|
1999
1998
|
@pulumi.getter(name="haHostMonitoring")
|
|
2000
|
-
def ha_host_monitoring(self) -> Optional[pulumi.Input[
|
|
1999
|
+
def ha_host_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2001
2000
|
"""
|
|
2002
2001
|
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2003
2002
|
"""
|
|
2004
2003
|
return pulumi.get(self, "ha_host_monitoring")
|
|
2005
2004
|
|
|
2006
2005
|
@ha_host_monitoring.setter
|
|
2007
|
-
def ha_host_monitoring(self, value: Optional[pulumi.Input[
|
|
2006
|
+
def ha_host_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2008
2007
|
pulumi.set(self, "ha_host_monitoring", value)
|
|
2009
2008
|
|
|
2010
|
-
@property
|
|
2009
|
+
@_builtins.property
|
|
2011
2010
|
@pulumi.getter(name="haVmComponentProtection")
|
|
2012
|
-
def ha_vm_component_protection(self) -> Optional[pulumi.Input[
|
|
2011
|
+
def ha_vm_component_protection(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2013
2012
|
"""
|
|
2014
2013
|
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
2015
2014
|
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
@@ -2017,12 +2016,12 @@ class _ComputeClusterState:
|
|
|
2017
2016
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
2018
2017
|
|
|
2019
2018
|
@ha_vm_component_protection.setter
|
|
2020
|
-
def ha_vm_component_protection(self, value: Optional[pulumi.Input[
|
|
2019
|
+
def ha_vm_component_protection(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2021
2020
|
pulumi.set(self, "ha_vm_component_protection", value)
|
|
2022
2021
|
|
|
2023
|
-
@property
|
|
2022
|
+
@_builtins.property
|
|
2024
2023
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
2025
|
-
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[
|
|
2024
|
+
def ha_vm_dependency_restart_condition(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2026
2025
|
"""
|
|
2027
2026
|
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2028
2027
|
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
@@ -2030,12 +2029,12 @@ class _ComputeClusterState:
|
|
|
2030
2029
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
2031
2030
|
|
|
2032
2031
|
@ha_vm_dependency_restart_condition.setter
|
|
2033
|
-
def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[
|
|
2032
|
+
def ha_vm_dependency_restart_condition(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2034
2033
|
pulumi.set(self, "ha_vm_dependency_restart_condition", value)
|
|
2035
2034
|
|
|
2036
|
-
@property
|
|
2035
|
+
@_builtins.property
|
|
2037
2036
|
@pulumi.getter(name="haVmFailureInterval")
|
|
2038
|
-
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[
|
|
2037
|
+
def ha_vm_failure_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2039
2038
|
"""
|
|
2040
2039
|
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2041
2040
|
failed. The value is in seconds.
|
|
@@ -2043,12 +2042,12 @@ class _ComputeClusterState:
|
|
|
2043
2042
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
2044
2043
|
|
|
2045
2044
|
@ha_vm_failure_interval.setter
|
|
2046
|
-
def ha_vm_failure_interval(self, value: Optional[pulumi.Input[
|
|
2045
|
+
def ha_vm_failure_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2047
2046
|
pulumi.set(self, "ha_vm_failure_interval", value)
|
|
2048
2047
|
|
|
2049
|
-
@property
|
|
2048
|
+
@_builtins.property
|
|
2050
2049
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
2051
|
-
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[
|
|
2050
|
+
def ha_vm_maximum_failure_window(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2052
2051
|
"""
|
|
2053
2052
|
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2054
2053
|
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
@@ -2057,36 +2056,36 @@ class _ComputeClusterState:
|
|
|
2057
2056
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
2058
2057
|
|
|
2059
2058
|
@ha_vm_maximum_failure_window.setter
|
|
2060
|
-
def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[
|
|
2059
|
+
def ha_vm_maximum_failure_window(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2061
2060
|
pulumi.set(self, "ha_vm_maximum_failure_window", value)
|
|
2062
2061
|
|
|
2063
|
-
@property
|
|
2062
|
+
@_builtins.property
|
|
2064
2063
|
@pulumi.getter(name="haVmMaximumResets")
|
|
2065
|
-
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[
|
|
2064
|
+
def ha_vm_maximum_resets(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2066
2065
|
"""
|
|
2067
2066
|
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2068
2067
|
"""
|
|
2069
2068
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
2070
2069
|
|
|
2071
2070
|
@ha_vm_maximum_resets.setter
|
|
2072
|
-
def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[
|
|
2071
|
+
def ha_vm_maximum_resets(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2073
2072
|
pulumi.set(self, "ha_vm_maximum_resets", value)
|
|
2074
2073
|
|
|
2075
|
-
@property
|
|
2074
|
+
@_builtins.property
|
|
2076
2075
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
2077
|
-
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[
|
|
2076
|
+
def ha_vm_minimum_uptime(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2078
2077
|
"""
|
|
2079
2078
|
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2080
2079
|
"""
|
|
2081
2080
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
2082
2081
|
|
|
2083
2082
|
@ha_vm_minimum_uptime.setter
|
|
2084
|
-
def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[
|
|
2083
|
+
def ha_vm_minimum_uptime(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2085
2084
|
pulumi.set(self, "ha_vm_minimum_uptime", value)
|
|
2086
2085
|
|
|
2087
|
-
@property
|
|
2086
|
+
@_builtins.property
|
|
2088
2087
|
@pulumi.getter(name="haVmMonitoring")
|
|
2089
|
-
def ha_vm_monitoring(self) -> Optional[pulumi.Input[
|
|
2088
|
+
def ha_vm_monitoring(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2090
2089
|
"""
|
|
2091
2090
|
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2092
2091
|
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
@@ -2094,24 +2093,24 @@ class _ComputeClusterState:
|
|
|
2094
2093
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
2095
2094
|
|
|
2096
2095
|
@ha_vm_monitoring.setter
|
|
2097
|
-
def ha_vm_monitoring(self, value: Optional[pulumi.Input[
|
|
2096
|
+
def ha_vm_monitoring(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2098
2097
|
pulumi.set(self, "ha_vm_monitoring", value)
|
|
2099
2098
|
|
|
2100
|
-
@property
|
|
2099
|
+
@_builtins.property
|
|
2101
2100
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
2102
|
-
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[
|
|
2101
|
+
def ha_vm_restart_additional_delay(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2103
2102
|
"""
|
|
2104
2103
|
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2105
2104
|
"""
|
|
2106
2105
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
2107
2106
|
|
|
2108
2107
|
@ha_vm_restart_additional_delay.setter
|
|
2109
|
-
def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[
|
|
2108
|
+
def ha_vm_restart_additional_delay(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2110
2109
|
pulumi.set(self, "ha_vm_restart_additional_delay", value)
|
|
2111
2110
|
|
|
2112
|
-
@property
|
|
2111
|
+
@_builtins.property
|
|
2113
2112
|
@pulumi.getter(name="haVmRestartPriority")
|
|
2114
|
-
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[
|
|
2113
|
+
def ha_vm_restart_priority(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2115
2114
|
"""
|
|
2116
2115
|
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2117
2116
|
high, or highest.
|
|
@@ -2119,12 +2118,12 @@ class _ComputeClusterState:
|
|
|
2119
2118
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
2120
2119
|
|
|
2121
2120
|
@ha_vm_restart_priority.setter
|
|
2122
|
-
def ha_vm_restart_priority(self, value: Optional[pulumi.Input[
|
|
2121
|
+
def ha_vm_restart_priority(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2123
2122
|
pulumi.set(self, "ha_vm_restart_priority", value)
|
|
2124
2123
|
|
|
2125
|
-
@property
|
|
2124
|
+
@_builtins.property
|
|
2126
2125
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
2127
|
-
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[
|
|
2126
|
+
def ha_vm_restart_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2128
2127
|
"""
|
|
2129
2128
|
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2130
2129
|
proceeding with the next priority.
|
|
@@ -2132,22 +2131,22 @@ class _ComputeClusterState:
|
|
|
2132
2131
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
2133
2132
|
|
|
2134
2133
|
@ha_vm_restart_timeout.setter
|
|
2135
|
-
def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[
|
|
2134
|
+
def ha_vm_restart_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2136
2135
|
pulumi.set(self, "ha_vm_restart_timeout", value)
|
|
2137
2136
|
|
|
2138
|
-
@property
|
|
2137
|
+
@_builtins.property
|
|
2139
2138
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
2140
|
-
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[
|
|
2139
|
+
def host_cluster_exit_timeout(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2141
2140
|
"""
|
|
2142
2141
|
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2143
2142
|
"""
|
|
2144
2143
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
2145
2144
|
|
|
2146
2145
|
@host_cluster_exit_timeout.setter
|
|
2147
|
-
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[
|
|
2146
|
+
def host_cluster_exit_timeout(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2148
2147
|
pulumi.set(self, "host_cluster_exit_timeout", value)
|
|
2149
2148
|
|
|
2150
|
-
@property
|
|
2149
|
+
@_builtins.property
|
|
2151
2150
|
@pulumi.getter(name="hostImage")
|
|
2152
2151
|
def host_image(self) -> Optional[pulumi.Input['ComputeClusterHostImageArgs']]:
|
|
2153
2152
|
"""
|
|
@@ -2159,69 +2158,69 @@ class _ComputeClusterState:
|
|
|
2159
2158
|
def host_image(self, value: Optional[pulumi.Input['ComputeClusterHostImageArgs']]):
|
|
2160
2159
|
pulumi.set(self, "host_image", value)
|
|
2161
2160
|
|
|
2162
|
-
@property
|
|
2161
|
+
@_builtins.property
|
|
2163
2162
|
@pulumi.getter(name="hostManaged")
|
|
2164
|
-
def host_managed(self) -> Optional[pulumi.Input[
|
|
2163
|
+
def host_managed(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2165
2164
|
"""
|
|
2166
2165
|
Must be set if cluster enrollment is managed from host resource.
|
|
2167
2166
|
"""
|
|
2168
2167
|
return pulumi.get(self, "host_managed")
|
|
2169
2168
|
|
|
2170
2169
|
@host_managed.setter
|
|
2171
|
-
def host_managed(self, value: Optional[pulumi.Input[
|
|
2170
|
+
def host_managed(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2172
2171
|
pulumi.set(self, "host_managed", value)
|
|
2173
2172
|
|
|
2174
|
-
@property
|
|
2173
|
+
@_builtins.property
|
|
2175
2174
|
@pulumi.getter(name="hostSystemIds")
|
|
2176
|
-
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2175
|
+
def host_system_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
2177
2176
|
"""
|
|
2178
2177
|
The managed object IDs of the hosts to put in the cluster.
|
|
2179
2178
|
"""
|
|
2180
2179
|
return pulumi.get(self, "host_system_ids")
|
|
2181
2180
|
|
|
2182
2181
|
@host_system_ids.setter
|
|
2183
|
-
def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2182
|
+
def host_system_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
2184
2183
|
pulumi.set(self, "host_system_ids", value)
|
|
2185
2184
|
|
|
2186
|
-
@property
|
|
2185
|
+
@_builtins.property
|
|
2187
2186
|
@pulumi.getter
|
|
2188
|
-
def name(self) -> Optional[pulumi.Input[
|
|
2187
|
+
def name(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2189
2188
|
"""
|
|
2190
2189
|
The name of the cluster.
|
|
2191
2190
|
"""
|
|
2192
2191
|
return pulumi.get(self, "name")
|
|
2193
2192
|
|
|
2194
2193
|
@name.setter
|
|
2195
|
-
def name(self, value: Optional[pulumi.Input[
|
|
2194
|
+
def name(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2196
2195
|
pulumi.set(self, "name", value)
|
|
2197
2196
|
|
|
2198
|
-
@property
|
|
2197
|
+
@_builtins.property
|
|
2199
2198
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
2200
|
-
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[
|
|
2199
|
+
def proactive_ha_automation_level(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2201
2200
|
"""
|
|
2202
2201
|
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
2203
2202
|
"""
|
|
2204
2203
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
2205
2204
|
|
|
2206
2205
|
@proactive_ha_automation_level.setter
|
|
2207
|
-
def proactive_ha_automation_level(self, value: Optional[pulumi.Input[
|
|
2206
|
+
def proactive_ha_automation_level(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2208
2207
|
pulumi.set(self, "proactive_ha_automation_level", value)
|
|
2209
2208
|
|
|
2210
|
-
@property
|
|
2209
|
+
@_builtins.property
|
|
2211
2210
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
2212
|
-
def proactive_ha_enabled(self) -> Optional[pulumi.Input[
|
|
2211
|
+
def proactive_ha_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2213
2212
|
"""
|
|
2214
2213
|
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
2215
2214
|
"""
|
|
2216
2215
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
2217
2216
|
|
|
2218
2217
|
@proactive_ha_enabled.setter
|
|
2219
|
-
def proactive_ha_enabled(self, value: Optional[pulumi.Input[
|
|
2218
|
+
def proactive_ha_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2220
2219
|
pulumi.set(self, "proactive_ha_enabled", value)
|
|
2221
2220
|
|
|
2222
|
-
@property
|
|
2221
|
+
@_builtins.property
|
|
2223
2222
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
2224
|
-
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[
|
|
2223
|
+
def proactive_ha_moderate_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2225
2224
|
"""
|
|
2226
2225
|
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
2227
2226
|
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
@@ -2229,24 +2228,24 @@ class _ComputeClusterState:
|
|
|
2229
2228
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
2230
2229
|
|
|
2231
2230
|
@proactive_ha_moderate_remediation.setter
|
|
2232
|
-
def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[
|
|
2231
|
+
def proactive_ha_moderate_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2233
2232
|
pulumi.set(self, "proactive_ha_moderate_remediation", value)
|
|
2234
2233
|
|
|
2235
|
-
@property
|
|
2234
|
+
@_builtins.property
|
|
2236
2235
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
2237
|
-
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2236
|
+
def proactive_ha_provider_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
2238
2237
|
"""
|
|
2239
2238
|
The list of IDs for health update providers configured for this cluster.
|
|
2240
2239
|
"""
|
|
2241
2240
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
2242
2241
|
|
|
2243
2242
|
@proactive_ha_provider_ids.setter
|
|
2244
|
-
def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2243
|
+
def proactive_ha_provider_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
2245
2244
|
pulumi.set(self, "proactive_ha_provider_ids", value)
|
|
2246
2245
|
|
|
2247
|
-
@property
|
|
2246
|
+
@_builtins.property
|
|
2248
2247
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
2249
|
-
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[
|
|
2248
|
+
def proactive_ha_severe_remediation(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2250
2249
|
"""
|
|
2251
2250
|
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
2252
2251
|
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
@@ -2254,12 +2253,12 @@ class _ComputeClusterState:
|
|
|
2254
2253
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
2255
2254
|
|
|
2256
2255
|
@proactive_ha_severe_remediation.setter
|
|
2257
|
-
def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[
|
|
2256
|
+
def proactive_ha_severe_remediation(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2258
2257
|
pulumi.set(self, "proactive_ha_severe_remediation", value)
|
|
2259
2258
|
|
|
2260
|
-
@property
|
|
2259
|
+
@_builtins.property
|
|
2261
2260
|
@pulumi.getter(name="resourcePoolId")
|
|
2262
|
-
def resource_pool_id(self) -> Optional[pulumi.Input[
|
|
2261
|
+
def resource_pool_id(self) -> Optional[pulumi.Input[_builtins.str]]:
|
|
2263
2262
|
"""
|
|
2264
2263
|
The managed object ID of the primary
|
|
2265
2264
|
resource pool for this cluster. This can be passed directly to the
|
|
@@ -2270,46 +2269,46 @@ class _ComputeClusterState:
|
|
|
2270
2269
|
return pulumi.get(self, "resource_pool_id")
|
|
2271
2270
|
|
|
2272
2271
|
@resource_pool_id.setter
|
|
2273
|
-
def resource_pool_id(self, value: Optional[pulumi.Input[
|
|
2272
|
+
def resource_pool_id(self, value: Optional[pulumi.Input[_builtins.str]]):
|
|
2274
2273
|
pulumi.set(self, "resource_pool_id", value)
|
|
2275
2274
|
|
|
2276
|
-
@property
|
|
2275
|
+
@_builtins.property
|
|
2277
2276
|
@pulumi.getter
|
|
2278
|
-
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2277
|
+
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
2279
2278
|
"""
|
|
2280
2279
|
The IDs of any tags to attach to this resource.
|
|
2281
2280
|
"""
|
|
2282
2281
|
return pulumi.get(self, "tags")
|
|
2283
2282
|
|
|
2284
2283
|
@tags.setter
|
|
2285
|
-
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2284
|
+
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
2286
2285
|
pulumi.set(self, "tags", value)
|
|
2287
2286
|
|
|
2288
|
-
@property
|
|
2287
|
+
@_builtins.property
|
|
2289
2288
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
2290
|
-
def vsan_compression_enabled(self) -> Optional[pulumi.Input[
|
|
2289
|
+
def vsan_compression_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2291
2290
|
"""
|
|
2292
2291
|
Whether the vSAN compression service is enabled for the cluster.
|
|
2293
2292
|
"""
|
|
2294
2293
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
2295
2294
|
|
|
2296
2295
|
@vsan_compression_enabled.setter
|
|
2297
|
-
def vsan_compression_enabled(self, value: Optional[pulumi.Input[
|
|
2296
|
+
def vsan_compression_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2298
2297
|
pulumi.set(self, "vsan_compression_enabled", value)
|
|
2299
2298
|
|
|
2300
|
-
@property
|
|
2299
|
+
@_builtins.property
|
|
2301
2300
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
2302
|
-
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[
|
|
2301
|
+
def vsan_dedup_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2303
2302
|
"""
|
|
2304
2303
|
Whether the vSAN deduplication service is enabled for the cluster.
|
|
2305
2304
|
"""
|
|
2306
2305
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
2307
2306
|
|
|
2308
2307
|
@vsan_dedup_enabled.setter
|
|
2309
|
-
def vsan_dedup_enabled(self, value: Optional[pulumi.Input[
|
|
2308
|
+
def vsan_dedup_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2310
2309
|
pulumi.set(self, "vsan_dedup_enabled", value)
|
|
2311
2310
|
|
|
2312
|
-
@property
|
|
2311
|
+
@_builtins.property
|
|
2313
2312
|
@pulumi.getter(name="vsanDiskGroups")
|
|
2314
2313
|
def vsan_disk_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]:
|
|
2315
2314
|
"""
|
|
@@ -2321,55 +2320,55 @@ class _ComputeClusterState:
|
|
|
2321
2320
|
def vsan_disk_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanDiskGroupArgs']]]]):
|
|
2322
2321
|
pulumi.set(self, "vsan_disk_groups", value)
|
|
2323
2322
|
|
|
2324
|
-
@property
|
|
2323
|
+
@_builtins.property
|
|
2325
2324
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
2326
|
-
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[
|
|
2325
|
+
def vsan_dit_encryption_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2327
2326
|
"""
|
|
2328
2327
|
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
2329
2328
|
"""
|
|
2330
2329
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
2331
2330
|
|
|
2332
2331
|
@vsan_dit_encryption_enabled.setter
|
|
2333
|
-
def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[
|
|
2332
|
+
def vsan_dit_encryption_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2334
2333
|
pulumi.set(self, "vsan_dit_encryption_enabled", value)
|
|
2335
2334
|
|
|
2336
|
-
@property
|
|
2335
|
+
@_builtins.property
|
|
2337
2336
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
2338
|
-
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[
|
|
2337
|
+
def vsan_dit_rekey_interval(self) -> Optional[pulumi.Input[_builtins.int]]:
|
|
2339
2338
|
"""
|
|
2340
2339
|
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
2341
2340
|
"""
|
|
2342
2341
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
2343
2342
|
|
|
2344
2343
|
@vsan_dit_rekey_interval.setter
|
|
2345
|
-
def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[
|
|
2344
|
+
def vsan_dit_rekey_interval(self, value: Optional[pulumi.Input[_builtins.int]]):
|
|
2346
2345
|
pulumi.set(self, "vsan_dit_rekey_interval", value)
|
|
2347
2346
|
|
|
2348
|
-
@property
|
|
2347
|
+
@_builtins.property
|
|
2349
2348
|
@pulumi.getter(name="vsanEnabled")
|
|
2350
|
-
def vsan_enabled(self) -> Optional[pulumi.Input[
|
|
2349
|
+
def vsan_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2351
2350
|
"""
|
|
2352
2351
|
Whether the vSAN service is enabled for the cluster.
|
|
2353
2352
|
"""
|
|
2354
2353
|
return pulumi.get(self, "vsan_enabled")
|
|
2355
2354
|
|
|
2356
2355
|
@vsan_enabled.setter
|
|
2357
|
-
def vsan_enabled(self, value: Optional[pulumi.Input[
|
|
2356
|
+
def vsan_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2358
2357
|
pulumi.set(self, "vsan_enabled", value)
|
|
2359
2358
|
|
|
2360
|
-
@property
|
|
2359
|
+
@_builtins.property
|
|
2361
2360
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
2362
|
-
def vsan_esa_enabled(self) -> Optional[pulumi.Input[
|
|
2361
|
+
def vsan_esa_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2363
2362
|
"""
|
|
2364
2363
|
Whether the vSAN ESA service is enabled for the cluster.
|
|
2365
2364
|
"""
|
|
2366
2365
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
2367
2366
|
|
|
2368
2367
|
@vsan_esa_enabled.setter
|
|
2369
|
-
def vsan_esa_enabled(self, value: Optional[pulumi.Input[
|
|
2368
|
+
def vsan_esa_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2370
2369
|
pulumi.set(self, "vsan_esa_enabled", value)
|
|
2371
2370
|
|
|
2372
|
-
@property
|
|
2371
|
+
@_builtins.property
|
|
2373
2372
|
@pulumi.getter(name="vsanFaultDomains")
|
|
2374
2373
|
def vsan_fault_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]:
|
|
2375
2374
|
"""
|
|
@@ -2381,43 +2380,43 @@ class _ComputeClusterState:
|
|
|
2381
2380
|
def vsan_fault_domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ComputeClusterVsanFaultDomainArgs']]]]):
|
|
2382
2381
|
pulumi.set(self, "vsan_fault_domains", value)
|
|
2383
2382
|
|
|
2384
|
-
@property
|
|
2383
|
+
@_builtins.property
|
|
2385
2384
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
2386
|
-
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[
|
|
2385
|
+
def vsan_network_diagnostic_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2387
2386
|
"""
|
|
2388
2387
|
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
2389
2388
|
"""
|
|
2390
2389
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
2391
2390
|
|
|
2392
2391
|
@vsan_network_diagnostic_mode_enabled.setter
|
|
2393
|
-
def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[
|
|
2392
|
+
def vsan_network_diagnostic_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2394
2393
|
pulumi.set(self, "vsan_network_diagnostic_mode_enabled", value)
|
|
2395
2394
|
|
|
2396
|
-
@property
|
|
2395
|
+
@_builtins.property
|
|
2397
2396
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
2398
|
-
def vsan_performance_enabled(self) -> Optional[pulumi.Input[
|
|
2397
|
+
def vsan_performance_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2399
2398
|
"""
|
|
2400
2399
|
Whether the vSAN performance service is enabled for the cluster.
|
|
2401
2400
|
"""
|
|
2402
2401
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
2403
2402
|
|
|
2404
2403
|
@vsan_performance_enabled.setter
|
|
2405
|
-
def vsan_performance_enabled(self, value: Optional[pulumi.Input[
|
|
2404
|
+
def vsan_performance_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2406
2405
|
pulumi.set(self, "vsan_performance_enabled", value)
|
|
2407
2406
|
|
|
2408
|
-
@property
|
|
2407
|
+
@_builtins.property
|
|
2409
2408
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
2410
|
-
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2409
|
+
def vsan_remote_datastore_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
|
|
2411
2410
|
"""
|
|
2412
2411
|
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
2413
2412
|
"""
|
|
2414
2413
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
2415
2414
|
|
|
2416
2415
|
@vsan_remote_datastore_ids.setter
|
|
2417
|
-
def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2416
|
+
def vsan_remote_datastore_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
|
|
2418
2417
|
pulumi.set(self, "vsan_remote_datastore_ids", value)
|
|
2419
2418
|
|
|
2420
|
-
@property
|
|
2419
|
+
@_builtins.property
|
|
2421
2420
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
2422
2421
|
def vsan_stretched_cluster(self) -> Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]:
|
|
2423
2422
|
"""
|
|
@@ -2429,28 +2428,28 @@ class _ComputeClusterState:
|
|
|
2429
2428
|
def vsan_stretched_cluster(self, value: Optional[pulumi.Input['ComputeClusterVsanStretchedClusterArgs']]):
|
|
2430
2429
|
pulumi.set(self, "vsan_stretched_cluster", value)
|
|
2431
2430
|
|
|
2432
|
-
@property
|
|
2431
|
+
@_builtins.property
|
|
2433
2432
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
2434
|
-
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[
|
|
2433
|
+
def vsan_unmap_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2435
2434
|
"""
|
|
2436
2435
|
Whether the vSAN unmap service is enabled for the cluster.
|
|
2437
2436
|
"""
|
|
2438
2437
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
2439
2438
|
|
|
2440
2439
|
@vsan_unmap_enabled.setter
|
|
2441
|
-
def vsan_unmap_enabled(self, value: Optional[pulumi.Input[
|
|
2440
|
+
def vsan_unmap_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2442
2441
|
pulumi.set(self, "vsan_unmap_enabled", value)
|
|
2443
2442
|
|
|
2444
|
-
@property
|
|
2443
|
+
@_builtins.property
|
|
2445
2444
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
2446
|
-
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[
|
|
2445
|
+
def vsan_verbose_mode_enabled(self) -> Optional[pulumi.Input[_builtins.bool]]:
|
|
2447
2446
|
"""
|
|
2448
2447
|
Whether the vSAN verbose mode is enabled for the cluster.
|
|
2449
2448
|
"""
|
|
2450
2449
|
return pulumi.get(self, "vsan_verbose_mode_enabled")
|
|
2451
2450
|
|
|
2452
2451
|
@vsan_verbose_mode_enabled.setter
|
|
2453
|
-
def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[
|
|
2452
|
+
def vsan_verbose_mode_enabled(self, value: Optional[pulumi.Input[_builtins.bool]]):
|
|
2454
2453
|
pulumi.set(self, "vsan_verbose_mode_enabled", value)
|
|
2455
2454
|
|
|
2456
2455
|
|
|
@@ -2460,75 +2459,75 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2460
2459
|
def __init__(__self__,
|
|
2461
2460
|
resource_name: str,
|
|
2462
2461
|
opts: Optional[pulumi.ResourceOptions] = None,
|
|
2463
|
-
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
2464
|
-
datacenter_id: Optional[pulumi.Input[
|
|
2465
|
-
dpm_automation_level: Optional[pulumi.Input[
|
|
2466
|
-
dpm_enabled: Optional[pulumi.Input[
|
|
2467
|
-
dpm_threshold: Optional[pulumi.Input[
|
|
2468
|
-
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
2469
|
-
drs_automation_level: Optional[pulumi.Input[
|
|
2470
|
-
drs_enable_predictive_drs: Optional[pulumi.Input[
|
|
2471
|
-
drs_enable_vm_overrides: Optional[pulumi.Input[
|
|
2472
|
-
drs_enabled: Optional[pulumi.Input[
|
|
2473
|
-
drs_migration_threshold: Optional[pulumi.Input[
|
|
2474
|
-
drs_scale_descendants_shares: Optional[pulumi.Input[
|
|
2475
|
-
folder: Optional[pulumi.Input[
|
|
2476
|
-
force_evacuate_on_destroy: Optional[pulumi.Input[
|
|
2477
|
-
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2478
|
-
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[
|
|
2479
|
-
ha_admission_control_performance_tolerance: Optional[pulumi.Input[
|
|
2480
|
-
ha_admission_control_policy: Optional[pulumi.Input[
|
|
2481
|
-
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[
|
|
2482
|
-
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[
|
|
2483
|
-
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[
|
|
2484
|
-
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[
|
|
2485
|
-
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[
|
|
2486
|
-
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[
|
|
2487
|
-
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
2488
|
-
ha_datastore_apd_recovery_action: Optional[pulumi.Input[
|
|
2489
|
-
ha_datastore_apd_response: Optional[pulumi.Input[
|
|
2490
|
-
ha_datastore_apd_response_delay: Optional[pulumi.Input[
|
|
2491
|
-
ha_datastore_pdl_response: Optional[pulumi.Input[
|
|
2492
|
-
ha_enabled: Optional[pulumi.Input[
|
|
2493
|
-
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2494
|
-
ha_heartbeat_datastore_policy: Optional[pulumi.Input[
|
|
2495
|
-
ha_host_isolation_response: Optional[pulumi.Input[
|
|
2496
|
-
ha_host_monitoring: Optional[pulumi.Input[
|
|
2497
|
-
ha_vm_component_protection: Optional[pulumi.Input[
|
|
2498
|
-
ha_vm_dependency_restart_condition: Optional[pulumi.Input[
|
|
2499
|
-
ha_vm_failure_interval: Optional[pulumi.Input[
|
|
2500
|
-
ha_vm_maximum_failure_window: Optional[pulumi.Input[
|
|
2501
|
-
ha_vm_maximum_resets: Optional[pulumi.Input[
|
|
2502
|
-
ha_vm_minimum_uptime: Optional[pulumi.Input[
|
|
2503
|
-
ha_vm_monitoring: Optional[pulumi.Input[
|
|
2504
|
-
ha_vm_restart_additional_delay: Optional[pulumi.Input[
|
|
2505
|
-
ha_vm_restart_priority: Optional[pulumi.Input[
|
|
2506
|
-
ha_vm_restart_timeout: Optional[pulumi.Input[
|
|
2507
|
-
host_cluster_exit_timeout: Optional[pulumi.Input[
|
|
2462
|
+
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
2463
|
+
datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2464
|
+
dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2465
|
+
dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2466
|
+
dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2467
|
+
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
2468
|
+
drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2469
|
+
drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2470
|
+
drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2471
|
+
drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2472
|
+
drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2473
|
+
drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2474
|
+
folder: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2475
|
+
force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2476
|
+
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2477
|
+
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2478
|
+
ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2479
|
+
ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2480
|
+
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2481
|
+
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2482
|
+
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2483
|
+
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2484
|
+
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2485
|
+
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2486
|
+
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
2487
|
+
ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2488
|
+
ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2489
|
+
ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2490
|
+
ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2491
|
+
ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2492
|
+
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2493
|
+
ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2494
|
+
ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2495
|
+
ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2496
|
+
ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2497
|
+
ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2498
|
+
ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2499
|
+
ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2500
|
+
ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2501
|
+
ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2502
|
+
ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2503
|
+
ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2504
|
+
ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2505
|
+
ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2506
|
+
host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2508
2507
|
host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
|
|
2509
|
-
host_managed: Optional[pulumi.Input[
|
|
2510
|
-
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2511
|
-
name: Optional[pulumi.Input[
|
|
2512
|
-
proactive_ha_automation_level: Optional[pulumi.Input[
|
|
2513
|
-
proactive_ha_enabled: Optional[pulumi.Input[
|
|
2514
|
-
proactive_ha_moderate_remediation: Optional[pulumi.Input[
|
|
2515
|
-
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2516
|
-
proactive_ha_severe_remediation: Optional[pulumi.Input[
|
|
2517
|
-
tags: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2518
|
-
vsan_compression_enabled: Optional[pulumi.Input[
|
|
2519
|
-
vsan_dedup_enabled: Optional[pulumi.Input[
|
|
2508
|
+
host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2509
|
+
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2510
|
+
name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2511
|
+
proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2512
|
+
proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2513
|
+
proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2514
|
+
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2515
|
+
proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2516
|
+
tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2517
|
+
vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2518
|
+
vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2520
2519
|
vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
|
|
2521
|
-
vsan_dit_encryption_enabled: Optional[pulumi.Input[
|
|
2522
|
-
vsan_dit_rekey_interval: Optional[pulumi.Input[
|
|
2523
|
-
vsan_enabled: Optional[pulumi.Input[
|
|
2524
|
-
vsan_esa_enabled: Optional[pulumi.Input[
|
|
2520
|
+
vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2521
|
+
vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2522
|
+
vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2523
|
+
vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2525
2524
|
vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
|
|
2526
|
-
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[
|
|
2527
|
-
vsan_performance_enabled: Optional[pulumi.Input[
|
|
2528
|
-
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2525
|
+
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2526
|
+
vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2527
|
+
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2529
2528
|
vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
|
|
2530
|
-
vsan_unmap_enabled: Optional[pulumi.Input[
|
|
2531
|
-
vsan_verbose_mode_enabled: Optional[pulumi.Input[
|
|
2529
|
+
vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2530
|
+
vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2532
2531
|
__props__=None):
|
|
2533
2532
|
"""
|
|
2534
2533
|
> **A note on the naming of this resource:** VMware refers to clusters of
|
|
@@ -2621,122 +2620,122 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2621
2620
|
|
|
2622
2621
|
:param str resource_name: The name of the resource.
|
|
2623
2622
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
2624
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
2623
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
|
|
2625
2624
|
value strings to set for the datastore cluster.
|
|
2626
2625
|
|
|
2627
2626
|
> **NOTE:** Custom attributes are unsupported on direct ESXi connections
|
|
2628
2627
|
and require vCenter Server.
|
|
2629
|
-
:param pulumi.Input[
|
|
2628
|
+
:param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
|
|
2630
2629
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
2631
|
-
:param pulumi.Input[
|
|
2632
|
-
:param pulumi.Input[
|
|
2630
|
+
:param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
2631
|
+
:param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
2633
2632
|
machines in the cluster. Requires that DRS be enabled.
|
|
2634
|
-
:param pulumi.Input[
|
|
2633
|
+
:param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
2635
2634
|
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
2636
2635
|
setting.
|
|
2637
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
2638
|
-
:param pulumi.Input[
|
|
2636
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
2637
|
+
:param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
2639
2638
|
fullyAutomated.
|
|
2640
|
-
:param pulumi.Input[
|
|
2641
|
-
:param pulumi.Input[
|
|
2642
|
-
:param pulumi.Input[
|
|
2643
|
-
:param pulumi.Input[
|
|
2639
|
+
:param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
2640
|
+
:param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
2641
|
+
:param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
|
|
2642
|
+
:param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
2644
2643
|
more imbalance while a higher setting will tolerate less.
|
|
2645
|
-
:param pulumi.Input[
|
|
2646
|
-
:param pulumi.Input[
|
|
2644
|
+
:param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
2645
|
+
:param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
|
|
2647
2646
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
2648
2647
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
2649
2648
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
2650
2649
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
2651
2650
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
2652
|
-
:param pulumi.Input[
|
|
2651
|
+
:param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
2653
2652
|
for testing and is not recommended in normal use.
|
|
2654
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
2653
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
2655
2654
|
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
2656
2655
|
will ignore the host when making recommendations.
|
|
2657
|
-
:param pulumi.Input[
|
|
2656
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
2658
2657
|
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
2659
|
-
:param pulumi.Input[
|
|
2658
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
2660
2659
|
warnings only, whereas a value of 100 disables the setting.
|
|
2661
|
-
:param pulumi.Input[
|
|
2660
|
+
:param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
2662
2661
|
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
2663
2662
|
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
2664
2663
|
issues.
|
|
2665
|
-
:param pulumi.Input[
|
|
2664
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
2666
2665
|
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
2667
2666
|
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
2668
|
-
:param pulumi.Input[
|
|
2667
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
2669
2668
|
the cluster to reserve for failover.
|
|
2670
|
-
:param pulumi.Input[
|
|
2669
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
2671
2670
|
the cluster to reserve for failover.
|
|
2672
|
-
:param pulumi.Input[
|
|
2673
|
-
:param pulumi.Input[
|
|
2674
|
-
:param pulumi.Input[
|
|
2671
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
2672
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
2673
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
2675
2674
|
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
2676
2675
|
currently in the cluster.
|
|
2677
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
2678
|
-
:param pulumi.Input[
|
|
2676
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
2677
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
2679
2678
|
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
2680
|
-
:param pulumi.Input[
|
|
2679
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2681
2680
|
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
2682
2681
|
restartAggressive.
|
|
2683
|
-
:param pulumi.Input[
|
|
2682
|
+
:param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
2684
2683
|
the response action defined in ha_datastore_apd_response.
|
|
2685
|
-
:param pulumi.Input[
|
|
2684
|
+
:param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
2686
2685
|
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
2687
|
-
:param pulumi.Input[
|
|
2688
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
2686
|
+
:param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
2687
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
2689
2688
|
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
2690
|
-
:param pulumi.Input[
|
|
2689
|
+
:param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
2691
2690
|
allFeasibleDsWithUserPreference.
|
|
2692
|
-
:param pulumi.Input[
|
|
2691
|
+
:param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
2693
2692
|
Can be one of none, powerOff, or shutdown.
|
|
2694
|
-
:param pulumi.Input[
|
|
2695
|
-
:param pulumi.Input[
|
|
2693
|
+
:param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
2694
|
+
:param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
2696
2695
|
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
2697
|
-
:param pulumi.Input[
|
|
2696
|
+
:param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
2698
2697
|
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
2699
|
-
:param pulumi.Input[
|
|
2698
|
+
:param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
2700
2699
|
failed. The value is in seconds.
|
|
2701
|
-
:param pulumi.Input[
|
|
2700
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
2702
2701
|
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
2703
2702
|
time is allotted.
|
|
2704
|
-
:param pulumi.Input[
|
|
2705
|
-
:param pulumi.Input[
|
|
2706
|
-
:param pulumi.Input[
|
|
2703
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
2704
|
+
:param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
2705
|
+
:param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
2707
2706
|
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
2708
|
-
:param pulumi.Input[
|
|
2709
|
-
:param pulumi.Input[
|
|
2707
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
2708
|
+
:param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
2710
2709
|
high, or highest.
|
|
2711
|
-
:param pulumi.Input[
|
|
2710
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
2712
2711
|
proceeding with the next priority.
|
|
2713
|
-
:param pulumi.Input[
|
|
2712
|
+
:param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
2714
2713
|
:param pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']] host_image: Details about the host image which should be applied to the cluster.
|
|
2715
|
-
:param pulumi.Input[
|
|
2716
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
2717
|
-
:param pulumi.Input[
|
|
2718
|
-
:param pulumi.Input[
|
|
2719
|
-
:param pulumi.Input[
|
|
2720
|
-
:param pulumi.Input[
|
|
2714
|
+
:param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
2715
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
2716
|
+
:param pulumi.Input[_builtins.str] name: The name of the cluster.
|
|
2717
|
+
:param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
2718
|
+
:param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
2719
|
+
:param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
2721
2720
|
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
2722
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
2723
|
-
:param pulumi.Input[
|
|
2721
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
2722
|
+
:param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
2724
2723
|
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
2725
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
2726
|
-
:param pulumi.Input[
|
|
2727
|
-
:param pulumi.Input[
|
|
2724
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
|
|
2725
|
+
:param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
2726
|
+
:param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
2728
2727
|
:param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
2729
|
-
:param pulumi.Input[
|
|
2730
|
-
:param pulumi.Input[
|
|
2731
|
-
:param pulumi.Input[
|
|
2732
|
-
:param pulumi.Input[
|
|
2728
|
+
:param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
2729
|
+
:param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
2730
|
+
:param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
2731
|
+
:param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
2733
2732
|
:param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
2734
|
-
:param pulumi.Input[
|
|
2735
|
-
:param pulumi.Input[
|
|
2736
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
2733
|
+
:param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
2734
|
+
:param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
2735
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
2737
2736
|
:param pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
2738
|
-
:param pulumi.Input[
|
|
2739
|
-
:param pulumi.Input[
|
|
2737
|
+
:param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
2738
|
+
:param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
2740
2739
|
"""
|
|
2741
2740
|
...
|
|
2742
2741
|
@overload
|
|
@@ -2848,75 +2847,75 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
2848
2847
|
def _internal_init(__self__,
|
|
2849
2848
|
resource_name: str,
|
|
2850
2849
|
opts: Optional[pulumi.ResourceOptions] = None,
|
|
2851
|
-
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
2852
|
-
datacenter_id: Optional[pulumi.Input[
|
|
2853
|
-
dpm_automation_level: Optional[pulumi.Input[
|
|
2854
|
-
dpm_enabled: Optional[pulumi.Input[
|
|
2855
|
-
dpm_threshold: Optional[pulumi.Input[
|
|
2856
|
-
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
2857
|
-
drs_automation_level: Optional[pulumi.Input[
|
|
2858
|
-
drs_enable_predictive_drs: Optional[pulumi.Input[
|
|
2859
|
-
drs_enable_vm_overrides: Optional[pulumi.Input[
|
|
2860
|
-
drs_enabled: Optional[pulumi.Input[
|
|
2861
|
-
drs_migration_threshold: Optional[pulumi.Input[
|
|
2862
|
-
drs_scale_descendants_shares: Optional[pulumi.Input[
|
|
2863
|
-
folder: Optional[pulumi.Input[
|
|
2864
|
-
force_evacuate_on_destroy: Optional[pulumi.Input[
|
|
2865
|
-
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2866
|
-
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[
|
|
2867
|
-
ha_admission_control_performance_tolerance: Optional[pulumi.Input[
|
|
2868
|
-
ha_admission_control_policy: Optional[pulumi.Input[
|
|
2869
|
-
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[
|
|
2870
|
-
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[
|
|
2871
|
-
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[
|
|
2872
|
-
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[
|
|
2873
|
-
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[
|
|
2874
|
-
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[
|
|
2875
|
-
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
2876
|
-
ha_datastore_apd_recovery_action: Optional[pulumi.Input[
|
|
2877
|
-
ha_datastore_apd_response: Optional[pulumi.Input[
|
|
2878
|
-
ha_datastore_apd_response_delay: Optional[pulumi.Input[
|
|
2879
|
-
ha_datastore_pdl_response: Optional[pulumi.Input[
|
|
2880
|
-
ha_enabled: Optional[pulumi.Input[
|
|
2881
|
-
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2882
|
-
ha_heartbeat_datastore_policy: Optional[pulumi.Input[
|
|
2883
|
-
ha_host_isolation_response: Optional[pulumi.Input[
|
|
2884
|
-
ha_host_monitoring: Optional[pulumi.Input[
|
|
2885
|
-
ha_vm_component_protection: Optional[pulumi.Input[
|
|
2886
|
-
ha_vm_dependency_restart_condition: Optional[pulumi.Input[
|
|
2887
|
-
ha_vm_failure_interval: Optional[pulumi.Input[
|
|
2888
|
-
ha_vm_maximum_failure_window: Optional[pulumi.Input[
|
|
2889
|
-
ha_vm_maximum_resets: Optional[pulumi.Input[
|
|
2890
|
-
ha_vm_minimum_uptime: Optional[pulumi.Input[
|
|
2891
|
-
ha_vm_monitoring: Optional[pulumi.Input[
|
|
2892
|
-
ha_vm_restart_additional_delay: Optional[pulumi.Input[
|
|
2893
|
-
ha_vm_restart_priority: Optional[pulumi.Input[
|
|
2894
|
-
ha_vm_restart_timeout: Optional[pulumi.Input[
|
|
2895
|
-
host_cluster_exit_timeout: Optional[pulumi.Input[
|
|
2850
|
+
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
2851
|
+
datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2852
|
+
dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2853
|
+
dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2854
|
+
dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2855
|
+
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
2856
|
+
drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2857
|
+
drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2858
|
+
drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2859
|
+
drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2860
|
+
drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2861
|
+
drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2862
|
+
folder: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2863
|
+
force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2864
|
+
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2865
|
+
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2866
|
+
ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2867
|
+
ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2868
|
+
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2869
|
+
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2870
|
+
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2871
|
+
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2872
|
+
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2873
|
+
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2874
|
+
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
2875
|
+
ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2876
|
+
ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2877
|
+
ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2878
|
+
ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2879
|
+
ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2880
|
+
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2881
|
+
ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2882
|
+
ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2883
|
+
ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2884
|
+
ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2885
|
+
ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2886
|
+
ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2887
|
+
ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2888
|
+
ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2889
|
+
ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2890
|
+
ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2891
|
+
ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2892
|
+
ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2893
|
+
ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2894
|
+
host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2896
2895
|
host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
|
|
2897
|
-
host_managed: Optional[pulumi.Input[
|
|
2898
|
-
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2899
|
-
name: Optional[pulumi.Input[
|
|
2900
|
-
proactive_ha_automation_level: Optional[pulumi.Input[
|
|
2901
|
-
proactive_ha_enabled: Optional[pulumi.Input[
|
|
2902
|
-
proactive_ha_moderate_remediation: Optional[pulumi.Input[
|
|
2903
|
-
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2904
|
-
proactive_ha_severe_remediation: Optional[pulumi.Input[
|
|
2905
|
-
tags: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2906
|
-
vsan_compression_enabled: Optional[pulumi.Input[
|
|
2907
|
-
vsan_dedup_enabled: Optional[pulumi.Input[
|
|
2896
|
+
host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2897
|
+
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2898
|
+
name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2899
|
+
proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2900
|
+
proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2901
|
+
proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2902
|
+
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2903
|
+
proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
2904
|
+
tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2905
|
+
vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2906
|
+
vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2908
2907
|
vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
|
|
2909
|
-
vsan_dit_encryption_enabled: Optional[pulumi.Input[
|
|
2910
|
-
vsan_dit_rekey_interval: Optional[pulumi.Input[
|
|
2911
|
-
vsan_enabled: Optional[pulumi.Input[
|
|
2912
|
-
vsan_esa_enabled: Optional[pulumi.Input[
|
|
2908
|
+
vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2909
|
+
vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
2910
|
+
vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2911
|
+
vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2913
2912
|
vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
|
|
2914
|
-
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[
|
|
2915
|
-
vsan_performance_enabled: Optional[pulumi.Input[
|
|
2916
|
-
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
2913
|
+
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2914
|
+
vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2915
|
+
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
2917
2916
|
vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
|
|
2918
|
-
vsan_unmap_enabled: Optional[pulumi.Input[
|
|
2919
|
-
vsan_verbose_mode_enabled: Optional[pulumi.Input[
|
|
2917
|
+
vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2918
|
+
vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
2920
2919
|
__props__=None):
|
|
2921
2920
|
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
|
|
2922
2921
|
if not isinstance(opts, pulumi.ResourceOptions):
|
|
@@ -3008,76 +3007,76 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3008
3007
|
def get(resource_name: str,
|
|
3009
3008
|
id: pulumi.Input[str],
|
|
3010
3009
|
opts: Optional[pulumi.ResourceOptions] = None,
|
|
3011
|
-
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
3012
|
-
datacenter_id: Optional[pulumi.Input[
|
|
3013
|
-
dpm_automation_level: Optional[pulumi.Input[
|
|
3014
|
-
dpm_enabled: Optional[pulumi.Input[
|
|
3015
|
-
dpm_threshold: Optional[pulumi.Input[
|
|
3016
|
-
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
3017
|
-
drs_automation_level: Optional[pulumi.Input[
|
|
3018
|
-
drs_enable_predictive_drs: Optional[pulumi.Input[
|
|
3019
|
-
drs_enable_vm_overrides: Optional[pulumi.Input[
|
|
3020
|
-
drs_enabled: Optional[pulumi.Input[
|
|
3021
|
-
drs_migration_threshold: Optional[pulumi.Input[
|
|
3022
|
-
drs_scale_descendants_shares: Optional[pulumi.Input[
|
|
3023
|
-
folder: Optional[pulumi.Input[
|
|
3024
|
-
force_evacuate_on_destroy: Optional[pulumi.Input[
|
|
3025
|
-
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
3026
|
-
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[
|
|
3027
|
-
ha_admission_control_performance_tolerance: Optional[pulumi.Input[
|
|
3028
|
-
ha_admission_control_policy: Optional[pulumi.Input[
|
|
3029
|
-
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[
|
|
3030
|
-
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[
|
|
3031
|
-
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[
|
|
3032
|
-
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[
|
|
3033
|
-
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[
|
|
3034
|
-
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[
|
|
3035
|
-
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[
|
|
3036
|
-
ha_datastore_apd_recovery_action: Optional[pulumi.Input[
|
|
3037
|
-
ha_datastore_apd_response: Optional[pulumi.Input[
|
|
3038
|
-
ha_datastore_apd_response_delay: Optional[pulumi.Input[
|
|
3039
|
-
ha_datastore_pdl_response: Optional[pulumi.Input[
|
|
3040
|
-
ha_enabled: Optional[pulumi.Input[
|
|
3041
|
-
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
3042
|
-
ha_heartbeat_datastore_policy: Optional[pulumi.Input[
|
|
3043
|
-
ha_host_isolation_response: Optional[pulumi.Input[
|
|
3044
|
-
ha_host_monitoring: Optional[pulumi.Input[
|
|
3045
|
-
ha_vm_component_protection: Optional[pulumi.Input[
|
|
3046
|
-
ha_vm_dependency_restart_condition: Optional[pulumi.Input[
|
|
3047
|
-
ha_vm_failure_interval: Optional[pulumi.Input[
|
|
3048
|
-
ha_vm_maximum_failure_window: Optional[pulumi.Input[
|
|
3049
|
-
ha_vm_maximum_resets: Optional[pulumi.Input[
|
|
3050
|
-
ha_vm_minimum_uptime: Optional[pulumi.Input[
|
|
3051
|
-
ha_vm_monitoring: Optional[pulumi.Input[
|
|
3052
|
-
ha_vm_restart_additional_delay: Optional[pulumi.Input[
|
|
3053
|
-
ha_vm_restart_priority: Optional[pulumi.Input[
|
|
3054
|
-
ha_vm_restart_timeout: Optional[pulumi.Input[
|
|
3055
|
-
host_cluster_exit_timeout: Optional[pulumi.Input[
|
|
3010
|
+
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
3011
|
+
datacenter_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3012
|
+
dpm_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3013
|
+
dpm_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3014
|
+
dpm_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3015
|
+
drs_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
3016
|
+
drs_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3017
|
+
drs_enable_predictive_drs: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3018
|
+
drs_enable_vm_overrides: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3019
|
+
drs_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3020
|
+
drs_migration_threshold: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3021
|
+
drs_scale_descendants_shares: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3022
|
+
folder: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3023
|
+
force_evacuate_on_destroy: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3024
|
+
ha_admission_control_failover_host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
3025
|
+
ha_admission_control_host_failure_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3026
|
+
ha_admission_control_performance_tolerance: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3027
|
+
ha_admission_control_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3028
|
+
ha_admission_control_resource_percentage_auto_compute: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3029
|
+
ha_admission_control_resource_percentage_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3030
|
+
ha_admission_control_resource_percentage_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3031
|
+
ha_admission_control_slot_policy_explicit_cpu: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3032
|
+
ha_admission_control_slot_policy_explicit_memory: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3033
|
+
ha_admission_control_slot_policy_use_explicit_size: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3034
|
+
ha_advanced_options: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
|
|
3035
|
+
ha_datastore_apd_recovery_action: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3036
|
+
ha_datastore_apd_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3037
|
+
ha_datastore_apd_response_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3038
|
+
ha_datastore_pdl_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3039
|
+
ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3040
|
+
ha_heartbeat_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
3041
|
+
ha_heartbeat_datastore_policy: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3042
|
+
ha_host_isolation_response: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3043
|
+
ha_host_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3044
|
+
ha_vm_component_protection: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3045
|
+
ha_vm_dependency_restart_condition: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3046
|
+
ha_vm_failure_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3047
|
+
ha_vm_maximum_failure_window: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3048
|
+
ha_vm_maximum_resets: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3049
|
+
ha_vm_minimum_uptime: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3050
|
+
ha_vm_monitoring: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3051
|
+
ha_vm_restart_additional_delay: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3052
|
+
ha_vm_restart_priority: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3053
|
+
ha_vm_restart_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3054
|
+
host_cluster_exit_timeout: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3056
3055
|
host_image: Optional[pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']]] = None,
|
|
3057
|
-
host_managed: Optional[pulumi.Input[
|
|
3058
|
-
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
3059
|
-
name: Optional[pulumi.Input[
|
|
3060
|
-
proactive_ha_automation_level: Optional[pulumi.Input[
|
|
3061
|
-
proactive_ha_enabled: Optional[pulumi.Input[
|
|
3062
|
-
proactive_ha_moderate_remediation: Optional[pulumi.Input[
|
|
3063
|
-
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
3064
|
-
proactive_ha_severe_remediation: Optional[pulumi.Input[
|
|
3065
|
-
resource_pool_id: Optional[pulumi.Input[
|
|
3066
|
-
tags: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
3067
|
-
vsan_compression_enabled: Optional[pulumi.Input[
|
|
3068
|
-
vsan_dedup_enabled: Optional[pulumi.Input[
|
|
3056
|
+
host_managed: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3057
|
+
host_system_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
3058
|
+
name: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3059
|
+
proactive_ha_automation_level: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3060
|
+
proactive_ha_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3061
|
+
proactive_ha_moderate_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3062
|
+
proactive_ha_provider_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
3063
|
+
proactive_ha_severe_remediation: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3064
|
+
resource_pool_id: Optional[pulumi.Input[_builtins.str]] = None,
|
|
3065
|
+
tags: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
3066
|
+
vsan_compression_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3067
|
+
vsan_dedup_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3069
3068
|
vsan_disk_groups: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]]] = None,
|
|
3070
|
-
vsan_dit_encryption_enabled: Optional[pulumi.Input[
|
|
3071
|
-
vsan_dit_rekey_interval: Optional[pulumi.Input[
|
|
3072
|
-
vsan_enabled: Optional[pulumi.Input[
|
|
3073
|
-
vsan_esa_enabled: Optional[pulumi.Input[
|
|
3069
|
+
vsan_dit_encryption_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3070
|
+
vsan_dit_rekey_interval: Optional[pulumi.Input[_builtins.int]] = None,
|
|
3071
|
+
vsan_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3072
|
+
vsan_esa_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3074
3073
|
vsan_fault_domains: Optional[pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]]] = None,
|
|
3075
|
-
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[
|
|
3076
|
-
vsan_performance_enabled: Optional[pulumi.Input[
|
|
3077
|
-
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[
|
|
3074
|
+
vsan_network_diagnostic_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3075
|
+
vsan_performance_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3076
|
+
vsan_remote_datastore_ids: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None,
|
|
3078
3077
|
vsan_stretched_cluster: Optional[pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']]] = None,
|
|
3079
|
-
vsan_unmap_enabled: Optional[pulumi.Input[
|
|
3080
|
-
vsan_verbose_mode_enabled: Optional[pulumi.Input[
|
|
3078
|
+
vsan_unmap_enabled: Optional[pulumi.Input[_builtins.bool]] = None,
|
|
3079
|
+
vsan_verbose_mode_enabled: Optional[pulumi.Input[_builtins.bool]] = None) -> 'ComputeCluster':
|
|
3081
3080
|
"""
|
|
3082
3081
|
Get an existing ComputeCluster resource's state with the given name, id, and optional extra
|
|
3083
3082
|
properties used to qualify the lookup.
|
|
@@ -3085,127 +3084,127 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3085
3084
|
:param str resource_name: The unique name of the resulting resource.
|
|
3086
3085
|
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
|
|
3087
3086
|
:param pulumi.ResourceOptions opts: Options for the resource.
|
|
3088
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
3087
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] custom_attributes: A map of custom attribute ids to attribute
|
|
3089
3088
|
value strings to set for the datastore cluster.
|
|
3090
3089
|
|
|
3091
3090
|
> **NOTE:** Custom attributes are unsupported on direct ESXi connections
|
|
3092
3091
|
and require vCenter Server.
|
|
3093
|
-
:param pulumi.Input[
|
|
3092
|
+
:param pulumi.Input[_builtins.str] datacenter_id: The managed object ID of
|
|
3094
3093
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
3095
|
-
:param pulumi.Input[
|
|
3096
|
-
:param pulumi.Input[
|
|
3094
|
+
:param pulumi.Input[_builtins.str] dpm_automation_level: The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
3095
|
+
:param pulumi.Input[_builtins.bool] dpm_enabled: Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
3097
3096
|
machines in the cluster. Requires that DRS be enabled.
|
|
3098
|
-
:param pulumi.Input[
|
|
3097
|
+
:param pulumi.Input[_builtins.int] dpm_threshold: A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
3099
3098
|
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
3100
3099
|
setting.
|
|
3101
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
3102
|
-
:param pulumi.Input[
|
|
3100
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] drs_advanced_options: Advanced configuration options for DRS and DPM.
|
|
3101
|
+
:param pulumi.Input[_builtins.str] drs_automation_level: The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
3103
3102
|
fullyAutomated.
|
|
3104
|
-
:param pulumi.Input[
|
|
3105
|
-
:param pulumi.Input[
|
|
3106
|
-
:param pulumi.Input[
|
|
3107
|
-
:param pulumi.Input[
|
|
3103
|
+
:param pulumi.Input[_builtins.bool] drs_enable_predictive_drs: When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
3104
|
+
:param pulumi.Input[_builtins.bool] drs_enable_vm_overrides: When true, allows individual VM overrides within this cluster to be set.
|
|
3105
|
+
:param pulumi.Input[_builtins.bool] drs_enabled: Enable DRS for this cluster.
|
|
3106
|
+
:param pulumi.Input[_builtins.int] drs_migration_threshold: A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
3108
3107
|
more imbalance while a higher setting will tolerate less.
|
|
3109
|
-
:param pulumi.Input[
|
|
3110
|
-
:param pulumi.Input[
|
|
3108
|
+
:param pulumi.Input[_builtins.str] drs_scale_descendants_shares: Enable scalable shares for all descendants of this cluster.
|
|
3109
|
+
:param pulumi.Input[_builtins.str] folder: The relative path to a folder to put this cluster in.
|
|
3111
3110
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
3112
3111
|
Example: for the `dc1` datacenter, and a provided `folder` of `foo/bar`,
|
|
3113
3112
|
The provider will place a cluster named `compute-cluster-test` in a
|
|
3114
3113
|
host folder located at `/dc1/host/foo/bar`, with the final inventory path
|
|
3115
3114
|
being `/dc1/host/foo/bar/datastore-cluster-test`.
|
|
3116
|
-
:param pulumi.Input[
|
|
3115
|
+
:param pulumi.Input[_builtins.bool] force_evacuate_on_destroy: Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
3117
3116
|
for testing and is not recommended in normal use.
|
|
3118
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
3117
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_admission_control_failover_host_system_ids: When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
3119
3118
|
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
3120
3119
|
will ignore the host when making recommendations.
|
|
3121
|
-
:param pulumi.Input[
|
|
3120
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_host_failure_tolerance: The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
3122
3121
|
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
3123
|
-
:param pulumi.Input[
|
|
3122
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_performance_tolerance: The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
3124
3123
|
warnings only, whereas a value of 100 disables the setting.
|
|
3125
|
-
:param pulumi.Input[
|
|
3124
|
+
:param pulumi.Input[_builtins.str] ha_admission_control_policy: The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
3126
3125
|
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
3127
3126
|
slotPolicy, failoverHosts, or disabled. Note that disabling admission control is not recommended and can lead to service
|
|
3128
3127
|
issues.
|
|
3129
|
-
:param pulumi.Input[
|
|
3128
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_resource_percentage_auto_compute: When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
3130
3129
|
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
3131
3130
|
from the total amount of resources in the cluster. Disable to supply user-defined values.
|
|
3132
|
-
:param pulumi.Input[
|
|
3131
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_cpu: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
3133
3132
|
the cluster to reserve for failover.
|
|
3134
|
-
:param pulumi.Input[
|
|
3133
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_resource_percentage_memory: When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
3135
3134
|
the cluster to reserve for failover.
|
|
3136
|
-
:param pulumi.Input[
|
|
3137
|
-
:param pulumi.Input[
|
|
3138
|
-
:param pulumi.Input[
|
|
3135
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_cpu: When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
3136
|
+
:param pulumi.Input[_builtins.int] ha_admission_control_slot_policy_explicit_memory: When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
3137
|
+
:param pulumi.Input[_builtins.bool] ha_admission_control_slot_policy_use_explicit_size: When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
3139
3138
|
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
3140
3139
|
currently in the cluster.
|
|
3141
|
-
:param pulumi.Input[Mapping[str, pulumi.Input[
|
|
3142
|
-
:param pulumi.Input[
|
|
3140
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] ha_advanced_options: Advanced configuration options for vSphere HA.
|
|
3141
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_recovery_action: When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
3143
3142
|
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
3144
|
-
:param pulumi.Input[
|
|
3143
|
+
:param pulumi.Input[_builtins.str] ha_datastore_apd_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3145
3144
|
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
3146
3145
|
restartAggressive.
|
|
3147
|
-
:param pulumi.Input[
|
|
3146
|
+
:param pulumi.Input[_builtins.int] ha_datastore_apd_response_delay: When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
3148
3147
|
the response action defined in ha_datastore_apd_response.
|
|
3149
|
-
:param pulumi.Input[
|
|
3148
|
+
:param pulumi.Input[_builtins.str] ha_datastore_pdl_response: When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3150
3149
|
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
3151
|
-
:param pulumi.Input[
|
|
3152
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
3150
|
+
:param pulumi.Input[_builtins.bool] ha_enabled: Enable vSphere HA for this cluster.
|
|
3151
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] ha_heartbeat_datastore_ids: The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
3153
3152
|
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
3154
|
-
:param pulumi.Input[
|
|
3153
|
+
:param pulumi.Input[_builtins.str] ha_heartbeat_datastore_policy: The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
3155
3154
|
allFeasibleDsWithUserPreference.
|
|
3156
|
-
:param pulumi.Input[
|
|
3155
|
+
:param pulumi.Input[_builtins.str] ha_host_isolation_response: The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
3157
3156
|
Can be one of none, powerOff, or shutdown.
|
|
3158
|
-
:param pulumi.Input[
|
|
3159
|
-
:param pulumi.Input[
|
|
3157
|
+
:param pulumi.Input[_builtins.str] ha_host_monitoring: Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
3158
|
+
:param pulumi.Input[_builtins.str] ha_vm_component_protection: Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
3160
3159
|
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
3161
|
-
:param pulumi.Input[
|
|
3160
|
+
:param pulumi.Input[_builtins.str] ha_vm_dependency_restart_condition: The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
3162
3161
|
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
3163
|
-
:param pulumi.Input[
|
|
3162
|
+
:param pulumi.Input[_builtins.int] ha_vm_failure_interval: If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
3164
3163
|
failed. The value is in seconds.
|
|
3165
|
-
:param pulumi.Input[
|
|
3164
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_failure_window: The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
3166
3165
|
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
3167
3166
|
time is allotted.
|
|
3168
|
-
:param pulumi.Input[
|
|
3169
|
-
:param pulumi.Input[
|
|
3170
|
-
:param pulumi.Input[
|
|
3167
|
+
:param pulumi.Input[_builtins.int] ha_vm_maximum_resets: The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
3168
|
+
:param pulumi.Input[_builtins.int] ha_vm_minimum_uptime: The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
3169
|
+
:param pulumi.Input[_builtins.str] ha_vm_monitoring: The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
3171
3170
|
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
3172
|
-
:param pulumi.Input[
|
|
3173
|
-
:param pulumi.Input[
|
|
3171
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_additional_delay: Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
3172
|
+
:param pulumi.Input[_builtins.str] ha_vm_restart_priority: The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
3174
3173
|
high, or highest.
|
|
3175
|
-
:param pulumi.Input[
|
|
3174
|
+
:param pulumi.Input[_builtins.int] ha_vm_restart_timeout: The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
3176
3175
|
proceeding with the next priority.
|
|
3177
|
-
:param pulumi.Input[
|
|
3176
|
+
:param pulumi.Input[_builtins.int] host_cluster_exit_timeout: The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
3178
3177
|
:param pulumi.Input[Union['ComputeClusterHostImageArgs', 'ComputeClusterHostImageArgsDict']] host_image: Details about the host image which should be applied to the cluster.
|
|
3179
|
-
:param pulumi.Input[
|
|
3180
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
3181
|
-
:param pulumi.Input[
|
|
3182
|
-
:param pulumi.Input[
|
|
3183
|
-
:param pulumi.Input[
|
|
3184
|
-
:param pulumi.Input[
|
|
3178
|
+
:param pulumi.Input[_builtins.bool] host_managed: Must be set if cluster enrollment is managed from host resource.
|
|
3179
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] host_system_ids: The managed object IDs of the hosts to put in the cluster.
|
|
3180
|
+
:param pulumi.Input[_builtins.str] name: The name of the cluster.
|
|
3181
|
+
:param pulumi.Input[_builtins.str] proactive_ha_automation_level: The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
3182
|
+
:param pulumi.Input[_builtins.bool] proactive_ha_enabled: Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
3183
|
+
:param pulumi.Input[_builtins.str] proactive_ha_moderate_remediation: The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
3185
3184
|
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
3186
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
3187
|
-
:param pulumi.Input[
|
|
3185
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] proactive_ha_provider_ids: The list of IDs for health update providers configured for this cluster.
|
|
3186
|
+
:param pulumi.Input[_builtins.str] proactive_ha_severe_remediation: The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
3188
3187
|
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
3189
|
-
:param pulumi.Input[
|
|
3188
|
+
:param pulumi.Input[_builtins.str] resource_pool_id: The managed object ID of the primary
|
|
3190
3189
|
resource pool for this cluster. This can be passed directly to the
|
|
3191
3190
|
`resource_pool_id`
|
|
3192
3191
|
attribute of the
|
|
3193
3192
|
`VirtualMachine` resource.
|
|
3194
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
3195
|
-
:param pulumi.Input[
|
|
3196
|
-
:param pulumi.Input[
|
|
3193
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] tags: The IDs of any tags to attach to this resource.
|
|
3194
|
+
:param pulumi.Input[_builtins.bool] vsan_compression_enabled: Whether the vSAN compression service is enabled for the cluster.
|
|
3195
|
+
:param pulumi.Input[_builtins.bool] vsan_dedup_enabled: Whether the vSAN deduplication service is enabled for the cluster.
|
|
3197
3196
|
:param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanDiskGroupArgs', 'ComputeClusterVsanDiskGroupArgsDict']]]] vsan_disk_groups: A list of disk UUIDs to add to the vSAN cluster.
|
|
3198
|
-
:param pulumi.Input[
|
|
3199
|
-
:param pulumi.Input[
|
|
3200
|
-
:param pulumi.Input[
|
|
3201
|
-
:param pulumi.Input[
|
|
3197
|
+
:param pulumi.Input[_builtins.bool] vsan_dit_encryption_enabled: Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
3198
|
+
:param pulumi.Input[_builtins.int] vsan_dit_rekey_interval: When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
3199
|
+
:param pulumi.Input[_builtins.bool] vsan_enabled: Whether the vSAN service is enabled for the cluster.
|
|
3200
|
+
:param pulumi.Input[_builtins.bool] vsan_esa_enabled: Whether the vSAN ESA service is enabled for the cluster.
|
|
3202
3201
|
:param pulumi.Input[Sequence[pulumi.Input[Union['ComputeClusterVsanFaultDomainArgs', 'ComputeClusterVsanFaultDomainArgsDict']]]] vsan_fault_domains: The configuration for vSAN fault domains.
|
|
3203
|
-
:param pulumi.Input[
|
|
3204
|
-
:param pulumi.Input[
|
|
3205
|
-
:param pulumi.Input[Sequence[pulumi.Input[
|
|
3202
|
+
:param pulumi.Input[_builtins.bool] vsan_network_diagnostic_mode_enabled: Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
3203
|
+
:param pulumi.Input[_builtins.bool] vsan_performance_enabled: Whether the vSAN performance service is enabled for the cluster.
|
|
3204
|
+
:param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] vsan_remote_datastore_ids: The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
3206
3205
|
:param pulumi.Input[Union['ComputeClusterVsanStretchedClusterArgs', 'ComputeClusterVsanStretchedClusterArgsDict']] vsan_stretched_cluster: The configuration for stretched cluster.
|
|
3207
|
-
:param pulumi.Input[
|
|
3208
|
-
:param pulumi.Input[
|
|
3206
|
+
:param pulumi.Input[_builtins.bool] vsan_unmap_enabled: Whether the vSAN unmap service is enabled for the cluster.
|
|
3207
|
+
:param pulumi.Input[_builtins.bool] vsan_verbose_mode_enabled: Whether the vSAN verbose mode is enabled for the cluster.
|
|
3209
3208
|
"""
|
|
3210
3209
|
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
|
|
3211
3210
|
|
|
@@ -3283,9 +3282,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3283
3282
|
__props__.__dict__["vsan_verbose_mode_enabled"] = vsan_verbose_mode_enabled
|
|
3284
3283
|
return ComputeCluster(resource_name, opts=opts, __props__=__props__)
|
|
3285
3284
|
|
|
3286
|
-
@property
|
|
3285
|
+
@_builtins.property
|
|
3287
3286
|
@pulumi.getter(name="customAttributes")
|
|
3288
|
-
def custom_attributes(self) -> pulumi.Output[Optional[Mapping[str,
|
|
3287
|
+
def custom_attributes(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
|
|
3289
3288
|
"""
|
|
3290
3289
|
A map of custom attribute ids to attribute
|
|
3291
3290
|
value strings to set for the datastore cluster.
|
|
@@ -3295,35 +3294,35 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3295
3294
|
"""
|
|
3296
3295
|
return pulumi.get(self, "custom_attributes")
|
|
3297
3296
|
|
|
3298
|
-
@property
|
|
3297
|
+
@_builtins.property
|
|
3299
3298
|
@pulumi.getter(name="datacenterId")
|
|
3300
|
-
def datacenter_id(self) -> pulumi.Output[
|
|
3299
|
+
def datacenter_id(self) -> pulumi.Output[_builtins.str]:
|
|
3301
3300
|
"""
|
|
3302
3301
|
The managed object ID of
|
|
3303
3302
|
the datacenter to create the cluster in. Forces a new resource if changed.
|
|
3304
3303
|
"""
|
|
3305
3304
|
return pulumi.get(self, "datacenter_id")
|
|
3306
3305
|
|
|
3307
|
-
@property
|
|
3306
|
+
@_builtins.property
|
|
3308
3307
|
@pulumi.getter(name="dpmAutomationLevel")
|
|
3309
|
-
def dpm_automation_level(self) -> pulumi.Output[Optional[
|
|
3308
|
+
def dpm_automation_level(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3310
3309
|
"""
|
|
3311
3310
|
The automation level for host power operations in this cluster. Can be one of manual or automated.
|
|
3312
3311
|
"""
|
|
3313
3312
|
return pulumi.get(self, "dpm_automation_level")
|
|
3314
3313
|
|
|
3315
|
-
@property
|
|
3314
|
+
@_builtins.property
|
|
3316
3315
|
@pulumi.getter(name="dpmEnabled")
|
|
3317
|
-
def dpm_enabled(self) -> pulumi.Output[Optional[
|
|
3316
|
+
def dpm_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3318
3317
|
"""
|
|
3319
3318
|
Enable DPM support for DRS. This allows you to dynamically control the power of hosts depending on the needs of virtual
|
|
3320
3319
|
machines in the cluster. Requires that DRS be enabled.
|
|
3321
3320
|
"""
|
|
3322
3321
|
return pulumi.get(self, "dpm_enabled")
|
|
3323
3322
|
|
|
3324
|
-
@property
|
|
3323
|
+
@_builtins.property
|
|
3325
3324
|
@pulumi.getter(name="dpmThreshold")
|
|
3326
|
-
def dpm_threshold(self) -> pulumi.Output[Optional[
|
|
3325
|
+
def dpm_threshold(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3327
3326
|
"""
|
|
3328
3327
|
A value between 1 and 5 indicating the threshold of load within the cluster that influences host power operations. This
|
|
3329
3328
|
affects both power on and power off operations - a lower setting will tolerate more of a surplus/deficit than a higher
|
|
@@ -3331,67 +3330,67 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3331
3330
|
"""
|
|
3332
3331
|
return pulumi.get(self, "dpm_threshold")
|
|
3333
3332
|
|
|
3334
|
-
@property
|
|
3333
|
+
@_builtins.property
|
|
3335
3334
|
@pulumi.getter(name="drsAdvancedOptions")
|
|
3336
|
-
def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str,
|
|
3335
|
+
def drs_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
|
|
3337
3336
|
"""
|
|
3338
3337
|
Advanced configuration options for DRS and DPM.
|
|
3339
3338
|
"""
|
|
3340
3339
|
return pulumi.get(self, "drs_advanced_options")
|
|
3341
3340
|
|
|
3342
|
-
@property
|
|
3341
|
+
@_builtins.property
|
|
3343
3342
|
@pulumi.getter(name="drsAutomationLevel")
|
|
3344
|
-
def drs_automation_level(self) -> pulumi.Output[Optional[
|
|
3343
|
+
def drs_automation_level(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3345
3344
|
"""
|
|
3346
3345
|
The default automation level for all virtual machines in this cluster. Can be one of manual, partiallyAutomated, or
|
|
3347
3346
|
fullyAutomated.
|
|
3348
3347
|
"""
|
|
3349
3348
|
return pulumi.get(self, "drs_automation_level")
|
|
3350
3349
|
|
|
3351
|
-
@property
|
|
3350
|
+
@_builtins.property
|
|
3352
3351
|
@pulumi.getter(name="drsEnablePredictiveDrs")
|
|
3353
|
-
def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[
|
|
3352
|
+
def drs_enable_predictive_drs(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3354
3353
|
"""
|
|
3355
3354
|
When true, enables DRS to use data from vRealize Operations Manager to make proactive DRS recommendations.
|
|
3356
3355
|
"""
|
|
3357
3356
|
return pulumi.get(self, "drs_enable_predictive_drs")
|
|
3358
3357
|
|
|
3359
|
-
@property
|
|
3358
|
+
@_builtins.property
|
|
3360
3359
|
@pulumi.getter(name="drsEnableVmOverrides")
|
|
3361
|
-
def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[
|
|
3360
|
+
def drs_enable_vm_overrides(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3362
3361
|
"""
|
|
3363
3362
|
When true, allows individual VM overrides within this cluster to be set.
|
|
3364
3363
|
"""
|
|
3365
3364
|
return pulumi.get(self, "drs_enable_vm_overrides")
|
|
3366
3365
|
|
|
3367
|
-
@property
|
|
3366
|
+
@_builtins.property
|
|
3368
3367
|
@pulumi.getter(name="drsEnabled")
|
|
3369
|
-
def drs_enabled(self) -> pulumi.Output[Optional[
|
|
3368
|
+
def drs_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3370
3369
|
"""
|
|
3371
3370
|
Enable DRS for this cluster.
|
|
3372
3371
|
"""
|
|
3373
3372
|
return pulumi.get(self, "drs_enabled")
|
|
3374
3373
|
|
|
3375
|
-
@property
|
|
3374
|
+
@_builtins.property
|
|
3376
3375
|
@pulumi.getter(name="drsMigrationThreshold")
|
|
3377
|
-
def drs_migration_threshold(self) -> pulumi.Output[Optional[
|
|
3376
|
+
def drs_migration_threshold(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3378
3377
|
"""
|
|
3379
3378
|
A value between 1 and 5 indicating the threshold of imbalance tolerated between hosts. A lower setting will tolerate
|
|
3380
3379
|
more imbalance while a higher setting will tolerate less.
|
|
3381
3380
|
"""
|
|
3382
3381
|
return pulumi.get(self, "drs_migration_threshold")
|
|
3383
3382
|
|
|
3384
|
-
@property
|
|
3383
|
+
@_builtins.property
|
|
3385
3384
|
@pulumi.getter(name="drsScaleDescendantsShares")
|
|
3386
|
-
def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[
|
|
3385
|
+
def drs_scale_descendants_shares(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3387
3386
|
"""
|
|
3388
3387
|
Enable scalable shares for all descendants of this cluster.
|
|
3389
3388
|
"""
|
|
3390
3389
|
return pulumi.get(self, "drs_scale_descendants_shares")
|
|
3391
3390
|
|
|
3392
|
-
@property
|
|
3391
|
+
@_builtins.property
|
|
3393
3392
|
@pulumi.getter
|
|
3394
|
-
def folder(self) -> pulumi.Output[Optional[
|
|
3393
|
+
def folder(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3395
3394
|
"""
|
|
3396
3395
|
The relative path to a folder to put this cluster in.
|
|
3397
3396
|
This is a path relative to the datacenter you are deploying the cluster to.
|
|
@@ -3402,18 +3401,18 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3402
3401
|
"""
|
|
3403
3402
|
return pulumi.get(self, "folder")
|
|
3404
3403
|
|
|
3405
|
-
@property
|
|
3404
|
+
@_builtins.property
|
|
3406
3405
|
@pulumi.getter(name="forceEvacuateOnDestroy")
|
|
3407
|
-
def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[
|
|
3406
|
+
def force_evacuate_on_destroy(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3408
3407
|
"""
|
|
3409
3408
|
Force removal of all hosts in the cluster during destroy and make them standalone hosts. Use of this flag mainly exists
|
|
3410
3409
|
for testing and is not recommended in normal use.
|
|
3411
3410
|
"""
|
|
3412
3411
|
return pulumi.get(self, "force_evacuate_on_destroy")
|
|
3413
3412
|
|
|
3414
|
-
@property
|
|
3413
|
+
@_builtins.property
|
|
3415
3414
|
@pulumi.getter(name="haAdmissionControlFailoverHostSystemIds")
|
|
3416
|
-
def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[
|
|
3415
|
+
def ha_admission_control_failover_host_system_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
|
|
3417
3416
|
"""
|
|
3418
3417
|
When ha_admission_control_policy is failoverHosts, this defines the managed object IDs of hosts to use as dedicated
|
|
3419
3418
|
failover hosts. These hosts are kept as available as possible - admission control will block access to the host, and DRS
|
|
@@ -3421,27 +3420,27 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3421
3420
|
"""
|
|
3422
3421
|
return pulumi.get(self, "ha_admission_control_failover_host_system_ids")
|
|
3423
3422
|
|
|
3424
|
-
@property
|
|
3423
|
+
@_builtins.property
|
|
3425
3424
|
@pulumi.getter(name="haAdmissionControlHostFailureTolerance")
|
|
3426
|
-
def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[
|
|
3425
|
+
def ha_admission_control_host_failure_tolerance(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3427
3426
|
"""
|
|
3428
3427
|
The maximum number of failed hosts that admission control tolerates when making decisions on whether to permit virtual
|
|
3429
3428
|
machine operations. The maximum is one less than the number of hosts in the cluster.
|
|
3430
3429
|
"""
|
|
3431
3430
|
return pulumi.get(self, "ha_admission_control_host_failure_tolerance")
|
|
3432
3431
|
|
|
3433
|
-
@property
|
|
3432
|
+
@_builtins.property
|
|
3434
3433
|
@pulumi.getter(name="haAdmissionControlPerformanceTolerance")
|
|
3435
|
-
def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[
|
|
3434
|
+
def ha_admission_control_performance_tolerance(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3436
3435
|
"""
|
|
3437
3436
|
The percentage of resource reduction that a cluster of VMs can tolerate in case of a failover. A value of 0 produces
|
|
3438
3437
|
warnings only, whereas a value of 100 disables the setting.
|
|
3439
3438
|
"""
|
|
3440
3439
|
return pulumi.get(self, "ha_admission_control_performance_tolerance")
|
|
3441
3440
|
|
|
3442
|
-
@property
|
|
3441
|
+
@_builtins.property
|
|
3443
3442
|
@pulumi.getter(name="haAdmissionControlPolicy")
|
|
3444
|
-
def ha_admission_control_policy(self) -> pulumi.Output[Optional[
|
|
3443
|
+
def ha_admission_control_policy(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3445
3444
|
"""
|
|
3446
3445
|
The type of admission control policy to use with vSphere HA, which controls whether or not specific VM operations are
|
|
3447
3446
|
permitted in the cluster in order to protect the reliability of the cluster. Can be one of resourcePercentage,
|
|
@@ -3450,9 +3449,9 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3450
3449
|
"""
|
|
3451
3450
|
return pulumi.get(self, "ha_admission_control_policy")
|
|
3452
3451
|
|
|
3453
|
-
@property
|
|
3452
|
+
@_builtins.property
|
|
3454
3453
|
@pulumi.getter(name="haAdmissionControlResourcePercentageAutoCompute")
|
|
3455
|
-
def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[
|
|
3454
|
+
def ha_admission_control_resource_percentage_auto_compute(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3456
3455
|
"""
|
|
3457
3456
|
When ha_admission_control_policy is resourcePercentage, automatically determine available resource percentages by
|
|
3458
3457
|
subtracting the average number of host resources represented by the ha_admission_control_host_failure_tolerance setting
|
|
@@ -3460,43 +3459,43 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3460
3459
|
"""
|
|
3461
3460
|
return pulumi.get(self, "ha_admission_control_resource_percentage_auto_compute")
|
|
3462
3461
|
|
|
3463
|
-
@property
|
|
3462
|
+
@_builtins.property
|
|
3464
3463
|
@pulumi.getter(name="haAdmissionControlResourcePercentageCpu")
|
|
3465
|
-
def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[
|
|
3464
|
+
def ha_admission_control_resource_percentage_cpu(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3466
3465
|
"""
|
|
3467
3466
|
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of CPU resources in
|
|
3468
3467
|
the cluster to reserve for failover.
|
|
3469
3468
|
"""
|
|
3470
3469
|
return pulumi.get(self, "ha_admission_control_resource_percentage_cpu")
|
|
3471
3470
|
|
|
3472
|
-
@property
|
|
3471
|
+
@_builtins.property
|
|
3473
3472
|
@pulumi.getter(name="haAdmissionControlResourcePercentageMemory")
|
|
3474
|
-
def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[
|
|
3473
|
+
def ha_admission_control_resource_percentage_memory(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3475
3474
|
"""
|
|
3476
3475
|
When ha_admission_control_policy is resourcePercentage, this controls the user-defined percentage of memory resources in
|
|
3477
3476
|
the cluster to reserve for failover.
|
|
3478
3477
|
"""
|
|
3479
3478
|
return pulumi.get(self, "ha_admission_control_resource_percentage_memory")
|
|
3480
3479
|
|
|
3481
|
-
@property
|
|
3480
|
+
@_builtins.property
|
|
3482
3481
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitCpu")
|
|
3483
|
-
def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[
|
|
3482
|
+
def ha_admission_control_slot_policy_explicit_cpu(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3484
3483
|
"""
|
|
3485
3484
|
When ha_admission_control_policy is slotPolicy, this controls the user-defined CPU slot size, in MHz.
|
|
3486
3485
|
"""
|
|
3487
3486
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_cpu")
|
|
3488
3487
|
|
|
3489
|
-
@property
|
|
3488
|
+
@_builtins.property
|
|
3490
3489
|
@pulumi.getter(name="haAdmissionControlSlotPolicyExplicitMemory")
|
|
3491
|
-
def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[
|
|
3490
|
+
def ha_admission_control_slot_policy_explicit_memory(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3492
3491
|
"""
|
|
3493
3492
|
When ha_admission_control_policy is slotPolicy, this controls the user-defined memory slot size, in MB.
|
|
3494
3493
|
"""
|
|
3495
3494
|
return pulumi.get(self, "ha_admission_control_slot_policy_explicit_memory")
|
|
3496
3495
|
|
|
3497
|
-
@property
|
|
3496
|
+
@_builtins.property
|
|
3498
3497
|
@pulumi.getter(name="haAdmissionControlSlotPolicyUseExplicitSize")
|
|
3499
|
-
def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[
|
|
3498
|
+
def ha_admission_control_slot_policy_use_explicit_size(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3500
3499
|
"""
|
|
3501
3500
|
When ha_admission_control_policy is slotPolicy, this setting controls whether or not you wish to supply explicit values
|
|
3502
3501
|
to CPU and memory slot sizes. The default is to gather a automatic average based on all powered-on virtual machines
|
|
@@ -3504,26 +3503,26 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3504
3503
|
"""
|
|
3505
3504
|
return pulumi.get(self, "ha_admission_control_slot_policy_use_explicit_size")
|
|
3506
3505
|
|
|
3507
|
-
@property
|
|
3506
|
+
@_builtins.property
|
|
3508
3507
|
@pulumi.getter(name="haAdvancedOptions")
|
|
3509
|
-
def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str,
|
|
3508
|
+
def ha_advanced_options(self) -> pulumi.Output[Optional[Mapping[str, _builtins.str]]]:
|
|
3510
3509
|
"""
|
|
3511
3510
|
Advanced configuration options for vSphere HA.
|
|
3512
3511
|
"""
|
|
3513
3512
|
return pulumi.get(self, "ha_advanced_options")
|
|
3514
3513
|
|
|
3515
|
-
@property
|
|
3514
|
+
@_builtins.property
|
|
3516
3515
|
@pulumi.getter(name="haDatastoreApdRecoveryAction")
|
|
3517
|
-
def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[
|
|
3516
|
+
def ha_datastore_apd_recovery_action(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3518
3517
|
"""
|
|
3519
3518
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines if an APD status on an
|
|
3520
3519
|
affected datastore clears in the middle of an APD event. Can be one of none or reset.
|
|
3521
3520
|
"""
|
|
3522
3521
|
return pulumi.get(self, "ha_datastore_apd_recovery_action")
|
|
3523
3522
|
|
|
3524
|
-
@property
|
|
3523
|
+
@_builtins.property
|
|
3525
3524
|
@pulumi.getter(name="haDatastoreApdResponse")
|
|
3526
|
-
def ha_datastore_apd_response(self) -> pulumi.Output[Optional[
|
|
3525
|
+
def ha_datastore_apd_response(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3527
3526
|
"""
|
|
3528
3527
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3529
3528
|
detected loss to all paths to a relevant datastore. Can be one of disabled, warning, restartConservative, or
|
|
@@ -3531,97 +3530,97 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3531
3530
|
"""
|
|
3532
3531
|
return pulumi.get(self, "ha_datastore_apd_response")
|
|
3533
3532
|
|
|
3534
|
-
@property
|
|
3533
|
+
@_builtins.property
|
|
3535
3534
|
@pulumi.getter(name="haDatastoreApdResponseDelay")
|
|
3536
|
-
def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[
|
|
3535
|
+
def ha_datastore_apd_response_delay(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3537
3536
|
"""
|
|
3538
3537
|
When ha_vm_component_protection is enabled, controls the delay in seconds to wait after an APD timeout event to execute
|
|
3539
3538
|
the response action defined in ha_datastore_apd_response.
|
|
3540
3539
|
"""
|
|
3541
3540
|
return pulumi.get(self, "ha_datastore_apd_response_delay")
|
|
3542
3541
|
|
|
3543
|
-
@property
|
|
3542
|
+
@_builtins.property
|
|
3544
3543
|
@pulumi.getter(name="haDatastorePdlResponse")
|
|
3545
|
-
def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[
|
|
3544
|
+
def ha_datastore_pdl_response(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3546
3545
|
"""
|
|
3547
3546
|
When ha_vm_component_protection is enabled, controls the action to take on virtual machines when the cluster has
|
|
3548
3547
|
detected a permanent device loss to a relevant datastore. Can be one of disabled, warning, or restartAggressive.
|
|
3549
3548
|
"""
|
|
3550
3549
|
return pulumi.get(self, "ha_datastore_pdl_response")
|
|
3551
3550
|
|
|
3552
|
-
@property
|
|
3551
|
+
@_builtins.property
|
|
3553
3552
|
@pulumi.getter(name="haEnabled")
|
|
3554
|
-
def ha_enabled(self) -> pulumi.Output[Optional[
|
|
3553
|
+
def ha_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3555
3554
|
"""
|
|
3556
3555
|
Enable vSphere HA for this cluster.
|
|
3557
3556
|
"""
|
|
3558
3557
|
return pulumi.get(self, "ha_enabled")
|
|
3559
3558
|
|
|
3560
|
-
@property
|
|
3559
|
+
@_builtins.property
|
|
3561
3560
|
@pulumi.getter(name="haHeartbeatDatastoreIds")
|
|
3562
|
-
def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[
|
|
3561
|
+
def ha_heartbeat_datastore_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
|
|
3563
3562
|
"""
|
|
3564
3563
|
The list of managed object IDs for preferred datastores to use for HA heartbeating. This setting is only useful when
|
|
3565
3564
|
ha_heartbeat_datastore_policy is set to either userSelectedDs or allFeasibleDsWithUserPreference.
|
|
3566
3565
|
"""
|
|
3567
3566
|
return pulumi.get(self, "ha_heartbeat_datastore_ids")
|
|
3568
3567
|
|
|
3569
|
-
@property
|
|
3568
|
+
@_builtins.property
|
|
3570
3569
|
@pulumi.getter(name="haHeartbeatDatastorePolicy")
|
|
3571
|
-
def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[
|
|
3570
|
+
def ha_heartbeat_datastore_policy(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3572
3571
|
"""
|
|
3573
3572
|
The selection policy for HA heartbeat datastores. Can be one of allFeasibleDs, userSelectedDs, or
|
|
3574
3573
|
allFeasibleDsWithUserPreference.
|
|
3575
3574
|
"""
|
|
3576
3575
|
return pulumi.get(self, "ha_heartbeat_datastore_policy")
|
|
3577
3576
|
|
|
3578
|
-
@property
|
|
3577
|
+
@_builtins.property
|
|
3579
3578
|
@pulumi.getter(name="haHostIsolationResponse")
|
|
3580
|
-
def ha_host_isolation_response(self) -> pulumi.Output[Optional[
|
|
3579
|
+
def ha_host_isolation_response(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3581
3580
|
"""
|
|
3582
3581
|
The action to take on virtual machines when a host has detected that it has been isolated from the rest of the cluster.
|
|
3583
3582
|
Can be one of none, powerOff, or shutdown.
|
|
3584
3583
|
"""
|
|
3585
3584
|
return pulumi.get(self, "ha_host_isolation_response")
|
|
3586
3585
|
|
|
3587
|
-
@property
|
|
3586
|
+
@_builtins.property
|
|
3588
3587
|
@pulumi.getter(name="haHostMonitoring")
|
|
3589
|
-
def ha_host_monitoring(self) -> pulumi.Output[Optional[
|
|
3588
|
+
def ha_host_monitoring(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3590
3589
|
"""
|
|
3591
3590
|
Global setting that controls whether vSphere HA remediates VMs on host failure. Can be one of enabled or disabled.
|
|
3592
3591
|
"""
|
|
3593
3592
|
return pulumi.get(self, "ha_host_monitoring")
|
|
3594
3593
|
|
|
3595
|
-
@property
|
|
3594
|
+
@_builtins.property
|
|
3596
3595
|
@pulumi.getter(name="haVmComponentProtection")
|
|
3597
|
-
def ha_vm_component_protection(self) -> pulumi.Output[Optional[
|
|
3596
|
+
def ha_vm_component_protection(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3598
3597
|
"""
|
|
3599
3598
|
Controls vSphere VM component protection for virtual machines in this cluster. This allows vSphere HA to react to
|
|
3600
3599
|
failures between hosts and specific virtual machine components, such as datastores. Can be one of enabled or disabled.
|
|
3601
3600
|
"""
|
|
3602
3601
|
return pulumi.get(self, "ha_vm_component_protection")
|
|
3603
3602
|
|
|
3604
|
-
@property
|
|
3603
|
+
@_builtins.property
|
|
3605
3604
|
@pulumi.getter(name="haVmDependencyRestartCondition")
|
|
3606
|
-
def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[
|
|
3605
|
+
def ha_vm_dependency_restart_condition(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3607
3606
|
"""
|
|
3608
3607
|
The condition used to determine whether or not VMs in a certain restart priority class are online, allowing HA to move
|
|
3609
3608
|
on to restarting VMs on the next priority. Can be one of none, poweredOn, guestHbStatusGreen, or appHbStatusGreen.
|
|
3610
3609
|
"""
|
|
3611
3610
|
return pulumi.get(self, "ha_vm_dependency_restart_condition")
|
|
3612
3611
|
|
|
3613
|
-
@property
|
|
3612
|
+
@_builtins.property
|
|
3614
3613
|
@pulumi.getter(name="haVmFailureInterval")
|
|
3615
|
-
def ha_vm_failure_interval(self) -> pulumi.Output[Optional[
|
|
3614
|
+
def ha_vm_failure_interval(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3616
3615
|
"""
|
|
3617
3616
|
If a heartbeat from a virtual machine is not received within this configured interval, the virtual machine is marked as
|
|
3618
3617
|
failed. The value is in seconds.
|
|
3619
3618
|
"""
|
|
3620
3619
|
return pulumi.get(self, "ha_vm_failure_interval")
|
|
3621
3620
|
|
|
3622
|
-
@property
|
|
3621
|
+
@_builtins.property
|
|
3623
3622
|
@pulumi.getter(name="haVmMaximumFailureWindow")
|
|
3624
|
-
def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[
|
|
3623
|
+
def ha_vm_maximum_failure_window(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3625
3624
|
"""
|
|
3626
3625
|
The length of the reset window in which ha_vm_maximum_resets can operate. When this window expires, no more resets are
|
|
3627
3626
|
attempted regardless of the setting configured in ha_vm_maximum_resets. -1 means no window, meaning an unlimited reset
|
|
@@ -3629,66 +3628,66 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3629
3628
|
"""
|
|
3630
3629
|
return pulumi.get(self, "ha_vm_maximum_failure_window")
|
|
3631
3630
|
|
|
3632
|
-
@property
|
|
3631
|
+
@_builtins.property
|
|
3633
3632
|
@pulumi.getter(name="haVmMaximumResets")
|
|
3634
|
-
def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[
|
|
3633
|
+
def ha_vm_maximum_resets(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3635
3634
|
"""
|
|
3636
3635
|
The maximum number of resets that HA will perform to a virtual machine when responding to a failure event.
|
|
3637
3636
|
"""
|
|
3638
3637
|
return pulumi.get(self, "ha_vm_maximum_resets")
|
|
3639
3638
|
|
|
3640
|
-
@property
|
|
3639
|
+
@_builtins.property
|
|
3641
3640
|
@pulumi.getter(name="haVmMinimumUptime")
|
|
3642
|
-
def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[
|
|
3641
|
+
def ha_vm_minimum_uptime(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3643
3642
|
"""
|
|
3644
3643
|
The time, in seconds, that HA waits after powering on a virtual machine before monitoring for heartbeats.
|
|
3645
3644
|
"""
|
|
3646
3645
|
return pulumi.get(self, "ha_vm_minimum_uptime")
|
|
3647
3646
|
|
|
3648
|
-
@property
|
|
3647
|
+
@_builtins.property
|
|
3649
3648
|
@pulumi.getter(name="haVmMonitoring")
|
|
3650
|
-
def ha_vm_monitoring(self) -> pulumi.Output[Optional[
|
|
3649
|
+
def ha_vm_monitoring(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3651
3650
|
"""
|
|
3652
3651
|
The type of virtual machine monitoring to use when HA is enabled in the cluster. Can be one of vmMonitoringDisabled,
|
|
3653
3652
|
vmMonitoringOnly, or vmAndAppMonitoring.
|
|
3654
3653
|
"""
|
|
3655
3654
|
return pulumi.get(self, "ha_vm_monitoring")
|
|
3656
3655
|
|
|
3657
|
-
@property
|
|
3656
|
+
@_builtins.property
|
|
3658
3657
|
@pulumi.getter(name="haVmRestartAdditionalDelay")
|
|
3659
|
-
def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[
|
|
3658
|
+
def ha_vm_restart_additional_delay(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3660
3659
|
"""
|
|
3661
3660
|
Additional delay in seconds after ready condition is met. A VM is considered ready at this point.
|
|
3662
3661
|
"""
|
|
3663
3662
|
return pulumi.get(self, "ha_vm_restart_additional_delay")
|
|
3664
3663
|
|
|
3665
|
-
@property
|
|
3664
|
+
@_builtins.property
|
|
3666
3665
|
@pulumi.getter(name="haVmRestartPriority")
|
|
3667
|
-
def ha_vm_restart_priority(self) -> pulumi.Output[Optional[
|
|
3666
|
+
def ha_vm_restart_priority(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3668
3667
|
"""
|
|
3669
3668
|
The default restart priority for affected VMs when vSphere detects a host failure. Can be one of lowest, low, medium,
|
|
3670
3669
|
high, or highest.
|
|
3671
3670
|
"""
|
|
3672
3671
|
return pulumi.get(self, "ha_vm_restart_priority")
|
|
3673
3672
|
|
|
3674
|
-
@property
|
|
3673
|
+
@_builtins.property
|
|
3675
3674
|
@pulumi.getter(name="haVmRestartTimeout")
|
|
3676
|
-
def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[
|
|
3675
|
+
def ha_vm_restart_timeout(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3677
3676
|
"""
|
|
3678
3677
|
The maximum time, in seconds, that vSphere HA will wait for virtual machines in one priority to be ready before
|
|
3679
3678
|
proceeding with the next priority.
|
|
3680
3679
|
"""
|
|
3681
3680
|
return pulumi.get(self, "ha_vm_restart_timeout")
|
|
3682
3681
|
|
|
3683
|
-
@property
|
|
3682
|
+
@_builtins.property
|
|
3684
3683
|
@pulumi.getter(name="hostClusterExitTimeout")
|
|
3685
|
-
def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[
|
|
3684
|
+
def host_cluster_exit_timeout(self) -> pulumi.Output[Optional[_builtins.int]]:
|
|
3686
3685
|
"""
|
|
3687
3686
|
The timeout for each host maintenance mode operation when removing hosts from a cluster.
|
|
3688
3687
|
"""
|
|
3689
3688
|
return pulumi.get(self, "host_cluster_exit_timeout")
|
|
3690
3689
|
|
|
3691
|
-
@property
|
|
3690
|
+
@_builtins.property
|
|
3692
3691
|
@pulumi.getter(name="hostImage")
|
|
3693
3692
|
def host_image(self) -> pulumi.Output[Optional['outputs.ComputeClusterHostImage']]:
|
|
3694
3693
|
"""
|
|
@@ -3696,75 +3695,75 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3696
3695
|
"""
|
|
3697
3696
|
return pulumi.get(self, "host_image")
|
|
3698
3697
|
|
|
3699
|
-
@property
|
|
3698
|
+
@_builtins.property
|
|
3700
3699
|
@pulumi.getter(name="hostManaged")
|
|
3701
|
-
def host_managed(self) -> pulumi.Output[Optional[
|
|
3700
|
+
def host_managed(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3702
3701
|
"""
|
|
3703
3702
|
Must be set if cluster enrollment is managed from host resource.
|
|
3704
3703
|
"""
|
|
3705
3704
|
return pulumi.get(self, "host_managed")
|
|
3706
3705
|
|
|
3707
|
-
@property
|
|
3706
|
+
@_builtins.property
|
|
3708
3707
|
@pulumi.getter(name="hostSystemIds")
|
|
3709
|
-
def host_system_ids(self) -> pulumi.Output[Optional[Sequence[
|
|
3708
|
+
def host_system_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
|
|
3710
3709
|
"""
|
|
3711
3710
|
The managed object IDs of the hosts to put in the cluster.
|
|
3712
3711
|
"""
|
|
3713
3712
|
return pulumi.get(self, "host_system_ids")
|
|
3714
3713
|
|
|
3715
|
-
@property
|
|
3714
|
+
@_builtins.property
|
|
3716
3715
|
@pulumi.getter
|
|
3717
|
-
def name(self) -> pulumi.Output[
|
|
3716
|
+
def name(self) -> pulumi.Output[_builtins.str]:
|
|
3718
3717
|
"""
|
|
3719
3718
|
The name of the cluster.
|
|
3720
3719
|
"""
|
|
3721
3720
|
return pulumi.get(self, "name")
|
|
3722
3721
|
|
|
3723
|
-
@property
|
|
3722
|
+
@_builtins.property
|
|
3724
3723
|
@pulumi.getter(name="proactiveHaAutomationLevel")
|
|
3725
|
-
def proactive_ha_automation_level(self) -> pulumi.Output[Optional[
|
|
3724
|
+
def proactive_ha_automation_level(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3726
3725
|
"""
|
|
3727
3726
|
The DRS behavior for proactive HA recommendations. Can be one of Automated or Manual.
|
|
3728
3727
|
"""
|
|
3729
3728
|
return pulumi.get(self, "proactive_ha_automation_level")
|
|
3730
3729
|
|
|
3731
|
-
@property
|
|
3730
|
+
@_builtins.property
|
|
3732
3731
|
@pulumi.getter(name="proactiveHaEnabled")
|
|
3733
|
-
def proactive_ha_enabled(self) -> pulumi.Output[Optional[
|
|
3732
|
+
def proactive_ha_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3734
3733
|
"""
|
|
3735
3734
|
Enables proactive HA, allowing for vSphere to get HA data from external providers and use DRS to perform remediation.
|
|
3736
3735
|
"""
|
|
3737
3736
|
return pulumi.get(self, "proactive_ha_enabled")
|
|
3738
3737
|
|
|
3739
|
-
@property
|
|
3738
|
+
@_builtins.property
|
|
3740
3739
|
@pulumi.getter(name="proactiveHaModerateRemediation")
|
|
3741
|
-
def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[
|
|
3740
|
+
def proactive_ha_moderate_remediation(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3742
3741
|
"""
|
|
3743
3742
|
The configured remediation for moderately degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that
|
|
3744
3743
|
this cannot be set to MaintenanceMode when proactive_ha_severe_remediation is set to QuarantineMode.
|
|
3745
3744
|
"""
|
|
3746
3745
|
return pulumi.get(self, "proactive_ha_moderate_remediation")
|
|
3747
3746
|
|
|
3748
|
-
@property
|
|
3747
|
+
@_builtins.property
|
|
3749
3748
|
@pulumi.getter(name="proactiveHaProviderIds")
|
|
3750
|
-
def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[
|
|
3749
|
+
def proactive_ha_provider_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
|
|
3751
3750
|
"""
|
|
3752
3751
|
The list of IDs for health update providers configured for this cluster.
|
|
3753
3752
|
"""
|
|
3754
3753
|
return pulumi.get(self, "proactive_ha_provider_ids")
|
|
3755
3754
|
|
|
3756
|
-
@property
|
|
3755
|
+
@_builtins.property
|
|
3757
3756
|
@pulumi.getter(name="proactiveHaSevereRemediation")
|
|
3758
|
-
def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[
|
|
3757
|
+
def proactive_ha_severe_remediation(self) -> pulumi.Output[Optional[_builtins.str]]:
|
|
3759
3758
|
"""
|
|
3760
3759
|
The configured remediation for severely degraded hosts. Can be one of MaintenanceMode or QuarantineMode. Note that this
|
|
3761
3760
|
cannot be set to QuarantineMode when proactive_ha_moderate_remediation is set to MaintenanceMode.
|
|
3762
3761
|
"""
|
|
3763
3762
|
return pulumi.get(self, "proactive_ha_severe_remediation")
|
|
3764
3763
|
|
|
3765
|
-
@property
|
|
3764
|
+
@_builtins.property
|
|
3766
3765
|
@pulumi.getter(name="resourcePoolId")
|
|
3767
|
-
def resource_pool_id(self) -> pulumi.Output[
|
|
3766
|
+
def resource_pool_id(self) -> pulumi.Output[_builtins.str]:
|
|
3768
3767
|
"""
|
|
3769
3768
|
The managed object ID of the primary
|
|
3770
3769
|
resource pool for this cluster. This can be passed directly to the
|
|
@@ -3774,31 +3773,31 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3774
3773
|
"""
|
|
3775
3774
|
return pulumi.get(self, "resource_pool_id")
|
|
3776
3775
|
|
|
3777
|
-
@property
|
|
3776
|
+
@_builtins.property
|
|
3778
3777
|
@pulumi.getter
|
|
3779
|
-
def tags(self) -> pulumi.Output[Optional[Sequence[
|
|
3778
|
+
def tags(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
|
|
3780
3779
|
"""
|
|
3781
3780
|
The IDs of any tags to attach to this resource.
|
|
3782
3781
|
"""
|
|
3783
3782
|
return pulumi.get(self, "tags")
|
|
3784
3783
|
|
|
3785
|
-
@property
|
|
3784
|
+
@_builtins.property
|
|
3786
3785
|
@pulumi.getter(name="vsanCompressionEnabled")
|
|
3787
|
-
def vsan_compression_enabled(self) -> pulumi.Output[Optional[
|
|
3786
|
+
def vsan_compression_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3788
3787
|
"""
|
|
3789
3788
|
Whether the vSAN compression service is enabled for the cluster.
|
|
3790
3789
|
"""
|
|
3791
3790
|
return pulumi.get(self, "vsan_compression_enabled")
|
|
3792
3791
|
|
|
3793
|
-
@property
|
|
3792
|
+
@_builtins.property
|
|
3794
3793
|
@pulumi.getter(name="vsanDedupEnabled")
|
|
3795
|
-
def vsan_dedup_enabled(self) -> pulumi.Output[Optional[
|
|
3794
|
+
def vsan_dedup_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3796
3795
|
"""
|
|
3797
3796
|
Whether the vSAN deduplication service is enabled for the cluster.
|
|
3798
3797
|
"""
|
|
3799
3798
|
return pulumi.get(self, "vsan_dedup_enabled")
|
|
3800
3799
|
|
|
3801
|
-
@property
|
|
3800
|
+
@_builtins.property
|
|
3802
3801
|
@pulumi.getter(name="vsanDiskGroups")
|
|
3803
3802
|
def vsan_disk_groups(self) -> pulumi.Output[Sequence['outputs.ComputeClusterVsanDiskGroup']]:
|
|
3804
3803
|
"""
|
|
@@ -3806,39 +3805,39 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3806
3805
|
"""
|
|
3807
3806
|
return pulumi.get(self, "vsan_disk_groups")
|
|
3808
3807
|
|
|
3809
|
-
@property
|
|
3808
|
+
@_builtins.property
|
|
3810
3809
|
@pulumi.getter(name="vsanDitEncryptionEnabled")
|
|
3811
|
-
def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[
|
|
3810
|
+
def vsan_dit_encryption_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3812
3811
|
"""
|
|
3813
3812
|
Whether the vSAN data-in-transit encryption is enabled for the cluster.
|
|
3814
3813
|
"""
|
|
3815
3814
|
return pulumi.get(self, "vsan_dit_encryption_enabled")
|
|
3816
3815
|
|
|
3817
|
-
@property
|
|
3816
|
+
@_builtins.property
|
|
3818
3817
|
@pulumi.getter(name="vsanDitRekeyInterval")
|
|
3819
|
-
def vsan_dit_rekey_interval(self) -> pulumi.Output[
|
|
3818
|
+
def vsan_dit_rekey_interval(self) -> pulumi.Output[_builtins.int]:
|
|
3820
3819
|
"""
|
|
3821
3820
|
When vsan_dit_encryption_enabled is enabled, sets the rekey interval of data-in-transit encryption (in minutes).
|
|
3822
3821
|
"""
|
|
3823
3822
|
return pulumi.get(self, "vsan_dit_rekey_interval")
|
|
3824
3823
|
|
|
3825
|
-
@property
|
|
3824
|
+
@_builtins.property
|
|
3826
3825
|
@pulumi.getter(name="vsanEnabled")
|
|
3827
|
-
def vsan_enabled(self) -> pulumi.Output[Optional[
|
|
3826
|
+
def vsan_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3828
3827
|
"""
|
|
3829
3828
|
Whether the vSAN service is enabled for the cluster.
|
|
3830
3829
|
"""
|
|
3831
3830
|
return pulumi.get(self, "vsan_enabled")
|
|
3832
3831
|
|
|
3833
|
-
@property
|
|
3832
|
+
@_builtins.property
|
|
3834
3833
|
@pulumi.getter(name="vsanEsaEnabled")
|
|
3835
|
-
def vsan_esa_enabled(self) -> pulumi.Output[Optional[
|
|
3834
|
+
def vsan_esa_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3836
3835
|
"""
|
|
3837
3836
|
Whether the vSAN ESA service is enabled for the cluster.
|
|
3838
3837
|
"""
|
|
3839
3838
|
return pulumi.get(self, "vsan_esa_enabled")
|
|
3840
3839
|
|
|
3841
|
-
@property
|
|
3840
|
+
@_builtins.property
|
|
3842
3841
|
@pulumi.getter(name="vsanFaultDomains")
|
|
3843
3842
|
def vsan_fault_domains(self) -> pulumi.Output[Optional[Sequence['outputs.ComputeClusterVsanFaultDomain']]]:
|
|
3844
3843
|
"""
|
|
@@ -3846,31 +3845,31 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3846
3845
|
"""
|
|
3847
3846
|
return pulumi.get(self, "vsan_fault_domains")
|
|
3848
3847
|
|
|
3849
|
-
@property
|
|
3848
|
+
@_builtins.property
|
|
3850
3849
|
@pulumi.getter(name="vsanNetworkDiagnosticModeEnabled")
|
|
3851
|
-
def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[
|
|
3850
|
+
def vsan_network_diagnostic_mode_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3852
3851
|
"""
|
|
3853
3852
|
Whether the vSAN network diagnostic mode is enabled for the cluster.
|
|
3854
3853
|
"""
|
|
3855
3854
|
return pulumi.get(self, "vsan_network_diagnostic_mode_enabled")
|
|
3856
3855
|
|
|
3857
|
-
@property
|
|
3856
|
+
@_builtins.property
|
|
3858
3857
|
@pulumi.getter(name="vsanPerformanceEnabled")
|
|
3859
|
-
def vsan_performance_enabled(self) -> pulumi.Output[Optional[
|
|
3858
|
+
def vsan_performance_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3860
3859
|
"""
|
|
3861
3860
|
Whether the vSAN performance service is enabled for the cluster.
|
|
3862
3861
|
"""
|
|
3863
3862
|
return pulumi.get(self, "vsan_performance_enabled")
|
|
3864
3863
|
|
|
3865
|
-
@property
|
|
3864
|
+
@_builtins.property
|
|
3866
3865
|
@pulumi.getter(name="vsanRemoteDatastoreIds")
|
|
3867
|
-
def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[
|
|
3866
|
+
def vsan_remote_datastore_ids(self) -> pulumi.Output[Optional[Sequence[_builtins.str]]]:
|
|
3868
3867
|
"""
|
|
3869
3868
|
The managed object IDs of the vSAN datastore to be mounted on the cluster.
|
|
3870
3869
|
"""
|
|
3871
3870
|
return pulumi.get(self, "vsan_remote_datastore_ids")
|
|
3872
3871
|
|
|
3873
|
-
@property
|
|
3872
|
+
@_builtins.property
|
|
3874
3873
|
@pulumi.getter(name="vsanStretchedCluster")
|
|
3875
3874
|
def vsan_stretched_cluster(self) -> pulumi.Output[Optional['outputs.ComputeClusterVsanStretchedCluster']]:
|
|
3876
3875
|
"""
|
|
@@ -3878,17 +3877,17 @@ class ComputeCluster(pulumi.CustomResource):
|
|
|
3878
3877
|
"""
|
|
3879
3878
|
return pulumi.get(self, "vsan_stretched_cluster")
|
|
3880
3879
|
|
|
3881
|
-
@property
|
|
3880
|
+
@_builtins.property
|
|
3882
3881
|
@pulumi.getter(name="vsanUnmapEnabled")
|
|
3883
|
-
def vsan_unmap_enabled(self) -> pulumi.Output[Optional[
|
|
3882
|
+
def vsan_unmap_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3884
3883
|
"""
|
|
3885
3884
|
Whether the vSAN unmap service is enabled for the cluster.
|
|
3886
3885
|
"""
|
|
3887
3886
|
return pulumi.get(self, "vsan_unmap_enabled")
|
|
3888
3887
|
|
|
3889
|
-
@property
|
|
3888
|
+
@_builtins.property
|
|
3890
3889
|
@pulumi.getter(name="vsanVerboseModeEnabled")
|
|
3891
|
-
def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[
|
|
3890
|
+
def vsan_verbose_mode_enabled(self) -> pulumi.Output[Optional[_builtins.bool]]:
|
|
3892
3891
|
"""
|
|
3893
3892
|
Whether the vSAN verbose mode is enabled for the cluster.
|
|
3894
3893
|
"""
|