pulumi-gcp 8.13.0a1736231082__py3-none-any.whl → 8.13.0a1736263433__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_gcp/__init__.py +43 -0
- pulumi_gcp/accesscontextmanager/_inputs.py +3 -3
- pulumi_gcp/accesscontextmanager/outputs.py +2 -2
- pulumi_gcp/artifactregistry/_inputs.py +83 -0
- pulumi_gcp/artifactregistry/get_repository.py +15 -4
- pulumi_gcp/artifactregistry/outputs.py +112 -0
- pulumi_gcp/artifactregistry/repository.py +93 -7
- pulumi_gcp/backupdisasterrecovery/get_backup.py +38 -2
- pulumi_gcp/backupdisasterrecovery/get_data_source.py +38 -2
- pulumi_gcp/backupdisasterrecovery/outputs.py +8 -8
- pulumi_gcp/bigquery/_inputs.py +369 -0
- pulumi_gcp/bigquery/dataset_access.py +61 -0
- pulumi_gcp/bigquery/job.py +6 -18
- pulumi_gcp/bigquery/outputs.py +350 -0
- pulumi_gcp/bigquery/table.py +47 -0
- pulumi_gcp/chronicle/__init__.py +10 -0
- pulumi_gcp/chronicle/_inputs.py +169 -0
- pulumi_gcp/chronicle/outputs.py +107 -0
- pulumi_gcp/chronicle/watchlist.py +776 -0
- pulumi_gcp/cloudrunv2/_inputs.py +6 -3
- pulumi_gcp/cloudrunv2/outputs.py +10 -6
- pulumi_gcp/composer/_inputs.py +80 -5
- pulumi_gcp/composer/outputs.py +111 -4
- pulumi_gcp/compute/_inputs.py +181 -87
- pulumi_gcp/compute/firewall_policy_association.py +50 -39
- pulumi_gcp/compute/get_network.py +17 -2
- pulumi_gcp/compute/get_subnetwork.py +18 -4
- pulumi_gcp/compute/network.py +39 -2
- pulumi_gcp/compute/outputs.py +157 -58
- pulumi_gcp/compute/region_network_endpoint_group.py +1 -1
- pulumi_gcp/compute/resize_request.py +26 -40
- pulumi_gcp/config/__init__.pyi +2 -0
- pulumi_gcp/config/vars.py +4 -0
- pulumi_gcp/dataproc/batch.py +4 -18
- pulumi_gcp/datastream/_inputs.py +68 -0
- pulumi_gcp/datastream/outputs.py +41 -1
- pulumi_gcp/developerconnect/connection.py +58 -12
- pulumi_gcp/diagflow/_inputs.py +3 -3
- pulumi_gcp/diagflow/outputs.py +2 -2
- pulumi_gcp/filestore/backup.py +71 -3
- pulumi_gcp/firebase/hosting_custom_domain.py +4 -4
- pulumi_gcp/firestore/_inputs.py +3 -3
- pulumi_gcp/firestore/outputs.py +2 -2
- pulumi_gcp/gkehub/__init__.py +1 -0
- pulumi_gcp/gkehub/get_feature.py +226 -0
- pulumi_gcp/gkehub/outputs.py +1153 -0
- pulumi_gcp/gkeonprem/__init__.py +1 -0
- pulumi_gcp/gkeonprem/_inputs.py +2364 -0
- pulumi_gcp/gkeonprem/outputs.py +1780 -0
- pulumi_gcp/gkeonprem/vmware_admin_cluster.py +1715 -0
- pulumi_gcp/integrationconnectors/_inputs.py +6 -6
- pulumi_gcp/integrationconnectors/connection.py +7 -7
- pulumi_gcp/integrationconnectors/outputs.py +4 -4
- pulumi_gcp/kms/__init__.py +2 -0
- pulumi_gcp/kms/get_autokey_config.py +121 -0
- pulumi_gcp/kms/get_key_handle.py +185 -0
- pulumi_gcp/netapp/kmsconfig.py +2 -14
- pulumi_gcp/networkconnectivity/hub.py +108 -0
- pulumi_gcp/networkconnectivity/spoke.py +195 -0
- pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +4 -0
- pulumi_gcp/networksecurity/__init__.py +2 -0
- pulumi_gcp/networksecurity/_inputs.py +78 -0
- pulumi_gcp/networksecurity/intercept_endpoint_group.py +723 -0
- pulumi_gcp/networksecurity/intercept_endpoint_group_association.py +815 -0
- pulumi_gcp/networksecurity/outputs.py +48 -0
- pulumi_gcp/oracledatabase/autonomous_database.py +2 -2
- pulumi_gcp/orgpolicy/policy.py +2 -2
- pulumi_gcp/parallelstore/instance.py +149 -0
- pulumi_gcp/projects/usage_export_bucket.py +42 -110
- pulumi_gcp/provider.py +20 -0
- pulumi_gcp/pubsub/subscription.py +6 -6
- pulumi_gcp/pulumi-plugin.json +1 -1
- pulumi_gcp/securesourcemanager/instance.py +4 -16
- pulumi_gcp/spanner/instance_iam_binding.py +14 -0
- pulumi_gcp/spanner/instance_iam_member.py +14 -0
- pulumi_gcp/sql/database_instance.py +7 -7
- pulumi_gcp/storage/__init__.py +1 -0
- pulumi_gcp/storage/_inputs.py +431 -0
- pulumi_gcp/storage/folder.py +483 -0
- pulumi_gcp/storage/outputs.py +363 -0
- pulumi_gcp/storage/transfer_job.py +84 -38
- pulumi_gcp/vertex/_inputs.py +26 -25
- pulumi_gcp/vertex/ai_endpoint.py +4 -4
- pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
- pulumi_gcp/vertex/outputs.py +15 -14
- pulumi_gcp/workstations/workstation.py +55 -1
- {pulumi_gcp-8.13.0a1736231082.dist-info → pulumi_gcp-8.13.0a1736263433.dist-info}/METADATA +1 -1
- {pulumi_gcp-8.13.0a1736231082.dist-info → pulumi_gcp-8.13.0a1736263433.dist-info}/RECORD +90 -79
- {pulumi_gcp-8.13.0a1736231082.dist-info → pulumi_gcp-8.13.0a1736263433.dist-info}/WHEEL +0 -0
- {pulumi_gcp-8.13.0a1736231082.dist-info → pulumi_gcp-8.13.0a1736263433.dist-info}/top_level.txt +0 -0
pulumi_gcp/gkeonprem/outputs.py
CHANGED
@@ -139,6 +139,37 @@ __all__ = [
|
|
139
139
|
'VMwareNodePoolNodePoolAutoscaling',
|
140
140
|
'VMwareNodePoolStatus',
|
141
141
|
'VMwareNodePoolStatusCondition',
|
142
|
+
'VmwareAdminClusterAddonNode',
|
143
|
+
'VmwareAdminClusterAddonNodeAutoResizeConfig',
|
144
|
+
'VmwareAdminClusterAntiAffinityGroups',
|
145
|
+
'VmwareAdminClusterAuthorization',
|
146
|
+
'VmwareAdminClusterAuthorizationViewerUser',
|
147
|
+
'VmwareAdminClusterAutoRepairConfig',
|
148
|
+
'VmwareAdminClusterControlPlaneNode',
|
149
|
+
'VmwareAdminClusterFleet',
|
150
|
+
'VmwareAdminClusterLoadBalancer',
|
151
|
+
'VmwareAdminClusterLoadBalancerF5Config',
|
152
|
+
'VmwareAdminClusterLoadBalancerManualLbConfig',
|
153
|
+
'VmwareAdminClusterLoadBalancerMetalLbConfig',
|
154
|
+
'VmwareAdminClusterLoadBalancerVipConfig',
|
155
|
+
'VmwareAdminClusterNetworkConfig',
|
156
|
+
'VmwareAdminClusterNetworkConfigDhcpIpConfig',
|
157
|
+
'VmwareAdminClusterNetworkConfigHaControlPlaneConfig',
|
158
|
+
'VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlock',
|
159
|
+
'VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockIp',
|
160
|
+
'VmwareAdminClusterNetworkConfigHostConfig',
|
161
|
+
'VmwareAdminClusterNetworkConfigStaticIpConfig',
|
162
|
+
'VmwareAdminClusterNetworkConfigStaticIpConfigIpBlock',
|
163
|
+
'VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockIp',
|
164
|
+
'VmwareAdminClusterPlatformConfig',
|
165
|
+
'VmwareAdminClusterPlatformConfigBundle',
|
166
|
+
'VmwareAdminClusterPlatformConfigBundleStatus',
|
167
|
+
'VmwareAdminClusterPlatformConfigBundleStatusCondition',
|
168
|
+
'VmwareAdminClusterPlatformConfigStatus',
|
169
|
+
'VmwareAdminClusterPlatformConfigStatusCondition',
|
170
|
+
'VmwareAdminClusterStatus',
|
171
|
+
'VmwareAdminClusterStatusCondition',
|
172
|
+
'VmwareAdminClusterVcenter',
|
142
173
|
]
|
143
174
|
|
144
175
|
@pulumi.output_type
|
@@ -6769,3 +6800,1752 @@ class VMwareNodePoolStatusCondition(dict):
|
|
6769
6800
|
return pulumi.get(self, "type")
|
6770
6801
|
|
6771
6802
|
|
6803
|
+
@pulumi.output_type
|
6804
|
+
class VmwareAdminClusterAddonNode(dict):
|
6805
|
+
@staticmethod
|
6806
|
+
def __key_warning(key: str):
|
6807
|
+
suggest = None
|
6808
|
+
if key == "autoResizeConfig":
|
6809
|
+
suggest = "auto_resize_config"
|
6810
|
+
|
6811
|
+
if suggest:
|
6812
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterAddonNode. Access the value via the '{suggest}' property getter instead.")
|
6813
|
+
|
6814
|
+
def __getitem__(self, key: str) -> Any:
|
6815
|
+
VmwareAdminClusterAddonNode.__key_warning(key)
|
6816
|
+
return super().__getitem__(key)
|
6817
|
+
|
6818
|
+
def get(self, key: str, default = None) -> Any:
|
6819
|
+
VmwareAdminClusterAddonNode.__key_warning(key)
|
6820
|
+
return super().get(key, default)
|
6821
|
+
|
6822
|
+
def __init__(__self__, *,
|
6823
|
+
auto_resize_config: Optional['outputs.VmwareAdminClusterAddonNodeAutoResizeConfig'] = None):
|
6824
|
+
"""
|
6825
|
+
:param 'VmwareAdminClusterAddonNodeAutoResizeConfigArgs' auto_resize_config: Specifies auto resize config.
|
6826
|
+
Structure is documented below.
|
6827
|
+
"""
|
6828
|
+
if auto_resize_config is not None:
|
6829
|
+
pulumi.set(__self__, "auto_resize_config", auto_resize_config)
|
6830
|
+
|
6831
|
+
@property
|
6832
|
+
@pulumi.getter(name="autoResizeConfig")
|
6833
|
+
def auto_resize_config(self) -> Optional['outputs.VmwareAdminClusterAddonNodeAutoResizeConfig']:
|
6834
|
+
"""
|
6835
|
+
Specifies auto resize config.
|
6836
|
+
Structure is documented below.
|
6837
|
+
"""
|
6838
|
+
return pulumi.get(self, "auto_resize_config")
|
6839
|
+
|
6840
|
+
|
6841
|
+
@pulumi.output_type
|
6842
|
+
class VmwareAdminClusterAddonNodeAutoResizeConfig(dict):
|
6843
|
+
def __init__(__self__, *,
|
6844
|
+
enabled: bool):
|
6845
|
+
"""
|
6846
|
+
:param bool enabled: Whether to enable controle plane node auto resizing.
|
6847
|
+
"""
|
6848
|
+
pulumi.set(__self__, "enabled", enabled)
|
6849
|
+
|
6850
|
+
@property
|
6851
|
+
@pulumi.getter
|
6852
|
+
def enabled(self) -> bool:
|
6853
|
+
"""
|
6854
|
+
Whether to enable controle plane node auto resizing.
|
6855
|
+
"""
|
6856
|
+
return pulumi.get(self, "enabled")
|
6857
|
+
|
6858
|
+
|
6859
|
+
@pulumi.output_type
|
6860
|
+
class VmwareAdminClusterAntiAffinityGroups(dict):
|
6861
|
+
@staticmethod
|
6862
|
+
def __key_warning(key: str):
|
6863
|
+
suggest = None
|
6864
|
+
if key == "aagConfigDisabled":
|
6865
|
+
suggest = "aag_config_disabled"
|
6866
|
+
|
6867
|
+
if suggest:
|
6868
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterAntiAffinityGroups. Access the value via the '{suggest}' property getter instead.")
|
6869
|
+
|
6870
|
+
def __getitem__(self, key: str) -> Any:
|
6871
|
+
VmwareAdminClusterAntiAffinityGroups.__key_warning(key)
|
6872
|
+
return super().__getitem__(key)
|
6873
|
+
|
6874
|
+
def get(self, key: str, default = None) -> Any:
|
6875
|
+
VmwareAdminClusterAntiAffinityGroups.__key_warning(key)
|
6876
|
+
return super().get(key, default)
|
6877
|
+
|
6878
|
+
def __init__(__self__, *,
|
6879
|
+
aag_config_disabled: bool):
|
6880
|
+
"""
|
6881
|
+
:param bool aag_config_disabled: Spread nodes across at least three physical hosts (requires at least three
|
6882
|
+
hosts).
|
6883
|
+
Enabled by default.
|
6884
|
+
"""
|
6885
|
+
pulumi.set(__self__, "aag_config_disabled", aag_config_disabled)
|
6886
|
+
|
6887
|
+
@property
|
6888
|
+
@pulumi.getter(name="aagConfigDisabled")
|
6889
|
+
def aag_config_disabled(self) -> bool:
|
6890
|
+
"""
|
6891
|
+
Spread nodes across at least three physical hosts (requires at least three
|
6892
|
+
hosts).
|
6893
|
+
Enabled by default.
|
6894
|
+
"""
|
6895
|
+
return pulumi.get(self, "aag_config_disabled")
|
6896
|
+
|
6897
|
+
|
6898
|
+
@pulumi.output_type
|
6899
|
+
class VmwareAdminClusterAuthorization(dict):
|
6900
|
+
@staticmethod
|
6901
|
+
def __key_warning(key: str):
|
6902
|
+
suggest = None
|
6903
|
+
if key == "viewerUsers":
|
6904
|
+
suggest = "viewer_users"
|
6905
|
+
|
6906
|
+
if suggest:
|
6907
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterAuthorization. Access the value via the '{suggest}' property getter instead.")
|
6908
|
+
|
6909
|
+
def __getitem__(self, key: str) -> Any:
|
6910
|
+
VmwareAdminClusterAuthorization.__key_warning(key)
|
6911
|
+
return super().__getitem__(key)
|
6912
|
+
|
6913
|
+
def get(self, key: str, default = None) -> Any:
|
6914
|
+
VmwareAdminClusterAuthorization.__key_warning(key)
|
6915
|
+
return super().get(key, default)
|
6916
|
+
|
6917
|
+
def __init__(__self__, *,
|
6918
|
+
viewer_users: Optional[Sequence['outputs.VmwareAdminClusterAuthorizationViewerUser']] = None):
|
6919
|
+
"""
|
6920
|
+
:param Sequence['VmwareAdminClusterAuthorizationViewerUserArgs'] viewer_users: Users that will be granted the cluster-admin role on the cluster, providing
|
6921
|
+
full access to the cluster.
|
6922
|
+
Structure is documented below.
|
6923
|
+
"""
|
6924
|
+
if viewer_users is not None:
|
6925
|
+
pulumi.set(__self__, "viewer_users", viewer_users)
|
6926
|
+
|
6927
|
+
@property
|
6928
|
+
@pulumi.getter(name="viewerUsers")
|
6929
|
+
def viewer_users(self) -> Optional[Sequence['outputs.VmwareAdminClusterAuthorizationViewerUser']]:
|
6930
|
+
"""
|
6931
|
+
Users that will be granted the cluster-admin role on the cluster, providing
|
6932
|
+
full access to the cluster.
|
6933
|
+
Structure is documented below.
|
6934
|
+
"""
|
6935
|
+
return pulumi.get(self, "viewer_users")
|
6936
|
+
|
6937
|
+
|
6938
|
+
@pulumi.output_type
|
6939
|
+
class VmwareAdminClusterAuthorizationViewerUser(dict):
|
6940
|
+
def __init__(__self__, *,
|
6941
|
+
username: str):
|
6942
|
+
"""
|
6943
|
+
:param str username: The name of the user, e.g. `my-gcp-id@gmail.com`.
|
6944
|
+
"""
|
6945
|
+
pulumi.set(__self__, "username", username)
|
6946
|
+
|
6947
|
+
@property
|
6948
|
+
@pulumi.getter
|
6949
|
+
def username(self) -> str:
|
6950
|
+
"""
|
6951
|
+
The name of the user, e.g. `my-gcp-id@gmail.com`.
|
6952
|
+
"""
|
6953
|
+
return pulumi.get(self, "username")
|
6954
|
+
|
6955
|
+
|
6956
|
+
@pulumi.output_type
|
6957
|
+
class VmwareAdminClusterAutoRepairConfig(dict):
|
6958
|
+
def __init__(__self__, *,
|
6959
|
+
enabled: bool):
|
6960
|
+
"""
|
6961
|
+
:param bool enabled: Whether auto repair is enabled.
|
6962
|
+
"""
|
6963
|
+
pulumi.set(__self__, "enabled", enabled)
|
6964
|
+
|
6965
|
+
@property
|
6966
|
+
@pulumi.getter
|
6967
|
+
def enabled(self) -> bool:
|
6968
|
+
"""
|
6969
|
+
Whether auto repair is enabled.
|
6970
|
+
"""
|
6971
|
+
return pulumi.get(self, "enabled")
|
6972
|
+
|
6973
|
+
|
6974
|
+
@pulumi.output_type
|
6975
|
+
class VmwareAdminClusterControlPlaneNode(dict):
|
6976
|
+
def __init__(__self__, *,
|
6977
|
+
cpus: Optional[int] = None,
|
6978
|
+
memory: Optional[int] = None,
|
6979
|
+
replicas: Optional[int] = None):
|
6980
|
+
"""
|
6981
|
+
:param int cpus: The number of vCPUs for the control-plane node of the admin cluster.
|
6982
|
+
:param int memory: The number of mebibytes of memory for the control-plane node of the admin cluster.
|
6983
|
+
:param int replicas: The number of control plane nodes for this VMware admin cluster.
|
6984
|
+
"""
|
6985
|
+
if cpus is not None:
|
6986
|
+
pulumi.set(__self__, "cpus", cpus)
|
6987
|
+
if memory is not None:
|
6988
|
+
pulumi.set(__self__, "memory", memory)
|
6989
|
+
if replicas is not None:
|
6990
|
+
pulumi.set(__self__, "replicas", replicas)
|
6991
|
+
|
6992
|
+
@property
|
6993
|
+
@pulumi.getter
|
6994
|
+
def cpus(self) -> Optional[int]:
|
6995
|
+
"""
|
6996
|
+
The number of vCPUs for the control-plane node of the admin cluster.
|
6997
|
+
"""
|
6998
|
+
return pulumi.get(self, "cpus")
|
6999
|
+
|
7000
|
+
@property
|
7001
|
+
@pulumi.getter
|
7002
|
+
def memory(self) -> Optional[int]:
|
7003
|
+
"""
|
7004
|
+
The number of mebibytes of memory for the control-plane node of the admin cluster.
|
7005
|
+
"""
|
7006
|
+
return pulumi.get(self, "memory")
|
7007
|
+
|
7008
|
+
@property
|
7009
|
+
@pulumi.getter
|
7010
|
+
def replicas(self) -> Optional[int]:
|
7011
|
+
"""
|
7012
|
+
The number of control plane nodes for this VMware admin cluster.
|
7013
|
+
"""
|
7014
|
+
return pulumi.get(self, "replicas")
|
7015
|
+
|
7016
|
+
|
7017
|
+
@pulumi.output_type
|
7018
|
+
class VmwareAdminClusterFleet(dict):
|
7019
|
+
def __init__(__self__, *,
|
7020
|
+
membership: Optional[str] = None):
|
7021
|
+
"""
|
7022
|
+
:param str membership: (Output)
|
7023
|
+
The name of the managed Fleet Membership resource associated to this cluster.
|
7024
|
+
Membership names are formatted as
|
7025
|
+
`projects/<project-number>/locations/<location>/memberships/<cluster-id>`.
|
7026
|
+
"""
|
7027
|
+
if membership is not None:
|
7028
|
+
pulumi.set(__self__, "membership", membership)
|
7029
|
+
|
7030
|
+
@property
|
7031
|
+
@pulumi.getter
|
7032
|
+
def membership(self) -> Optional[str]:
|
7033
|
+
"""
|
7034
|
+
(Output)
|
7035
|
+
The name of the managed Fleet Membership resource associated to this cluster.
|
7036
|
+
Membership names are formatted as
|
7037
|
+
`projects/<project-number>/locations/<location>/memberships/<cluster-id>`.
|
7038
|
+
"""
|
7039
|
+
return pulumi.get(self, "membership")
|
7040
|
+
|
7041
|
+
|
7042
|
+
@pulumi.output_type
|
7043
|
+
class VmwareAdminClusterLoadBalancer(dict):
|
7044
|
+
@staticmethod
|
7045
|
+
def __key_warning(key: str):
|
7046
|
+
suggest = None
|
7047
|
+
if key == "vipConfig":
|
7048
|
+
suggest = "vip_config"
|
7049
|
+
elif key == "f5Config":
|
7050
|
+
suggest = "f5_config"
|
7051
|
+
elif key == "manualLbConfig":
|
7052
|
+
suggest = "manual_lb_config"
|
7053
|
+
elif key == "metalLbConfig":
|
7054
|
+
suggest = "metal_lb_config"
|
7055
|
+
|
7056
|
+
if suggest:
|
7057
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterLoadBalancer. Access the value via the '{suggest}' property getter instead.")
|
7058
|
+
|
7059
|
+
def __getitem__(self, key: str) -> Any:
|
7060
|
+
VmwareAdminClusterLoadBalancer.__key_warning(key)
|
7061
|
+
return super().__getitem__(key)
|
7062
|
+
|
7063
|
+
def get(self, key: str, default = None) -> Any:
|
7064
|
+
VmwareAdminClusterLoadBalancer.__key_warning(key)
|
7065
|
+
return super().get(key, default)
|
7066
|
+
|
7067
|
+
def __init__(__self__, *,
|
7068
|
+
vip_config: 'outputs.VmwareAdminClusterLoadBalancerVipConfig',
|
7069
|
+
f5_config: Optional['outputs.VmwareAdminClusterLoadBalancerF5Config'] = None,
|
7070
|
+
manual_lb_config: Optional['outputs.VmwareAdminClusterLoadBalancerManualLbConfig'] = None,
|
7071
|
+
metal_lb_config: Optional['outputs.VmwareAdminClusterLoadBalancerMetalLbConfig'] = None):
|
7072
|
+
"""
|
7073
|
+
:param 'VmwareAdminClusterLoadBalancerVipConfigArgs' vip_config: Specified the VMware Load Balancer Config
|
7074
|
+
Structure is documented below.
|
7075
|
+
:param 'VmwareAdminClusterLoadBalancerF5ConfigArgs' f5_config: Configuration for F5 Big IP typed load balancers.
|
7076
|
+
Structure is documented below.
|
7077
|
+
:param 'VmwareAdminClusterLoadBalancerManualLbConfigArgs' manual_lb_config: Manually configured load balancers.
|
7078
|
+
Structure is documented below.
|
7079
|
+
:param 'VmwareAdminClusterLoadBalancerMetalLbConfigArgs' metal_lb_config: Metal LB load balancers.
|
7080
|
+
Structure is documented below.
|
7081
|
+
"""
|
7082
|
+
pulumi.set(__self__, "vip_config", vip_config)
|
7083
|
+
if f5_config is not None:
|
7084
|
+
pulumi.set(__self__, "f5_config", f5_config)
|
7085
|
+
if manual_lb_config is not None:
|
7086
|
+
pulumi.set(__self__, "manual_lb_config", manual_lb_config)
|
7087
|
+
if metal_lb_config is not None:
|
7088
|
+
pulumi.set(__self__, "metal_lb_config", metal_lb_config)
|
7089
|
+
|
7090
|
+
@property
|
7091
|
+
@pulumi.getter(name="vipConfig")
|
7092
|
+
def vip_config(self) -> 'outputs.VmwareAdminClusterLoadBalancerVipConfig':
|
7093
|
+
"""
|
7094
|
+
Specified the VMware Load Balancer Config
|
7095
|
+
Structure is documented below.
|
7096
|
+
"""
|
7097
|
+
return pulumi.get(self, "vip_config")
|
7098
|
+
|
7099
|
+
@property
|
7100
|
+
@pulumi.getter(name="f5Config")
|
7101
|
+
def f5_config(self) -> Optional['outputs.VmwareAdminClusterLoadBalancerF5Config']:
|
7102
|
+
"""
|
7103
|
+
Configuration for F5 Big IP typed load balancers.
|
7104
|
+
Structure is documented below.
|
7105
|
+
"""
|
7106
|
+
return pulumi.get(self, "f5_config")
|
7107
|
+
|
7108
|
+
@property
|
7109
|
+
@pulumi.getter(name="manualLbConfig")
|
7110
|
+
def manual_lb_config(self) -> Optional['outputs.VmwareAdminClusterLoadBalancerManualLbConfig']:
|
7111
|
+
"""
|
7112
|
+
Manually configured load balancers.
|
7113
|
+
Structure is documented below.
|
7114
|
+
"""
|
7115
|
+
return pulumi.get(self, "manual_lb_config")
|
7116
|
+
|
7117
|
+
@property
|
7118
|
+
@pulumi.getter(name="metalLbConfig")
|
7119
|
+
def metal_lb_config(self) -> Optional['outputs.VmwareAdminClusterLoadBalancerMetalLbConfig']:
|
7120
|
+
"""
|
7121
|
+
Metal LB load balancers.
|
7122
|
+
Structure is documented below.
|
7123
|
+
"""
|
7124
|
+
return pulumi.get(self, "metal_lb_config")
|
7125
|
+
|
7126
|
+
|
7127
|
+
@pulumi.output_type
|
7128
|
+
class VmwareAdminClusterLoadBalancerF5Config(dict):
|
7129
|
+
@staticmethod
|
7130
|
+
def __key_warning(key: str):
|
7131
|
+
suggest = None
|
7132
|
+
if key == "snatPool":
|
7133
|
+
suggest = "snat_pool"
|
7134
|
+
|
7135
|
+
if suggest:
|
7136
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterLoadBalancerF5Config. Access the value via the '{suggest}' property getter instead.")
|
7137
|
+
|
7138
|
+
def __getitem__(self, key: str) -> Any:
|
7139
|
+
VmwareAdminClusterLoadBalancerF5Config.__key_warning(key)
|
7140
|
+
return super().__getitem__(key)
|
7141
|
+
|
7142
|
+
def get(self, key: str, default = None) -> Any:
|
7143
|
+
VmwareAdminClusterLoadBalancerF5Config.__key_warning(key)
|
7144
|
+
return super().get(key, default)
|
7145
|
+
|
7146
|
+
def __init__(__self__, *,
|
7147
|
+
address: Optional[str] = None,
|
7148
|
+
partition: Optional[str] = None,
|
7149
|
+
snat_pool: Optional[str] = None):
|
7150
|
+
"""
|
7151
|
+
:param str address: The load balancer's IP address.
|
7152
|
+
:param str partition: he preexisting partition to be used by the load balancer. T
|
7153
|
+
his partition is usually created for the admin cluster for example:
|
7154
|
+
'my-f5-admin-partition'.
|
7155
|
+
:param str snat_pool: The pool name. Only necessary, if using SNAT.
|
7156
|
+
"""
|
7157
|
+
if address is not None:
|
7158
|
+
pulumi.set(__self__, "address", address)
|
7159
|
+
if partition is not None:
|
7160
|
+
pulumi.set(__self__, "partition", partition)
|
7161
|
+
if snat_pool is not None:
|
7162
|
+
pulumi.set(__self__, "snat_pool", snat_pool)
|
7163
|
+
|
7164
|
+
@property
|
7165
|
+
@pulumi.getter
|
7166
|
+
def address(self) -> Optional[str]:
|
7167
|
+
"""
|
7168
|
+
The load balancer's IP address.
|
7169
|
+
"""
|
7170
|
+
return pulumi.get(self, "address")
|
7171
|
+
|
7172
|
+
@property
|
7173
|
+
@pulumi.getter
|
7174
|
+
def partition(self) -> Optional[str]:
|
7175
|
+
"""
|
7176
|
+
he preexisting partition to be used by the load balancer. T
|
7177
|
+
his partition is usually created for the admin cluster for example:
|
7178
|
+
'my-f5-admin-partition'.
|
7179
|
+
"""
|
7180
|
+
return pulumi.get(self, "partition")
|
7181
|
+
|
7182
|
+
@property
|
7183
|
+
@pulumi.getter(name="snatPool")
|
7184
|
+
def snat_pool(self) -> Optional[str]:
|
7185
|
+
"""
|
7186
|
+
The pool name. Only necessary, if using SNAT.
|
7187
|
+
"""
|
7188
|
+
return pulumi.get(self, "snat_pool")
|
7189
|
+
|
7190
|
+
|
7191
|
+
@pulumi.output_type
|
7192
|
+
class VmwareAdminClusterLoadBalancerManualLbConfig(dict):
|
7193
|
+
@staticmethod
|
7194
|
+
def __key_warning(key: str):
|
7195
|
+
suggest = None
|
7196
|
+
if key == "addonsNodePort":
|
7197
|
+
suggest = "addons_node_port"
|
7198
|
+
elif key == "controlPlaneNodePort":
|
7199
|
+
suggest = "control_plane_node_port"
|
7200
|
+
elif key == "ingressHttpNodePort":
|
7201
|
+
suggest = "ingress_http_node_port"
|
7202
|
+
elif key == "ingressHttpsNodePort":
|
7203
|
+
suggest = "ingress_https_node_port"
|
7204
|
+
elif key == "konnectivityServerNodePort":
|
7205
|
+
suggest = "konnectivity_server_node_port"
|
7206
|
+
|
7207
|
+
if suggest:
|
7208
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterLoadBalancerManualLbConfig. Access the value via the '{suggest}' property getter instead.")
|
7209
|
+
|
7210
|
+
def __getitem__(self, key: str) -> Any:
|
7211
|
+
VmwareAdminClusterLoadBalancerManualLbConfig.__key_warning(key)
|
7212
|
+
return super().__getitem__(key)
|
7213
|
+
|
7214
|
+
def get(self, key: str, default = None) -> Any:
|
7215
|
+
VmwareAdminClusterLoadBalancerManualLbConfig.__key_warning(key)
|
7216
|
+
return super().get(key, default)
|
7217
|
+
|
7218
|
+
def __init__(__self__, *,
|
7219
|
+
addons_node_port: Optional[int] = None,
|
7220
|
+
control_plane_node_port: Optional[int] = None,
|
7221
|
+
ingress_http_node_port: Optional[int] = None,
|
7222
|
+
ingress_https_node_port: Optional[int] = None,
|
7223
|
+
konnectivity_server_node_port: Optional[int] = None):
|
7224
|
+
"""
|
7225
|
+
:param int addons_node_port: NodePort for add-ons server in the admin cluster.
|
7226
|
+
:param int control_plane_node_port: NodePort for control plane service. The Kubernetes API server in the admin
|
7227
|
+
cluster is implemented as a Service of type NodePort (ex. 30968).
|
7228
|
+
:param int ingress_http_node_port: NodePort for ingress service's http. The ingress service in the admin
|
7229
|
+
cluster is implemented as a Service of type NodePort (ex. 32527).
|
7230
|
+
:param int ingress_https_node_port: NodePort for ingress service's https. The ingress service in the admin
|
7231
|
+
cluster is implemented as a Service of type NodePort (ex. 30139).
|
7232
|
+
:param int konnectivity_server_node_port: NodePort for konnectivity server service running as a sidecar in each
|
7233
|
+
kube-apiserver pod (ex. 30564).
|
7234
|
+
"""
|
7235
|
+
if addons_node_port is not None:
|
7236
|
+
pulumi.set(__self__, "addons_node_port", addons_node_port)
|
7237
|
+
if control_plane_node_port is not None:
|
7238
|
+
pulumi.set(__self__, "control_plane_node_port", control_plane_node_port)
|
7239
|
+
if ingress_http_node_port is not None:
|
7240
|
+
pulumi.set(__self__, "ingress_http_node_port", ingress_http_node_port)
|
7241
|
+
if ingress_https_node_port is not None:
|
7242
|
+
pulumi.set(__self__, "ingress_https_node_port", ingress_https_node_port)
|
7243
|
+
if konnectivity_server_node_port is not None:
|
7244
|
+
pulumi.set(__self__, "konnectivity_server_node_port", konnectivity_server_node_port)
|
7245
|
+
|
7246
|
+
@property
|
7247
|
+
@pulumi.getter(name="addonsNodePort")
|
7248
|
+
def addons_node_port(self) -> Optional[int]:
|
7249
|
+
"""
|
7250
|
+
NodePort for add-ons server in the admin cluster.
|
7251
|
+
"""
|
7252
|
+
return pulumi.get(self, "addons_node_port")
|
7253
|
+
|
7254
|
+
@property
|
7255
|
+
@pulumi.getter(name="controlPlaneNodePort")
|
7256
|
+
def control_plane_node_port(self) -> Optional[int]:
|
7257
|
+
"""
|
7258
|
+
NodePort for control plane service. The Kubernetes API server in the admin
|
7259
|
+
cluster is implemented as a Service of type NodePort (ex. 30968).
|
7260
|
+
"""
|
7261
|
+
return pulumi.get(self, "control_plane_node_port")
|
7262
|
+
|
7263
|
+
@property
|
7264
|
+
@pulumi.getter(name="ingressHttpNodePort")
|
7265
|
+
def ingress_http_node_port(self) -> Optional[int]:
|
7266
|
+
"""
|
7267
|
+
NodePort for ingress service's http. The ingress service in the admin
|
7268
|
+
cluster is implemented as a Service of type NodePort (ex. 32527).
|
7269
|
+
"""
|
7270
|
+
return pulumi.get(self, "ingress_http_node_port")
|
7271
|
+
|
7272
|
+
@property
|
7273
|
+
@pulumi.getter(name="ingressHttpsNodePort")
|
7274
|
+
def ingress_https_node_port(self) -> Optional[int]:
|
7275
|
+
"""
|
7276
|
+
NodePort for ingress service's https. The ingress service in the admin
|
7277
|
+
cluster is implemented as a Service of type NodePort (ex. 30139).
|
7278
|
+
"""
|
7279
|
+
return pulumi.get(self, "ingress_https_node_port")
|
7280
|
+
|
7281
|
+
@property
|
7282
|
+
@pulumi.getter(name="konnectivityServerNodePort")
|
7283
|
+
def konnectivity_server_node_port(self) -> Optional[int]:
|
7284
|
+
"""
|
7285
|
+
NodePort for konnectivity server service running as a sidecar in each
|
7286
|
+
kube-apiserver pod (ex. 30564).
|
7287
|
+
"""
|
7288
|
+
return pulumi.get(self, "konnectivity_server_node_port")
|
7289
|
+
|
7290
|
+
|
7291
|
+
@pulumi.output_type
|
7292
|
+
class VmwareAdminClusterLoadBalancerMetalLbConfig(dict):
|
7293
|
+
def __init__(__self__, *,
|
7294
|
+
enabled: Optional[bool] = None):
|
7295
|
+
"""
|
7296
|
+
:param bool enabled: Metal LB is enabled.
|
7297
|
+
"""
|
7298
|
+
if enabled is not None:
|
7299
|
+
pulumi.set(__self__, "enabled", enabled)
|
7300
|
+
|
7301
|
+
@property
|
7302
|
+
@pulumi.getter
|
7303
|
+
def enabled(self) -> Optional[bool]:
|
7304
|
+
"""
|
7305
|
+
Metal LB is enabled.
|
7306
|
+
"""
|
7307
|
+
return pulumi.get(self, "enabled")
|
7308
|
+
|
7309
|
+
|
7310
|
+
@pulumi.output_type
|
7311
|
+
class VmwareAdminClusterLoadBalancerVipConfig(dict):
|
7312
|
+
@staticmethod
|
7313
|
+
def __key_warning(key: str):
|
7314
|
+
suggest = None
|
7315
|
+
if key == "controlPlaneVip":
|
7316
|
+
suggest = "control_plane_vip"
|
7317
|
+
elif key == "addonsVip":
|
7318
|
+
suggest = "addons_vip"
|
7319
|
+
|
7320
|
+
if suggest:
|
7321
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterLoadBalancerVipConfig. Access the value via the '{suggest}' property getter instead.")
|
7322
|
+
|
7323
|
+
def __getitem__(self, key: str) -> Any:
|
7324
|
+
VmwareAdminClusterLoadBalancerVipConfig.__key_warning(key)
|
7325
|
+
return super().__getitem__(key)
|
7326
|
+
|
7327
|
+
def get(self, key: str, default = None) -> Any:
|
7328
|
+
VmwareAdminClusterLoadBalancerVipConfig.__key_warning(key)
|
7329
|
+
return super().get(key, default)
|
7330
|
+
|
7331
|
+
def __init__(__self__, *,
|
7332
|
+
control_plane_vip: str,
|
7333
|
+
addons_vip: Optional[str] = None):
|
7334
|
+
"""
|
7335
|
+
:param str control_plane_vip: The VIP which you previously set aside for the Kubernetes
|
7336
|
+
API of this VMware Admin Cluster.
|
7337
|
+
:param str addons_vip: The VIP to configure the load balancer for add-ons.
|
7338
|
+
|
7339
|
+
<a name="nested_f5_config"></a>The `f5_config` block supports:
|
7340
|
+
"""
|
7341
|
+
pulumi.set(__self__, "control_plane_vip", control_plane_vip)
|
7342
|
+
if addons_vip is not None:
|
7343
|
+
pulumi.set(__self__, "addons_vip", addons_vip)
|
7344
|
+
|
7345
|
+
@property
|
7346
|
+
@pulumi.getter(name="controlPlaneVip")
|
7347
|
+
def control_plane_vip(self) -> str:
|
7348
|
+
"""
|
7349
|
+
The VIP which you previously set aside for the Kubernetes
|
7350
|
+
API of this VMware Admin Cluster.
|
7351
|
+
"""
|
7352
|
+
return pulumi.get(self, "control_plane_vip")
|
7353
|
+
|
7354
|
+
@property
|
7355
|
+
@pulumi.getter(name="addonsVip")
|
7356
|
+
def addons_vip(self) -> Optional[str]:
|
7357
|
+
"""
|
7358
|
+
The VIP to configure the load balancer for add-ons.
|
7359
|
+
|
7360
|
+
<a name="nested_f5_config"></a>The `f5_config` block supports:
|
7361
|
+
"""
|
7362
|
+
return pulumi.get(self, "addons_vip")
|
7363
|
+
|
7364
|
+
|
7365
|
+
@pulumi.output_type
|
7366
|
+
class VmwareAdminClusterNetworkConfig(dict):
|
7367
|
+
@staticmethod
|
7368
|
+
def __key_warning(key: str):
|
7369
|
+
suggest = None
|
7370
|
+
if key == "podAddressCidrBlocks":
|
7371
|
+
suggest = "pod_address_cidr_blocks"
|
7372
|
+
elif key == "serviceAddressCidrBlocks":
|
7373
|
+
suggest = "service_address_cidr_blocks"
|
7374
|
+
elif key == "dhcpIpConfig":
|
7375
|
+
suggest = "dhcp_ip_config"
|
7376
|
+
elif key == "haControlPlaneConfig":
|
7377
|
+
suggest = "ha_control_plane_config"
|
7378
|
+
elif key == "hostConfig":
|
7379
|
+
suggest = "host_config"
|
7380
|
+
elif key == "staticIpConfig":
|
7381
|
+
suggest = "static_ip_config"
|
7382
|
+
elif key == "vcenterNetwork":
|
7383
|
+
suggest = "vcenter_network"
|
7384
|
+
|
7385
|
+
if suggest:
|
7386
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterNetworkConfig. Access the value via the '{suggest}' property getter instead.")
|
7387
|
+
|
7388
|
+
def __getitem__(self, key: str) -> Any:
|
7389
|
+
VmwareAdminClusterNetworkConfig.__key_warning(key)
|
7390
|
+
return super().__getitem__(key)
|
7391
|
+
|
7392
|
+
def get(self, key: str, default = None) -> Any:
|
7393
|
+
VmwareAdminClusterNetworkConfig.__key_warning(key)
|
7394
|
+
return super().get(key, default)
|
7395
|
+
|
7396
|
+
def __init__(__self__, *,
|
7397
|
+
pod_address_cidr_blocks: Sequence[str],
|
7398
|
+
service_address_cidr_blocks: Sequence[str],
|
7399
|
+
dhcp_ip_config: Optional['outputs.VmwareAdminClusterNetworkConfigDhcpIpConfig'] = None,
|
7400
|
+
ha_control_plane_config: Optional['outputs.VmwareAdminClusterNetworkConfigHaControlPlaneConfig'] = None,
|
7401
|
+
host_config: Optional['outputs.VmwareAdminClusterNetworkConfigHostConfig'] = None,
|
7402
|
+
static_ip_config: Optional['outputs.VmwareAdminClusterNetworkConfigStaticIpConfig'] = None,
|
7403
|
+
vcenter_network: Optional[str] = None):
|
7404
|
+
"""
|
7405
|
+
:param Sequence[str] pod_address_cidr_blocks: All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges.
|
7406
|
+
Only a single range is supported. This field cannot be changed after creation.
|
7407
|
+
:param Sequence[str] service_address_cidr_blocks: All services in the cluster are assigned an RFC1918 IPv4 address
|
7408
|
+
from these ranges. Only a single range is supported.. This field
|
7409
|
+
cannot be changed after creation.
|
7410
|
+
:param 'VmwareAdminClusterNetworkConfigDhcpIpConfigArgs' dhcp_ip_config: Configuration settings for a DHCP IP configuration.
|
7411
|
+
Structure is documented below.
|
7412
|
+
:param 'VmwareAdminClusterNetworkConfigHaControlPlaneConfigArgs' ha_control_plane_config: Configuration for HA admin cluster control plane.
|
7413
|
+
Structure is documented below.
|
7414
|
+
:param 'VmwareAdminClusterNetworkConfigHostConfigArgs' host_config: Represents common network settings irrespective of the host's IP address.
|
7415
|
+
Structure is documented below.
|
7416
|
+
:param 'VmwareAdminClusterNetworkConfigStaticIpConfigArgs' static_ip_config: Configuration settings for a static IP configuration.
|
7417
|
+
Structure is documented below.
|
7418
|
+
:param str vcenter_network: vcenter_network specifies vCenter network name.
|
7419
|
+
"""
|
7420
|
+
pulumi.set(__self__, "pod_address_cidr_blocks", pod_address_cidr_blocks)
|
7421
|
+
pulumi.set(__self__, "service_address_cidr_blocks", service_address_cidr_blocks)
|
7422
|
+
if dhcp_ip_config is not None:
|
7423
|
+
pulumi.set(__self__, "dhcp_ip_config", dhcp_ip_config)
|
7424
|
+
if ha_control_plane_config is not None:
|
7425
|
+
pulumi.set(__self__, "ha_control_plane_config", ha_control_plane_config)
|
7426
|
+
if host_config is not None:
|
7427
|
+
pulumi.set(__self__, "host_config", host_config)
|
7428
|
+
if static_ip_config is not None:
|
7429
|
+
pulumi.set(__self__, "static_ip_config", static_ip_config)
|
7430
|
+
if vcenter_network is not None:
|
7431
|
+
pulumi.set(__self__, "vcenter_network", vcenter_network)
|
7432
|
+
|
7433
|
+
@property
|
7434
|
+
@pulumi.getter(name="podAddressCidrBlocks")
|
7435
|
+
def pod_address_cidr_blocks(self) -> Sequence[str]:
|
7436
|
+
"""
|
7437
|
+
All pods in the cluster are assigned an RFC1918 IPv4 address from these ranges.
|
7438
|
+
Only a single range is supported. This field cannot be changed after creation.
|
7439
|
+
"""
|
7440
|
+
return pulumi.get(self, "pod_address_cidr_blocks")
|
7441
|
+
|
7442
|
+
@property
|
7443
|
+
@pulumi.getter(name="serviceAddressCidrBlocks")
|
7444
|
+
def service_address_cidr_blocks(self) -> Sequence[str]:
|
7445
|
+
"""
|
7446
|
+
All services in the cluster are assigned an RFC1918 IPv4 address
|
7447
|
+
from these ranges. Only a single range is supported.. This field
|
7448
|
+
cannot be changed after creation.
|
7449
|
+
"""
|
7450
|
+
return pulumi.get(self, "service_address_cidr_blocks")
|
7451
|
+
|
7452
|
+
@property
|
7453
|
+
@pulumi.getter(name="dhcpIpConfig")
|
7454
|
+
def dhcp_ip_config(self) -> Optional['outputs.VmwareAdminClusterNetworkConfigDhcpIpConfig']:
|
7455
|
+
"""
|
7456
|
+
Configuration settings for a DHCP IP configuration.
|
7457
|
+
Structure is documented below.
|
7458
|
+
"""
|
7459
|
+
return pulumi.get(self, "dhcp_ip_config")
|
7460
|
+
|
7461
|
+
@property
|
7462
|
+
@pulumi.getter(name="haControlPlaneConfig")
|
7463
|
+
def ha_control_plane_config(self) -> Optional['outputs.VmwareAdminClusterNetworkConfigHaControlPlaneConfig']:
|
7464
|
+
"""
|
7465
|
+
Configuration for HA admin cluster control plane.
|
7466
|
+
Structure is documented below.
|
7467
|
+
"""
|
7468
|
+
return pulumi.get(self, "ha_control_plane_config")
|
7469
|
+
|
7470
|
+
@property
|
7471
|
+
@pulumi.getter(name="hostConfig")
|
7472
|
+
def host_config(self) -> Optional['outputs.VmwareAdminClusterNetworkConfigHostConfig']:
|
7473
|
+
"""
|
7474
|
+
Represents common network settings irrespective of the host's IP address.
|
7475
|
+
Structure is documented below.
|
7476
|
+
"""
|
7477
|
+
return pulumi.get(self, "host_config")
|
7478
|
+
|
7479
|
+
@property
|
7480
|
+
@pulumi.getter(name="staticIpConfig")
|
7481
|
+
def static_ip_config(self) -> Optional['outputs.VmwareAdminClusterNetworkConfigStaticIpConfig']:
|
7482
|
+
"""
|
7483
|
+
Configuration settings for a static IP configuration.
|
7484
|
+
Structure is documented below.
|
7485
|
+
"""
|
7486
|
+
return pulumi.get(self, "static_ip_config")
|
7487
|
+
|
7488
|
+
@property
|
7489
|
+
@pulumi.getter(name="vcenterNetwork")
|
7490
|
+
def vcenter_network(self) -> Optional[str]:
|
7491
|
+
"""
|
7492
|
+
vcenter_network specifies vCenter network name.
|
7493
|
+
"""
|
7494
|
+
return pulumi.get(self, "vcenter_network")
|
7495
|
+
|
7496
|
+
|
7497
|
+
@pulumi.output_type
|
7498
|
+
class VmwareAdminClusterNetworkConfigDhcpIpConfig(dict):
|
7499
|
+
def __init__(__self__, *,
|
7500
|
+
enabled: bool):
|
7501
|
+
"""
|
7502
|
+
:param bool enabled: enabled is a flag to mark if DHCP IP allocation is
|
7503
|
+
used for VMware admin clusters.
|
7504
|
+
"""
|
7505
|
+
pulumi.set(__self__, "enabled", enabled)
|
7506
|
+
|
7507
|
+
@property
|
7508
|
+
@pulumi.getter
|
7509
|
+
def enabled(self) -> bool:
|
7510
|
+
"""
|
7511
|
+
enabled is a flag to mark if DHCP IP allocation is
|
7512
|
+
used for VMware admin clusters.
|
7513
|
+
"""
|
7514
|
+
return pulumi.get(self, "enabled")
|
7515
|
+
|
7516
|
+
|
7517
|
+
@pulumi.output_type
|
7518
|
+
class VmwareAdminClusterNetworkConfigHaControlPlaneConfig(dict):
|
7519
|
+
@staticmethod
|
7520
|
+
def __key_warning(key: str):
|
7521
|
+
suggest = None
|
7522
|
+
if key == "controlPlaneIpBlock":
|
7523
|
+
suggest = "control_plane_ip_block"
|
7524
|
+
|
7525
|
+
if suggest:
|
7526
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterNetworkConfigHaControlPlaneConfig. Access the value via the '{suggest}' property getter instead.")
|
7527
|
+
|
7528
|
+
def __getitem__(self, key: str) -> Any:
|
7529
|
+
VmwareAdminClusterNetworkConfigHaControlPlaneConfig.__key_warning(key)
|
7530
|
+
return super().__getitem__(key)
|
7531
|
+
|
7532
|
+
def get(self, key: str, default = None) -> Any:
|
7533
|
+
VmwareAdminClusterNetworkConfigHaControlPlaneConfig.__key_warning(key)
|
7534
|
+
return super().get(key, default)
|
7535
|
+
|
7536
|
+
def __init__(__self__, *,
|
7537
|
+
control_plane_ip_block: Optional['outputs.VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlock'] = None):
|
7538
|
+
"""
|
7539
|
+
:param 'VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockArgs' control_plane_ip_block: Static IP addresses for the control plane nodes.
|
7540
|
+
Structure is documented below.
|
7541
|
+
"""
|
7542
|
+
if control_plane_ip_block is not None:
|
7543
|
+
pulumi.set(__self__, "control_plane_ip_block", control_plane_ip_block)
|
7544
|
+
|
7545
|
+
@property
|
7546
|
+
@pulumi.getter(name="controlPlaneIpBlock")
|
7547
|
+
def control_plane_ip_block(self) -> Optional['outputs.VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlock']:
|
7548
|
+
"""
|
7549
|
+
Static IP addresses for the control plane nodes.
|
7550
|
+
Structure is documented below.
|
7551
|
+
"""
|
7552
|
+
return pulumi.get(self, "control_plane_ip_block")
|
7553
|
+
|
7554
|
+
|
7555
|
+
@pulumi.output_type
|
7556
|
+
class VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlock(dict):
|
7557
|
+
def __init__(__self__, *,
|
7558
|
+
gateway: str,
|
7559
|
+
ips: Sequence['outputs.VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockIp'],
|
7560
|
+
netmask: str):
|
7561
|
+
"""
|
7562
|
+
:param str gateway: The network gateway used by the VMware Admin Cluster.
|
7563
|
+
:param Sequence['VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockIpArgs'] ips: The node's network configurations used by the VMware Admin Cluster.
|
7564
|
+
Structure is documented below.
|
7565
|
+
:param str netmask: The netmask used by the VMware Admin Cluster.
|
7566
|
+
"""
|
7567
|
+
pulumi.set(__self__, "gateway", gateway)
|
7568
|
+
pulumi.set(__self__, "ips", ips)
|
7569
|
+
pulumi.set(__self__, "netmask", netmask)
|
7570
|
+
|
7571
|
+
@property
|
7572
|
+
@pulumi.getter
|
7573
|
+
def gateway(self) -> str:
|
7574
|
+
"""
|
7575
|
+
The network gateway used by the VMware Admin Cluster.
|
7576
|
+
"""
|
7577
|
+
return pulumi.get(self, "gateway")
|
7578
|
+
|
7579
|
+
@property
|
7580
|
+
@pulumi.getter
|
7581
|
+
def ips(self) -> Sequence['outputs.VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockIp']:
|
7582
|
+
"""
|
7583
|
+
The node's network configurations used by the VMware Admin Cluster.
|
7584
|
+
Structure is documented below.
|
7585
|
+
"""
|
7586
|
+
return pulumi.get(self, "ips")
|
7587
|
+
|
7588
|
+
@property
|
7589
|
+
@pulumi.getter
|
7590
|
+
def netmask(self) -> str:
|
7591
|
+
"""
|
7592
|
+
The netmask used by the VMware Admin Cluster.
|
7593
|
+
"""
|
7594
|
+
return pulumi.get(self, "netmask")
|
7595
|
+
|
7596
|
+
|
7597
|
+
@pulumi.output_type
|
7598
|
+
class VmwareAdminClusterNetworkConfigHaControlPlaneConfigControlPlaneIpBlockIp(dict):
|
7599
|
+
def __init__(__self__, *,
|
7600
|
+
ip: str,
|
7601
|
+
hostname: Optional[str] = None):
|
7602
|
+
"""
|
7603
|
+
:param str ip: IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24).
|
7604
|
+
:param str hostname: Hostname of the machine. VM's name will be used if this field is empty.
|
7605
|
+
|
7606
|
+
- - -
|
7607
|
+
"""
|
7608
|
+
pulumi.set(__self__, "ip", ip)
|
7609
|
+
if hostname is not None:
|
7610
|
+
pulumi.set(__self__, "hostname", hostname)
|
7611
|
+
|
7612
|
+
@property
|
7613
|
+
@pulumi.getter
|
7614
|
+
def ip(self) -> str:
|
7615
|
+
"""
|
7616
|
+
IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24).
|
7617
|
+
"""
|
7618
|
+
return pulumi.get(self, "ip")
|
7619
|
+
|
7620
|
+
@property
|
7621
|
+
@pulumi.getter
|
7622
|
+
def hostname(self) -> Optional[str]:
|
7623
|
+
"""
|
7624
|
+
Hostname of the machine. VM's name will be used if this field is empty.
|
7625
|
+
|
7626
|
+
- - -
|
7627
|
+
"""
|
7628
|
+
return pulumi.get(self, "hostname")
|
7629
|
+
|
7630
|
+
|
7631
|
+
@pulumi.output_type
|
7632
|
+
class VmwareAdminClusterNetworkConfigHostConfig(dict):
|
7633
|
+
@staticmethod
|
7634
|
+
def __key_warning(key: str):
|
7635
|
+
suggest = None
|
7636
|
+
if key == "dnsSearchDomains":
|
7637
|
+
suggest = "dns_search_domains"
|
7638
|
+
elif key == "dnsServers":
|
7639
|
+
suggest = "dns_servers"
|
7640
|
+
elif key == "ntpServers":
|
7641
|
+
suggest = "ntp_servers"
|
7642
|
+
|
7643
|
+
if suggest:
|
7644
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterNetworkConfigHostConfig. Access the value via the '{suggest}' property getter instead.")
|
7645
|
+
|
7646
|
+
def __getitem__(self, key: str) -> Any:
|
7647
|
+
VmwareAdminClusterNetworkConfigHostConfig.__key_warning(key)
|
7648
|
+
return super().__getitem__(key)
|
7649
|
+
|
7650
|
+
def get(self, key: str, default = None) -> Any:
|
7651
|
+
VmwareAdminClusterNetworkConfigHostConfig.__key_warning(key)
|
7652
|
+
return super().get(key, default)
|
7653
|
+
|
7654
|
+
def __init__(__self__, *,
|
7655
|
+
dns_search_domains: Optional[Sequence[str]] = None,
|
7656
|
+
dns_servers: Optional[Sequence[str]] = None,
|
7657
|
+
ntp_servers: Optional[Sequence[str]] = None):
|
7658
|
+
"""
|
7659
|
+
:param Sequence[str] dns_search_domains: DNS search domains.
|
7660
|
+
:param Sequence[str] dns_servers: DNS servers.
|
7661
|
+
:param Sequence[str] ntp_servers: NTP servers.
|
7662
|
+
"""
|
7663
|
+
if dns_search_domains is not None:
|
7664
|
+
pulumi.set(__self__, "dns_search_domains", dns_search_domains)
|
7665
|
+
if dns_servers is not None:
|
7666
|
+
pulumi.set(__self__, "dns_servers", dns_servers)
|
7667
|
+
if ntp_servers is not None:
|
7668
|
+
pulumi.set(__self__, "ntp_servers", ntp_servers)
|
7669
|
+
|
7670
|
+
@property
|
7671
|
+
@pulumi.getter(name="dnsSearchDomains")
|
7672
|
+
def dns_search_domains(self) -> Optional[Sequence[str]]:
|
7673
|
+
"""
|
7674
|
+
DNS search domains.
|
7675
|
+
"""
|
7676
|
+
return pulumi.get(self, "dns_search_domains")
|
7677
|
+
|
7678
|
+
@property
|
7679
|
+
@pulumi.getter(name="dnsServers")
|
7680
|
+
def dns_servers(self) -> Optional[Sequence[str]]:
|
7681
|
+
"""
|
7682
|
+
DNS servers.
|
7683
|
+
"""
|
7684
|
+
return pulumi.get(self, "dns_servers")
|
7685
|
+
|
7686
|
+
@property
|
7687
|
+
@pulumi.getter(name="ntpServers")
|
7688
|
+
def ntp_servers(self) -> Optional[Sequence[str]]:
|
7689
|
+
"""
|
7690
|
+
NTP servers.
|
7691
|
+
"""
|
7692
|
+
return pulumi.get(self, "ntp_servers")
|
7693
|
+
|
7694
|
+
|
7695
|
+
@pulumi.output_type
|
7696
|
+
class VmwareAdminClusterNetworkConfigStaticIpConfig(dict):
|
7697
|
+
@staticmethod
|
7698
|
+
def __key_warning(key: str):
|
7699
|
+
suggest = None
|
7700
|
+
if key == "ipBlocks":
|
7701
|
+
suggest = "ip_blocks"
|
7702
|
+
|
7703
|
+
if suggest:
|
7704
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterNetworkConfigStaticIpConfig. Access the value via the '{suggest}' property getter instead.")
|
7705
|
+
|
7706
|
+
def __getitem__(self, key: str) -> Any:
|
7707
|
+
VmwareAdminClusterNetworkConfigStaticIpConfig.__key_warning(key)
|
7708
|
+
return super().__getitem__(key)
|
7709
|
+
|
7710
|
+
def get(self, key: str, default = None) -> Any:
|
7711
|
+
VmwareAdminClusterNetworkConfigStaticIpConfig.__key_warning(key)
|
7712
|
+
return super().get(key, default)
|
7713
|
+
|
7714
|
+
def __init__(__self__, *,
|
7715
|
+
ip_blocks: Optional[Sequence['outputs.VmwareAdminClusterNetworkConfigStaticIpConfigIpBlock']] = None):
|
7716
|
+
"""
|
7717
|
+
:param Sequence['VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockArgs'] ip_blocks: Represents the configuration values for static IP allocation to nodes.
|
7718
|
+
Structure is documented below.
|
7719
|
+
"""
|
7720
|
+
if ip_blocks is not None:
|
7721
|
+
pulumi.set(__self__, "ip_blocks", ip_blocks)
|
7722
|
+
|
7723
|
+
@property
|
7724
|
+
@pulumi.getter(name="ipBlocks")
|
7725
|
+
def ip_blocks(self) -> Optional[Sequence['outputs.VmwareAdminClusterNetworkConfigStaticIpConfigIpBlock']]:
|
7726
|
+
"""
|
7727
|
+
Represents the configuration values for static IP allocation to nodes.
|
7728
|
+
Structure is documented below.
|
7729
|
+
"""
|
7730
|
+
return pulumi.get(self, "ip_blocks")
|
7731
|
+
|
7732
|
+
|
7733
|
+
@pulumi.output_type
|
7734
|
+
class VmwareAdminClusterNetworkConfigStaticIpConfigIpBlock(dict):
|
7735
|
+
def __init__(__self__, *,
|
7736
|
+
gateway: str,
|
7737
|
+
ips: Sequence['outputs.VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockIp'],
|
7738
|
+
netmask: str):
|
7739
|
+
"""
|
7740
|
+
:param str gateway: The network gateway used by the VMware Admin Cluster.
|
7741
|
+
:param Sequence['VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockIpArgs'] ips: The node's network configurations used by the VMware Admin Cluster.
|
7742
|
+
Structure is documented below.
|
7743
|
+
:param str netmask: The netmask used by the VMware Admin Cluster.
|
7744
|
+
"""
|
7745
|
+
pulumi.set(__self__, "gateway", gateway)
|
7746
|
+
pulumi.set(__self__, "ips", ips)
|
7747
|
+
pulumi.set(__self__, "netmask", netmask)
|
7748
|
+
|
7749
|
+
@property
|
7750
|
+
@pulumi.getter
|
7751
|
+
def gateway(self) -> str:
|
7752
|
+
"""
|
7753
|
+
The network gateway used by the VMware Admin Cluster.
|
7754
|
+
"""
|
7755
|
+
return pulumi.get(self, "gateway")
|
7756
|
+
|
7757
|
+
@property
|
7758
|
+
@pulumi.getter
|
7759
|
+
def ips(self) -> Sequence['outputs.VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockIp']:
|
7760
|
+
"""
|
7761
|
+
The node's network configurations used by the VMware Admin Cluster.
|
7762
|
+
Structure is documented below.
|
7763
|
+
"""
|
7764
|
+
return pulumi.get(self, "ips")
|
7765
|
+
|
7766
|
+
@property
|
7767
|
+
@pulumi.getter
|
7768
|
+
def netmask(self) -> str:
|
7769
|
+
"""
|
7770
|
+
The netmask used by the VMware Admin Cluster.
|
7771
|
+
"""
|
7772
|
+
return pulumi.get(self, "netmask")
|
7773
|
+
|
7774
|
+
|
7775
|
+
@pulumi.output_type
|
7776
|
+
class VmwareAdminClusterNetworkConfigStaticIpConfigIpBlockIp(dict):
|
7777
|
+
def __init__(__self__, *,
|
7778
|
+
ip: str,
|
7779
|
+
hostname: Optional[str] = None):
|
7780
|
+
"""
|
7781
|
+
:param str ip: IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24).
|
7782
|
+
:param str hostname: Hostname of the machine. VM's name will be used if this field is empty.
|
7783
|
+
|
7784
|
+
- - -
|
7785
|
+
"""
|
7786
|
+
pulumi.set(__self__, "ip", ip)
|
7787
|
+
if hostname is not None:
|
7788
|
+
pulumi.set(__self__, "hostname", hostname)
|
7789
|
+
|
7790
|
+
@property
|
7791
|
+
@pulumi.getter
|
7792
|
+
def ip(self) -> str:
|
7793
|
+
"""
|
7794
|
+
IP could be an IP address (like 1.2.3.4) or a CIDR (like 1.2.3.0/24).
|
7795
|
+
"""
|
7796
|
+
return pulumi.get(self, "ip")
|
7797
|
+
|
7798
|
+
@property
|
7799
|
+
@pulumi.getter
|
7800
|
+
def hostname(self) -> Optional[str]:
|
7801
|
+
"""
|
7802
|
+
Hostname of the machine. VM's name will be used if this field is empty.
|
7803
|
+
|
7804
|
+
- - -
|
7805
|
+
"""
|
7806
|
+
return pulumi.get(self, "hostname")
|
7807
|
+
|
7808
|
+
|
7809
|
+
@pulumi.output_type
|
7810
|
+
class VmwareAdminClusterPlatformConfig(dict):
|
7811
|
+
@staticmethod
|
7812
|
+
def __key_warning(key: str):
|
7813
|
+
suggest = None
|
7814
|
+
if key == "platformVersion":
|
7815
|
+
suggest = "platform_version"
|
7816
|
+
elif key == "requiredPlatformVersion":
|
7817
|
+
suggest = "required_platform_version"
|
7818
|
+
|
7819
|
+
if suggest:
|
7820
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterPlatformConfig. Access the value via the '{suggest}' property getter instead.")
|
7821
|
+
|
7822
|
+
def __getitem__(self, key: str) -> Any:
|
7823
|
+
VmwareAdminClusterPlatformConfig.__key_warning(key)
|
7824
|
+
return super().__getitem__(key)
|
7825
|
+
|
7826
|
+
def get(self, key: str, default = None) -> Any:
|
7827
|
+
VmwareAdminClusterPlatformConfig.__key_warning(key)
|
7828
|
+
return super().get(key, default)
|
7829
|
+
|
7830
|
+
def __init__(__self__, *,
|
7831
|
+
bundles: Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigBundle']] = None,
|
7832
|
+
platform_version: Optional[str] = None,
|
7833
|
+
required_platform_version: Optional[str] = None,
|
7834
|
+
statuses: Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigStatus']] = None):
|
7835
|
+
"""
|
7836
|
+
:param Sequence['VmwareAdminClusterPlatformConfigBundleArgs'] bundles: (Output)
|
7837
|
+
The list of bundles installed in the admin cluster.
|
7838
|
+
Structure is documented below.
|
7839
|
+
:param str platform_version: (Output)
|
7840
|
+
The platform version e.g. 1.13.2.
|
7841
|
+
:param str required_platform_version: The required platform version e.g. 1.13.1.
|
7842
|
+
If the current platform version is lower than the target version,
|
7843
|
+
the platform version will be updated to the target version.
|
7844
|
+
If the target version is not installed in the platform
|
7845
|
+
(bundle versions), download the target version bundle.
|
7846
|
+
:param Sequence['VmwareAdminClusterPlatformConfigStatusArgs'] statuses: (Output)
|
7847
|
+
ResourceStatus representing detailed cluster state.
|
7848
|
+
Structure is documented below.
|
7849
|
+
|
7850
|
+
|
7851
|
+
<a name="nested_status"></a>The `status` block contains:
|
7852
|
+
"""
|
7853
|
+
if bundles is not None:
|
7854
|
+
pulumi.set(__self__, "bundles", bundles)
|
7855
|
+
if platform_version is not None:
|
7856
|
+
pulumi.set(__self__, "platform_version", platform_version)
|
7857
|
+
if required_platform_version is not None:
|
7858
|
+
pulumi.set(__self__, "required_platform_version", required_platform_version)
|
7859
|
+
if statuses is not None:
|
7860
|
+
pulumi.set(__self__, "statuses", statuses)
|
7861
|
+
|
7862
|
+
@property
|
7863
|
+
@pulumi.getter
|
7864
|
+
def bundles(self) -> Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigBundle']]:
|
7865
|
+
"""
|
7866
|
+
(Output)
|
7867
|
+
The list of bundles installed in the admin cluster.
|
7868
|
+
Structure is documented below.
|
7869
|
+
"""
|
7870
|
+
return pulumi.get(self, "bundles")
|
7871
|
+
|
7872
|
+
@property
|
7873
|
+
@pulumi.getter(name="platformVersion")
|
7874
|
+
def platform_version(self) -> Optional[str]:
|
7875
|
+
"""
|
7876
|
+
(Output)
|
7877
|
+
The platform version e.g. 1.13.2.
|
7878
|
+
"""
|
7879
|
+
return pulumi.get(self, "platform_version")
|
7880
|
+
|
7881
|
+
@property
|
7882
|
+
@pulumi.getter(name="requiredPlatformVersion")
|
7883
|
+
def required_platform_version(self) -> Optional[str]:
|
7884
|
+
"""
|
7885
|
+
The required platform version e.g. 1.13.1.
|
7886
|
+
If the current platform version is lower than the target version,
|
7887
|
+
the platform version will be updated to the target version.
|
7888
|
+
If the target version is not installed in the platform
|
7889
|
+
(bundle versions), download the target version bundle.
|
7890
|
+
"""
|
7891
|
+
return pulumi.get(self, "required_platform_version")
|
7892
|
+
|
7893
|
+
@property
|
7894
|
+
@pulumi.getter
|
7895
|
+
def statuses(self) -> Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigStatus']]:
|
7896
|
+
"""
|
7897
|
+
(Output)
|
7898
|
+
ResourceStatus representing detailed cluster state.
|
7899
|
+
Structure is documented below.
|
7900
|
+
|
7901
|
+
|
7902
|
+
<a name="nested_status"></a>The `status` block contains:
|
7903
|
+
"""
|
7904
|
+
return pulumi.get(self, "statuses")
|
7905
|
+
|
7906
|
+
|
7907
|
+
@pulumi.output_type
|
7908
|
+
class VmwareAdminClusterPlatformConfigBundle(dict):
|
7909
|
+
def __init__(__self__, *,
|
7910
|
+
statuses: Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigBundleStatus']] = None,
|
7911
|
+
version: Optional[str] = None):
|
7912
|
+
"""
|
7913
|
+
:param Sequence['VmwareAdminClusterPlatformConfigBundleStatusArgs'] statuses: ResourceStatus representing detailed cluster state.
|
7914
|
+
Structure is documented below.
|
7915
|
+
:param str version: The version of the bundle.
|
7916
|
+
"""
|
7917
|
+
if statuses is not None:
|
7918
|
+
pulumi.set(__self__, "statuses", statuses)
|
7919
|
+
if version is not None:
|
7920
|
+
pulumi.set(__self__, "version", version)
|
7921
|
+
|
7922
|
+
@property
|
7923
|
+
@pulumi.getter
|
7924
|
+
def statuses(self) -> Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigBundleStatus']]:
|
7925
|
+
"""
|
7926
|
+
ResourceStatus representing detailed cluster state.
|
7927
|
+
Structure is documented below.
|
7928
|
+
"""
|
7929
|
+
return pulumi.get(self, "statuses")
|
7930
|
+
|
7931
|
+
@property
|
7932
|
+
@pulumi.getter
|
7933
|
+
def version(self) -> Optional[str]:
|
7934
|
+
"""
|
7935
|
+
The version of the bundle.
|
7936
|
+
"""
|
7937
|
+
return pulumi.get(self, "version")
|
7938
|
+
|
7939
|
+
|
7940
|
+
@pulumi.output_type
|
7941
|
+
class VmwareAdminClusterPlatformConfigBundleStatus(dict):
|
7942
|
+
@staticmethod
|
7943
|
+
def __key_warning(key: str):
|
7944
|
+
suggest = None
|
7945
|
+
if key == "errorMessage":
|
7946
|
+
suggest = "error_message"
|
7947
|
+
|
7948
|
+
if suggest:
|
7949
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterPlatformConfigBundleStatus. Access the value via the '{suggest}' property getter instead.")
|
7950
|
+
|
7951
|
+
def __getitem__(self, key: str) -> Any:
|
7952
|
+
VmwareAdminClusterPlatformConfigBundleStatus.__key_warning(key)
|
7953
|
+
return super().__getitem__(key)
|
7954
|
+
|
7955
|
+
def get(self, key: str, default = None) -> Any:
|
7956
|
+
VmwareAdminClusterPlatformConfigBundleStatus.__key_warning(key)
|
7957
|
+
return super().get(key, default)
|
7958
|
+
|
7959
|
+
def __init__(__self__, *,
|
7960
|
+
conditions: Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigBundleStatusCondition']] = None,
|
7961
|
+
error_message: Optional[str] = None):
|
7962
|
+
"""
|
7963
|
+
:param Sequence['VmwareAdminClusterPlatformConfigBundleStatusConditionArgs'] conditions: (Output)
|
7964
|
+
ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller.
|
7965
|
+
Structure is documented below.
|
7966
|
+
:param str error_message: (Output)
|
7967
|
+
Human-friendly representation of the error message from the admin cluster
|
7968
|
+
controller. The error message can be temporary as the admin cluster
|
7969
|
+
controller creates a cluster or node pool. If the error message persists
|
7970
|
+
for a longer period of time, it can be used to surface error message to
|
7971
|
+
indicate real problems requiring user intervention.
|
7972
|
+
"""
|
7973
|
+
if conditions is not None:
|
7974
|
+
pulumi.set(__self__, "conditions", conditions)
|
7975
|
+
if error_message is not None:
|
7976
|
+
pulumi.set(__self__, "error_message", error_message)
|
7977
|
+
|
7978
|
+
@property
|
7979
|
+
@pulumi.getter
|
7980
|
+
def conditions(self) -> Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigBundleStatusCondition']]:
|
7981
|
+
"""
|
7982
|
+
(Output)
|
7983
|
+
ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller.
|
7984
|
+
Structure is documented below.
|
7985
|
+
"""
|
7986
|
+
return pulumi.get(self, "conditions")
|
7987
|
+
|
7988
|
+
@property
|
7989
|
+
@pulumi.getter(name="errorMessage")
|
7990
|
+
def error_message(self) -> Optional[str]:
|
7991
|
+
"""
|
7992
|
+
(Output)
|
7993
|
+
Human-friendly representation of the error message from the admin cluster
|
7994
|
+
controller. The error message can be temporary as the admin cluster
|
7995
|
+
controller creates a cluster or node pool. If the error message persists
|
7996
|
+
for a longer period of time, it can be used to surface error message to
|
7997
|
+
indicate real problems requiring user intervention.
|
7998
|
+
"""
|
7999
|
+
return pulumi.get(self, "error_message")
|
8000
|
+
|
8001
|
+
|
8002
|
+
@pulumi.output_type
|
8003
|
+
class VmwareAdminClusterPlatformConfigBundleStatusCondition(dict):
|
8004
|
+
@staticmethod
|
8005
|
+
def __key_warning(key: str):
|
8006
|
+
suggest = None
|
8007
|
+
if key == "lastTransitionTime":
|
8008
|
+
suggest = "last_transition_time"
|
8009
|
+
|
8010
|
+
if suggest:
|
8011
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterPlatformConfigBundleStatusCondition. Access the value via the '{suggest}' property getter instead.")
|
8012
|
+
|
8013
|
+
def __getitem__(self, key: str) -> Any:
|
8014
|
+
VmwareAdminClusterPlatformConfigBundleStatusCondition.__key_warning(key)
|
8015
|
+
return super().__getitem__(key)
|
8016
|
+
|
8017
|
+
def get(self, key: str, default = None) -> Any:
|
8018
|
+
VmwareAdminClusterPlatformConfigBundleStatusCondition.__key_warning(key)
|
8019
|
+
return super().get(key, default)
|
8020
|
+
|
8021
|
+
def __init__(__self__, *,
|
8022
|
+
last_transition_time: Optional[str] = None,
|
8023
|
+
message: Optional[str] = None,
|
8024
|
+
reason: Optional[str] = None,
|
8025
|
+
state: Optional[str] = None,
|
8026
|
+
type: Optional[str] = None):
|
8027
|
+
"""
|
8028
|
+
:param str last_transition_time: (Output)
|
8029
|
+
Last time the condition transit from one status to another.
|
8030
|
+
:param str message: (Output)
|
8031
|
+
Human-readable message indicating details about last transition.
|
8032
|
+
:param str reason: (Output)
|
8033
|
+
Machine-readable message indicating details about last transition.
|
8034
|
+
:param str state: (Output)
|
8035
|
+
The lifecycle state of the condition.
|
8036
|
+
:param str type: (Output)
|
8037
|
+
Type of the condition.
|
8038
|
+
(e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
|
8039
|
+
"""
|
8040
|
+
if last_transition_time is not None:
|
8041
|
+
pulumi.set(__self__, "last_transition_time", last_transition_time)
|
8042
|
+
if message is not None:
|
8043
|
+
pulumi.set(__self__, "message", message)
|
8044
|
+
if reason is not None:
|
8045
|
+
pulumi.set(__self__, "reason", reason)
|
8046
|
+
if state is not None:
|
8047
|
+
pulumi.set(__self__, "state", state)
|
8048
|
+
if type is not None:
|
8049
|
+
pulumi.set(__self__, "type", type)
|
8050
|
+
|
8051
|
+
@property
|
8052
|
+
@pulumi.getter(name="lastTransitionTime")
|
8053
|
+
def last_transition_time(self) -> Optional[str]:
|
8054
|
+
"""
|
8055
|
+
(Output)
|
8056
|
+
Last time the condition transit from one status to another.
|
8057
|
+
"""
|
8058
|
+
return pulumi.get(self, "last_transition_time")
|
8059
|
+
|
8060
|
+
@property
|
8061
|
+
@pulumi.getter
|
8062
|
+
def message(self) -> Optional[str]:
|
8063
|
+
"""
|
8064
|
+
(Output)
|
8065
|
+
Human-readable message indicating details about last transition.
|
8066
|
+
"""
|
8067
|
+
return pulumi.get(self, "message")
|
8068
|
+
|
8069
|
+
@property
|
8070
|
+
@pulumi.getter
|
8071
|
+
def reason(self) -> Optional[str]:
|
8072
|
+
"""
|
8073
|
+
(Output)
|
8074
|
+
Machine-readable message indicating details about last transition.
|
8075
|
+
"""
|
8076
|
+
return pulumi.get(self, "reason")
|
8077
|
+
|
8078
|
+
@property
|
8079
|
+
@pulumi.getter
|
8080
|
+
def state(self) -> Optional[str]:
|
8081
|
+
"""
|
8082
|
+
(Output)
|
8083
|
+
The lifecycle state of the condition.
|
8084
|
+
"""
|
8085
|
+
return pulumi.get(self, "state")
|
8086
|
+
|
8087
|
+
@property
|
8088
|
+
@pulumi.getter
|
8089
|
+
def type(self) -> Optional[str]:
|
8090
|
+
"""
|
8091
|
+
(Output)
|
8092
|
+
Type of the condition.
|
8093
|
+
(e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
|
8094
|
+
"""
|
8095
|
+
return pulumi.get(self, "type")
|
8096
|
+
|
8097
|
+
|
8098
|
+
@pulumi.output_type
|
8099
|
+
class VmwareAdminClusterPlatformConfigStatus(dict):
|
8100
|
+
@staticmethod
|
8101
|
+
def __key_warning(key: str):
|
8102
|
+
suggest = None
|
8103
|
+
if key == "errorMessage":
|
8104
|
+
suggest = "error_message"
|
8105
|
+
|
8106
|
+
if suggest:
|
8107
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterPlatformConfigStatus. Access the value via the '{suggest}' property getter instead.")
|
8108
|
+
|
8109
|
+
def __getitem__(self, key: str) -> Any:
|
8110
|
+
VmwareAdminClusterPlatformConfigStatus.__key_warning(key)
|
8111
|
+
return super().__getitem__(key)
|
8112
|
+
|
8113
|
+
def get(self, key: str, default = None) -> Any:
|
8114
|
+
VmwareAdminClusterPlatformConfigStatus.__key_warning(key)
|
8115
|
+
return super().get(key, default)
|
8116
|
+
|
8117
|
+
def __init__(__self__, *,
|
8118
|
+
conditions: Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigStatusCondition']] = None,
|
8119
|
+
error_message: Optional[str] = None):
|
8120
|
+
"""
|
8121
|
+
:param Sequence['VmwareAdminClusterPlatformConfigStatusConditionArgs'] conditions: (Output)
|
8122
|
+
ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller.
|
8123
|
+
Structure is documented below.
|
8124
|
+
:param str error_message: (Output)
|
8125
|
+
Human-friendly representation of the error message from the admin cluster
|
8126
|
+
controller. The error message can be temporary as the admin cluster
|
8127
|
+
controller creates a cluster or node pool. If the error message persists
|
8128
|
+
for a longer period of time, it can be used to surface error message to
|
8129
|
+
indicate real problems requiring user intervention.
|
8130
|
+
"""
|
8131
|
+
if conditions is not None:
|
8132
|
+
pulumi.set(__self__, "conditions", conditions)
|
8133
|
+
if error_message is not None:
|
8134
|
+
pulumi.set(__self__, "error_message", error_message)
|
8135
|
+
|
8136
|
+
@property
|
8137
|
+
@pulumi.getter
|
8138
|
+
def conditions(self) -> Optional[Sequence['outputs.VmwareAdminClusterPlatformConfigStatusCondition']]:
|
8139
|
+
"""
|
8140
|
+
(Output)
|
8141
|
+
ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller.
|
8142
|
+
Structure is documented below.
|
8143
|
+
"""
|
8144
|
+
return pulumi.get(self, "conditions")
|
8145
|
+
|
8146
|
+
@property
|
8147
|
+
@pulumi.getter(name="errorMessage")
|
8148
|
+
def error_message(self) -> Optional[str]:
|
8149
|
+
"""
|
8150
|
+
(Output)
|
8151
|
+
Human-friendly representation of the error message from the admin cluster
|
8152
|
+
controller. The error message can be temporary as the admin cluster
|
8153
|
+
controller creates a cluster or node pool. If the error message persists
|
8154
|
+
for a longer period of time, it can be used to surface error message to
|
8155
|
+
indicate real problems requiring user intervention.
|
8156
|
+
"""
|
8157
|
+
return pulumi.get(self, "error_message")
|
8158
|
+
|
8159
|
+
|
8160
|
+
@pulumi.output_type
|
8161
|
+
class VmwareAdminClusterPlatformConfigStatusCondition(dict):
|
8162
|
+
@staticmethod
|
8163
|
+
def __key_warning(key: str):
|
8164
|
+
suggest = None
|
8165
|
+
if key == "lastTransitionTime":
|
8166
|
+
suggest = "last_transition_time"
|
8167
|
+
|
8168
|
+
if suggest:
|
8169
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterPlatformConfigStatusCondition. Access the value via the '{suggest}' property getter instead.")
|
8170
|
+
|
8171
|
+
def __getitem__(self, key: str) -> Any:
|
8172
|
+
VmwareAdminClusterPlatformConfigStatusCondition.__key_warning(key)
|
8173
|
+
return super().__getitem__(key)
|
8174
|
+
|
8175
|
+
def get(self, key: str, default = None) -> Any:
|
8176
|
+
VmwareAdminClusterPlatformConfigStatusCondition.__key_warning(key)
|
8177
|
+
return super().get(key, default)
|
8178
|
+
|
8179
|
+
def __init__(__self__, *,
|
8180
|
+
last_transition_time: Optional[str] = None,
|
8181
|
+
message: Optional[str] = None,
|
8182
|
+
reason: Optional[str] = None,
|
8183
|
+
state: Optional[str] = None,
|
8184
|
+
type: Optional[str] = None):
|
8185
|
+
"""
|
8186
|
+
:param str last_transition_time: (Output)
|
8187
|
+
Last time the condition transit from one status to another.
|
8188
|
+
:param str message: (Output)
|
8189
|
+
Human-readable message indicating details about last transition.
|
8190
|
+
:param str reason: (Output)
|
8191
|
+
Machine-readable message indicating details about last transition.
|
8192
|
+
:param str state: (Output)
|
8193
|
+
The lifecycle state of the condition.
|
8194
|
+
:param str type: (Output)
|
8195
|
+
Type of the condition.
|
8196
|
+
(e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
|
8197
|
+
"""
|
8198
|
+
if last_transition_time is not None:
|
8199
|
+
pulumi.set(__self__, "last_transition_time", last_transition_time)
|
8200
|
+
if message is not None:
|
8201
|
+
pulumi.set(__self__, "message", message)
|
8202
|
+
if reason is not None:
|
8203
|
+
pulumi.set(__self__, "reason", reason)
|
8204
|
+
if state is not None:
|
8205
|
+
pulumi.set(__self__, "state", state)
|
8206
|
+
if type is not None:
|
8207
|
+
pulumi.set(__self__, "type", type)
|
8208
|
+
|
8209
|
+
@property
|
8210
|
+
@pulumi.getter(name="lastTransitionTime")
|
8211
|
+
def last_transition_time(self) -> Optional[str]:
|
8212
|
+
"""
|
8213
|
+
(Output)
|
8214
|
+
Last time the condition transit from one status to another.
|
8215
|
+
"""
|
8216
|
+
return pulumi.get(self, "last_transition_time")
|
8217
|
+
|
8218
|
+
@property
|
8219
|
+
@pulumi.getter
|
8220
|
+
def message(self) -> Optional[str]:
|
8221
|
+
"""
|
8222
|
+
(Output)
|
8223
|
+
Human-readable message indicating details about last transition.
|
8224
|
+
"""
|
8225
|
+
return pulumi.get(self, "message")
|
8226
|
+
|
8227
|
+
@property
|
8228
|
+
@pulumi.getter
|
8229
|
+
def reason(self) -> Optional[str]:
|
8230
|
+
"""
|
8231
|
+
(Output)
|
8232
|
+
Machine-readable message indicating details about last transition.
|
8233
|
+
"""
|
8234
|
+
return pulumi.get(self, "reason")
|
8235
|
+
|
8236
|
+
@property
|
8237
|
+
@pulumi.getter
|
8238
|
+
def state(self) -> Optional[str]:
|
8239
|
+
"""
|
8240
|
+
(Output)
|
8241
|
+
The lifecycle state of the condition.
|
8242
|
+
"""
|
8243
|
+
return pulumi.get(self, "state")
|
8244
|
+
|
8245
|
+
@property
|
8246
|
+
@pulumi.getter
|
8247
|
+
def type(self) -> Optional[str]:
|
8248
|
+
"""
|
8249
|
+
(Output)
|
8250
|
+
Type of the condition.
|
8251
|
+
(e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
|
8252
|
+
"""
|
8253
|
+
return pulumi.get(self, "type")
|
8254
|
+
|
8255
|
+
|
8256
|
+
@pulumi.output_type
|
8257
|
+
class VmwareAdminClusterStatus(dict):
|
8258
|
+
@staticmethod
|
8259
|
+
def __key_warning(key: str):
|
8260
|
+
suggest = None
|
8261
|
+
if key == "errorMessage":
|
8262
|
+
suggest = "error_message"
|
8263
|
+
|
8264
|
+
if suggest:
|
8265
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterStatus. Access the value via the '{suggest}' property getter instead.")
|
8266
|
+
|
8267
|
+
def __getitem__(self, key: str) -> Any:
|
8268
|
+
VmwareAdminClusterStatus.__key_warning(key)
|
8269
|
+
return super().__getitem__(key)
|
8270
|
+
|
8271
|
+
def get(self, key: str, default = None) -> Any:
|
8272
|
+
VmwareAdminClusterStatus.__key_warning(key)
|
8273
|
+
return super().get(key, default)
|
8274
|
+
|
8275
|
+
def __init__(__self__, *,
|
8276
|
+
conditions: Optional[Sequence['outputs.VmwareAdminClusterStatusCondition']] = None,
|
8277
|
+
error_message: Optional[str] = None):
|
8278
|
+
"""
|
8279
|
+
:param Sequence['VmwareAdminClusterStatusConditionArgs'] conditions: (Output)
|
8280
|
+
ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller.
|
8281
|
+
Structure is documented below.
|
8282
|
+
:param str error_message: (Output)
|
8283
|
+
Human-friendly representation of the error message from the admin cluster
|
8284
|
+
controller. The error message can be temporary as the admin cluster
|
8285
|
+
controller creates a cluster or node pool. If the error message persists
|
8286
|
+
for a longer period of time, it can be used to surface error message to
|
8287
|
+
indicate real problems requiring user intervention.
|
8288
|
+
"""
|
8289
|
+
if conditions is not None:
|
8290
|
+
pulumi.set(__self__, "conditions", conditions)
|
8291
|
+
if error_message is not None:
|
8292
|
+
pulumi.set(__self__, "error_message", error_message)
|
8293
|
+
|
8294
|
+
@property
|
8295
|
+
@pulumi.getter
|
8296
|
+
def conditions(self) -> Optional[Sequence['outputs.VmwareAdminClusterStatusCondition']]:
|
8297
|
+
"""
|
8298
|
+
(Output)
|
8299
|
+
ResourceConditions provide a standard mechanism for higher-level status reporting from admin cluster controller.
|
8300
|
+
Structure is documented below.
|
8301
|
+
"""
|
8302
|
+
return pulumi.get(self, "conditions")
|
8303
|
+
|
8304
|
+
@property
|
8305
|
+
@pulumi.getter(name="errorMessage")
|
8306
|
+
def error_message(self) -> Optional[str]:
|
8307
|
+
"""
|
8308
|
+
(Output)
|
8309
|
+
Human-friendly representation of the error message from the admin cluster
|
8310
|
+
controller. The error message can be temporary as the admin cluster
|
8311
|
+
controller creates a cluster or node pool. If the error message persists
|
8312
|
+
for a longer period of time, it can be used to surface error message to
|
8313
|
+
indicate real problems requiring user intervention.
|
8314
|
+
"""
|
8315
|
+
return pulumi.get(self, "error_message")
|
8316
|
+
|
8317
|
+
|
8318
|
+
@pulumi.output_type
|
8319
|
+
class VmwareAdminClusterStatusCondition(dict):
|
8320
|
+
@staticmethod
|
8321
|
+
def __key_warning(key: str):
|
8322
|
+
suggest = None
|
8323
|
+
if key == "lastTransitionTime":
|
8324
|
+
suggest = "last_transition_time"
|
8325
|
+
|
8326
|
+
if suggest:
|
8327
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterStatusCondition. Access the value via the '{suggest}' property getter instead.")
|
8328
|
+
|
8329
|
+
def __getitem__(self, key: str) -> Any:
|
8330
|
+
VmwareAdminClusterStatusCondition.__key_warning(key)
|
8331
|
+
return super().__getitem__(key)
|
8332
|
+
|
8333
|
+
def get(self, key: str, default = None) -> Any:
|
8334
|
+
VmwareAdminClusterStatusCondition.__key_warning(key)
|
8335
|
+
return super().get(key, default)
|
8336
|
+
|
8337
|
+
def __init__(__self__, *,
|
8338
|
+
last_transition_time: Optional[str] = None,
|
8339
|
+
message: Optional[str] = None,
|
8340
|
+
reason: Optional[str] = None,
|
8341
|
+
state: Optional[str] = None,
|
8342
|
+
type: Optional[str] = None):
|
8343
|
+
"""
|
8344
|
+
:param str last_transition_time: (Output)
|
8345
|
+
Last time the condition transit from one status to another.
|
8346
|
+
:param str message: (Output)
|
8347
|
+
Human-readable message indicating details about last transition.
|
8348
|
+
:param str reason: (Output)
|
8349
|
+
Machine-readable message indicating details about last transition.
|
8350
|
+
:param str state: (Output)
|
8351
|
+
The lifecycle state of the condition.
|
8352
|
+
:param str type: (Output)
|
8353
|
+
Type of the condition.
|
8354
|
+
(e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
|
8355
|
+
"""
|
8356
|
+
if last_transition_time is not None:
|
8357
|
+
pulumi.set(__self__, "last_transition_time", last_transition_time)
|
8358
|
+
if message is not None:
|
8359
|
+
pulumi.set(__self__, "message", message)
|
8360
|
+
if reason is not None:
|
8361
|
+
pulumi.set(__self__, "reason", reason)
|
8362
|
+
if state is not None:
|
8363
|
+
pulumi.set(__self__, "state", state)
|
8364
|
+
if type is not None:
|
8365
|
+
pulumi.set(__self__, "type", type)
|
8366
|
+
|
8367
|
+
@property
|
8368
|
+
@pulumi.getter(name="lastTransitionTime")
|
8369
|
+
def last_transition_time(self) -> Optional[str]:
|
8370
|
+
"""
|
8371
|
+
(Output)
|
8372
|
+
Last time the condition transit from one status to another.
|
8373
|
+
"""
|
8374
|
+
return pulumi.get(self, "last_transition_time")
|
8375
|
+
|
8376
|
+
@property
|
8377
|
+
@pulumi.getter
|
8378
|
+
def message(self) -> Optional[str]:
|
8379
|
+
"""
|
8380
|
+
(Output)
|
8381
|
+
Human-readable message indicating details about last transition.
|
8382
|
+
"""
|
8383
|
+
return pulumi.get(self, "message")
|
8384
|
+
|
8385
|
+
@property
|
8386
|
+
@pulumi.getter
|
8387
|
+
def reason(self) -> Optional[str]:
|
8388
|
+
"""
|
8389
|
+
(Output)
|
8390
|
+
Machine-readable message indicating details about last transition.
|
8391
|
+
"""
|
8392
|
+
return pulumi.get(self, "reason")
|
8393
|
+
|
8394
|
+
@property
|
8395
|
+
@pulumi.getter
|
8396
|
+
def state(self) -> Optional[str]:
|
8397
|
+
"""
|
8398
|
+
(Output)
|
8399
|
+
The lifecycle state of the condition.
|
8400
|
+
"""
|
8401
|
+
return pulumi.get(self, "state")
|
8402
|
+
|
8403
|
+
@property
|
8404
|
+
@pulumi.getter
|
8405
|
+
def type(self) -> Optional[str]:
|
8406
|
+
"""
|
8407
|
+
(Output)
|
8408
|
+
Type of the condition.
|
8409
|
+
(e.g., ClusterRunning, NodePoolRunning or ServerSidePreflightReady)
|
8410
|
+
"""
|
8411
|
+
return pulumi.get(self, "type")
|
8412
|
+
|
8413
|
+
|
8414
|
+
@pulumi.output_type
|
8415
|
+
class VmwareAdminClusterVcenter(dict):
|
8416
|
+
@staticmethod
|
8417
|
+
def __key_warning(key: str):
|
8418
|
+
suggest = None
|
8419
|
+
if key == "caCertData":
|
8420
|
+
suggest = "ca_cert_data"
|
8421
|
+
elif key == "dataDisk":
|
8422
|
+
suggest = "data_disk"
|
8423
|
+
elif key == "resourcePool":
|
8424
|
+
suggest = "resource_pool"
|
8425
|
+
elif key == "storagePolicyName":
|
8426
|
+
suggest = "storage_policy_name"
|
8427
|
+
|
8428
|
+
if suggest:
|
8429
|
+
pulumi.log.warn(f"Key '{key}' not found in VmwareAdminClusterVcenter. Access the value via the '{suggest}' property getter instead.")
|
8430
|
+
|
8431
|
+
def __getitem__(self, key: str) -> Any:
|
8432
|
+
VmwareAdminClusterVcenter.__key_warning(key)
|
8433
|
+
return super().__getitem__(key)
|
8434
|
+
|
8435
|
+
def get(self, key: str, default = None) -> Any:
|
8436
|
+
VmwareAdminClusterVcenter.__key_warning(key)
|
8437
|
+
return super().get(key, default)
|
8438
|
+
|
8439
|
+
def __init__(__self__, *,
|
8440
|
+
address: Optional[str] = None,
|
8441
|
+
ca_cert_data: Optional[str] = None,
|
8442
|
+
cluster: Optional[str] = None,
|
8443
|
+
data_disk: Optional[str] = None,
|
8444
|
+
datacenter: Optional[str] = None,
|
8445
|
+
datastore: Optional[str] = None,
|
8446
|
+
folder: Optional[str] = None,
|
8447
|
+
resource_pool: Optional[str] = None,
|
8448
|
+
storage_policy_name: Optional[str] = None):
|
8449
|
+
"""
|
8450
|
+
:param str address: The vCenter IP address.
|
8451
|
+
:param str ca_cert_data: Contains the vCenter CA certificate public key for SSL verification.
|
8452
|
+
:param str cluster: The name of the vCenter cluster for the admin cluster.
|
8453
|
+
:param str data_disk: The name of the virtual machine disk (VMDK) for the admin cluster.
|
8454
|
+
:param str datacenter: The name of the vCenter datacenter for the admin cluster.
|
8455
|
+
:param str datastore: The name of the vCenter datastore for the admin cluster.
|
8456
|
+
:param str folder: The name of the vCenter folder for the admin cluster.
|
8457
|
+
:param str resource_pool: The name of the vCenter resource pool for the admin cluster.
|
8458
|
+
:param str storage_policy_name: The name of the vCenter storage policy for the user cluster.
|
8459
|
+
"""
|
8460
|
+
if address is not None:
|
8461
|
+
pulumi.set(__self__, "address", address)
|
8462
|
+
if ca_cert_data is not None:
|
8463
|
+
pulumi.set(__self__, "ca_cert_data", ca_cert_data)
|
8464
|
+
if cluster is not None:
|
8465
|
+
pulumi.set(__self__, "cluster", cluster)
|
8466
|
+
if data_disk is not None:
|
8467
|
+
pulumi.set(__self__, "data_disk", data_disk)
|
8468
|
+
if datacenter is not None:
|
8469
|
+
pulumi.set(__self__, "datacenter", datacenter)
|
8470
|
+
if datastore is not None:
|
8471
|
+
pulumi.set(__self__, "datastore", datastore)
|
8472
|
+
if folder is not None:
|
8473
|
+
pulumi.set(__self__, "folder", folder)
|
8474
|
+
if resource_pool is not None:
|
8475
|
+
pulumi.set(__self__, "resource_pool", resource_pool)
|
8476
|
+
if storage_policy_name is not None:
|
8477
|
+
pulumi.set(__self__, "storage_policy_name", storage_policy_name)
|
8478
|
+
|
8479
|
+
@property
|
8480
|
+
@pulumi.getter
|
8481
|
+
def address(self) -> Optional[str]:
|
8482
|
+
"""
|
8483
|
+
The vCenter IP address.
|
8484
|
+
"""
|
8485
|
+
return pulumi.get(self, "address")
|
8486
|
+
|
8487
|
+
@property
|
8488
|
+
@pulumi.getter(name="caCertData")
|
8489
|
+
def ca_cert_data(self) -> Optional[str]:
|
8490
|
+
"""
|
8491
|
+
Contains the vCenter CA certificate public key for SSL verification.
|
8492
|
+
"""
|
8493
|
+
return pulumi.get(self, "ca_cert_data")
|
8494
|
+
|
8495
|
+
@property
|
8496
|
+
@pulumi.getter
|
8497
|
+
def cluster(self) -> Optional[str]:
|
8498
|
+
"""
|
8499
|
+
The name of the vCenter cluster for the admin cluster.
|
8500
|
+
"""
|
8501
|
+
return pulumi.get(self, "cluster")
|
8502
|
+
|
8503
|
+
@property
|
8504
|
+
@pulumi.getter(name="dataDisk")
|
8505
|
+
def data_disk(self) -> Optional[str]:
|
8506
|
+
"""
|
8507
|
+
The name of the virtual machine disk (VMDK) for the admin cluster.
|
8508
|
+
"""
|
8509
|
+
return pulumi.get(self, "data_disk")
|
8510
|
+
|
8511
|
+
@property
|
8512
|
+
@pulumi.getter
|
8513
|
+
def datacenter(self) -> Optional[str]:
|
8514
|
+
"""
|
8515
|
+
The name of the vCenter datacenter for the admin cluster.
|
8516
|
+
"""
|
8517
|
+
return pulumi.get(self, "datacenter")
|
8518
|
+
|
8519
|
+
@property
|
8520
|
+
@pulumi.getter
|
8521
|
+
def datastore(self) -> Optional[str]:
|
8522
|
+
"""
|
8523
|
+
The name of the vCenter datastore for the admin cluster.
|
8524
|
+
"""
|
8525
|
+
return pulumi.get(self, "datastore")
|
8526
|
+
|
8527
|
+
@property
|
8528
|
+
@pulumi.getter
|
8529
|
+
def folder(self) -> Optional[str]:
|
8530
|
+
"""
|
8531
|
+
The name of the vCenter folder for the admin cluster.
|
8532
|
+
"""
|
8533
|
+
return pulumi.get(self, "folder")
|
8534
|
+
|
8535
|
+
@property
|
8536
|
+
@pulumi.getter(name="resourcePool")
|
8537
|
+
def resource_pool(self) -> Optional[str]:
|
8538
|
+
"""
|
8539
|
+
The name of the vCenter resource pool for the admin cluster.
|
8540
|
+
"""
|
8541
|
+
return pulumi.get(self, "resource_pool")
|
8542
|
+
|
8543
|
+
@property
|
8544
|
+
@pulumi.getter(name="storagePolicyName")
|
8545
|
+
def storage_policy_name(self) -> Optional[str]:
|
8546
|
+
"""
|
8547
|
+
The name of the vCenter storage policy for the user cluster.
|
8548
|
+
"""
|
8549
|
+
return pulumi.get(self, "storage_policy_name")
|
8550
|
+
|
8551
|
+
|