pulumi-gcp 7.8.0a1706805960__py3-none-any.whl → 7.8.0a1706829616__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- pulumi_gcp/_utilities.py +6 -2
- pulumi_gcp/alloydb/outputs.py +16 -0
- pulumi_gcp/artifactregistry/outputs.py +204 -0
- pulumi_gcp/assuredworkloads/_inputs.py +34 -0
- pulumi_gcp/assuredworkloads/outputs.py +34 -0
- pulumi_gcp/backupdisasterrecovery/outputs.py +20 -0
- pulumi_gcp/beyondcorp/outputs.py +54 -0
- pulumi_gcp/bigquery/_inputs.py +18 -0
- pulumi_gcp/bigquery/outputs.py +172 -0
- pulumi_gcp/bigtable/_inputs.py +4 -0
- pulumi_gcp/bigtable/outputs.py +4 -0
- pulumi_gcp/certificateauthority/outputs.py +378 -0
- pulumi_gcp/certificatemanager/outputs.py +32 -0
- pulumi_gcp/cloudbuild/outputs.py +1009 -3
- pulumi_gcp/cloudbuildv2/_inputs.py +14 -0
- pulumi_gcp/cloudbuildv2/outputs.py +14 -0
- pulumi_gcp/clouddeploy/_inputs.py +36 -0
- pulumi_gcp/clouddeploy/outputs.py +36 -0
- pulumi_gcp/cloudfunctions/_inputs.py +4 -0
- pulumi_gcp/cloudfunctions/outputs.py +54 -0
- pulumi_gcp/cloudfunctionsv2/outputs.py +302 -0
- pulumi_gcp/cloudidentity/outputs.py +54 -0
- pulumi_gcp/cloudrun/outputs.py +704 -0
- pulumi_gcp/cloudrunv2/outputs.py +929 -5
- pulumi_gcp/composer/_inputs.py +430 -0
- pulumi_gcp/composer/outputs.py +860 -0
- pulumi_gcp/compute/_inputs.py +924 -0
- pulumi_gcp/compute/outputs.py +4091 -1
- pulumi_gcp/container/_inputs.py +542 -0
- pulumi_gcp/container/outputs.py +2137 -3
- pulumi_gcp/dataplex/_inputs.py +66 -0
- pulumi_gcp/dataplex/outputs.py +66 -0
- pulumi_gcp/dataproc/_inputs.py +174 -0
- pulumi_gcp/dataproc/outputs.py +174 -0
- pulumi_gcp/dns/_inputs.py +34 -0
- pulumi_gcp/dns/outputs.py +34 -0
- pulumi_gcp/filestore/outputs.py +88 -0
- pulumi_gcp/firebaserules/_inputs.py +6 -0
- pulumi_gcp/firebaserules/outputs.py +6 -0
- pulumi_gcp/folder/_inputs.py +4 -0
- pulumi_gcp/folder/iam_audit_config.py +2 -2
- pulumi_gcp/folder/iam_member.py +2 -2
- pulumi_gcp/folder/iam_policy.py +2 -2
- pulumi_gcp/folder/outputs.py +54 -0
- pulumi_gcp/gkehub/_inputs.py +8 -0
- pulumi_gcp/gkehub/outputs.py +8 -0
- pulumi_gcp/iam/outputs.py +92 -0
- pulumi_gcp/kms/outputs.py +16 -0
- pulumi_gcp/logging/_inputs.py +108 -0
- pulumi_gcp/logging/outputs.py +108 -0
- pulumi_gcp/monitoring/outputs.py +14 -0
- pulumi_gcp/networkconnectivity/_inputs.py +6 -0
- pulumi_gcp/networkconnectivity/outputs.py +6 -0
- pulumi_gcp/organizations/_inputs.py +4 -0
- pulumi_gcp/organizations/iam_audit_config.py +2 -2
- pulumi_gcp/organizations/iam_member.py +2 -2
- pulumi_gcp/organizations/iam_policy.py +2 -2
- pulumi_gcp/organizations/outputs.py +4 -0
- pulumi_gcp/projects/_inputs.py +4 -0
- pulumi_gcp/projects/iam_audit_config.py +2 -2
- pulumi_gcp/projects/iam_binding.py +2 -2
- pulumi_gcp/projects/iam_member.py +2 -2
- pulumi_gcp/projects/iam_policy.py +2 -2
- pulumi_gcp/projects/outputs.py +54 -0
- pulumi_gcp/pubsub/outputs.py +282 -0
- pulumi_gcp/redis/outputs.py +204 -0
- pulumi_gcp/secretmanager/outputs.py +88 -0
- pulumi_gcp/sourcerepo/outputs.py +20 -0
- pulumi_gcp/spanner/outputs.py +68 -0
- pulumi_gcp/sql/_inputs.py +50 -0
- pulumi_gcp/sql/outputs.py +904 -0
- pulumi_gcp/storage/outputs.py +188 -0
- pulumi_gcp/vertex/outputs.py +124 -0
- pulumi_gcp/vmwareengine/outputs.py +208 -0
- pulumi_gcp/vpcaccess/outputs.py +4 -0
- {pulumi_gcp-7.8.0a1706805960.dist-info → pulumi_gcp-7.8.0a1706829616.dist-info}/METADATA +2 -1
- {pulumi_gcp-7.8.0a1706805960.dist-info → pulumi_gcp-7.8.0a1706829616.dist-info}/RECORD +79 -79
- {pulumi_gcp-7.8.0a1706805960.dist-info → pulumi_gcp-7.8.0a1706829616.dist-info}/WHEEL +0 -0
- {pulumi_gcp-7.8.0a1706805960.dist-info → pulumi_gcp-7.8.0a1706829616.dist-info}/top_level.txt +0 -0
pulumi_gcp/container/_inputs.py
CHANGED
@@ -1482,6 +1482,11 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1482
1482
|
identity_provider: Optional[pulumi.Input[str]] = None,
|
1483
1483
|
issuer_uri: Optional[pulumi.Input[str]] = None,
|
1484
1484
|
workload_pool: Optional[pulumi.Input[str]] = None):
|
1485
|
+
"""
|
1486
|
+
:param pulumi.Input[str] identity_provider: The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
1487
|
+
:param pulumi.Input[str] issuer_uri: The OIDC issuer URL for this cluster.
|
1488
|
+
:param pulumi.Input[str] workload_pool: The Workload Identity Pool associated to the cluster.
|
1489
|
+
"""
|
1485
1490
|
if identity_provider is not None:
|
1486
1491
|
pulumi.set(__self__, "identity_provider", identity_provider)
|
1487
1492
|
if issuer_uri is not None:
|
@@ -1492,6 +1497,9 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1492
1497
|
@property
|
1493
1498
|
@pulumi.getter(name="identityProvider")
|
1494
1499
|
def identity_provider(self) -> Optional[pulumi.Input[str]]:
|
1500
|
+
"""
|
1501
|
+
The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
1502
|
+
"""
|
1495
1503
|
return pulumi.get(self, "identity_provider")
|
1496
1504
|
|
1497
1505
|
@identity_provider.setter
|
@@ -1501,6 +1509,9 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1501
1509
|
@property
|
1502
1510
|
@pulumi.getter(name="issuerUri")
|
1503
1511
|
def issuer_uri(self) -> Optional[pulumi.Input[str]]:
|
1512
|
+
"""
|
1513
|
+
The OIDC issuer URL for this cluster.
|
1514
|
+
"""
|
1504
1515
|
return pulumi.get(self, "issuer_uri")
|
1505
1516
|
|
1506
1517
|
@issuer_uri.setter
|
@@ -1510,6 +1521,9 @@ class AwsClusterWorkloadIdentityConfigArgs:
|
|
1510
1521
|
@property
|
1511
1522
|
@pulumi.getter(name="workloadPool")
|
1512
1523
|
def workload_pool(self) -> Optional[pulumi.Input[str]]:
|
1524
|
+
"""
|
1525
|
+
The Workload Identity Pool associated to the cluster.
|
1526
|
+
"""
|
1513
1527
|
return pulumi.get(self, "workload_pool")
|
1514
1528
|
|
1515
1529
|
@workload_pool.setter
|
@@ -2791,6 +2805,11 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2791
2805
|
identity_provider: Optional[pulumi.Input[str]] = None,
|
2792
2806
|
issuer_uri: Optional[pulumi.Input[str]] = None,
|
2793
2807
|
workload_pool: Optional[pulumi.Input[str]] = None):
|
2808
|
+
"""
|
2809
|
+
:param pulumi.Input[str] identity_provider: The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
2810
|
+
:param pulumi.Input[str] issuer_uri: The OIDC issuer URL for this cluster.
|
2811
|
+
:param pulumi.Input[str] workload_pool: The Workload Identity Pool associated to the cluster.
|
2812
|
+
"""
|
2794
2813
|
if identity_provider is not None:
|
2795
2814
|
pulumi.set(__self__, "identity_provider", identity_provider)
|
2796
2815
|
if issuer_uri is not None:
|
@@ -2801,6 +2820,9 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2801
2820
|
@property
|
2802
2821
|
@pulumi.getter(name="identityProvider")
|
2803
2822
|
def identity_provider(self) -> Optional[pulumi.Input[str]]:
|
2823
|
+
"""
|
2824
|
+
The ID of the OIDC Identity Provider (IdP) associated to the Workload Identity Pool.
|
2825
|
+
"""
|
2804
2826
|
return pulumi.get(self, "identity_provider")
|
2805
2827
|
|
2806
2828
|
@identity_provider.setter
|
@@ -2810,6 +2832,9 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2810
2832
|
@property
|
2811
2833
|
@pulumi.getter(name="issuerUri")
|
2812
2834
|
def issuer_uri(self) -> Optional[pulumi.Input[str]]:
|
2835
|
+
"""
|
2836
|
+
The OIDC issuer URL for this cluster.
|
2837
|
+
"""
|
2813
2838
|
return pulumi.get(self, "issuer_uri")
|
2814
2839
|
|
2815
2840
|
@issuer_uri.setter
|
@@ -2819,6 +2844,9 @@ class AzureClusterWorkloadIdentityConfigArgs:
|
|
2819
2844
|
@property
|
2820
2845
|
@pulumi.getter(name="workloadPool")
|
2821
2846
|
def workload_pool(self) -> Optional[pulumi.Input[str]]:
|
2847
|
+
"""
|
2848
|
+
The Workload Identity Pool associated to the cluster.
|
2849
|
+
"""
|
2822
2850
|
return pulumi.get(self, "workload_pool")
|
2823
2851
|
|
2824
2852
|
@workload_pool.setter
|
@@ -4041,6 +4069,7 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementArgs:
|
|
4041
4069
|
|
4042
4070
|
This block also contains several computed attributes, documented below.
|
4043
4071
|
:param pulumi.Input[bool] auto_upgrade: Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.
|
4072
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionArgs']]] upgrade_options: Specifies the Auto Upgrade knobs for the node pool.
|
4044
4073
|
"""
|
4045
4074
|
if auto_repair is not None:
|
4046
4075
|
pulumi.set(__self__, "auto_repair", auto_repair)
|
@@ -4078,6 +4107,9 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementArgs:
|
|
4078
4107
|
@property
|
4079
4108
|
@pulumi.getter(name="upgradeOptions")
|
4080
4109
|
def upgrade_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionArgs']]]]:
|
4110
|
+
"""
|
4111
|
+
Specifies the Auto Upgrade knobs for the node pool.
|
4112
|
+
"""
|
4081
4113
|
return pulumi.get(self, "upgrade_options")
|
4082
4114
|
|
4083
4115
|
@upgrade_options.setter
|
@@ -4091,6 +4123,7 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionAr
|
|
4091
4123
|
auto_upgrade_start_time: Optional[pulumi.Input[str]] = None,
|
4092
4124
|
description: Optional[pulumi.Input[str]] = None):
|
4093
4125
|
"""
|
4126
|
+
:param pulumi.Input[str] auto_upgrade_start_time: This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format.
|
4094
4127
|
:param pulumi.Input[str] description: Description of the cluster.
|
4095
4128
|
"""
|
4096
4129
|
if auto_upgrade_start_time is not None:
|
@@ -4101,6 +4134,9 @@ class ClusterClusterAutoscalingAutoProvisioningDefaultsManagementUpgradeOptionAr
|
|
4101
4134
|
@property
|
4102
4135
|
@pulumi.getter(name="autoUpgradeStartTime")
|
4103
4136
|
def auto_upgrade_start_time(self) -> Optional[pulumi.Input[str]]:
|
4137
|
+
"""
|
4138
|
+
This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format.
|
4139
|
+
"""
|
4104
4140
|
return pulumi.get(self, "auto_upgrade_start_time")
|
4105
4141
|
|
4106
4142
|
@auto_upgrade_start_time.setter
|
@@ -4612,6 +4648,8 @@ class ClusterFleetArgs:
|
|
4612
4648
|
pre_registered: Optional[pulumi.Input[bool]] = None,
|
4613
4649
|
project: Optional[pulumi.Input[str]] = None):
|
4614
4650
|
"""
|
4651
|
+
:param pulumi.Input[str] membership: Full resource name of the registered fleet membership of the cluster.
|
4652
|
+
:param pulumi.Input[bool] pre_registered: Whether the cluster has been registered via the fleet API.
|
4615
4653
|
:param pulumi.Input[str] project: The name of the Fleet host project where this cluster will be registered.
|
4616
4654
|
"""
|
4617
4655
|
if membership is not None:
|
@@ -4624,6 +4662,9 @@ class ClusterFleetArgs:
|
|
4624
4662
|
@property
|
4625
4663
|
@pulumi.getter
|
4626
4664
|
def membership(self) -> Optional[pulumi.Input[str]]:
|
4665
|
+
"""
|
4666
|
+
Full resource name of the registered fleet membership of the cluster.
|
4667
|
+
"""
|
4627
4668
|
return pulumi.get(self, "membership")
|
4628
4669
|
|
4629
4670
|
@membership.setter
|
@@ -4633,6 +4674,9 @@ class ClusterFleetArgs:
|
|
4633
4674
|
@property
|
4634
4675
|
@pulumi.getter(name="preRegistered")
|
4635
4676
|
def pre_registered(self) -> Optional[pulumi.Input[bool]]:
|
4677
|
+
"""
|
4678
|
+
Whether the cluster has been registered via the fleet API.
|
4679
|
+
"""
|
4636
4680
|
return pulumi.get(self, "pre_registered")
|
4637
4681
|
|
4638
4682
|
@pre_registered.setter
|
@@ -4719,6 +4763,7 @@ class ClusterIpAllocationPolicyArgs:
|
|
4719
4763
|
:param pulumi.Input[str] cluster_secondary_range_name: The name of the existing secondary
|
4720
4764
|
range in the cluster's subnetwork to use for pod IP addresses. Alternatively,
|
4721
4765
|
`cluster_ipv4_cidr_block` can be used to automatically create a GKE-managed one.
|
4766
|
+
:param pulumi.Input['ClusterIpAllocationPolicyPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for cluster level pod cidr overprovision. Default is disabled=false.
|
4722
4767
|
:param pulumi.Input[str] services_ipv4_cidr_block: The IP address range of the services IPs in this cluster.
|
4723
4768
|
Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14)
|
4724
4769
|
to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14)
|
@@ -4794,6 +4839,9 @@ class ClusterIpAllocationPolicyArgs:
|
|
4794
4839
|
@property
|
4795
4840
|
@pulumi.getter(name="podCidrOverprovisionConfig")
|
4796
4841
|
def pod_cidr_overprovision_config(self) -> Optional[pulumi.Input['ClusterIpAllocationPolicyPodCidrOverprovisionConfigArgs']]:
|
4842
|
+
"""
|
4843
|
+
Configuration for cluster level pod cidr overprovision. Default is disabled=false.
|
4844
|
+
"""
|
4797
4845
|
return pulumi.get(self, "pod_cidr_overprovision_config")
|
4798
4846
|
|
4799
4847
|
@pod_cidr_overprovision_config.setter
|
@@ -5196,6 +5244,9 @@ class ClusterMasterAuthArgs:
|
|
5196
5244
|
```
|
5197
5245
|
|
5198
5246
|
This block also contains several computed attributes, documented below.
|
5247
|
+
:param pulumi.Input[str] client_certificate: Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.
|
5248
|
+
:param pulumi.Input[str] client_key: Base64 encoded private key used by clients to authenticate to the cluster endpoint.
|
5249
|
+
:param pulumi.Input[str] cluster_ca_certificate: Base64 encoded public certificate that is the root of trust for the cluster.
|
5199
5250
|
"""
|
5200
5251
|
pulumi.set(__self__, "client_certificate_config", client_certificate_config)
|
5201
5252
|
if client_certificate is not None:
|
@@ -5226,6 +5277,9 @@ class ClusterMasterAuthArgs:
|
|
5226
5277
|
@property
|
5227
5278
|
@pulumi.getter(name="clientCertificate")
|
5228
5279
|
def client_certificate(self) -> Optional[pulumi.Input[str]]:
|
5280
|
+
"""
|
5281
|
+
Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.
|
5282
|
+
"""
|
5229
5283
|
return pulumi.get(self, "client_certificate")
|
5230
5284
|
|
5231
5285
|
@client_certificate.setter
|
@@ -5235,6 +5289,9 @@ class ClusterMasterAuthArgs:
|
|
5235
5289
|
@property
|
5236
5290
|
@pulumi.getter(name="clientKey")
|
5237
5291
|
def client_key(self) -> Optional[pulumi.Input[str]]:
|
5292
|
+
"""
|
5293
|
+
Base64 encoded private key used by clients to authenticate to the cluster endpoint.
|
5294
|
+
"""
|
5238
5295
|
return pulumi.get(self, "client_key")
|
5239
5296
|
|
5240
5297
|
@client_key.setter
|
@@ -5244,6 +5301,9 @@ class ClusterMasterAuthArgs:
|
|
5244
5301
|
@property
|
5245
5302
|
@pulumi.getter(name="clusterCaCertificate")
|
5246
5303
|
def cluster_ca_certificate(self) -> Optional[pulumi.Input[str]]:
|
5304
|
+
"""
|
5305
|
+
Base64 encoded public certificate that is the root of trust for the cluster.
|
5306
|
+
"""
|
5247
5307
|
return pulumi.get(self, "cluster_ca_certificate")
|
5248
5308
|
|
5249
5309
|
@cluster_ca_certificate.setter
|
@@ -5255,11 +5315,17 @@ class ClusterMasterAuthArgs:
|
|
5255
5315
|
class ClusterMasterAuthClientCertificateConfigArgs:
|
5256
5316
|
def __init__(__self__, *,
|
5257
5317
|
issue_client_certificate: pulumi.Input[bool]):
|
5318
|
+
"""
|
5319
|
+
:param pulumi.Input[bool] issue_client_certificate: Whether client certificate authorization is enabled for this cluster.
|
5320
|
+
"""
|
5258
5321
|
pulumi.set(__self__, "issue_client_certificate", issue_client_certificate)
|
5259
5322
|
|
5260
5323
|
@property
|
5261
5324
|
@pulumi.getter(name="issueClientCertificate")
|
5262
5325
|
def issue_client_certificate(self) -> pulumi.Input[bool]:
|
5326
|
+
"""
|
5327
|
+
Whether client certificate authorization is enabled for this cluster.
|
5328
|
+
"""
|
5263
5329
|
return pulumi.get(self, "issue_client_certificate")
|
5264
5330
|
|
5265
5331
|
@issue_client_certificate.setter
|
@@ -5574,6 +5640,7 @@ class ClusterNodeConfigArgs:
|
|
5574
5640
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
|
5575
5641
|
:param pulumi.Input[str] disk_type: Type of the disk attached to each node
|
5576
5642
|
(e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-standard'
|
5643
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node.
|
5577
5644
|
:param pulumi.Input[bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
5578
5645
|
:param pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
5579
5646
|
|
@@ -5611,6 +5678,7 @@ class ClusterNodeConfigArgs:
|
|
5611
5678
|
```python
|
5612
5679
|
import pulumi
|
5613
5680
|
```
|
5681
|
+
:param pulumi.Input['ClusterNodeConfigHostMaintenancePolicyArgs'] host_maintenance_policy: The maintenance policy for the hosts on which the GKE VMs run on.
|
5614
5682
|
:param pulumi.Input[str] image_type: The image type to use for this node. Note that changing the image type
|
5615
5683
|
will delete and recreate all nodes in the node pool.
|
5616
5684
|
:param pulumi.Input['ClusterNodeConfigKubeletConfigArgs'] kubelet_config: Kubelet configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
|
@@ -5651,6 +5719,7 @@ class ClusterNodeConfigArgs:
|
|
5651
5719
|
:param pulumi.Input['ClusterNodeConfigReservationAffinityArgs'] reservation_affinity: The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
|
5652
5720
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCP labels (key/value pairs) to be applied to each node. Refer [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels)
|
5653
5721
|
for how these labels are applied to clusters, node pools and nodes.
|
5722
|
+
:param pulumi.Input['ClusterNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
|
5654
5723
|
:param pulumi.Input[str] service_account: The service account to be used by the Node VMs.
|
5655
5724
|
If not specified, the "default" service account is used.
|
5656
5725
|
:param pulumi.Input['ClusterNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
|
@@ -5816,6 +5885,9 @@ class ClusterNodeConfigArgs:
|
|
5816
5885
|
@property
|
5817
5886
|
@pulumi.getter(name="effectiveTaints")
|
5818
5887
|
def effective_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]]]:
|
5888
|
+
"""
|
5889
|
+
List of kubernetes taints applied to each node.
|
5890
|
+
"""
|
5819
5891
|
return pulumi.get(self, "effective_taints")
|
5820
5892
|
|
5821
5893
|
@effective_taints.setter
|
@@ -5939,6 +6011,9 @@ class ClusterNodeConfigArgs:
|
|
5939
6011
|
@property
|
5940
6012
|
@pulumi.getter(name="hostMaintenancePolicy")
|
5941
6013
|
def host_maintenance_policy(self) -> Optional[pulumi.Input['ClusterNodeConfigHostMaintenancePolicyArgs']]:
|
6014
|
+
"""
|
6015
|
+
The maintenance policy for the hosts on which the GKE VMs run on.
|
6016
|
+
"""
|
5942
6017
|
return pulumi.get(self, "host_maintenance_policy")
|
5943
6018
|
|
5944
6019
|
@host_maintenance_policy.setter
|
@@ -6153,6 +6228,9 @@ class ClusterNodeConfigArgs:
|
|
6153
6228
|
@property
|
6154
6229
|
@pulumi.getter(name="sandboxConfig")
|
6155
6230
|
def sandbox_config(self) -> Optional[pulumi.Input['ClusterNodeConfigSandboxConfigArgs']]:
|
6231
|
+
"""
|
6232
|
+
Sandbox configuration for this node.
|
6233
|
+
"""
|
6156
6234
|
return pulumi.get(self, "sandbox_config")
|
6157
6235
|
|
6158
6236
|
@sandbox_config.setter
|
@@ -6633,11 +6711,17 @@ class ClusterNodeConfigGvnicArgs:
|
|
6633
6711
|
class ClusterNodeConfigHostMaintenancePolicyArgs:
|
6634
6712
|
def __init__(__self__, *,
|
6635
6713
|
maintenance_interval: pulumi.Input[str]):
|
6714
|
+
"""
|
6715
|
+
:param pulumi.Input[str] maintenance_interval: .
|
6716
|
+
"""
|
6636
6717
|
pulumi.set(__self__, "maintenance_interval", maintenance_interval)
|
6637
6718
|
|
6638
6719
|
@property
|
6639
6720
|
@pulumi.getter(name="maintenanceInterval")
|
6640
6721
|
def maintenance_interval(self) -> pulumi.Input[str]:
|
6722
|
+
"""
|
6723
|
+
.
|
6724
|
+
"""
|
6641
6725
|
return pulumi.get(self, "maintenance_interval")
|
6642
6726
|
|
6643
6727
|
@maintenance_interval.setter
|
@@ -6961,11 +7045,17 @@ class ClusterNodeConfigShieldedInstanceConfigArgs:
|
|
6961
7045
|
class ClusterNodeConfigSoleTenantConfigArgs:
|
6962
7046
|
def __init__(__self__, *,
|
6963
7047
|
node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
|
7048
|
+
"""
|
7049
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
|
7050
|
+
"""
|
6964
7051
|
pulumi.set(__self__, "node_affinities", node_affinities)
|
6965
7052
|
|
6966
7053
|
@property
|
6967
7054
|
@pulumi.getter(name="nodeAffinities")
|
6968
7055
|
def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
|
7056
|
+
"""
|
7057
|
+
.
|
7058
|
+
"""
|
6969
7059
|
return pulumi.get(self, "node_affinities")
|
6970
7060
|
|
6971
7061
|
@node_affinities.setter
|
@@ -7127,17 +7217,22 @@ class ClusterNodePoolArgs:
|
|
7127
7217
|
upgrade_settings: Optional[pulumi.Input['ClusterNodePoolUpgradeSettingsArgs']] = None,
|
7128
7218
|
version: Optional[pulumi.Input[str]] = None):
|
7129
7219
|
"""
|
7220
|
+
:param pulumi.Input['ClusterNodePoolAutoscalingArgs'] autoscaling: Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.
|
7130
7221
|
:param pulumi.Input[int] initial_node_count: The number of nodes to create in this
|
7131
7222
|
cluster's default node pool. In regional or multi-zonal clusters, this is the
|
7132
7223
|
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
|
7133
7224
|
`container.NodePool` objects with no default node pool, you'll need to
|
7134
7225
|
set this to a value of at least `1`, alongside setting
|
7135
7226
|
`remove_default_node_pool` to `true`.
|
7227
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_group_urls: The resource URLs of the managed instance groups associated with this node pool.
|
7228
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] managed_instance_group_urls: List of instance group URLs which have been assigned to this node pool.
|
7136
7229
|
:param pulumi.Input['ClusterNodePoolManagementArgs'] management: NodeManagement configuration for this NodePool. Structure is documented below.
|
7230
|
+
:param pulumi.Input[int] max_pods_per_node: The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled.
|
7137
7231
|
:param pulumi.Input[str] name: The name of the cluster, unique within the project and
|
7138
7232
|
location.
|
7139
7233
|
|
7140
7234
|
- - -
|
7235
|
+
:param pulumi.Input[str] name_prefix: Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
|
7141
7236
|
:param pulumi.Input['ClusterNodePoolNetworkConfigArgs'] network_config: Configuration for
|
7142
7237
|
[Adding Pod IP address ranges](https://cloud.google.com/kubernetes-engine/docs/how-to/multi-pod-cidr)) to the node pool. Structure is documented below
|
7143
7238
|
:param pulumi.Input['ClusterNodePoolNodeConfigArgs'] node_config: Parameters used in creating the default node pool.
|
@@ -7145,6 +7240,7 @@ class ClusterNodePoolArgs:
|
|
7145
7240
|
`container.NodePool` or a `node_pool` block; this configuration
|
7146
7241
|
manages the default node pool, which isn't recommended to be used.
|
7147
7242
|
Structure is documented below.
|
7243
|
+
:param pulumi.Input[int] node_count: The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
|
7148
7244
|
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_locations: The list of zones in which the cluster's nodes
|
7149
7245
|
are located. Nodes must be in the region of their regional cluster or in the
|
7150
7246
|
same region as their cluster's zone for zonal clusters. If this is specified for
|
@@ -7156,6 +7252,8 @@ class ClusterNodePoolArgs:
|
|
7156
7252
|
locations. In contrast, in a regional cluster, cluster master nodes are present
|
7157
7253
|
in multiple zones in the region. For that reason, regional clusters should be
|
7158
7254
|
preferred.
|
7255
|
+
:param pulumi.Input['ClusterNodePoolPlacementPolicyArgs'] placement_policy: Specifies the node placement policy
|
7256
|
+
:param pulumi.Input['ClusterNodePoolQueuedProvisioningArgs'] queued_provisioning: Specifies the configuration of queued provisioning
|
7159
7257
|
:param pulumi.Input['ClusterNodePoolUpgradeSettingsArgs'] upgrade_settings: Specifies the upgrade settings for NAP created node pools. Structure is documented below.
|
7160
7258
|
"""
|
7161
7259
|
if autoscaling is not None:
|
@@ -7194,6 +7292,9 @@ class ClusterNodePoolArgs:
|
|
7194
7292
|
@property
|
7195
7293
|
@pulumi.getter
|
7196
7294
|
def autoscaling(self) -> Optional[pulumi.Input['ClusterNodePoolAutoscalingArgs']]:
|
7295
|
+
"""
|
7296
|
+
Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.
|
7297
|
+
"""
|
7197
7298
|
return pulumi.get(self, "autoscaling")
|
7198
7299
|
|
7199
7300
|
@autoscaling.setter
|
@@ -7220,6 +7321,9 @@ class ClusterNodePoolArgs:
|
|
7220
7321
|
@property
|
7221
7322
|
@pulumi.getter(name="instanceGroupUrls")
|
7222
7323
|
def instance_group_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
7324
|
+
"""
|
7325
|
+
The resource URLs of the managed instance groups associated with this node pool.
|
7326
|
+
"""
|
7223
7327
|
return pulumi.get(self, "instance_group_urls")
|
7224
7328
|
|
7225
7329
|
@instance_group_urls.setter
|
@@ -7229,6 +7333,9 @@ class ClusterNodePoolArgs:
|
|
7229
7333
|
@property
|
7230
7334
|
@pulumi.getter(name="managedInstanceGroupUrls")
|
7231
7335
|
def managed_instance_group_urls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
7336
|
+
"""
|
7337
|
+
List of instance group URLs which have been assigned to this node pool.
|
7338
|
+
"""
|
7232
7339
|
return pulumi.get(self, "managed_instance_group_urls")
|
7233
7340
|
|
7234
7341
|
@managed_instance_group_urls.setter
|
@@ -7250,6 +7357,9 @@ class ClusterNodePoolArgs:
|
|
7250
7357
|
@property
|
7251
7358
|
@pulumi.getter(name="maxPodsPerNode")
|
7252
7359
|
def max_pods_per_node(self) -> Optional[pulumi.Input[int]]:
|
7360
|
+
"""
|
7361
|
+
The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled.
|
7362
|
+
"""
|
7253
7363
|
return pulumi.get(self, "max_pods_per_node")
|
7254
7364
|
|
7255
7365
|
@max_pods_per_node.setter
|
@@ -7274,6 +7384,9 @@ class ClusterNodePoolArgs:
|
|
7274
7384
|
@property
|
7275
7385
|
@pulumi.getter(name="namePrefix")
|
7276
7386
|
def name_prefix(self) -> Optional[pulumi.Input[str]]:
|
7387
|
+
"""
|
7388
|
+
Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.
|
7389
|
+
"""
|
7277
7390
|
return pulumi.get(self, "name_prefix")
|
7278
7391
|
|
7279
7392
|
@name_prefix.setter
|
@@ -7312,6 +7425,9 @@ class ClusterNodePoolArgs:
|
|
7312
7425
|
@property
|
7313
7426
|
@pulumi.getter(name="nodeCount")
|
7314
7427
|
def node_count(self) -> Optional[pulumi.Input[int]]:
|
7428
|
+
"""
|
7429
|
+
The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.
|
7430
|
+
"""
|
7315
7431
|
return pulumi.get(self, "node_count")
|
7316
7432
|
|
7317
7433
|
@node_count.setter
|
@@ -7343,6 +7459,9 @@ class ClusterNodePoolArgs:
|
|
7343
7459
|
@property
|
7344
7460
|
@pulumi.getter(name="placementPolicy")
|
7345
7461
|
def placement_policy(self) -> Optional[pulumi.Input['ClusterNodePoolPlacementPolicyArgs']]:
|
7462
|
+
"""
|
7463
|
+
Specifies the node placement policy
|
7464
|
+
"""
|
7346
7465
|
return pulumi.get(self, "placement_policy")
|
7347
7466
|
|
7348
7467
|
@placement_policy.setter
|
@@ -7352,6 +7471,9 @@ class ClusterNodePoolArgs:
|
|
7352
7471
|
@property
|
7353
7472
|
@pulumi.getter(name="queuedProvisioning")
|
7354
7473
|
def queued_provisioning(self) -> Optional[pulumi.Input['ClusterNodePoolQueuedProvisioningArgs']]:
|
7474
|
+
"""
|
7475
|
+
Specifies the configuration of queued provisioning
|
7476
|
+
"""
|
7355
7477
|
return pulumi.get(self, "queued_provisioning")
|
7356
7478
|
|
7357
7479
|
@queued_provisioning.setter
|
@@ -7442,6 +7564,13 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7442
7564
|
min_node_count: Optional[pulumi.Input[int]] = None,
|
7443
7565
|
total_max_node_count: Optional[pulumi.Input[int]] = None,
|
7444
7566
|
total_min_node_count: Optional[pulumi.Input[int]] = None):
|
7567
|
+
"""
|
7568
|
+
:param pulumi.Input[str] location_policy: Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs.
|
7569
|
+
:param pulumi.Input[int] max_node_count: Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits.
|
7570
|
+
:param pulumi.Input[int] min_node_count: Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
|
7571
|
+
:param pulumi.Input[int] total_max_node_count: Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits.
|
7572
|
+
:param pulumi.Input[int] total_min_node_count: Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits.
|
7573
|
+
"""
|
7445
7574
|
if location_policy is not None:
|
7446
7575
|
pulumi.set(__self__, "location_policy", location_policy)
|
7447
7576
|
if max_node_count is not None:
|
@@ -7456,6 +7585,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7456
7585
|
@property
|
7457
7586
|
@pulumi.getter(name="locationPolicy")
|
7458
7587
|
def location_policy(self) -> Optional[pulumi.Input[str]]:
|
7588
|
+
"""
|
7589
|
+
Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs.
|
7590
|
+
"""
|
7459
7591
|
return pulumi.get(self, "location_policy")
|
7460
7592
|
|
7461
7593
|
@location_policy.setter
|
@@ -7465,6 +7597,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7465
7597
|
@property
|
7466
7598
|
@pulumi.getter(name="maxNodeCount")
|
7467
7599
|
def max_node_count(self) -> Optional[pulumi.Input[int]]:
|
7600
|
+
"""
|
7601
|
+
Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits.
|
7602
|
+
"""
|
7468
7603
|
return pulumi.get(self, "max_node_count")
|
7469
7604
|
|
7470
7605
|
@max_node_count.setter
|
@@ -7474,6 +7609,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7474
7609
|
@property
|
7475
7610
|
@pulumi.getter(name="minNodeCount")
|
7476
7611
|
def min_node_count(self) -> Optional[pulumi.Input[int]]:
|
7612
|
+
"""
|
7613
|
+
Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits.
|
7614
|
+
"""
|
7477
7615
|
return pulumi.get(self, "min_node_count")
|
7478
7616
|
|
7479
7617
|
@min_node_count.setter
|
@@ -7483,6 +7621,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7483
7621
|
@property
|
7484
7622
|
@pulumi.getter(name="totalMaxNodeCount")
|
7485
7623
|
def total_max_node_count(self) -> Optional[pulumi.Input[int]]:
|
7624
|
+
"""
|
7625
|
+
Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits.
|
7626
|
+
"""
|
7486
7627
|
return pulumi.get(self, "total_max_node_count")
|
7487
7628
|
|
7488
7629
|
@total_max_node_count.setter
|
@@ -7492,6 +7633,9 @@ class ClusterNodePoolAutoscalingArgs:
|
|
7492
7633
|
@property
|
7493
7634
|
@pulumi.getter(name="totalMinNodeCount")
|
7494
7635
|
def total_min_node_count(self) -> Optional[pulumi.Input[int]]:
|
7636
|
+
"""
|
7637
|
+
Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits.
|
7638
|
+
"""
|
7495
7639
|
return pulumi.get(self, "total_min_node_count")
|
7496
7640
|
|
7497
7641
|
@total_min_node_count.setter
|
@@ -7638,12 +7782,15 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7638
7782
|
pod_ipv4_cidr_block: Optional[pulumi.Input[str]] = None,
|
7639
7783
|
pod_range: Optional[pulumi.Input[str]] = None):
|
7640
7784
|
"""
|
7785
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgs']]] additional_node_network_configs: We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface
|
7786
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs']]] additional_pod_network_configs: We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node
|
7641
7787
|
:param pulumi.Input[bool] create_pod_range: Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified.
|
7642
7788
|
:param pulumi.Input[bool] enable_private_nodes: Enables the private cluster feature,
|
7643
7789
|
creating a private endpoint on the cluster. In a private cluster, nodes only
|
7644
7790
|
have RFC 1918 private addresses and communicate with the master's private
|
7645
7791
|
endpoint via private networking.
|
7646
7792
|
:param pulumi.Input['ClusterNodePoolNetworkConfigNetworkPerformanceConfigArgs'] network_performance_config: Network bandwidth tier configuration.
|
7793
|
+
:param pulumi.Input['ClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
7647
7794
|
:param pulumi.Input[str] pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
|
7648
7795
|
:param pulumi.Input[str] pod_range: The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.
|
7649
7796
|
"""
|
@@ -7667,6 +7814,9 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7667
7814
|
@property
|
7668
7815
|
@pulumi.getter(name="additionalNodeNetworkConfigs")
|
7669
7816
|
def additional_node_network_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgs']]]]:
|
7817
|
+
"""
|
7818
|
+
We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface
|
7819
|
+
"""
|
7670
7820
|
return pulumi.get(self, "additional_node_network_configs")
|
7671
7821
|
|
7672
7822
|
@additional_node_network_configs.setter
|
@@ -7676,6 +7826,9 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7676
7826
|
@property
|
7677
7827
|
@pulumi.getter(name="additionalPodNetworkConfigs")
|
7678
7828
|
def additional_pod_network_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs']]]]:
|
7829
|
+
"""
|
7830
|
+
We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node
|
7831
|
+
"""
|
7679
7832
|
return pulumi.get(self, "additional_pod_network_configs")
|
7680
7833
|
|
7681
7834
|
@additional_pod_network_configs.setter
|
@@ -7724,6 +7877,9 @@ class ClusterNodePoolNetworkConfigArgs:
|
|
7724
7877
|
@property
|
7725
7878
|
@pulumi.getter(name="podCidrOverprovisionConfig")
|
7726
7879
|
def pod_cidr_overprovision_config(self) -> Optional[pulumi.Input['ClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs']]:
|
7880
|
+
"""
|
7881
|
+
Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
7882
|
+
"""
|
7727
7883
|
return pulumi.get(self, "pod_cidr_overprovision_config")
|
7728
7884
|
|
7729
7885
|
@pod_cidr_overprovision_config.setter
|
@@ -7807,6 +7963,8 @@ class ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
7807
7963
|
secondary_pod_range: Optional[pulumi.Input[str]] = None,
|
7808
7964
|
subnetwork: Optional[pulumi.Input[str]] = None):
|
7809
7965
|
"""
|
7966
|
+
:param pulumi.Input[int] max_pods_per_node: The maximum number of pods per node which use this pod network.
|
7967
|
+
:param pulumi.Input[str] secondary_pod_range: The name of the secondary range on the subnet which provides IP address for this pod range.
|
7810
7968
|
:param pulumi.Input[str] subnetwork: The name or self_link of the Google Compute Engine
|
7811
7969
|
subnetwork in which the cluster's instances are launched.
|
7812
7970
|
"""
|
@@ -7820,6 +7978,9 @@ class ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
7820
7978
|
@property
|
7821
7979
|
@pulumi.getter(name="maxPodsPerNode")
|
7822
7980
|
def max_pods_per_node(self) -> Optional[pulumi.Input[int]]:
|
7981
|
+
"""
|
7982
|
+
The maximum number of pods per node which use this pod network.
|
7983
|
+
"""
|
7823
7984
|
return pulumi.get(self, "max_pods_per_node")
|
7824
7985
|
|
7825
7986
|
@max_pods_per_node.setter
|
@@ -7829,6 +7990,9 @@ class ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
7829
7990
|
@property
|
7830
7991
|
@pulumi.getter(name="secondaryPodRange")
|
7831
7992
|
def secondary_pod_range(self) -> Optional[pulumi.Input[str]]:
|
7993
|
+
"""
|
7994
|
+
The name of the secondary range on the subnet which provides IP address for this pod range.
|
7995
|
+
"""
|
7832
7996
|
return pulumi.get(self, "secondary_pod_range")
|
7833
7997
|
|
7834
7998
|
@secondary_pod_range.setter
|
@@ -7946,6 +8110,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
7946
8110
|
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
|
7947
8111
|
:param pulumi.Input[str] disk_type: Type of the disk attached to each node
|
7948
8112
|
(e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-standard'
|
8113
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node.
|
7949
8114
|
:param pulumi.Input[bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
|
7950
8115
|
:param pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
|
7951
8116
|
|
@@ -7983,6 +8148,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
7983
8148
|
```python
|
7984
8149
|
import pulumi
|
7985
8150
|
```
|
8151
|
+
:param pulumi.Input['ClusterNodePoolNodeConfigHostMaintenancePolicyArgs'] host_maintenance_policy: The maintenance policy for the hosts on which the GKE VMs run on.
|
7986
8152
|
:param pulumi.Input[str] image_type: The image type to use for this node. Note that changing the image type
|
7987
8153
|
will delete and recreate all nodes in the node pool.
|
7988
8154
|
:param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigArgs'] kubelet_config: Kubelet configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
|
@@ -8023,6 +8189,7 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8023
8189
|
:param pulumi.Input['ClusterNodePoolNodeConfigReservationAffinityArgs'] reservation_affinity: The configuration of the desired reservation which instances could take capacity from. Structure is documented below.
|
8024
8190
|
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCP labels (key/value pairs) to be applied to each node. Refer [here](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels)
|
8025
8191
|
for how these labels are applied to clusters, node pools and nodes.
|
8192
|
+
:param pulumi.Input['ClusterNodePoolNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
|
8026
8193
|
:param pulumi.Input[str] service_account: The service account to be used by the Node VMs.
|
8027
8194
|
If not specified, the "default" service account is used.
|
8028
8195
|
:param pulumi.Input['ClusterNodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
|
@@ -8188,6 +8355,9 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8188
8355
|
@property
|
8189
8356
|
@pulumi.getter(name="effectiveTaints")
|
8190
8357
|
def effective_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]]]:
|
8358
|
+
"""
|
8359
|
+
List of kubernetes taints applied to each node.
|
8360
|
+
"""
|
8191
8361
|
return pulumi.get(self, "effective_taints")
|
8192
8362
|
|
8193
8363
|
@effective_taints.setter
|
@@ -8311,6 +8481,9 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8311
8481
|
@property
|
8312
8482
|
@pulumi.getter(name="hostMaintenancePolicy")
|
8313
8483
|
def host_maintenance_policy(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigHostMaintenancePolicyArgs']]:
|
8484
|
+
"""
|
8485
|
+
The maintenance policy for the hosts on which the GKE VMs run on.
|
8486
|
+
"""
|
8314
8487
|
return pulumi.get(self, "host_maintenance_policy")
|
8315
8488
|
|
8316
8489
|
@host_maintenance_policy.setter
|
@@ -8525,6 +8698,9 @@ class ClusterNodePoolNodeConfigArgs:
|
|
8525
8698
|
@property
|
8526
8699
|
@pulumi.getter(name="sandboxConfig")
|
8527
8700
|
def sandbox_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigSandboxConfigArgs']]:
|
8701
|
+
"""
|
8702
|
+
Sandbox configuration for this node.
|
8703
|
+
"""
|
8528
8704
|
return pulumi.get(self, "sandbox_config")
|
8529
8705
|
|
8530
8706
|
@sandbox_config.setter
|
@@ -9005,11 +9181,17 @@ class ClusterNodePoolNodeConfigGvnicArgs:
|
|
9005
9181
|
class ClusterNodePoolNodeConfigHostMaintenancePolicyArgs:
|
9006
9182
|
def __init__(__self__, *,
|
9007
9183
|
maintenance_interval: pulumi.Input[str]):
|
9184
|
+
"""
|
9185
|
+
:param pulumi.Input[str] maintenance_interval: .
|
9186
|
+
"""
|
9008
9187
|
pulumi.set(__self__, "maintenance_interval", maintenance_interval)
|
9009
9188
|
|
9010
9189
|
@property
|
9011
9190
|
@pulumi.getter(name="maintenanceInterval")
|
9012
9191
|
def maintenance_interval(self) -> pulumi.Input[str]:
|
9192
|
+
"""
|
9193
|
+
.
|
9194
|
+
"""
|
9013
9195
|
return pulumi.get(self, "maintenance_interval")
|
9014
9196
|
|
9015
9197
|
@maintenance_interval.setter
|
@@ -9333,11 +9515,17 @@ class ClusterNodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
9333
9515
|
class ClusterNodePoolNodeConfigSoleTenantConfigArgs:
|
9334
9516
|
def __init__(__self__, *,
|
9335
9517
|
node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
|
9518
|
+
"""
|
9519
|
+
:param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
|
9520
|
+
"""
|
9336
9521
|
pulumi.set(__self__, "node_affinities", node_affinities)
|
9337
9522
|
|
9338
9523
|
@property
|
9339
9524
|
@pulumi.getter(name="nodeAffinities")
|
9340
9525
|
def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
|
9526
|
+
"""
|
9527
|
+
.
|
9528
|
+
"""
|
9341
9529
|
return pulumi.get(self, "node_affinities")
|
9342
9530
|
|
9343
9531
|
@node_affinities.setter
|
@@ -9488,6 +9676,8 @@ class ClusterNodePoolPlacementPolicyArgs:
|
|
9488
9676
|
"""
|
9489
9677
|
:param pulumi.Input[str] type: Telemetry integration for the cluster. Supported values (`ENABLED, DISABLED, SYSTEM_ONLY`);
|
9490
9678
|
`SYSTEM_ONLY` (Only system components are monitored and logged) is only available in GKE versions 1.15 and later.
|
9679
|
+
:param pulumi.Input[str] policy_name: If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
|
9680
|
+
:param pulumi.Input[str] tpu_topology: TPU placement topology for pod slice node pool. https://cloud.google.com/tpu/docs/types-topologies#tpu_topologies
|
9491
9681
|
"""
|
9492
9682
|
pulumi.set(__self__, "type", type)
|
9493
9683
|
if policy_name is not None:
|
@@ -9511,6 +9701,9 @@ class ClusterNodePoolPlacementPolicyArgs:
|
|
9511
9701
|
@property
|
9512
9702
|
@pulumi.getter(name="policyName")
|
9513
9703
|
def policy_name(self) -> Optional[pulumi.Input[str]]:
|
9704
|
+
"""
|
9705
|
+
If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.
|
9706
|
+
"""
|
9514
9707
|
return pulumi.get(self, "policy_name")
|
9515
9708
|
|
9516
9709
|
@policy_name.setter
|
@@ -9520,6 +9713,9 @@ class ClusterNodePoolPlacementPolicyArgs:
|
|
9520
9713
|
@property
|
9521
9714
|
@pulumi.getter(name="tpuTopology")
|
9522
9715
|
def tpu_topology(self) -> Optional[pulumi.Input[str]]:
|
9716
|
+
"""
|
9717
|
+
TPU placement topology for pod slice node pool. https://cloud.google.com/tpu/docs/types-topologies#tpu_topologies
|
9718
|
+
"""
|
9523
9719
|
return pulumi.get(self, "tpu_topology")
|
9524
9720
|
|
9525
9721
|
@tpu_topology.setter
|
@@ -10211,11 +10407,17 @@ class ClusterResourceUsageExportConfigArgs:
|
|
10211
10407
|
class ClusterResourceUsageExportConfigBigqueryDestinationArgs:
|
10212
10408
|
def __init__(__self__, *,
|
10213
10409
|
dataset_id: pulumi.Input[str]):
|
10410
|
+
"""
|
10411
|
+
:param pulumi.Input[str] dataset_id: The ID of a BigQuery Dataset.
|
10412
|
+
"""
|
10214
10413
|
pulumi.set(__self__, "dataset_id", dataset_id)
|
10215
10414
|
|
10216
10415
|
@property
|
10217
10416
|
@pulumi.getter(name="datasetId")
|
10218
10417
|
def dataset_id(self) -> pulumi.Input[str]:
|
10418
|
+
"""
|
10419
|
+
The ID of a BigQuery Dataset.
|
10420
|
+
"""
|
10219
10421
|
return pulumi.get(self, "dataset_id")
|
10220
10422
|
|
10221
10423
|
@dataset_id.setter
|
@@ -10292,6 +10494,8 @@ class ClusterTpuConfigArgs:
|
|
10292
10494
|
use_service_networking: Optional[pulumi.Input[bool]] = None):
|
10293
10495
|
"""
|
10294
10496
|
:param pulumi.Input[bool] enabled: Enable Binary Authorization for this cluster. Deprecated in favor of `evaluation_mode`.
|
10497
|
+
:param pulumi.Input[str] ipv4_cidr_block: IPv4 CIDR block reserved for Cloud TPU in the VPC.
|
10498
|
+
:param pulumi.Input[bool] use_service_networking: Whether to use service networking for Cloud TPU or not
|
10295
10499
|
"""
|
10296
10500
|
pulumi.set(__self__, "enabled", enabled)
|
10297
10501
|
if ipv4_cidr_block is not None:
|
@@ -10314,6 +10518,9 @@ class ClusterTpuConfigArgs:
|
|
10314
10518
|
@property
|
10315
10519
|
@pulumi.getter(name="ipv4CidrBlock")
|
10316
10520
|
def ipv4_cidr_block(self) -> Optional[pulumi.Input[str]]:
|
10521
|
+
"""
|
10522
|
+
IPv4 CIDR block reserved for Cloud TPU in the VPC.
|
10523
|
+
"""
|
10317
10524
|
return pulumi.get(self, "ipv4_cidr_block")
|
10318
10525
|
|
10319
10526
|
@ipv4_cidr_block.setter
|
@@ -10323,6 +10530,9 @@ class ClusterTpuConfigArgs:
|
|
10323
10530
|
@property
|
10324
10531
|
@pulumi.getter(name="useServiceNetworking")
|
10325
10532
|
def use_service_networking(self) -> Optional[pulumi.Input[bool]]:
|
10533
|
+
"""
|
10534
|
+
Whether to use service networking for Cloud TPU or not
|
10535
|
+
"""
|
10326
10536
|
return pulumi.get(self, "use_service_networking")
|
10327
10537
|
|
10328
10538
|
@use_service_networking.setter
|
@@ -10569,6 +10779,8 @@ class NodePoolNetworkConfigArgs:
|
|
10569
10779
|
Structure is documented below
|
10570
10780
|
:param pulumi.Input[bool] create_pod_range: Whether to create a new range for pod IPs in this node pool. Defaults are provided for `pod_range` and `pod_ipv4_cidr_block` if they are not specified.
|
10571
10781
|
:param pulumi.Input[bool] enable_private_nodes: Whether nodes have internal IP addresses only.
|
10782
|
+
:param pulumi.Input['NodePoolNetworkConfigNetworkPerformanceConfigArgs'] network_performance_config: Network bandwidth tier configuration.
|
10783
|
+
:param pulumi.Input['NodePoolNetworkConfigPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
10572
10784
|
:param pulumi.Input[str] pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
|
10573
10785
|
:param pulumi.Input[str] pod_range: The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.
|
10574
10786
|
"""
|
@@ -10642,6 +10854,9 @@ class NodePoolNetworkConfigArgs:
|
|
10642
10854
|
@property
|
10643
10855
|
@pulumi.getter(name="networkPerformanceConfig")
|
10644
10856
|
def network_performance_config(self) -> Optional[pulumi.Input['NodePoolNetworkConfigNetworkPerformanceConfigArgs']]:
|
10857
|
+
"""
|
10858
|
+
Network bandwidth tier configuration.
|
10859
|
+
"""
|
10645
10860
|
return pulumi.get(self, "network_performance_config")
|
10646
10861
|
|
10647
10862
|
@network_performance_config.setter
|
@@ -10651,6 +10866,9 @@ class NodePoolNetworkConfigArgs:
|
|
10651
10866
|
@property
|
10652
10867
|
@pulumi.getter(name="podCidrOverprovisionConfig")
|
10653
10868
|
def pod_cidr_overprovision_config(self) -> Optional[pulumi.Input['NodePoolNetworkConfigPodCidrOverprovisionConfigArgs']]:
|
10869
|
+
"""
|
10870
|
+
Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
|
10871
|
+
"""
|
10654
10872
|
return pulumi.get(self, "pod_cidr_overprovision_config")
|
10655
10873
|
|
10656
10874
|
@pod_cidr_overprovision_config.setter
|
@@ -10780,11 +10998,17 @@ class NodePoolNetworkConfigAdditionalPodNetworkConfigArgs:
|
|
10780
10998
|
class NodePoolNetworkConfigNetworkPerformanceConfigArgs:
|
10781
10999
|
def __init__(__self__, *,
|
10782
11000
|
total_egress_bandwidth_tier: pulumi.Input[str]):
|
11001
|
+
"""
|
11002
|
+
:param pulumi.Input[str] total_egress_bandwidth_tier: Specifies the total network bandwidth tier for the NodePool.
|
11003
|
+
"""
|
10783
11004
|
pulumi.set(__self__, "total_egress_bandwidth_tier", total_egress_bandwidth_tier)
|
10784
11005
|
|
10785
11006
|
@property
|
10786
11007
|
@pulumi.getter(name="totalEgressBandwidthTier")
|
10787
11008
|
def total_egress_bandwidth_tier(self) -> pulumi.Input[str]:
|
11009
|
+
"""
|
11010
|
+
Specifies the total network bandwidth tier for the NodePool.
|
11011
|
+
"""
|
10788
11012
|
return pulumi.get(self, "total_egress_bandwidth_tier")
|
10789
11013
|
|
10790
11014
|
@total_egress_bandwidth_tier.setter
|
@@ -10849,7 +11073,43 @@ class NodePoolNodeConfigArgs:
|
|
10849
11073
|
taints: Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigTaintArgs']]]] = None,
|
10850
11074
|
workload_metadata_config: Optional[pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs']] = None):
|
10851
11075
|
"""
|
11076
|
+
:param pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
|
11077
|
+
:param pulumi.Input[str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
|
10852
11078
|
:param pulumi.Input['NodePoolNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
|
11079
|
+
:param pulumi.Input[int] disk_size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
|
11080
|
+
:param pulumi.Input[str] disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
|
11081
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node.
|
11082
|
+
:param pulumi.Input[bool] enable_confidential_storage: If enabled boot disks are configured with confidential mode.
|
11083
|
+
:param pulumi.Input['NodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11084
|
+
:param pulumi.Input['NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs'] ephemeral_storage_local_ssd_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11085
|
+
:param pulumi.Input['NodePoolNodeConfigFastSocketArgs'] fast_socket: Enable or disable NCCL Fast Socket in the node pool.
|
11086
|
+
:param pulumi.Input['NodePoolNodeConfigGcfsConfigArgs'] gcfs_config: GCFS configuration for this node.
|
11087
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigGuestAcceleratorArgs']]] guest_accelerators: List of the type and count of accelerator cards attached to the instance.
|
11088
|
+
:param pulumi.Input['NodePoolNodeConfigGvnicArgs'] gvnic: Enable or disable gvnic in the node pool.
|
11089
|
+
:param pulumi.Input['NodePoolNodeConfigHostMaintenancePolicyArgs'] host_maintenance_policy: The maintenance policy for the hosts on which the GKE VMs run on.
|
11090
|
+
:param pulumi.Input[str] image_type: The image type to use for this node. Note that for a given image type, the latest version of it will be used.
|
11091
|
+
:param pulumi.Input['NodePoolNodeConfigKubeletConfigArgs'] kubelet_config: Node kubelet configs.
|
11092
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
|
11093
|
+
:param pulumi.Input['NodePoolNodeConfigLinuxNodeConfigArgs'] linux_node_config: Parameters that can be configured on Linux nodes.
|
11094
|
+
:param pulumi.Input['NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs'] local_nvme_ssd_block_config: Parameters for raw-block local NVMe SSDs.
|
11095
|
+
:param pulumi.Input[int] local_ssd_count: The number of local SSD disks to be attached to the node.
|
11096
|
+
:param pulumi.Input[str] logging_variant: Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
|
11097
|
+
:param pulumi.Input[str] machine_type: The name of a Google Compute Engine machine type.
|
11098
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The metadata key/value pairs assigned to instances in the cluster.
|
11099
|
+
:param pulumi.Input[str] min_cpu_platform: Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
|
11100
|
+
:param pulumi.Input[str] node_group: Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
|
11101
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] oauth_scopes: The set of Google API scopes to be made available on all of the node VMs.
|
11102
|
+
:param pulumi.Input[bool] preemptible: Whether the nodes are created as preemptible VM instances.
|
11103
|
+
:param pulumi.Input['NodePoolNodeConfigReservationAffinityArgs'] reservation_affinity: The reservation affinity configuration for the node pool.
|
11104
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
|
11105
|
+
:param pulumi.Input['NodePoolNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
|
11106
|
+
:param pulumi.Input[str] service_account: The Google Cloud Platform Service Account to be used by the node VMs.
|
11107
|
+
:param pulumi.Input['NodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options.
|
11108
|
+
:param pulumi.Input['NodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Node affinity options for sole tenant node pools.
|
11109
|
+
:param pulumi.Input[bool] spot: Whether the nodes are created as spot VM instances.
|
11110
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The list of instance tags applied to all nodes.
|
11111
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigTaintArgs']]] taints: List of Kubernetes taints to be applied to each node.
|
11112
|
+
:param pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs'] workload_metadata_config: The workload metadata configuration for this node.
|
10853
11113
|
"""
|
10854
11114
|
if advanced_machine_features is not None:
|
10855
11115
|
pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
|
@@ -10929,6 +11189,9 @@ class NodePoolNodeConfigArgs:
|
|
10929
11189
|
@property
|
10930
11190
|
@pulumi.getter(name="advancedMachineFeatures")
|
10931
11191
|
def advanced_machine_features(self) -> Optional[pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs']]:
|
11192
|
+
"""
|
11193
|
+
Specifies options for controlling advanced machine features.
|
11194
|
+
"""
|
10932
11195
|
return pulumi.get(self, "advanced_machine_features")
|
10933
11196
|
|
10934
11197
|
@advanced_machine_features.setter
|
@@ -10938,6 +11201,9 @@ class NodePoolNodeConfigArgs:
|
|
10938
11201
|
@property
|
10939
11202
|
@pulumi.getter(name="bootDiskKmsKey")
|
10940
11203
|
def boot_disk_kms_key(self) -> Optional[pulumi.Input[str]]:
|
11204
|
+
"""
|
11205
|
+
The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
|
11206
|
+
"""
|
10941
11207
|
return pulumi.get(self, "boot_disk_kms_key")
|
10942
11208
|
|
10943
11209
|
@boot_disk_kms_key.setter
|
@@ -10959,6 +11225,9 @@ class NodePoolNodeConfigArgs:
|
|
10959
11225
|
@property
|
10960
11226
|
@pulumi.getter(name="diskSizeGb")
|
10961
11227
|
def disk_size_gb(self) -> Optional[pulumi.Input[int]]:
|
11228
|
+
"""
|
11229
|
+
Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
|
11230
|
+
"""
|
10962
11231
|
return pulumi.get(self, "disk_size_gb")
|
10963
11232
|
|
10964
11233
|
@disk_size_gb.setter
|
@@ -10968,6 +11237,9 @@ class NodePoolNodeConfigArgs:
|
|
10968
11237
|
@property
|
10969
11238
|
@pulumi.getter(name="diskType")
|
10970
11239
|
def disk_type(self) -> Optional[pulumi.Input[str]]:
|
11240
|
+
"""
|
11241
|
+
Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
|
11242
|
+
"""
|
10971
11243
|
return pulumi.get(self, "disk_type")
|
10972
11244
|
|
10973
11245
|
@disk_type.setter
|
@@ -10977,6 +11249,9 @@ class NodePoolNodeConfigArgs:
|
|
10977
11249
|
@property
|
10978
11250
|
@pulumi.getter(name="effectiveTaints")
|
10979
11251
|
def effective_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigEffectiveTaintArgs']]]]:
|
11252
|
+
"""
|
11253
|
+
List of kubernetes taints applied to each node.
|
11254
|
+
"""
|
10980
11255
|
return pulumi.get(self, "effective_taints")
|
10981
11256
|
|
10982
11257
|
@effective_taints.setter
|
@@ -10986,6 +11261,9 @@ class NodePoolNodeConfigArgs:
|
|
10986
11261
|
@property
|
10987
11262
|
@pulumi.getter(name="enableConfidentialStorage")
|
10988
11263
|
def enable_confidential_storage(self) -> Optional[pulumi.Input[bool]]:
|
11264
|
+
"""
|
11265
|
+
If enabled boot disks are configured with confidential mode.
|
11266
|
+
"""
|
10989
11267
|
return pulumi.get(self, "enable_confidential_storage")
|
10990
11268
|
|
10991
11269
|
@enable_confidential_storage.setter
|
@@ -10995,6 +11273,9 @@ class NodePoolNodeConfigArgs:
|
|
10995
11273
|
@property
|
10996
11274
|
@pulumi.getter(name="ephemeralStorageConfig")
|
10997
11275
|
def ephemeral_storage_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigEphemeralStorageConfigArgs']]:
|
11276
|
+
"""
|
11277
|
+
Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11278
|
+
"""
|
10998
11279
|
return pulumi.get(self, "ephemeral_storage_config")
|
10999
11280
|
|
11000
11281
|
@ephemeral_storage_config.setter
|
@@ -11004,6 +11285,9 @@ class NodePoolNodeConfigArgs:
|
|
11004
11285
|
@property
|
11005
11286
|
@pulumi.getter(name="ephemeralStorageLocalSsdConfig")
|
11006
11287
|
def ephemeral_storage_local_ssd_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs']]:
|
11288
|
+
"""
|
11289
|
+
Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.
|
11290
|
+
"""
|
11007
11291
|
return pulumi.get(self, "ephemeral_storage_local_ssd_config")
|
11008
11292
|
|
11009
11293
|
@ephemeral_storage_local_ssd_config.setter
|
@@ -11013,6 +11297,9 @@ class NodePoolNodeConfigArgs:
|
|
11013
11297
|
@property
|
11014
11298
|
@pulumi.getter(name="fastSocket")
|
11015
11299
|
def fast_socket(self) -> Optional[pulumi.Input['NodePoolNodeConfigFastSocketArgs']]:
|
11300
|
+
"""
|
11301
|
+
Enable or disable NCCL Fast Socket in the node pool.
|
11302
|
+
"""
|
11016
11303
|
return pulumi.get(self, "fast_socket")
|
11017
11304
|
|
11018
11305
|
@fast_socket.setter
|
@@ -11022,6 +11309,9 @@ class NodePoolNodeConfigArgs:
|
|
11022
11309
|
@property
|
11023
11310
|
@pulumi.getter(name="gcfsConfig")
|
11024
11311
|
def gcfs_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigGcfsConfigArgs']]:
|
11312
|
+
"""
|
11313
|
+
GCFS configuration for this node.
|
11314
|
+
"""
|
11025
11315
|
return pulumi.get(self, "gcfs_config")
|
11026
11316
|
|
11027
11317
|
@gcfs_config.setter
|
@@ -11031,6 +11321,9 @@ class NodePoolNodeConfigArgs:
|
|
11031
11321
|
@property
|
11032
11322
|
@pulumi.getter(name="guestAccelerators")
|
11033
11323
|
def guest_accelerators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigGuestAcceleratorArgs']]]]:
|
11324
|
+
"""
|
11325
|
+
List of the type and count of accelerator cards attached to the instance.
|
11326
|
+
"""
|
11034
11327
|
return pulumi.get(self, "guest_accelerators")
|
11035
11328
|
|
11036
11329
|
@guest_accelerators.setter
|
@@ -11040,6 +11333,9 @@ class NodePoolNodeConfigArgs:
|
|
11040
11333
|
@property
|
11041
11334
|
@pulumi.getter
|
11042
11335
|
def gvnic(self) -> Optional[pulumi.Input['NodePoolNodeConfigGvnicArgs']]:
|
11336
|
+
"""
|
11337
|
+
Enable or disable gvnic in the node pool.
|
11338
|
+
"""
|
11043
11339
|
return pulumi.get(self, "gvnic")
|
11044
11340
|
|
11045
11341
|
@gvnic.setter
|
@@ -11049,6 +11345,9 @@ class NodePoolNodeConfigArgs:
|
|
11049
11345
|
@property
|
11050
11346
|
@pulumi.getter(name="hostMaintenancePolicy")
|
11051
11347
|
def host_maintenance_policy(self) -> Optional[pulumi.Input['NodePoolNodeConfigHostMaintenancePolicyArgs']]:
|
11348
|
+
"""
|
11349
|
+
The maintenance policy for the hosts on which the GKE VMs run on.
|
11350
|
+
"""
|
11052
11351
|
return pulumi.get(self, "host_maintenance_policy")
|
11053
11352
|
|
11054
11353
|
@host_maintenance_policy.setter
|
@@ -11058,6 +11357,9 @@ class NodePoolNodeConfigArgs:
|
|
11058
11357
|
@property
|
11059
11358
|
@pulumi.getter(name="imageType")
|
11060
11359
|
def image_type(self) -> Optional[pulumi.Input[str]]:
|
11360
|
+
"""
|
11361
|
+
The image type to use for this node. Note that for a given image type, the latest version of it will be used.
|
11362
|
+
"""
|
11061
11363
|
return pulumi.get(self, "image_type")
|
11062
11364
|
|
11063
11365
|
@image_type.setter
|
@@ -11067,6 +11369,9 @@ class NodePoolNodeConfigArgs:
|
|
11067
11369
|
@property
|
11068
11370
|
@pulumi.getter(name="kubeletConfig")
|
11069
11371
|
def kubelet_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigArgs']]:
|
11372
|
+
"""
|
11373
|
+
Node kubelet configs.
|
11374
|
+
"""
|
11070
11375
|
return pulumi.get(self, "kubelet_config")
|
11071
11376
|
|
11072
11377
|
@kubelet_config.setter
|
@@ -11076,6 +11381,9 @@ class NodePoolNodeConfigArgs:
|
|
11076
11381
|
@property
|
11077
11382
|
@pulumi.getter
|
11078
11383
|
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
11384
|
+
"""
|
11385
|
+
The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.
|
11386
|
+
"""
|
11079
11387
|
return pulumi.get(self, "labels")
|
11080
11388
|
|
11081
11389
|
@labels.setter
|
@@ -11085,6 +11393,9 @@ class NodePoolNodeConfigArgs:
|
|
11085
11393
|
@property
|
11086
11394
|
@pulumi.getter(name="linuxNodeConfig")
|
11087
11395
|
def linux_node_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigLinuxNodeConfigArgs']]:
|
11396
|
+
"""
|
11397
|
+
Parameters that can be configured on Linux nodes.
|
11398
|
+
"""
|
11088
11399
|
return pulumi.get(self, "linux_node_config")
|
11089
11400
|
|
11090
11401
|
@linux_node_config.setter
|
@@ -11094,6 +11405,9 @@ class NodePoolNodeConfigArgs:
|
|
11094
11405
|
@property
|
11095
11406
|
@pulumi.getter(name="localNvmeSsdBlockConfig")
|
11096
11407
|
def local_nvme_ssd_block_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs']]:
|
11408
|
+
"""
|
11409
|
+
Parameters for raw-block local NVMe SSDs.
|
11410
|
+
"""
|
11097
11411
|
return pulumi.get(self, "local_nvme_ssd_block_config")
|
11098
11412
|
|
11099
11413
|
@local_nvme_ssd_block_config.setter
|
@@ -11103,6 +11417,9 @@ class NodePoolNodeConfigArgs:
|
|
11103
11417
|
@property
|
11104
11418
|
@pulumi.getter(name="localSsdCount")
|
11105
11419
|
def local_ssd_count(self) -> Optional[pulumi.Input[int]]:
|
11420
|
+
"""
|
11421
|
+
The number of local SSD disks to be attached to the node.
|
11422
|
+
"""
|
11106
11423
|
return pulumi.get(self, "local_ssd_count")
|
11107
11424
|
|
11108
11425
|
@local_ssd_count.setter
|
@@ -11112,6 +11429,9 @@ class NodePoolNodeConfigArgs:
|
|
11112
11429
|
@property
|
11113
11430
|
@pulumi.getter(name="loggingVariant")
|
11114
11431
|
def logging_variant(self) -> Optional[pulumi.Input[str]]:
|
11432
|
+
"""
|
11433
|
+
Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
|
11434
|
+
"""
|
11115
11435
|
return pulumi.get(self, "logging_variant")
|
11116
11436
|
|
11117
11437
|
@logging_variant.setter
|
@@ -11121,6 +11441,9 @@ class NodePoolNodeConfigArgs:
|
|
11121
11441
|
@property
|
11122
11442
|
@pulumi.getter(name="machineType")
|
11123
11443
|
def machine_type(self) -> Optional[pulumi.Input[str]]:
|
11444
|
+
"""
|
11445
|
+
The name of a Google Compute Engine machine type.
|
11446
|
+
"""
|
11124
11447
|
return pulumi.get(self, "machine_type")
|
11125
11448
|
|
11126
11449
|
@machine_type.setter
|
@@ -11130,6 +11453,9 @@ class NodePoolNodeConfigArgs:
|
|
11130
11453
|
@property
|
11131
11454
|
@pulumi.getter
|
11132
11455
|
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
11456
|
+
"""
|
11457
|
+
The metadata key/value pairs assigned to instances in the cluster.
|
11458
|
+
"""
|
11133
11459
|
return pulumi.get(self, "metadata")
|
11134
11460
|
|
11135
11461
|
@metadata.setter
|
@@ -11139,6 +11465,9 @@ class NodePoolNodeConfigArgs:
|
|
11139
11465
|
@property
|
11140
11466
|
@pulumi.getter(name="minCpuPlatform")
|
11141
11467
|
def min_cpu_platform(self) -> Optional[pulumi.Input[str]]:
|
11468
|
+
"""
|
11469
|
+
Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.
|
11470
|
+
"""
|
11142
11471
|
return pulumi.get(self, "min_cpu_platform")
|
11143
11472
|
|
11144
11473
|
@min_cpu_platform.setter
|
@@ -11148,6 +11477,9 @@ class NodePoolNodeConfigArgs:
|
|
11148
11477
|
@property
|
11149
11478
|
@pulumi.getter(name="nodeGroup")
|
11150
11479
|
def node_group(self) -> Optional[pulumi.Input[str]]:
|
11480
|
+
"""
|
11481
|
+
Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.
|
11482
|
+
"""
|
11151
11483
|
return pulumi.get(self, "node_group")
|
11152
11484
|
|
11153
11485
|
@node_group.setter
|
@@ -11157,6 +11489,9 @@ class NodePoolNodeConfigArgs:
|
|
11157
11489
|
@property
|
11158
11490
|
@pulumi.getter(name="oauthScopes")
|
11159
11491
|
def oauth_scopes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
11492
|
+
"""
|
11493
|
+
The set of Google API scopes to be made available on all of the node VMs.
|
11494
|
+
"""
|
11160
11495
|
return pulumi.get(self, "oauth_scopes")
|
11161
11496
|
|
11162
11497
|
@oauth_scopes.setter
|
@@ -11166,6 +11501,9 @@ class NodePoolNodeConfigArgs:
|
|
11166
11501
|
@property
|
11167
11502
|
@pulumi.getter
|
11168
11503
|
def preemptible(self) -> Optional[pulumi.Input[bool]]:
|
11504
|
+
"""
|
11505
|
+
Whether the nodes are created as preemptible VM instances.
|
11506
|
+
"""
|
11169
11507
|
return pulumi.get(self, "preemptible")
|
11170
11508
|
|
11171
11509
|
@preemptible.setter
|
@@ -11175,6 +11513,9 @@ class NodePoolNodeConfigArgs:
|
|
11175
11513
|
@property
|
11176
11514
|
@pulumi.getter(name="reservationAffinity")
|
11177
11515
|
def reservation_affinity(self) -> Optional[pulumi.Input['NodePoolNodeConfigReservationAffinityArgs']]:
|
11516
|
+
"""
|
11517
|
+
The reservation affinity configuration for the node pool.
|
11518
|
+
"""
|
11178
11519
|
return pulumi.get(self, "reservation_affinity")
|
11179
11520
|
|
11180
11521
|
@reservation_affinity.setter
|
@@ -11184,6 +11525,9 @@ class NodePoolNodeConfigArgs:
|
|
11184
11525
|
@property
|
11185
11526
|
@pulumi.getter(name="resourceLabels")
|
11186
11527
|
def resource_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
11528
|
+
"""
|
11529
|
+
The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
|
11530
|
+
"""
|
11187
11531
|
return pulumi.get(self, "resource_labels")
|
11188
11532
|
|
11189
11533
|
@resource_labels.setter
|
@@ -11193,6 +11537,9 @@ class NodePoolNodeConfigArgs:
|
|
11193
11537
|
@property
|
11194
11538
|
@pulumi.getter(name="sandboxConfig")
|
11195
11539
|
def sandbox_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigSandboxConfigArgs']]:
|
11540
|
+
"""
|
11541
|
+
Sandbox configuration for this node.
|
11542
|
+
"""
|
11196
11543
|
return pulumi.get(self, "sandbox_config")
|
11197
11544
|
|
11198
11545
|
@sandbox_config.setter
|
@@ -11202,6 +11549,9 @@ class NodePoolNodeConfigArgs:
|
|
11202
11549
|
@property
|
11203
11550
|
@pulumi.getter(name="serviceAccount")
|
11204
11551
|
def service_account(self) -> Optional[pulumi.Input[str]]:
|
11552
|
+
"""
|
11553
|
+
The Google Cloud Platform Service Account to be used by the node VMs.
|
11554
|
+
"""
|
11205
11555
|
return pulumi.get(self, "service_account")
|
11206
11556
|
|
11207
11557
|
@service_account.setter
|
@@ -11211,6 +11561,9 @@ class NodePoolNodeConfigArgs:
|
|
11211
11561
|
@property
|
11212
11562
|
@pulumi.getter(name="shieldedInstanceConfig")
|
11213
11563
|
def shielded_instance_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigShieldedInstanceConfigArgs']]:
|
11564
|
+
"""
|
11565
|
+
Shielded Instance options.
|
11566
|
+
"""
|
11214
11567
|
return pulumi.get(self, "shielded_instance_config")
|
11215
11568
|
|
11216
11569
|
@shielded_instance_config.setter
|
@@ -11220,6 +11573,9 @@ class NodePoolNodeConfigArgs:
|
|
11220
11573
|
@property
|
11221
11574
|
@pulumi.getter(name="soleTenantConfig")
|
11222
11575
|
def sole_tenant_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigSoleTenantConfigArgs']]:
|
11576
|
+
"""
|
11577
|
+
Node affinity options for sole tenant node pools.
|
11578
|
+
"""
|
11223
11579
|
return pulumi.get(self, "sole_tenant_config")
|
11224
11580
|
|
11225
11581
|
@sole_tenant_config.setter
|
@@ -11229,6 +11585,9 @@ class NodePoolNodeConfigArgs:
|
|
11229
11585
|
@property
|
11230
11586
|
@pulumi.getter
|
11231
11587
|
def spot(self) -> Optional[pulumi.Input[bool]]:
|
11588
|
+
"""
|
11589
|
+
Whether the nodes are created as spot VM instances.
|
11590
|
+
"""
|
11232
11591
|
return pulumi.get(self, "spot")
|
11233
11592
|
|
11234
11593
|
@spot.setter
|
@@ -11238,6 +11597,9 @@ class NodePoolNodeConfigArgs:
|
|
11238
11597
|
@property
|
11239
11598
|
@pulumi.getter
|
11240
11599
|
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
11600
|
+
"""
|
11601
|
+
The list of instance tags applied to all nodes.
|
11602
|
+
"""
|
11241
11603
|
return pulumi.get(self, "tags")
|
11242
11604
|
|
11243
11605
|
@tags.setter
|
@@ -11247,6 +11609,9 @@ class NodePoolNodeConfigArgs:
|
|
11247
11609
|
@property
|
11248
11610
|
@pulumi.getter
|
11249
11611
|
def taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigTaintArgs']]]]:
|
11612
|
+
"""
|
11613
|
+
List of Kubernetes taints to be applied to each node.
|
11614
|
+
"""
|
11250
11615
|
return pulumi.get(self, "taints")
|
11251
11616
|
|
11252
11617
|
@taints.setter
|
@@ -11256,6 +11621,9 @@ class NodePoolNodeConfigArgs:
|
|
11256
11621
|
@property
|
11257
11622
|
@pulumi.getter(name="workloadMetadataConfig")
|
11258
11623
|
def workload_metadata_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs']]:
|
11624
|
+
"""
|
11625
|
+
The workload metadata configuration for this node.
|
11626
|
+
"""
|
11259
11627
|
return pulumi.get(self, "workload_metadata_config")
|
11260
11628
|
|
11261
11629
|
@workload_metadata_config.setter
|
@@ -11267,11 +11635,17 @@ class NodePoolNodeConfigArgs:
|
|
11267
11635
|
class NodePoolNodeConfigAdvancedMachineFeaturesArgs:
|
11268
11636
|
def __init__(__self__, *,
|
11269
11637
|
threads_per_core: pulumi.Input[int]):
|
11638
|
+
"""
|
11639
|
+
:param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
|
11640
|
+
"""
|
11270
11641
|
pulumi.set(__self__, "threads_per_core", threads_per_core)
|
11271
11642
|
|
11272
11643
|
@property
|
11273
11644
|
@pulumi.getter(name="threadsPerCore")
|
11274
11645
|
def threads_per_core(self) -> pulumi.Input[int]:
|
11646
|
+
"""
|
11647
|
+
The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
|
11648
|
+
"""
|
11275
11649
|
return pulumi.get(self, "threads_per_core")
|
11276
11650
|
|
11277
11651
|
@threads_per_core.setter
|
@@ -11309,6 +11683,11 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11309
11683
|
effect: Optional[pulumi.Input[str]] = None,
|
11310
11684
|
key: Optional[pulumi.Input[str]] = None,
|
11311
11685
|
value: Optional[pulumi.Input[str]] = None):
|
11686
|
+
"""
|
11687
|
+
:param pulumi.Input[str] effect: Effect for taint.
|
11688
|
+
:param pulumi.Input[str] key: Key for taint.
|
11689
|
+
:param pulumi.Input[str] value: Value for taint.
|
11690
|
+
"""
|
11312
11691
|
if effect is not None:
|
11313
11692
|
pulumi.set(__self__, "effect", effect)
|
11314
11693
|
if key is not None:
|
@@ -11319,6 +11698,9 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11319
11698
|
@property
|
11320
11699
|
@pulumi.getter
|
11321
11700
|
def effect(self) -> Optional[pulumi.Input[str]]:
|
11701
|
+
"""
|
11702
|
+
Effect for taint.
|
11703
|
+
"""
|
11322
11704
|
return pulumi.get(self, "effect")
|
11323
11705
|
|
11324
11706
|
@effect.setter
|
@@ -11328,6 +11710,9 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11328
11710
|
@property
|
11329
11711
|
@pulumi.getter
|
11330
11712
|
def key(self) -> Optional[pulumi.Input[str]]:
|
11713
|
+
"""
|
11714
|
+
Key for taint.
|
11715
|
+
"""
|
11331
11716
|
return pulumi.get(self, "key")
|
11332
11717
|
|
11333
11718
|
@key.setter
|
@@ -11337,6 +11722,9 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11337
11722
|
@property
|
11338
11723
|
@pulumi.getter
|
11339
11724
|
def value(self) -> Optional[pulumi.Input[str]]:
|
11725
|
+
"""
|
11726
|
+
Value for taint.
|
11727
|
+
"""
|
11340
11728
|
return pulumi.get(self, "value")
|
11341
11729
|
|
11342
11730
|
@value.setter
|
@@ -11348,11 +11736,17 @@ class NodePoolNodeConfigEffectiveTaintArgs:
|
|
11348
11736
|
class NodePoolNodeConfigEphemeralStorageConfigArgs:
|
11349
11737
|
def __init__(__self__, *,
|
11350
11738
|
local_ssd_count: pulumi.Input[int]):
|
11739
|
+
"""
|
11740
|
+
:param pulumi.Input[int] local_ssd_count: Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11741
|
+
"""
|
11351
11742
|
pulumi.set(__self__, "local_ssd_count", local_ssd_count)
|
11352
11743
|
|
11353
11744
|
@property
|
11354
11745
|
@pulumi.getter(name="localSsdCount")
|
11355
11746
|
def local_ssd_count(self) -> pulumi.Input[int]:
|
11747
|
+
"""
|
11748
|
+
Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11749
|
+
"""
|
11356
11750
|
return pulumi.get(self, "local_ssd_count")
|
11357
11751
|
|
11358
11752
|
@local_ssd_count.setter
|
@@ -11364,11 +11758,17 @@ class NodePoolNodeConfigEphemeralStorageConfigArgs:
|
|
11364
11758
|
class NodePoolNodeConfigEphemeralStorageLocalSsdConfigArgs:
|
11365
11759
|
def __init__(__self__, *,
|
11366
11760
|
local_ssd_count: pulumi.Input[int]):
|
11761
|
+
"""
|
11762
|
+
:param pulumi.Input[int] local_ssd_count: Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11763
|
+
"""
|
11367
11764
|
pulumi.set(__self__, "local_ssd_count", local_ssd_count)
|
11368
11765
|
|
11369
11766
|
@property
|
11370
11767
|
@pulumi.getter(name="localSsdCount")
|
11371
11768
|
def local_ssd_count(self) -> pulumi.Input[int]:
|
11769
|
+
"""
|
11770
|
+
Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.
|
11771
|
+
"""
|
11372
11772
|
return pulumi.get(self, "local_ssd_count")
|
11373
11773
|
|
11374
11774
|
@local_ssd_count.setter
|
@@ -11433,9 +11833,13 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11433
11833
|
gpu_partition_size: Optional[pulumi.Input[str]] = None,
|
11434
11834
|
gpu_sharing_config: Optional[pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs']] = None):
|
11435
11835
|
"""
|
11836
|
+
:param pulumi.Input[int] count: The number of the accelerator cards exposed to an instance.
|
11436
11837
|
:param pulumi.Input[str] type: The type of the policy. Supports a single value: COMPACT.
|
11437
11838
|
Specifying COMPACT placement policy type places node pool's nodes in a closer
|
11438
11839
|
physical proximity in order to reduce network latency between nodes.
|
11840
|
+
:param pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs'] gpu_driver_installation_config: Configuration for auto installation of GPU driver.
|
11841
|
+
:param pulumi.Input[str] gpu_partition_size: Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
|
11842
|
+
:param pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs'] gpu_sharing_config: Configuration for GPU sharing.
|
11439
11843
|
"""
|
11440
11844
|
pulumi.set(__self__, "count", count)
|
11441
11845
|
pulumi.set(__self__, "type", type)
|
@@ -11449,6 +11853,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11449
11853
|
@property
|
11450
11854
|
@pulumi.getter
|
11451
11855
|
def count(self) -> pulumi.Input[int]:
|
11856
|
+
"""
|
11857
|
+
The number of the accelerator cards exposed to an instance.
|
11858
|
+
"""
|
11452
11859
|
return pulumi.get(self, "count")
|
11453
11860
|
|
11454
11861
|
@count.setter
|
@@ -11472,6 +11879,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11472
11879
|
@property
|
11473
11880
|
@pulumi.getter(name="gpuDriverInstallationConfig")
|
11474
11881
|
def gpu_driver_installation_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs']]:
|
11882
|
+
"""
|
11883
|
+
Configuration for auto installation of GPU driver.
|
11884
|
+
"""
|
11475
11885
|
return pulumi.get(self, "gpu_driver_installation_config")
|
11476
11886
|
|
11477
11887
|
@gpu_driver_installation_config.setter
|
@@ -11481,6 +11891,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11481
11891
|
@property
|
11482
11892
|
@pulumi.getter(name="gpuPartitionSize")
|
11483
11893
|
def gpu_partition_size(self) -> Optional[pulumi.Input[str]]:
|
11894
|
+
"""
|
11895
|
+
Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)
|
11896
|
+
"""
|
11484
11897
|
return pulumi.get(self, "gpu_partition_size")
|
11485
11898
|
|
11486
11899
|
@gpu_partition_size.setter
|
@@ -11490,6 +11903,9 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11490
11903
|
@property
|
11491
11904
|
@pulumi.getter(name="gpuSharingConfig")
|
11492
11905
|
def gpu_sharing_config(self) -> Optional[pulumi.Input['NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs']]:
|
11906
|
+
"""
|
11907
|
+
Configuration for GPU sharing.
|
11908
|
+
"""
|
11493
11909
|
return pulumi.get(self, "gpu_sharing_config")
|
11494
11910
|
|
11495
11911
|
@gpu_sharing_config.setter
|
@@ -11501,11 +11917,17 @@ class NodePoolNodeConfigGuestAcceleratorArgs:
|
|
11501
11917
|
class NodePoolNodeConfigGuestAcceleratorGpuDriverInstallationConfigArgs:
|
11502
11918
|
def __init__(__self__, *,
|
11503
11919
|
gpu_driver_version: pulumi.Input[str]):
|
11920
|
+
"""
|
11921
|
+
:param pulumi.Input[str] gpu_driver_version: Mode for how the GPU driver is installed.
|
11922
|
+
"""
|
11504
11923
|
pulumi.set(__self__, "gpu_driver_version", gpu_driver_version)
|
11505
11924
|
|
11506
11925
|
@property
|
11507
11926
|
@pulumi.getter(name="gpuDriverVersion")
|
11508
11927
|
def gpu_driver_version(self) -> pulumi.Input[str]:
|
11928
|
+
"""
|
11929
|
+
Mode for how the GPU driver is installed.
|
11930
|
+
"""
|
11509
11931
|
return pulumi.get(self, "gpu_driver_version")
|
11510
11932
|
|
11511
11933
|
@gpu_driver_version.setter
|
@@ -11518,12 +11940,19 @@ class NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs:
|
|
11518
11940
|
def __init__(__self__, *,
|
11519
11941
|
gpu_sharing_strategy: pulumi.Input[str],
|
11520
11942
|
max_shared_clients_per_gpu: pulumi.Input[int]):
|
11943
|
+
"""
|
11944
|
+
:param pulumi.Input[str] gpu_sharing_strategy: The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
|
11945
|
+
:param pulumi.Input[int] max_shared_clients_per_gpu: The maximum number of containers that can share a GPU.
|
11946
|
+
"""
|
11521
11947
|
pulumi.set(__self__, "gpu_sharing_strategy", gpu_sharing_strategy)
|
11522
11948
|
pulumi.set(__self__, "max_shared_clients_per_gpu", max_shared_clients_per_gpu)
|
11523
11949
|
|
11524
11950
|
@property
|
11525
11951
|
@pulumi.getter(name="gpuSharingStrategy")
|
11526
11952
|
def gpu_sharing_strategy(self) -> pulumi.Input[str]:
|
11953
|
+
"""
|
11954
|
+
The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)
|
11955
|
+
"""
|
11527
11956
|
return pulumi.get(self, "gpu_sharing_strategy")
|
11528
11957
|
|
11529
11958
|
@gpu_sharing_strategy.setter
|
@@ -11533,6 +11962,9 @@ class NodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs:
|
|
11533
11962
|
@property
|
11534
11963
|
@pulumi.getter(name="maxSharedClientsPerGpu")
|
11535
11964
|
def max_shared_clients_per_gpu(self) -> pulumi.Input[int]:
|
11965
|
+
"""
|
11966
|
+
The maximum number of containers that can share a GPU.
|
11967
|
+
"""
|
11536
11968
|
return pulumi.get(self, "max_shared_clients_per_gpu")
|
11537
11969
|
|
11538
11970
|
@max_shared_clients_per_gpu.setter
|
@@ -11568,11 +12000,17 @@ class NodePoolNodeConfigGvnicArgs:
|
|
11568
12000
|
class NodePoolNodeConfigHostMaintenancePolicyArgs:
|
11569
12001
|
def __init__(__self__, *,
|
11570
12002
|
maintenance_interval: pulumi.Input[str]):
|
12003
|
+
"""
|
12004
|
+
:param pulumi.Input[str] maintenance_interval: .
|
12005
|
+
"""
|
11571
12006
|
pulumi.set(__self__, "maintenance_interval", maintenance_interval)
|
11572
12007
|
|
11573
12008
|
@property
|
11574
12009
|
@pulumi.getter(name="maintenanceInterval")
|
11575
12010
|
def maintenance_interval(self) -> pulumi.Input[str]:
|
12011
|
+
"""
|
12012
|
+
.
|
12013
|
+
"""
|
11576
12014
|
return pulumi.get(self, "maintenance_interval")
|
11577
12015
|
|
11578
12016
|
@maintenance_interval.setter
|
@@ -11587,6 +12025,12 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11587
12025
|
cpu_cfs_quota: Optional[pulumi.Input[bool]] = None,
|
11588
12026
|
cpu_cfs_quota_period: Optional[pulumi.Input[str]] = None,
|
11589
12027
|
pod_pids_limit: Optional[pulumi.Input[int]] = None):
|
12028
|
+
"""
|
12029
|
+
:param pulumi.Input[str] cpu_manager_policy: Control the CPU management policy on the node.
|
12030
|
+
:param pulumi.Input[bool] cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
|
12031
|
+
:param pulumi.Input[str] cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
|
12032
|
+
:param pulumi.Input[int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
|
12033
|
+
"""
|
11590
12034
|
pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
|
11591
12035
|
if cpu_cfs_quota is not None:
|
11592
12036
|
pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
|
@@ -11598,6 +12042,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11598
12042
|
@property
|
11599
12043
|
@pulumi.getter(name="cpuManagerPolicy")
|
11600
12044
|
def cpu_manager_policy(self) -> pulumi.Input[str]:
|
12045
|
+
"""
|
12046
|
+
Control the CPU management policy on the node.
|
12047
|
+
"""
|
11601
12048
|
return pulumi.get(self, "cpu_manager_policy")
|
11602
12049
|
|
11603
12050
|
@cpu_manager_policy.setter
|
@@ -11607,6 +12054,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11607
12054
|
@property
|
11608
12055
|
@pulumi.getter(name="cpuCfsQuota")
|
11609
12056
|
def cpu_cfs_quota(self) -> Optional[pulumi.Input[bool]]:
|
12057
|
+
"""
|
12058
|
+
Enable CPU CFS quota enforcement for containers that specify CPU limits.
|
12059
|
+
"""
|
11610
12060
|
return pulumi.get(self, "cpu_cfs_quota")
|
11611
12061
|
|
11612
12062
|
@cpu_cfs_quota.setter
|
@@ -11616,6 +12066,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11616
12066
|
@property
|
11617
12067
|
@pulumi.getter(name="cpuCfsQuotaPeriod")
|
11618
12068
|
def cpu_cfs_quota_period(self) -> Optional[pulumi.Input[str]]:
|
12069
|
+
"""
|
12070
|
+
Set the CPU CFS quota period value 'cpu.cfs_period_us'.
|
12071
|
+
"""
|
11619
12072
|
return pulumi.get(self, "cpu_cfs_quota_period")
|
11620
12073
|
|
11621
12074
|
@cpu_cfs_quota_period.setter
|
@@ -11625,6 +12078,9 @@ class NodePoolNodeConfigKubeletConfigArgs:
|
|
11625
12078
|
@property
|
11626
12079
|
@pulumi.getter(name="podPidsLimit")
|
11627
12080
|
def pod_pids_limit(self) -> Optional[pulumi.Input[int]]:
|
12081
|
+
"""
|
12082
|
+
Controls the maximum number of processes allowed to run in a pod.
|
12083
|
+
"""
|
11628
12084
|
return pulumi.get(self, "pod_pids_limit")
|
11629
12085
|
|
11630
12086
|
@pod_pids_limit.setter
|
@@ -11637,6 +12093,10 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11637
12093
|
def __init__(__self__, *,
|
11638
12094
|
cgroup_mode: Optional[pulumi.Input[str]] = None,
|
11639
12095
|
sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
|
12096
|
+
"""
|
12097
|
+
:param pulumi.Input[str] cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
|
12098
|
+
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
|
12099
|
+
"""
|
11640
12100
|
if cgroup_mode is not None:
|
11641
12101
|
pulumi.set(__self__, "cgroup_mode", cgroup_mode)
|
11642
12102
|
if sysctls is not None:
|
@@ -11645,6 +12105,9 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11645
12105
|
@property
|
11646
12106
|
@pulumi.getter(name="cgroupMode")
|
11647
12107
|
def cgroup_mode(self) -> Optional[pulumi.Input[str]]:
|
12108
|
+
"""
|
12109
|
+
cgroupMode specifies the cgroup mode to be used on the node.
|
12110
|
+
"""
|
11648
12111
|
return pulumi.get(self, "cgroup_mode")
|
11649
12112
|
|
11650
12113
|
@cgroup_mode.setter
|
@@ -11654,6 +12117,9 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11654
12117
|
@property
|
11655
12118
|
@pulumi.getter
|
11656
12119
|
def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
|
12120
|
+
"""
|
12121
|
+
The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
|
12122
|
+
"""
|
11657
12123
|
return pulumi.get(self, "sysctls")
|
11658
12124
|
|
11659
12125
|
@sysctls.setter
|
@@ -11665,11 +12131,17 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
|
|
11665
12131
|
class NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs:
|
11666
12132
|
def __init__(__self__, *,
|
11667
12133
|
local_ssd_count: pulumi.Input[int]):
|
12134
|
+
"""
|
12135
|
+
:param pulumi.Input[int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
|
12136
|
+
"""
|
11668
12137
|
pulumi.set(__self__, "local_ssd_count", local_ssd_count)
|
11669
12138
|
|
11670
12139
|
@property
|
11671
12140
|
@pulumi.getter(name="localSsdCount")
|
11672
12141
|
def local_ssd_count(self) -> pulumi.Input[int]:
|
12142
|
+
"""
|
12143
|
+
Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.
|
12144
|
+
"""
|
11673
12145
|
return pulumi.get(self, "local_ssd_count")
|
11674
12146
|
|
11675
12147
|
@local_ssd_count.setter
|
@@ -11683,6 +12155,11 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11683
12155
|
consume_reservation_type: pulumi.Input[str],
|
11684
12156
|
key: Optional[pulumi.Input[str]] = None,
|
11685
12157
|
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
|
12158
|
+
"""
|
12159
|
+
:param pulumi.Input[str] consume_reservation_type: Corresponds to the type of reservation consumption.
|
12160
|
+
:param pulumi.Input[str] key: The label key of a reservation resource.
|
12161
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: The label values of the reservation resource.
|
12162
|
+
"""
|
11686
12163
|
pulumi.set(__self__, "consume_reservation_type", consume_reservation_type)
|
11687
12164
|
if key is not None:
|
11688
12165
|
pulumi.set(__self__, "key", key)
|
@@ -11692,6 +12169,9 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11692
12169
|
@property
|
11693
12170
|
@pulumi.getter(name="consumeReservationType")
|
11694
12171
|
def consume_reservation_type(self) -> pulumi.Input[str]:
|
12172
|
+
"""
|
12173
|
+
Corresponds to the type of reservation consumption.
|
12174
|
+
"""
|
11695
12175
|
return pulumi.get(self, "consume_reservation_type")
|
11696
12176
|
|
11697
12177
|
@consume_reservation_type.setter
|
@@ -11701,6 +12181,9 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11701
12181
|
@property
|
11702
12182
|
@pulumi.getter
|
11703
12183
|
def key(self) -> Optional[pulumi.Input[str]]:
|
12184
|
+
"""
|
12185
|
+
The label key of a reservation resource.
|
12186
|
+
"""
|
11704
12187
|
return pulumi.get(self, "key")
|
11705
12188
|
|
11706
12189
|
@key.setter
|
@@ -11710,6 +12193,9 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11710
12193
|
@property
|
11711
12194
|
@pulumi.getter
|
11712
12195
|
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
|
12196
|
+
"""
|
12197
|
+
The label values of the reservation resource.
|
12198
|
+
"""
|
11713
12199
|
return pulumi.get(self, "values")
|
11714
12200
|
|
11715
12201
|
@values.setter
|
@@ -11721,11 +12207,17 @@ class NodePoolNodeConfigReservationAffinityArgs:
|
|
11721
12207
|
class NodePoolNodeConfigSandboxConfigArgs:
|
11722
12208
|
def __init__(__self__, *,
|
11723
12209
|
sandbox_type: pulumi.Input[str]):
|
12210
|
+
"""
|
12211
|
+
:param pulumi.Input[str] sandbox_type: Type of the sandbox to use for the node (e.g. 'gvisor')
|
12212
|
+
"""
|
11724
12213
|
pulumi.set(__self__, "sandbox_type", sandbox_type)
|
11725
12214
|
|
11726
12215
|
@property
|
11727
12216
|
@pulumi.getter(name="sandboxType")
|
11728
12217
|
def sandbox_type(self) -> pulumi.Input[str]:
|
12218
|
+
"""
|
12219
|
+
Type of the sandbox to use for the node (e.g. 'gvisor')
|
12220
|
+
"""
|
11729
12221
|
return pulumi.get(self, "sandbox_type")
|
11730
12222
|
|
11731
12223
|
@sandbox_type.setter
|
@@ -11738,6 +12230,10 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11738
12230
|
def __init__(__self__, *,
|
11739
12231
|
enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None,
|
11740
12232
|
enable_secure_boot: Optional[pulumi.Input[bool]] = None):
|
12233
|
+
"""
|
12234
|
+
:param pulumi.Input[bool] enable_integrity_monitoring: Defines whether the instance has integrity monitoring enabled.
|
12235
|
+
:param pulumi.Input[bool] enable_secure_boot: Defines whether the instance has Secure Boot enabled.
|
12236
|
+
"""
|
11741
12237
|
if enable_integrity_monitoring is not None:
|
11742
12238
|
pulumi.set(__self__, "enable_integrity_monitoring", enable_integrity_monitoring)
|
11743
12239
|
if enable_secure_boot is not None:
|
@@ -11746,6 +12242,9 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11746
12242
|
@property
|
11747
12243
|
@pulumi.getter(name="enableIntegrityMonitoring")
|
11748
12244
|
def enable_integrity_monitoring(self) -> Optional[pulumi.Input[bool]]:
|
12245
|
+
"""
|
12246
|
+
Defines whether the instance has integrity monitoring enabled.
|
12247
|
+
"""
|
11749
12248
|
return pulumi.get(self, "enable_integrity_monitoring")
|
11750
12249
|
|
11751
12250
|
@enable_integrity_monitoring.setter
|
@@ -11755,6 +12254,9 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11755
12254
|
@property
|
11756
12255
|
@pulumi.getter(name="enableSecureBoot")
|
11757
12256
|
def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:
|
12257
|
+
"""
|
12258
|
+
Defines whether the instance has Secure Boot enabled.
|
12259
|
+
"""
|
11758
12260
|
return pulumi.get(self, "enable_secure_boot")
|
11759
12261
|
|
11760
12262
|
@enable_secure_boot.setter
|
@@ -11766,11 +12268,17 @@ class NodePoolNodeConfigShieldedInstanceConfigArgs:
|
|
11766
12268
|
class NodePoolNodeConfigSoleTenantConfigArgs:
|
11767
12269
|
def __init__(__self__, *,
|
11768
12270
|
node_affinities: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
|
12271
|
+
"""
|
12272
|
+
:param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
|
12273
|
+
"""
|
11769
12274
|
pulumi.set(__self__, "node_affinities", node_affinities)
|
11770
12275
|
|
11771
12276
|
@property
|
11772
12277
|
@pulumi.getter(name="nodeAffinities")
|
11773
12278
|
def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
|
12279
|
+
"""
|
12280
|
+
.
|
12281
|
+
"""
|
11774
12282
|
return pulumi.get(self, "node_affinities")
|
11775
12283
|
|
11776
12284
|
@node_affinities.setter
|
@@ -11784,6 +12292,11 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11784
12292
|
key: pulumi.Input[str],
|
11785
12293
|
operator: pulumi.Input[str],
|
11786
12294
|
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
|
12295
|
+
"""
|
12296
|
+
:param pulumi.Input[str] key: .
|
12297
|
+
:param pulumi.Input[str] operator: .
|
12298
|
+
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: .
|
12299
|
+
"""
|
11787
12300
|
pulumi.set(__self__, "key", key)
|
11788
12301
|
pulumi.set(__self__, "operator", operator)
|
11789
12302
|
pulumi.set(__self__, "values", values)
|
@@ -11791,6 +12304,9 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11791
12304
|
@property
|
11792
12305
|
@pulumi.getter
|
11793
12306
|
def key(self) -> pulumi.Input[str]:
|
12307
|
+
"""
|
12308
|
+
.
|
12309
|
+
"""
|
11794
12310
|
return pulumi.get(self, "key")
|
11795
12311
|
|
11796
12312
|
@key.setter
|
@@ -11800,6 +12316,9 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11800
12316
|
@property
|
11801
12317
|
@pulumi.getter
|
11802
12318
|
def operator(self) -> pulumi.Input[str]:
|
12319
|
+
"""
|
12320
|
+
.
|
12321
|
+
"""
|
11803
12322
|
return pulumi.get(self, "operator")
|
11804
12323
|
|
11805
12324
|
@operator.setter
|
@@ -11809,6 +12328,9 @@ class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs:
|
|
11809
12328
|
@property
|
11810
12329
|
@pulumi.getter
|
11811
12330
|
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
|
12331
|
+
"""
|
12332
|
+
.
|
12333
|
+
"""
|
11812
12334
|
return pulumi.get(self, "values")
|
11813
12335
|
|
11814
12336
|
@values.setter
|
@@ -11822,6 +12344,11 @@ class NodePoolNodeConfigTaintArgs:
|
|
11822
12344
|
effect: pulumi.Input[str],
|
11823
12345
|
key: pulumi.Input[str],
|
11824
12346
|
value: pulumi.Input[str]):
|
12347
|
+
"""
|
12348
|
+
:param pulumi.Input[str] effect: Effect for taint.
|
12349
|
+
:param pulumi.Input[str] key: Key for taint.
|
12350
|
+
:param pulumi.Input[str] value: Value for taint.
|
12351
|
+
"""
|
11825
12352
|
pulumi.set(__self__, "effect", effect)
|
11826
12353
|
pulumi.set(__self__, "key", key)
|
11827
12354
|
pulumi.set(__self__, "value", value)
|
@@ -11829,6 +12356,9 @@ class NodePoolNodeConfigTaintArgs:
|
|
11829
12356
|
@property
|
11830
12357
|
@pulumi.getter
|
11831
12358
|
def effect(self) -> pulumi.Input[str]:
|
12359
|
+
"""
|
12360
|
+
Effect for taint.
|
12361
|
+
"""
|
11832
12362
|
return pulumi.get(self, "effect")
|
11833
12363
|
|
11834
12364
|
@effect.setter
|
@@ -11838,6 +12368,9 @@ class NodePoolNodeConfigTaintArgs:
|
|
11838
12368
|
@property
|
11839
12369
|
@pulumi.getter
|
11840
12370
|
def key(self) -> pulumi.Input[str]:
|
12371
|
+
"""
|
12372
|
+
Key for taint.
|
12373
|
+
"""
|
11841
12374
|
return pulumi.get(self, "key")
|
11842
12375
|
|
11843
12376
|
@key.setter
|
@@ -11847,6 +12380,9 @@ class NodePoolNodeConfigTaintArgs:
|
|
11847
12380
|
@property
|
11848
12381
|
@pulumi.getter
|
11849
12382
|
def value(self) -> pulumi.Input[str]:
|
12383
|
+
"""
|
12384
|
+
Value for taint.
|
12385
|
+
"""
|
11850
12386
|
return pulumi.get(self, "value")
|
11851
12387
|
|
11852
12388
|
@value.setter
|
@@ -11858,11 +12394,17 @@ class NodePoolNodeConfigTaintArgs:
|
|
11858
12394
|
class NodePoolNodeConfigWorkloadMetadataConfigArgs:
|
11859
12395
|
def __init__(__self__, *,
|
11860
12396
|
mode: pulumi.Input[str]):
|
12397
|
+
"""
|
12398
|
+
:param pulumi.Input[str] mode: Mode is the configuration for how to expose metadata to workloads running on the node.
|
12399
|
+
"""
|
11861
12400
|
pulumi.set(__self__, "mode", mode)
|
11862
12401
|
|
11863
12402
|
@property
|
11864
12403
|
@pulumi.getter
|
11865
12404
|
def mode(self) -> pulumi.Input[str]:
|
12405
|
+
"""
|
12406
|
+
Mode is the configuration for how to expose metadata to workloads running on the node.
|
12407
|
+
"""
|
11866
12408
|
return pulumi.get(self, "mode")
|
11867
12409
|
|
11868
12410
|
@mode.setter
|