pulumi-gcp 7.22.0a1715345822__py3-none-any.whl → 7.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. pulumi_gcp/__init__.py +35 -0
  2. pulumi_gcp/alloydb/_inputs.py +139 -0
  3. pulumi_gcp/alloydb/cluster.py +54 -0
  4. pulumi_gcp/alloydb/outputs.py +145 -0
  5. pulumi_gcp/applicationintegration/auth_config.py +2 -6
  6. pulumi_gcp/applicationintegration/client.py +133 -18
  7. pulumi_gcp/bigquery/dataset.py +2 -2
  8. pulumi_gcp/bigquery/job.py +16 -20
  9. pulumi_gcp/bigquery/table.py +47 -0
  10. pulumi_gcp/bigtable/__init__.py +1 -0
  11. pulumi_gcp/bigtable/_inputs.py +101 -0
  12. pulumi_gcp/bigtable/authorized_view.py +440 -0
  13. pulumi_gcp/bigtable/outputs.py +119 -0
  14. pulumi_gcp/certificateauthority/certificate_template.py +70 -0
  15. pulumi_gcp/cloudbuildv2/repository.py +2 -2
  16. pulumi_gcp/clouddeploy/_inputs.py +96 -0
  17. pulumi_gcp/clouddeploy/custom_target_type.py +46 -0
  18. pulumi_gcp/clouddeploy/delivery_pipeline.py +7 -7
  19. pulumi_gcp/clouddeploy/outputs.py +96 -1
  20. pulumi_gcp/clouddeploy/target.py +54 -7
  21. pulumi_gcp/cloudrunv2/job.py +2 -4
  22. pulumi_gcp/cloudrunv2/service.py +2 -4
  23. pulumi_gcp/compute/_inputs.py +693 -0
  24. pulumi_gcp/compute/firewall_policy_rule.py +125 -10
  25. pulumi_gcp/compute/network_firewall_policy_rule.py +125 -10
  26. pulumi_gcp/compute/outputs.py +688 -0
  27. pulumi_gcp/compute/region_network_firewall_policy_rule.py +125 -10
  28. pulumi_gcp/compute/region_security_policy_rule.py +230 -1
  29. pulumi_gcp/compute/router_peer.py +54 -14
  30. pulumi_gcp/config/__init__.pyi +2 -0
  31. pulumi_gcp/config/vars.py +4 -0
  32. pulumi_gcp/container/_inputs.py +236 -3
  33. pulumi_gcp/container/outputs.py +365 -4
  34. pulumi_gcp/dataflow/flex_template_job.py +28 -28
  35. pulumi_gcp/dataflow/job.py +28 -14
  36. pulumi_gcp/essentialcontacts/document_ai_warehouse_document_schema.py +0 -528
  37. pulumi_gcp/firebaserules/release.py +2 -2
  38. pulumi_gcp/iam/_inputs.py +191 -2
  39. pulumi_gcp/iam/outputs.py +197 -2
  40. pulumi_gcp/iam/workforce_pool_provider.py +245 -0
  41. pulumi_gcp/integrationconnectors/__init__.py +1 -0
  42. pulumi_gcp/integrationconnectors/managed_zone.py +753 -0
  43. pulumi_gcp/networkconnectivity/__init__.py +1 -0
  44. pulumi_gcp/networkconnectivity/regional_endpoint.py +946 -0
  45. pulumi_gcp/networksecurity/firewall_endpoint.py +34 -0
  46. pulumi_gcp/networksecurity/firewall_endpoint_association.py +24 -0
  47. pulumi_gcp/networksecurity/security_profile.py +16 -0
  48. pulumi_gcp/networksecurity/security_profile_group.py +18 -0
  49. pulumi_gcp/networksecurity/tls_inspection_policy.py +16 -0
  50. pulumi_gcp/orgpolicy/policy.py +2 -2
  51. pulumi_gcp/privilegedaccessmanager/__init__.py +10 -0
  52. pulumi_gcp/privilegedaccessmanager/_inputs.py +420 -0
  53. pulumi_gcp/privilegedaccessmanager/entitlement.py +852 -0
  54. pulumi_gcp/privilegedaccessmanager/outputs.py +491 -0
  55. pulumi_gcp/provider.py +20 -0
  56. pulumi_gcp/pubsub/subscription.py +4 -4
  57. pulumi_gcp/pulumi-plugin.json +2 -1
  58. pulumi_gcp/redis/cluster.py +69 -2
  59. pulumi_gcp/storage/__init__.py +1 -0
  60. pulumi_gcp/storage/get_buckets.py +138 -0
  61. pulumi_gcp/storage/outputs.py +63 -0
  62. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/METADATA +1 -1
  63. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/RECORD +65 -57
  64. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/WHEEL +0 -0
  65. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/top_level.txt +0 -0
@@ -149,6 +149,7 @@ __all__ = [
149
149
  'ClusterNodeConfigLocalNvmeSsdBlockConfig',
150
150
  'ClusterNodeConfigReservationAffinity',
151
151
  'ClusterNodeConfigSandboxConfig',
152
+ 'ClusterNodeConfigSecondaryBootDisk',
152
153
  'ClusterNodeConfigShieldedInstanceConfig',
153
154
  'ClusterNodeConfigSoleTenantConfig',
154
155
  'ClusterNodeConfigSoleTenantConfigNodeAffinity',
@@ -185,6 +186,7 @@ __all__ = [
185
186
  'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig',
186
187
  'ClusterNodePoolNodeConfigReservationAffinity',
187
188
  'ClusterNodePoolNodeConfigSandboxConfig',
189
+ 'ClusterNodePoolNodeConfigSecondaryBootDisk',
188
190
  'ClusterNodePoolNodeConfigShieldedInstanceConfig',
189
191
  'ClusterNodePoolNodeConfigSoleTenantConfig',
190
192
  'ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity',
@@ -237,6 +239,7 @@ __all__ = [
237
239
  'NodePoolNodeConfigLocalNvmeSsdBlockConfig',
238
240
  'NodePoolNodeConfigReservationAffinity',
239
241
  'NodePoolNodeConfigSandboxConfig',
242
+ 'NodePoolNodeConfigSecondaryBootDisk',
240
243
  'NodePoolNodeConfigShieldedInstanceConfig',
241
244
  'NodePoolNodeConfigSoleTenantConfig',
242
245
  'NodePoolNodeConfigSoleTenantConfigNodeAffinity',
@@ -318,6 +321,7 @@ __all__ = [
318
321
  'GetClusterNodeConfigLocalNvmeSsdBlockConfigResult',
319
322
  'GetClusterNodeConfigReservationAffinityResult',
320
323
  'GetClusterNodeConfigSandboxConfigResult',
324
+ 'GetClusterNodeConfigSecondaryBootDiskResult',
321
325
  'GetClusterNodeConfigShieldedInstanceConfigResult',
322
326
  'GetClusterNodeConfigSoleTenantConfigResult',
323
327
  'GetClusterNodeConfigSoleTenantConfigNodeAffinityResult',
@@ -354,6 +358,7 @@ __all__ = [
354
358
  'GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigResult',
355
359
  'GetClusterNodePoolNodeConfigReservationAffinityResult',
356
360
  'GetClusterNodePoolNodeConfigSandboxConfigResult',
361
+ 'GetClusterNodePoolNodeConfigSecondaryBootDiskResult',
357
362
  'GetClusterNodePoolNodeConfigShieldedInstanceConfigResult',
358
363
  'GetClusterNodePoolNodeConfigSoleTenantConfigResult',
359
364
  'GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityResult',
@@ -5117,7 +5122,9 @@ class ClusterDnsConfig(dict):
5117
5122
  @staticmethod
5118
5123
  def __key_warning(key: str):
5119
5124
  suggest = None
5120
- if key == "clusterDns":
5125
+ if key == "additiveVpcScopeDnsDomain":
5126
+ suggest = "additive_vpc_scope_dns_domain"
5127
+ elif key == "clusterDns":
5121
5128
  suggest = "cluster_dns"
5122
5129
  elif key == "clusterDnsDomain":
5123
5130
  suggest = "cluster_dns_domain"
@@ -5136,14 +5143,18 @@ class ClusterDnsConfig(dict):
5136
5143
  return super().get(key, default)
5137
5144
 
5138
5145
  def __init__(__self__, *,
5146
+ additive_vpc_scope_dns_domain: Optional[str] = None,
5139
5147
  cluster_dns: Optional[str] = None,
5140
5148
  cluster_dns_domain: Optional[str] = None,
5141
5149
  cluster_dns_scope: Optional[str] = None):
5142
5150
  """
5151
+ :param str additive_vpc_scope_dns_domain: This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work `cluster_dns = "CLOUD_DNS"` and `cluster_dns_scope = "CLUSTER_SCOPE"` must both be set as well.
5143
5152
  :param str cluster_dns: Which in-cluster DNS provider should be used. `PROVIDER_UNSPECIFIED` (default) or `PLATFORM_DEFAULT` or `CLOUD_DNS`.
5144
5153
  :param str cluster_dns_domain: The suffix used for all cluster service records.
5145
5154
  :param str cluster_dns_scope: The scope of access to cluster DNS records. `DNS_SCOPE_UNSPECIFIED` (default) or `CLUSTER_SCOPE` or `VPC_SCOPE`.
5146
5155
  """
5156
+ if additive_vpc_scope_dns_domain is not None:
5157
+ pulumi.set(__self__, "additive_vpc_scope_dns_domain", additive_vpc_scope_dns_domain)
5147
5158
  if cluster_dns is not None:
5148
5159
  pulumi.set(__self__, "cluster_dns", cluster_dns)
5149
5160
  if cluster_dns_domain is not None:
@@ -5151,6 +5162,14 @@ class ClusterDnsConfig(dict):
5151
5162
  if cluster_dns_scope is not None:
5152
5163
  pulumi.set(__self__, "cluster_dns_scope", cluster_dns_scope)
5153
5164
 
5165
+ @property
5166
+ @pulumi.getter(name="additiveVpcScopeDnsDomain")
5167
+ def additive_vpc_scope_dns_domain(self) -> Optional[str]:
5168
+ """
5169
+ This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work `cluster_dns = "CLOUD_DNS"` and `cluster_dns_scope = "CLUSTER_SCOPE"` must both be set as well.
5170
+ """
5171
+ return pulumi.get(self, "additive_vpc_scope_dns_domain")
5172
+
5154
5173
  @property
5155
5174
  @pulumi.getter(name="clusterDns")
5156
5175
  def cluster_dns(self) -> Optional[str]:
@@ -6442,6 +6461,8 @@ class ClusterNodeConfig(dict):
6442
6461
  suggest = "resource_manager_tags"
6443
6462
  elif key == "sandboxConfig":
6444
6463
  suggest = "sandbox_config"
6464
+ elif key == "secondaryBootDisks":
6465
+ suggest = "secondary_boot_disks"
6445
6466
  elif key == "serviceAccount":
6446
6467
  suggest = "service_account"
6447
6468
  elif key == "shieldedInstanceConfig":
@@ -6494,6 +6515,7 @@ class ClusterNodeConfig(dict):
6494
6515
  resource_labels: Optional[Mapping[str, str]] = None,
6495
6516
  resource_manager_tags: Optional[Mapping[str, Any]] = None,
6496
6517
  sandbox_config: Optional['outputs.ClusterNodeConfigSandboxConfig'] = None,
6518
+ secondary_boot_disks: Optional[Sequence['outputs.ClusterNodeConfigSecondaryBootDisk']] = None,
6497
6519
  service_account: Optional[str] = None,
6498
6520
  shielded_instance_config: Optional['outputs.ClusterNodeConfigShieldedInstanceConfig'] = None,
6499
6521
  sole_tenant_config: Optional['outputs.ClusterNodeConfigSoleTenantConfig'] = None,
@@ -6579,6 +6601,7 @@ class ClusterNodeConfig(dict):
6579
6601
  for how these labels are applied to clusters, node pools and nodes.
6580
6602
  :param Mapping[str, Any] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`.
6581
6603
  :param 'ClusterNodeConfigSandboxConfigArgs' sandbox_config: Sandbox configuration for this node.
6604
+ :param Sequence['ClusterNodeConfigSecondaryBootDiskArgs'] secondary_boot_disks: Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
6582
6605
  :param str service_account: The service account to be used by the Node VMs.
6583
6606
  If not specified, the "default" service account is used.
6584
6607
  :param 'ClusterNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
@@ -6661,6 +6684,8 @@ class ClusterNodeConfig(dict):
6661
6684
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
6662
6685
  if sandbox_config is not None:
6663
6686
  pulumi.set(__self__, "sandbox_config", sandbox_config)
6687
+ if secondary_boot_disks is not None:
6688
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
6664
6689
  if service_account is not None:
6665
6690
  pulumi.set(__self__, "service_account", service_account)
6666
6691
  if shielded_instance_config is not None:
@@ -6970,6 +6995,14 @@ class ClusterNodeConfig(dict):
6970
6995
  """
6971
6996
  return pulumi.get(self, "sandbox_config")
6972
6997
 
6998
+ @property
6999
+ @pulumi.getter(name="secondaryBootDisks")
7000
+ def secondary_boot_disks(self) -> Optional[Sequence['outputs.ClusterNodeConfigSecondaryBootDisk']]:
7001
+ """
7002
+ Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
7003
+ """
7004
+ return pulumi.get(self, "secondary_boot_disks")
7005
+
6973
7006
  @property
6974
7007
  @pulumi.getter(name="serviceAccount")
6975
7008
  def service_account(self) -> Optional[str]:
@@ -7046,6 +7079,8 @@ class ClusterNodeConfigAdvancedMachineFeatures(dict):
7046
7079
  suggest = None
7047
7080
  if key == "threadsPerCore":
7048
7081
  suggest = "threads_per_core"
7082
+ elif key == "enableNestedVirtualization":
7083
+ suggest = "enable_nested_virtualization"
7049
7084
 
7050
7085
  if suggest:
7051
7086
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigAdvancedMachineFeatures. Access the value via the '{suggest}' property getter instead.")
@@ -7059,11 +7094,15 @@ class ClusterNodeConfigAdvancedMachineFeatures(dict):
7059
7094
  return super().get(key, default)
7060
7095
 
7061
7096
  def __init__(__self__, *,
7062
- threads_per_core: int):
7097
+ threads_per_core: int,
7098
+ enable_nested_virtualization: Optional[bool] = None):
7063
7099
  """
7064
7100
  :param int threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
7101
+ :param bool enable_nested_virtualization: Defines whether the instance should have nested virtualization enabled. Defaults to false.
7065
7102
  """
7066
7103
  pulumi.set(__self__, "threads_per_core", threads_per_core)
7104
+ if enable_nested_virtualization is not None:
7105
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
7067
7106
 
7068
7107
  @property
7069
7108
  @pulumi.getter(name="threadsPerCore")
@@ -7073,6 +7112,14 @@ class ClusterNodeConfigAdvancedMachineFeatures(dict):
7073
7112
  """
7074
7113
  return pulumi.get(self, "threads_per_core")
7075
7114
 
7115
+ @property
7116
+ @pulumi.getter(name="enableNestedVirtualization")
7117
+ def enable_nested_virtualization(self) -> Optional[bool]:
7118
+ """
7119
+ Defines whether the instance should have nested virtualization enabled. Defaults to false.
7120
+ """
7121
+ return pulumi.get(self, "enable_nested_virtualization")
7122
+
7076
7123
 
7077
7124
  @pulumi.output_type
7078
7125
  class ClusterNodeConfigConfidentialNodes(dict):
@@ -7402,6 +7449,7 @@ class ClusterNodeConfigGuestAcceleratorGpuSharingConfig(dict):
7402
7449
  :param str gpu_sharing_strategy: The type of GPU sharing strategy to enable on the GPU node.
7403
7450
  Accepted values are:
7404
7451
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
7452
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
7405
7453
  :param int max_shared_clients_per_gpu: The maximum number of containers that can share a GPU.
7406
7454
  """
7407
7455
  pulumi.set(__self__, "gpu_sharing_strategy", gpu_sharing_strategy)
@@ -7414,6 +7462,7 @@ class ClusterNodeConfigGuestAcceleratorGpuSharingConfig(dict):
7414
7462
  The type of GPU sharing strategy to enable on the GPU node.
7415
7463
  Accepted values are:
7416
7464
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
7465
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
7417
7466
  """
7418
7467
  return pulumi.get(self, "gpu_sharing_strategy")
7419
7468
 
@@ -7783,6 +7832,53 @@ class ClusterNodeConfigSandboxConfig(dict):
7783
7832
  return pulumi.get(self, "sandbox_type")
7784
7833
 
7785
7834
 
7835
+ @pulumi.output_type
7836
+ class ClusterNodeConfigSecondaryBootDisk(dict):
7837
+ @staticmethod
7838
+ def __key_warning(key: str):
7839
+ suggest = None
7840
+ if key == "diskImage":
7841
+ suggest = "disk_image"
7842
+
7843
+ if suggest:
7844
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigSecondaryBootDisk. Access the value via the '{suggest}' property getter instead.")
7845
+
7846
+ def __getitem__(self, key: str) -> Any:
7847
+ ClusterNodeConfigSecondaryBootDisk.__key_warning(key)
7848
+ return super().__getitem__(key)
7849
+
7850
+ def get(self, key: str, default = None) -> Any:
7851
+ ClusterNodeConfigSecondaryBootDisk.__key_warning(key)
7852
+ return super().get(key, default)
7853
+
7854
+ def __init__(__self__, *,
7855
+ disk_image: str,
7856
+ mode: Optional[str] = None):
7857
+ """
7858
+ :param str disk_image: Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
7859
+ :param str mode: Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
7860
+ """
7861
+ pulumi.set(__self__, "disk_image", disk_image)
7862
+ if mode is not None:
7863
+ pulumi.set(__self__, "mode", mode)
7864
+
7865
+ @property
7866
+ @pulumi.getter(name="diskImage")
7867
+ def disk_image(self) -> str:
7868
+ """
7869
+ Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
7870
+ """
7871
+ return pulumi.get(self, "disk_image")
7872
+
7873
+ @property
7874
+ @pulumi.getter
7875
+ def mode(self) -> Optional[str]:
7876
+ """
7877
+ Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
7878
+ """
7879
+ return pulumi.get(self, "mode")
7880
+
7881
+
7786
7882
  @pulumi.output_type
7787
7883
  class ClusterNodeConfigShieldedInstanceConfig(dict):
7788
7884
  @staticmethod
@@ -8932,6 +9028,8 @@ class ClusterNodePoolNodeConfig(dict):
8932
9028
  suggest = "resource_manager_tags"
8933
9029
  elif key == "sandboxConfig":
8934
9030
  suggest = "sandbox_config"
9031
+ elif key == "secondaryBootDisks":
9032
+ suggest = "secondary_boot_disks"
8935
9033
  elif key == "serviceAccount":
8936
9034
  suggest = "service_account"
8937
9035
  elif key == "shieldedInstanceConfig":
@@ -8984,6 +9082,7 @@ class ClusterNodePoolNodeConfig(dict):
8984
9082
  resource_labels: Optional[Mapping[str, str]] = None,
8985
9083
  resource_manager_tags: Optional[Mapping[str, Any]] = None,
8986
9084
  sandbox_config: Optional['outputs.ClusterNodePoolNodeConfigSandboxConfig'] = None,
9085
+ secondary_boot_disks: Optional[Sequence['outputs.ClusterNodePoolNodeConfigSecondaryBootDisk']] = None,
8987
9086
  service_account: Optional[str] = None,
8988
9087
  shielded_instance_config: Optional['outputs.ClusterNodePoolNodeConfigShieldedInstanceConfig'] = None,
8989
9088
  sole_tenant_config: Optional['outputs.ClusterNodePoolNodeConfigSoleTenantConfig'] = None,
@@ -9069,6 +9168,7 @@ class ClusterNodePoolNodeConfig(dict):
9069
9168
  for how these labels are applied to clusters, node pools and nodes.
9070
9169
  :param Mapping[str, Any] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`.
9071
9170
  :param 'ClusterNodePoolNodeConfigSandboxConfigArgs' sandbox_config: Sandbox configuration for this node.
9171
+ :param Sequence['ClusterNodePoolNodeConfigSecondaryBootDiskArgs'] secondary_boot_disks: Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
9072
9172
  :param str service_account: The service account to be used by the Node VMs.
9073
9173
  If not specified, the "default" service account is used.
9074
9174
  :param 'ClusterNodePoolNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
@@ -9151,6 +9251,8 @@ class ClusterNodePoolNodeConfig(dict):
9151
9251
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
9152
9252
  if sandbox_config is not None:
9153
9253
  pulumi.set(__self__, "sandbox_config", sandbox_config)
9254
+ if secondary_boot_disks is not None:
9255
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
9154
9256
  if service_account is not None:
9155
9257
  pulumi.set(__self__, "service_account", service_account)
9156
9258
  if shielded_instance_config is not None:
@@ -9460,6 +9562,14 @@ class ClusterNodePoolNodeConfig(dict):
9460
9562
  """
9461
9563
  return pulumi.get(self, "sandbox_config")
9462
9564
 
9565
+ @property
9566
+ @pulumi.getter(name="secondaryBootDisks")
9567
+ def secondary_boot_disks(self) -> Optional[Sequence['outputs.ClusterNodePoolNodeConfigSecondaryBootDisk']]:
9568
+ """
9569
+ Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
9570
+ """
9571
+ return pulumi.get(self, "secondary_boot_disks")
9572
+
9463
9573
  @property
9464
9574
  @pulumi.getter(name="serviceAccount")
9465
9575
  def service_account(self) -> Optional[str]:
@@ -9536,6 +9646,8 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeatures(dict):
9536
9646
  suggest = None
9537
9647
  if key == "threadsPerCore":
9538
9648
  suggest = "threads_per_core"
9649
+ elif key == "enableNestedVirtualization":
9650
+ suggest = "enable_nested_virtualization"
9539
9651
 
9540
9652
  if suggest:
9541
9653
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigAdvancedMachineFeatures. Access the value via the '{suggest}' property getter instead.")
@@ -9549,11 +9661,15 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeatures(dict):
9549
9661
  return super().get(key, default)
9550
9662
 
9551
9663
  def __init__(__self__, *,
9552
- threads_per_core: int):
9664
+ threads_per_core: int,
9665
+ enable_nested_virtualization: Optional[bool] = None):
9553
9666
  """
9554
9667
  :param int threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
9668
+ :param bool enable_nested_virtualization: Defines whether the instance should have nested virtualization enabled. Defaults to false.
9555
9669
  """
9556
9670
  pulumi.set(__self__, "threads_per_core", threads_per_core)
9671
+ if enable_nested_virtualization is not None:
9672
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
9557
9673
 
9558
9674
  @property
9559
9675
  @pulumi.getter(name="threadsPerCore")
@@ -9563,6 +9679,14 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeatures(dict):
9563
9679
  """
9564
9680
  return pulumi.get(self, "threads_per_core")
9565
9681
 
9682
+ @property
9683
+ @pulumi.getter(name="enableNestedVirtualization")
9684
+ def enable_nested_virtualization(self) -> Optional[bool]:
9685
+ """
9686
+ Defines whether the instance should have nested virtualization enabled. Defaults to false.
9687
+ """
9688
+ return pulumi.get(self, "enable_nested_virtualization")
9689
+
9566
9690
 
9567
9691
  @pulumi.output_type
9568
9692
  class ClusterNodePoolNodeConfigConfidentialNodes(dict):
@@ -9892,6 +10016,7 @@ class ClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfig(dict):
9892
10016
  :param str gpu_sharing_strategy: The type of GPU sharing strategy to enable on the GPU node.
9893
10017
  Accepted values are:
9894
10018
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
10019
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
9895
10020
  :param int max_shared_clients_per_gpu: The maximum number of containers that can share a GPU.
9896
10021
  """
9897
10022
  pulumi.set(__self__, "gpu_sharing_strategy", gpu_sharing_strategy)
@@ -9904,6 +10029,7 @@ class ClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfig(dict):
9904
10029
  The type of GPU sharing strategy to enable on the GPU node.
9905
10030
  Accepted values are:
9906
10031
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
10032
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
9907
10033
  """
9908
10034
  return pulumi.get(self, "gpu_sharing_strategy")
9909
10035
 
@@ -10273,6 +10399,53 @@ class ClusterNodePoolNodeConfigSandboxConfig(dict):
10273
10399
  return pulumi.get(self, "sandbox_type")
10274
10400
 
10275
10401
 
10402
+ @pulumi.output_type
10403
+ class ClusterNodePoolNodeConfigSecondaryBootDisk(dict):
10404
+ @staticmethod
10405
+ def __key_warning(key: str):
10406
+ suggest = None
10407
+ if key == "diskImage":
10408
+ suggest = "disk_image"
10409
+
10410
+ if suggest:
10411
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigSecondaryBootDisk. Access the value via the '{suggest}' property getter instead.")
10412
+
10413
+ def __getitem__(self, key: str) -> Any:
10414
+ ClusterNodePoolNodeConfigSecondaryBootDisk.__key_warning(key)
10415
+ return super().__getitem__(key)
10416
+
10417
+ def get(self, key: str, default = None) -> Any:
10418
+ ClusterNodePoolNodeConfigSecondaryBootDisk.__key_warning(key)
10419
+ return super().get(key, default)
10420
+
10421
+ def __init__(__self__, *,
10422
+ disk_image: str,
10423
+ mode: Optional[str] = None):
10424
+ """
10425
+ :param str disk_image: Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
10426
+ :param str mode: Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
10427
+ """
10428
+ pulumi.set(__self__, "disk_image", disk_image)
10429
+ if mode is not None:
10430
+ pulumi.set(__self__, "mode", mode)
10431
+
10432
+ @property
10433
+ @pulumi.getter(name="diskImage")
10434
+ def disk_image(self) -> str:
10435
+ """
10436
+ Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
10437
+ """
10438
+ return pulumi.get(self, "disk_image")
10439
+
10440
+ @property
10441
+ @pulumi.getter
10442
+ def mode(self) -> Optional[str]:
10443
+ """
10444
+ Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
10445
+ """
10446
+ return pulumi.get(self, "mode")
10447
+
10448
+
10276
10449
  @pulumi.output_type
10277
10450
  class ClusterNodePoolNodeConfigShieldedInstanceConfig(dict):
10278
10451
  @staticmethod
@@ -11987,6 +12160,8 @@ class NodePoolNodeConfig(dict):
11987
12160
  suggest = "resource_manager_tags"
11988
12161
  elif key == "sandboxConfig":
11989
12162
  suggest = "sandbox_config"
12163
+ elif key == "secondaryBootDisks":
12164
+ suggest = "secondary_boot_disks"
11990
12165
  elif key == "serviceAccount":
11991
12166
  suggest = "service_account"
11992
12167
  elif key == "shieldedInstanceConfig":
@@ -12039,6 +12214,7 @@ class NodePoolNodeConfig(dict):
12039
12214
  resource_labels: Optional[Mapping[str, str]] = None,
12040
12215
  resource_manager_tags: Optional[Mapping[str, Any]] = None,
12041
12216
  sandbox_config: Optional['outputs.NodePoolNodeConfigSandboxConfig'] = None,
12217
+ secondary_boot_disks: Optional[Sequence['outputs.NodePoolNodeConfigSecondaryBootDisk']] = None,
12042
12218
  service_account: Optional[str] = None,
12043
12219
  shielded_instance_config: Optional['outputs.NodePoolNodeConfigShieldedInstanceConfig'] = None,
12044
12220
  sole_tenant_config: Optional['outputs.NodePoolNodeConfigSoleTenantConfig'] = None,
@@ -12078,6 +12254,7 @@ class NodePoolNodeConfig(dict):
12078
12254
  :param Mapping[str, str] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
12079
12255
  :param Mapping[str, Any] resource_manager_tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
12080
12256
  :param 'NodePoolNodeConfigSandboxConfigArgs' sandbox_config: Sandbox configuration for this node.
12257
+ :param Sequence['NodePoolNodeConfigSecondaryBootDiskArgs'] secondary_boot_disks: Secondary boot disks for preloading data or container images.
12081
12258
  :param str service_account: The Google Cloud Platform Service Account to be used by the node VMs.
12082
12259
  :param 'NodePoolNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options.
12083
12260
  :param 'NodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Node affinity options for sole tenant node pools.
@@ -12148,6 +12325,8 @@ class NodePoolNodeConfig(dict):
12148
12325
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
12149
12326
  if sandbox_config is not None:
12150
12327
  pulumi.set(__self__, "sandbox_config", sandbox_config)
12328
+ if secondary_boot_disks is not None:
12329
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
12151
12330
  if service_account is not None:
12152
12331
  pulumi.set(__self__, "service_account", service_account)
12153
12332
  if shielded_instance_config is not None:
@@ -12411,6 +12590,14 @@ class NodePoolNodeConfig(dict):
12411
12590
  """
12412
12591
  return pulumi.get(self, "sandbox_config")
12413
12592
 
12593
+ @property
12594
+ @pulumi.getter(name="secondaryBootDisks")
12595
+ def secondary_boot_disks(self) -> Optional[Sequence['outputs.NodePoolNodeConfigSecondaryBootDisk']]:
12596
+ """
12597
+ Secondary boot disks for preloading data or container images.
12598
+ """
12599
+ return pulumi.get(self, "secondary_boot_disks")
12600
+
12414
12601
  @property
12415
12602
  @pulumi.getter(name="serviceAccount")
12416
12603
  def service_account(self) -> Optional[str]:
@@ -12475,6 +12662,8 @@ class NodePoolNodeConfigAdvancedMachineFeatures(dict):
12475
12662
  suggest = None
12476
12663
  if key == "threadsPerCore":
12477
12664
  suggest = "threads_per_core"
12665
+ elif key == "enableNestedVirtualization":
12666
+ suggest = "enable_nested_virtualization"
12478
12667
 
12479
12668
  if suggest:
12480
12669
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigAdvancedMachineFeatures. Access the value via the '{suggest}' property getter instead.")
@@ -12488,11 +12677,15 @@ class NodePoolNodeConfigAdvancedMachineFeatures(dict):
12488
12677
  return super().get(key, default)
12489
12678
 
12490
12679
  def __init__(__self__, *,
12491
- threads_per_core: int):
12680
+ threads_per_core: int,
12681
+ enable_nested_virtualization: Optional[bool] = None):
12492
12682
  """
12493
12683
  :param int threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
12684
+ :param bool enable_nested_virtualization: Whether the node should have nested virtualization enabled.
12494
12685
  """
12495
12686
  pulumi.set(__self__, "threads_per_core", threads_per_core)
12687
+ if enable_nested_virtualization is not None:
12688
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
12496
12689
 
12497
12690
  @property
12498
12691
  @pulumi.getter(name="threadsPerCore")
@@ -12502,6 +12695,14 @@ class NodePoolNodeConfigAdvancedMachineFeatures(dict):
12502
12695
  """
12503
12696
  return pulumi.get(self, "threads_per_core")
12504
12697
 
12698
+ @property
12699
+ @pulumi.getter(name="enableNestedVirtualization")
12700
+ def enable_nested_virtualization(self) -> Optional[bool]:
12701
+ """
12702
+ Whether the node should have nested virtualization enabled.
12703
+ """
12704
+ return pulumi.get(self, "enable_nested_virtualization")
12705
+
12505
12706
 
12506
12707
  @pulumi.output_type
12507
12708
  class NodePoolNodeConfigConfidentialNodes(dict):
@@ -13148,6 +13349,53 @@ class NodePoolNodeConfigSandboxConfig(dict):
13148
13349
  return pulumi.get(self, "sandbox_type")
13149
13350
 
13150
13351
 
13352
+ @pulumi.output_type
13353
+ class NodePoolNodeConfigSecondaryBootDisk(dict):
13354
+ @staticmethod
13355
+ def __key_warning(key: str):
13356
+ suggest = None
13357
+ if key == "diskImage":
13358
+ suggest = "disk_image"
13359
+
13360
+ if suggest:
13361
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigSecondaryBootDisk. Access the value via the '{suggest}' property getter instead.")
13362
+
13363
+ def __getitem__(self, key: str) -> Any:
13364
+ NodePoolNodeConfigSecondaryBootDisk.__key_warning(key)
13365
+ return super().__getitem__(key)
13366
+
13367
+ def get(self, key: str, default = None) -> Any:
13368
+ NodePoolNodeConfigSecondaryBootDisk.__key_warning(key)
13369
+ return super().get(key, default)
13370
+
13371
+ def __init__(__self__, *,
13372
+ disk_image: str,
13373
+ mode: Optional[str] = None):
13374
+ """
13375
+ :param str disk_image: Disk image to create the secondary boot disk from
13376
+ :param str mode: Mode for how the secondary boot disk is used.
13377
+ """
13378
+ pulumi.set(__self__, "disk_image", disk_image)
13379
+ if mode is not None:
13380
+ pulumi.set(__self__, "mode", mode)
13381
+
13382
+ @property
13383
+ @pulumi.getter(name="diskImage")
13384
+ def disk_image(self) -> str:
13385
+ """
13386
+ Disk image to create the secondary boot disk from
13387
+ """
13388
+ return pulumi.get(self, "disk_image")
13389
+
13390
+ @property
13391
+ @pulumi.getter
13392
+ def mode(self) -> Optional[str]:
13393
+ """
13394
+ Mode for how the secondary boot disk is used.
13395
+ """
13396
+ return pulumi.get(self, "mode")
13397
+
13398
+
13151
13399
  @pulumi.output_type
13152
13400
  class NodePoolNodeConfigShieldedInstanceConfig(dict):
13153
13401
  @staticmethod
@@ -14538,18 +14786,29 @@ class GetClusterDefaultSnatStatusResult(dict):
14538
14786
  @pulumi.output_type
14539
14787
  class GetClusterDnsConfigResult(dict):
14540
14788
  def __init__(__self__, *,
14789
+ additive_vpc_scope_dns_domain: str,
14541
14790
  cluster_dns: str,
14542
14791
  cluster_dns_domain: str,
14543
14792
  cluster_dns_scope: str):
14544
14793
  """
14794
+ :param str additive_vpc_scope_dns_domain: Enable additive VPC scope DNS in a GKE cluster.
14545
14795
  :param str cluster_dns: Which in-cluster DNS provider should be used.
14546
14796
  :param str cluster_dns_domain: The suffix used for all cluster service records.
14547
14797
  :param str cluster_dns_scope: The scope of access to cluster DNS records.
14548
14798
  """
14799
+ pulumi.set(__self__, "additive_vpc_scope_dns_domain", additive_vpc_scope_dns_domain)
14549
14800
  pulumi.set(__self__, "cluster_dns", cluster_dns)
14550
14801
  pulumi.set(__self__, "cluster_dns_domain", cluster_dns_domain)
14551
14802
  pulumi.set(__self__, "cluster_dns_scope", cluster_dns_scope)
14552
14803
 
14804
+ @property
14805
+ @pulumi.getter(name="additiveVpcScopeDnsDomain")
14806
+ def additive_vpc_scope_dns_domain(self) -> str:
14807
+ """
14808
+ Enable additive VPC scope DNS in a GKE cluster.
14809
+ """
14810
+ return pulumi.get(self, "additive_vpc_scope_dns_domain")
14811
+
14553
14812
  @property
14554
14813
  @pulumi.getter(name="clusterDns")
14555
14814
  def cluster_dns(self) -> str:
@@ -15273,6 +15532,7 @@ class GetClusterNodeConfigResult(dict):
15273
15532
  resource_labels: Mapping[str, str],
15274
15533
  resource_manager_tags: Mapping[str, Any],
15275
15534
  sandbox_configs: Sequence['outputs.GetClusterNodeConfigSandboxConfigResult'],
15535
+ secondary_boot_disks: Sequence['outputs.GetClusterNodeConfigSecondaryBootDiskResult'],
15276
15536
  service_account: str,
15277
15537
  shielded_instance_configs: Sequence['outputs.GetClusterNodeConfigShieldedInstanceConfigResult'],
15278
15538
  sole_tenant_configs: Sequence['outputs.GetClusterNodeConfigSoleTenantConfigResult'],
@@ -15312,6 +15572,7 @@ class GetClusterNodeConfigResult(dict):
15312
15572
  :param Mapping[str, str] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
15313
15573
  :param Mapping[str, Any] resource_manager_tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
15314
15574
  :param Sequence['GetClusterNodeConfigSandboxConfigArgs'] sandbox_configs: Sandbox configuration for this node.
15575
+ :param Sequence['GetClusterNodeConfigSecondaryBootDiskArgs'] secondary_boot_disks: Secondary boot disks for preloading data or container images.
15315
15576
  :param str service_account: The Google Cloud Platform Service Account to be used by the node VMs.
15316
15577
  :param Sequence['GetClusterNodeConfigShieldedInstanceConfigArgs'] shielded_instance_configs: Shielded Instance options.
15317
15578
  :param Sequence['GetClusterNodeConfigSoleTenantConfigArgs'] sole_tenant_configs: Node affinity options for sole tenant node pools.
@@ -15351,6 +15612,7 @@ class GetClusterNodeConfigResult(dict):
15351
15612
  pulumi.set(__self__, "resource_labels", resource_labels)
15352
15613
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
15353
15614
  pulumi.set(__self__, "sandbox_configs", sandbox_configs)
15615
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
15354
15616
  pulumi.set(__self__, "service_account", service_account)
15355
15617
  pulumi.set(__self__, "shielded_instance_configs", shielded_instance_configs)
15356
15618
  pulumi.set(__self__, "sole_tenant_configs", sole_tenant_configs)
@@ -15607,6 +15869,14 @@ class GetClusterNodeConfigResult(dict):
15607
15869
  """
15608
15870
  return pulumi.get(self, "sandbox_configs")
15609
15871
 
15872
+ @property
15873
+ @pulumi.getter(name="secondaryBootDisks")
15874
+ def secondary_boot_disks(self) -> Sequence['outputs.GetClusterNodeConfigSecondaryBootDiskResult']:
15875
+ """
15876
+ Secondary boot disks for preloading data or container images.
15877
+ """
15878
+ return pulumi.get(self, "secondary_boot_disks")
15879
+
15610
15880
  @property
15611
15881
  @pulumi.getter(name="serviceAccount")
15612
15882
  def service_account(self) -> str:
@@ -15667,12 +15937,23 @@ class GetClusterNodeConfigResult(dict):
15667
15937
  @pulumi.output_type
15668
15938
  class GetClusterNodeConfigAdvancedMachineFeatureResult(dict):
15669
15939
  def __init__(__self__, *,
15940
+ enable_nested_virtualization: bool,
15670
15941
  threads_per_core: int):
15671
15942
  """
15943
+ :param bool enable_nested_virtualization: Whether the node should have nested virtualization enabled.
15672
15944
  :param int threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
15673
15945
  """
15946
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
15674
15947
  pulumi.set(__self__, "threads_per_core", threads_per_core)
15675
15948
 
15949
+ @property
15950
+ @pulumi.getter(name="enableNestedVirtualization")
15951
+ def enable_nested_virtualization(self) -> bool:
15952
+ """
15953
+ Whether the node should have nested virtualization enabled.
15954
+ """
15955
+ return pulumi.get(self, "enable_nested_virtualization")
15956
+
15676
15957
  @property
15677
15958
  @pulumi.getter(name="threadsPerCore")
15678
15959
  def threads_per_core(self) -> int:
@@ -16113,6 +16394,35 @@ class GetClusterNodeConfigSandboxConfigResult(dict):
16113
16394
  return pulumi.get(self, "sandbox_type")
16114
16395
 
16115
16396
 
16397
+ @pulumi.output_type
16398
+ class GetClusterNodeConfigSecondaryBootDiskResult(dict):
16399
+ def __init__(__self__, *,
16400
+ disk_image: str,
16401
+ mode: str):
16402
+ """
16403
+ :param str disk_image: Disk image to create the secondary boot disk from
16404
+ :param str mode: Mode for how the secondary boot disk is used.
16405
+ """
16406
+ pulumi.set(__self__, "disk_image", disk_image)
16407
+ pulumi.set(__self__, "mode", mode)
16408
+
16409
+ @property
16410
+ @pulumi.getter(name="diskImage")
16411
+ def disk_image(self) -> str:
16412
+ """
16413
+ Disk image to create the secondary boot disk from
16414
+ """
16415
+ return pulumi.get(self, "disk_image")
16416
+
16417
+ @property
16418
+ @pulumi.getter
16419
+ def mode(self) -> str:
16420
+ """
16421
+ Mode for how the secondary boot disk is used.
16422
+ """
16423
+ return pulumi.get(self, "mode")
16424
+
16425
+
16116
16426
  @pulumi.output_type
16117
16427
  class GetClusterNodeConfigShieldedInstanceConfigResult(dict):
16118
16428
  def __init__(__self__, *,
@@ -16868,6 +17178,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
16868
17178
  resource_labels: Mapping[str, str],
16869
17179
  resource_manager_tags: Mapping[str, Any],
16870
17180
  sandbox_configs: Sequence['outputs.GetClusterNodePoolNodeConfigSandboxConfigResult'],
17181
+ secondary_boot_disks: Sequence['outputs.GetClusterNodePoolNodeConfigSecondaryBootDiskResult'],
16871
17182
  service_account: str,
16872
17183
  shielded_instance_configs: Sequence['outputs.GetClusterNodePoolNodeConfigShieldedInstanceConfigResult'],
16873
17184
  sole_tenant_configs: Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigResult'],
@@ -16907,6 +17218,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
16907
17218
  :param Mapping[str, str] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
16908
17219
  :param Mapping[str, Any] resource_manager_tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
16909
17220
  :param Sequence['GetClusterNodePoolNodeConfigSandboxConfigArgs'] sandbox_configs: Sandbox configuration for this node.
17221
+ :param Sequence['GetClusterNodePoolNodeConfigSecondaryBootDiskArgs'] secondary_boot_disks: Secondary boot disks for preloading data or container images.
16910
17222
  :param str service_account: The Google Cloud Platform Service Account to be used by the node VMs.
16911
17223
  :param Sequence['GetClusterNodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_configs: Shielded Instance options.
16912
17224
  :param Sequence['GetClusterNodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_configs: Node affinity options for sole tenant node pools.
@@ -16946,6 +17258,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
16946
17258
  pulumi.set(__self__, "resource_labels", resource_labels)
16947
17259
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
16948
17260
  pulumi.set(__self__, "sandbox_configs", sandbox_configs)
17261
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
16949
17262
  pulumi.set(__self__, "service_account", service_account)
16950
17263
  pulumi.set(__self__, "shielded_instance_configs", shielded_instance_configs)
16951
17264
  pulumi.set(__self__, "sole_tenant_configs", sole_tenant_configs)
@@ -17202,6 +17515,14 @@ class GetClusterNodePoolNodeConfigResult(dict):
17202
17515
  """
17203
17516
  return pulumi.get(self, "sandbox_configs")
17204
17517
 
17518
+ @property
17519
+ @pulumi.getter(name="secondaryBootDisks")
17520
+ def secondary_boot_disks(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigSecondaryBootDiskResult']:
17521
+ """
17522
+ Secondary boot disks for preloading data or container images.
17523
+ """
17524
+ return pulumi.get(self, "secondary_boot_disks")
17525
+
17205
17526
  @property
17206
17527
  @pulumi.getter(name="serviceAccount")
17207
17528
  def service_account(self) -> str:
@@ -17262,12 +17583,23 @@ class GetClusterNodePoolNodeConfigResult(dict):
17262
17583
  @pulumi.output_type
17263
17584
  class GetClusterNodePoolNodeConfigAdvancedMachineFeatureResult(dict):
17264
17585
  def __init__(__self__, *,
17586
+ enable_nested_virtualization: bool,
17265
17587
  threads_per_core: int):
17266
17588
  """
17589
+ :param bool enable_nested_virtualization: Whether the node should have nested virtualization enabled.
17267
17590
  :param int threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
17268
17591
  """
17592
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
17269
17593
  pulumi.set(__self__, "threads_per_core", threads_per_core)
17270
17594
 
17595
+ @property
17596
+ @pulumi.getter(name="enableNestedVirtualization")
17597
+ def enable_nested_virtualization(self) -> bool:
17598
+ """
17599
+ Whether the node should have nested virtualization enabled.
17600
+ """
17601
+ return pulumi.get(self, "enable_nested_virtualization")
17602
+
17271
17603
  @property
17272
17604
  @pulumi.getter(name="threadsPerCore")
17273
17605
  def threads_per_core(self) -> int:
@@ -17708,6 +18040,35 @@ class GetClusterNodePoolNodeConfigSandboxConfigResult(dict):
17708
18040
  return pulumi.get(self, "sandbox_type")
17709
18041
 
17710
18042
 
18043
+ @pulumi.output_type
18044
+ class GetClusterNodePoolNodeConfigSecondaryBootDiskResult(dict):
18045
+ def __init__(__self__, *,
18046
+ disk_image: str,
18047
+ mode: str):
18048
+ """
18049
+ :param str disk_image: Disk image to create the secondary boot disk from
18050
+ :param str mode: Mode for how the secondary boot disk is used.
18051
+ """
18052
+ pulumi.set(__self__, "disk_image", disk_image)
18053
+ pulumi.set(__self__, "mode", mode)
18054
+
18055
+ @property
18056
+ @pulumi.getter(name="diskImage")
18057
+ def disk_image(self) -> str:
18058
+ """
18059
+ Disk image to create the secondary boot disk from
18060
+ """
18061
+ return pulumi.get(self, "disk_image")
18062
+
18063
+ @property
18064
+ @pulumi.getter
18065
+ def mode(self) -> str:
18066
+ """
18067
+ Mode for how the secondary boot disk is used.
18068
+ """
18069
+ return pulumi.get(self, "mode")
18070
+
18071
+
17711
18072
  @pulumi.output_type
17712
18073
  class GetClusterNodePoolNodeConfigShieldedInstanceConfigResult(dict):
17713
18074
  def __init__(__self__, *,