pulumi-gcp 7.22.0a1715345822__py3-none-any.whl → 7.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. pulumi_gcp/__init__.py +35 -0
  2. pulumi_gcp/alloydb/_inputs.py +139 -0
  3. pulumi_gcp/alloydb/cluster.py +54 -0
  4. pulumi_gcp/alloydb/outputs.py +145 -0
  5. pulumi_gcp/applicationintegration/auth_config.py +2 -6
  6. pulumi_gcp/applicationintegration/client.py +133 -18
  7. pulumi_gcp/bigquery/dataset.py +2 -2
  8. pulumi_gcp/bigquery/job.py +16 -20
  9. pulumi_gcp/bigquery/table.py +47 -0
  10. pulumi_gcp/bigtable/__init__.py +1 -0
  11. pulumi_gcp/bigtable/_inputs.py +101 -0
  12. pulumi_gcp/bigtable/authorized_view.py +440 -0
  13. pulumi_gcp/bigtable/outputs.py +119 -0
  14. pulumi_gcp/certificateauthority/certificate_template.py +70 -0
  15. pulumi_gcp/cloudbuildv2/repository.py +2 -2
  16. pulumi_gcp/clouddeploy/_inputs.py +96 -0
  17. pulumi_gcp/clouddeploy/custom_target_type.py +46 -0
  18. pulumi_gcp/clouddeploy/delivery_pipeline.py +7 -7
  19. pulumi_gcp/clouddeploy/outputs.py +96 -1
  20. pulumi_gcp/clouddeploy/target.py +54 -7
  21. pulumi_gcp/cloudrunv2/job.py +2 -4
  22. pulumi_gcp/cloudrunv2/service.py +2 -4
  23. pulumi_gcp/compute/_inputs.py +693 -0
  24. pulumi_gcp/compute/firewall_policy_rule.py +125 -10
  25. pulumi_gcp/compute/network_firewall_policy_rule.py +125 -10
  26. pulumi_gcp/compute/outputs.py +688 -0
  27. pulumi_gcp/compute/region_network_firewall_policy_rule.py +125 -10
  28. pulumi_gcp/compute/region_security_policy_rule.py +230 -1
  29. pulumi_gcp/compute/router_peer.py +54 -14
  30. pulumi_gcp/config/__init__.pyi +2 -0
  31. pulumi_gcp/config/vars.py +4 -0
  32. pulumi_gcp/container/_inputs.py +236 -3
  33. pulumi_gcp/container/outputs.py +365 -4
  34. pulumi_gcp/dataflow/flex_template_job.py +28 -28
  35. pulumi_gcp/dataflow/job.py +28 -14
  36. pulumi_gcp/essentialcontacts/document_ai_warehouse_document_schema.py +0 -528
  37. pulumi_gcp/firebaserules/release.py +2 -2
  38. pulumi_gcp/iam/_inputs.py +191 -2
  39. pulumi_gcp/iam/outputs.py +197 -2
  40. pulumi_gcp/iam/workforce_pool_provider.py +245 -0
  41. pulumi_gcp/integrationconnectors/__init__.py +1 -0
  42. pulumi_gcp/integrationconnectors/managed_zone.py +753 -0
  43. pulumi_gcp/networkconnectivity/__init__.py +1 -0
  44. pulumi_gcp/networkconnectivity/regional_endpoint.py +946 -0
  45. pulumi_gcp/networksecurity/firewall_endpoint.py +34 -0
  46. pulumi_gcp/networksecurity/firewall_endpoint_association.py +24 -0
  47. pulumi_gcp/networksecurity/security_profile.py +16 -0
  48. pulumi_gcp/networksecurity/security_profile_group.py +18 -0
  49. pulumi_gcp/networksecurity/tls_inspection_policy.py +16 -0
  50. pulumi_gcp/orgpolicy/policy.py +2 -2
  51. pulumi_gcp/privilegedaccessmanager/__init__.py +10 -0
  52. pulumi_gcp/privilegedaccessmanager/_inputs.py +420 -0
  53. pulumi_gcp/privilegedaccessmanager/entitlement.py +852 -0
  54. pulumi_gcp/privilegedaccessmanager/outputs.py +491 -0
  55. pulumi_gcp/provider.py +20 -0
  56. pulumi_gcp/pubsub/subscription.py +4 -4
  57. pulumi_gcp/pulumi-plugin.json +2 -1
  58. pulumi_gcp/redis/cluster.py +69 -2
  59. pulumi_gcp/storage/__init__.py +1 -0
  60. pulumi_gcp/storage/get_buckets.py +138 -0
  61. pulumi_gcp/storage/outputs.py +63 -0
  62. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/METADATA +1 -1
  63. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/RECORD +65 -57
  64. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/WHEEL +0 -0
  65. {pulumi_gcp-7.22.0a1715345822.dist-info → pulumi_gcp-7.23.0.dist-info}/top_level.txt +0 -0
@@ -148,6 +148,7 @@ __all__ = [
148
148
  'ClusterNodeConfigLocalNvmeSsdBlockConfigArgs',
149
149
  'ClusterNodeConfigReservationAffinityArgs',
150
150
  'ClusterNodeConfigSandboxConfigArgs',
151
+ 'ClusterNodeConfigSecondaryBootDiskArgs',
151
152
  'ClusterNodeConfigShieldedInstanceConfigArgs',
152
153
  'ClusterNodeConfigSoleTenantConfigArgs',
153
154
  'ClusterNodeConfigSoleTenantConfigNodeAffinityArgs',
@@ -184,6 +185,7 @@ __all__ = [
184
185
  'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgs',
185
186
  'ClusterNodePoolNodeConfigReservationAffinityArgs',
186
187
  'ClusterNodePoolNodeConfigSandboxConfigArgs',
188
+ 'ClusterNodePoolNodeConfigSecondaryBootDiskArgs',
187
189
  'ClusterNodePoolNodeConfigShieldedInstanceConfigArgs',
188
190
  'ClusterNodePoolNodeConfigSoleTenantConfigArgs',
189
191
  'ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs',
@@ -236,6 +238,7 @@ __all__ = [
236
238
  'NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs',
237
239
  'NodePoolNodeConfigReservationAffinityArgs',
238
240
  'NodePoolNodeConfigSandboxConfigArgs',
241
+ 'NodePoolNodeConfigSecondaryBootDiskArgs',
239
242
  'NodePoolNodeConfigShieldedInstanceConfigArgs',
240
243
  'NodePoolNodeConfigSoleTenantConfigArgs',
241
244
  'NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs',
@@ -4548,14 +4551,18 @@ class ClusterDefaultSnatStatusArgs:
4548
4551
  @pulumi.input_type
4549
4552
  class ClusterDnsConfigArgs:
4550
4553
  def __init__(__self__, *,
4554
+ additive_vpc_scope_dns_domain: Optional[pulumi.Input[str]] = None,
4551
4555
  cluster_dns: Optional[pulumi.Input[str]] = None,
4552
4556
  cluster_dns_domain: Optional[pulumi.Input[str]] = None,
4553
4557
  cluster_dns_scope: Optional[pulumi.Input[str]] = None):
4554
4558
  """
4559
+ :param pulumi.Input[str] additive_vpc_scope_dns_domain: This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work `cluster_dns = "CLOUD_DNS"` and `cluster_dns_scope = "CLUSTER_SCOPE"` must both be set as well.
4555
4560
  :param pulumi.Input[str] cluster_dns: Which in-cluster DNS provider should be used. `PROVIDER_UNSPECIFIED` (default) or `PLATFORM_DEFAULT` or `CLOUD_DNS`.
4556
4561
  :param pulumi.Input[str] cluster_dns_domain: The suffix used for all cluster service records.
4557
4562
  :param pulumi.Input[str] cluster_dns_scope: The scope of access to cluster DNS records. `DNS_SCOPE_UNSPECIFIED` (default) or `CLUSTER_SCOPE` or `VPC_SCOPE`.
4558
4563
  """
4564
+ if additive_vpc_scope_dns_domain is not None:
4565
+ pulumi.set(__self__, "additive_vpc_scope_dns_domain", additive_vpc_scope_dns_domain)
4559
4566
  if cluster_dns is not None:
4560
4567
  pulumi.set(__self__, "cluster_dns", cluster_dns)
4561
4568
  if cluster_dns_domain is not None:
@@ -4563,6 +4570,18 @@ class ClusterDnsConfigArgs:
4563
4570
  if cluster_dns_scope is not None:
4564
4571
  pulumi.set(__self__, "cluster_dns_scope", cluster_dns_scope)
4565
4572
 
4573
+ @property
4574
+ @pulumi.getter(name="additiveVpcScopeDnsDomain")
4575
+ def additive_vpc_scope_dns_domain(self) -> Optional[pulumi.Input[str]]:
4576
+ """
4577
+ This will enable Cloud DNS additive VPC scope. Must provide a domain name that is unique within the VPC. For this to work `cluster_dns = "CLOUD_DNS"` and `cluster_dns_scope = "CLUSTER_SCOPE"` must both be set as well.
4578
+ """
4579
+ return pulumi.get(self, "additive_vpc_scope_dns_domain")
4580
+
4581
+ @additive_vpc_scope_dns_domain.setter
4582
+ def additive_vpc_scope_dns_domain(self, value: Optional[pulumi.Input[str]]):
4583
+ pulumi.set(self, "additive_vpc_scope_dns_domain", value)
4584
+
4566
4585
  @property
4567
4586
  @pulumi.getter(name="clusterDns")
4568
4587
  def cluster_dns(self) -> Optional[pulumi.Input[str]]:
@@ -5726,6 +5745,7 @@ class ClusterNodeConfigArgs:
5726
5745
  resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
5727
5746
  resource_manager_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
5728
5747
  sandbox_config: Optional[pulumi.Input['ClusterNodeConfigSandboxConfigArgs']] = None,
5748
+ secondary_boot_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSecondaryBootDiskArgs']]]] = None,
5729
5749
  service_account: Optional[pulumi.Input[str]] = None,
5730
5750
  shielded_instance_config: Optional[pulumi.Input['ClusterNodeConfigShieldedInstanceConfigArgs']] = None,
5731
5751
  sole_tenant_config: Optional[pulumi.Input['ClusterNodeConfigSoleTenantConfigArgs']] = None,
@@ -5811,6 +5831,7 @@ class ClusterNodeConfigArgs:
5811
5831
  for how these labels are applied to clusters, node pools and nodes.
5812
5832
  :param pulumi.Input[Mapping[str, Any]] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`.
5813
5833
  :param pulumi.Input['ClusterNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
5834
+ :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSecondaryBootDiskArgs']]] secondary_boot_disks: Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
5814
5835
  :param pulumi.Input[str] service_account: The service account to be used by the Node VMs.
5815
5836
  If not specified, the "default" service account is used.
5816
5837
  :param pulumi.Input['ClusterNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
@@ -5893,6 +5914,8 @@ class ClusterNodeConfigArgs:
5893
5914
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
5894
5915
  if sandbox_config is not None:
5895
5916
  pulumi.set(__self__, "sandbox_config", sandbox_config)
5917
+ if secondary_boot_disks is not None:
5918
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
5896
5919
  if service_account is not None:
5897
5920
  pulumi.set(__self__, "service_account", service_account)
5898
5921
  if shielded_instance_config is not None:
@@ -6326,6 +6349,18 @@ class ClusterNodeConfigArgs:
6326
6349
  def sandbox_config(self, value: Optional[pulumi.Input['ClusterNodeConfigSandboxConfigArgs']]):
6327
6350
  pulumi.set(self, "sandbox_config", value)
6328
6351
 
6352
+ @property
6353
+ @pulumi.getter(name="secondaryBootDisks")
6354
+ def secondary_boot_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSecondaryBootDiskArgs']]]]:
6355
+ """
6356
+ Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
6357
+ """
6358
+ return pulumi.get(self, "secondary_boot_disks")
6359
+
6360
+ @secondary_boot_disks.setter
6361
+ def secondary_boot_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSecondaryBootDiskArgs']]]]):
6362
+ pulumi.set(self, "secondary_boot_disks", value)
6363
+
6329
6364
  @property
6330
6365
  @pulumi.getter(name="serviceAccount")
6331
6366
  def service_account(self) -> Optional[pulumi.Input[str]]:
@@ -6426,11 +6461,15 @@ class ClusterNodeConfigArgs:
6426
6461
  @pulumi.input_type
6427
6462
  class ClusterNodeConfigAdvancedMachineFeaturesArgs:
6428
6463
  def __init__(__self__, *,
6429
- threads_per_core: pulumi.Input[int]):
6464
+ threads_per_core: pulumi.Input[int],
6465
+ enable_nested_virtualization: Optional[pulumi.Input[bool]] = None):
6430
6466
  """
6431
6467
  :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
6468
+ :param pulumi.Input[bool] enable_nested_virtualization: Defines whether the instance should have nested virtualization enabled. Defaults to false.
6432
6469
  """
6433
6470
  pulumi.set(__self__, "threads_per_core", threads_per_core)
6471
+ if enable_nested_virtualization is not None:
6472
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
6434
6473
 
6435
6474
  @property
6436
6475
  @pulumi.getter(name="threadsPerCore")
@@ -6444,6 +6483,18 @@ class ClusterNodeConfigAdvancedMachineFeaturesArgs:
6444
6483
  def threads_per_core(self, value: pulumi.Input[int]):
6445
6484
  pulumi.set(self, "threads_per_core", value)
6446
6485
 
6486
+ @property
6487
+ @pulumi.getter(name="enableNestedVirtualization")
6488
+ def enable_nested_virtualization(self) -> Optional[pulumi.Input[bool]]:
6489
+ """
6490
+ Defines whether the instance should have nested virtualization enabled. Defaults to false.
6491
+ """
6492
+ return pulumi.get(self, "enable_nested_virtualization")
6493
+
6494
+ @enable_nested_virtualization.setter
6495
+ def enable_nested_virtualization(self, value: Optional[pulumi.Input[bool]]):
6496
+ pulumi.set(self, "enable_nested_virtualization", value)
6497
+
6447
6498
 
6448
6499
  @pulumi.input_type
6449
6500
  class ClusterNodeConfigConfidentialNodesArgs:
@@ -6738,6 +6789,7 @@ class ClusterNodeConfigGuestAcceleratorGpuSharingConfigArgs:
6738
6789
  :param pulumi.Input[str] gpu_sharing_strategy: The type of GPU sharing strategy to enable on the GPU node.
6739
6790
  Accepted values are:
6740
6791
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
6792
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
6741
6793
  :param pulumi.Input[int] max_shared_clients_per_gpu: The maximum number of containers that can share a GPU.
6742
6794
  """
6743
6795
  pulumi.set(__self__, "gpu_sharing_strategy", gpu_sharing_strategy)
@@ -6750,6 +6802,7 @@ class ClusterNodeConfigGuestAcceleratorGpuSharingConfigArgs:
6750
6802
  The type of GPU sharing strategy to enable on the GPU node.
6751
6803
  Accepted values are:
6752
6804
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
6805
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
6753
6806
  """
6754
6807
  return pulumi.get(self, "gpu_sharing_strategy")
6755
6808
 
@@ -7071,6 +7124,44 @@ class ClusterNodeConfigSandboxConfigArgs:
7071
7124
  pulumi.set(self, "sandbox_type", value)
7072
7125
 
7073
7126
 
7127
+ @pulumi.input_type
7128
+ class ClusterNodeConfigSecondaryBootDiskArgs:
7129
+ def __init__(__self__, *,
7130
+ disk_image: pulumi.Input[str],
7131
+ mode: Optional[pulumi.Input[str]] = None):
7132
+ """
7133
+ :param pulumi.Input[str] disk_image: Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
7134
+ :param pulumi.Input[str] mode: Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
7135
+ """
7136
+ pulumi.set(__self__, "disk_image", disk_image)
7137
+ if mode is not None:
7138
+ pulumi.set(__self__, "mode", mode)
7139
+
7140
+ @property
7141
+ @pulumi.getter(name="diskImage")
7142
+ def disk_image(self) -> pulumi.Input[str]:
7143
+ """
7144
+ Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
7145
+ """
7146
+ return pulumi.get(self, "disk_image")
7147
+
7148
+ @disk_image.setter
7149
+ def disk_image(self, value: pulumi.Input[str]):
7150
+ pulumi.set(self, "disk_image", value)
7151
+
7152
+ @property
7153
+ @pulumi.getter
7154
+ def mode(self) -> Optional[pulumi.Input[str]]:
7155
+ """
7156
+ Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
7157
+ """
7158
+ return pulumi.get(self, "mode")
7159
+
7160
+ @mode.setter
7161
+ def mode(self, value: Optional[pulumi.Input[str]]):
7162
+ pulumi.set(self, "mode", value)
7163
+
7164
+
7074
7165
  @pulumi.input_type
7075
7166
  class ClusterNodeConfigShieldedInstanceConfigArgs:
7076
7167
  def __init__(__self__, *,
@@ -8174,6 +8265,7 @@ class ClusterNodePoolNodeConfigArgs:
8174
8265
  resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
8175
8266
  resource_manager_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
8176
8267
  sandbox_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigSandboxConfigArgs']] = None,
8268
+ secondary_boot_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSecondaryBootDiskArgs']]]] = None,
8177
8269
  service_account: Optional[pulumi.Input[str]] = None,
8178
8270
  shielded_instance_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigShieldedInstanceConfigArgs']] = None,
8179
8271
  sole_tenant_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgs']] = None,
@@ -8259,6 +8351,7 @@ class ClusterNodePoolNodeConfigArgs:
8259
8351
  for how these labels are applied to clusters, node pools and nodes.
8260
8352
  :param pulumi.Input[Mapping[str, Any]] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`.
8261
8353
  :param pulumi.Input['ClusterNodePoolNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
8354
+ :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSecondaryBootDiskArgs']]] secondary_boot_disks: Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
8262
8355
  :param pulumi.Input[str] service_account: The service account to be used by the Node VMs.
8263
8356
  If not specified, the "default" service account is used.
8264
8357
  :param pulumi.Input['ClusterNodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
@@ -8341,6 +8434,8 @@ class ClusterNodePoolNodeConfigArgs:
8341
8434
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
8342
8435
  if sandbox_config is not None:
8343
8436
  pulumi.set(__self__, "sandbox_config", sandbox_config)
8437
+ if secondary_boot_disks is not None:
8438
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
8344
8439
  if service_account is not None:
8345
8440
  pulumi.set(__self__, "service_account", service_account)
8346
8441
  if shielded_instance_config is not None:
@@ -8774,6 +8869,18 @@ class ClusterNodePoolNodeConfigArgs:
8774
8869
  def sandbox_config(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigSandboxConfigArgs']]):
8775
8870
  pulumi.set(self, "sandbox_config", value)
8776
8871
 
8872
+ @property
8873
+ @pulumi.getter(name="secondaryBootDisks")
8874
+ def secondary_boot_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSecondaryBootDiskArgs']]]]:
8875
+ """
8876
+ Parameters for secondary boot disks to preload container images and data on new nodes. Structure is documented below. `gcfs_config` must be `enabled=true` for this feature to work. `min_master_version` must also be set to use GKE 1.28.3-gke.106700 or later versions.
8877
+ """
8878
+ return pulumi.get(self, "secondary_boot_disks")
8879
+
8880
+ @secondary_boot_disks.setter
8881
+ def secondary_boot_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSecondaryBootDiskArgs']]]]):
8882
+ pulumi.set(self, "secondary_boot_disks", value)
8883
+
8777
8884
  @property
8778
8885
  @pulumi.getter(name="serviceAccount")
8779
8886
  def service_account(self) -> Optional[pulumi.Input[str]]:
@@ -8874,11 +8981,15 @@ class ClusterNodePoolNodeConfigArgs:
8874
8981
  @pulumi.input_type
8875
8982
  class ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs:
8876
8983
  def __init__(__self__, *,
8877
- threads_per_core: pulumi.Input[int]):
8984
+ threads_per_core: pulumi.Input[int],
8985
+ enable_nested_virtualization: Optional[pulumi.Input[bool]] = None):
8878
8986
  """
8879
8987
  :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
8988
+ :param pulumi.Input[bool] enable_nested_virtualization: Defines whether the instance should have nested virtualization enabled. Defaults to false.
8880
8989
  """
8881
8990
  pulumi.set(__self__, "threads_per_core", threads_per_core)
8991
+ if enable_nested_virtualization is not None:
8992
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
8882
8993
 
8883
8994
  @property
8884
8995
  @pulumi.getter(name="threadsPerCore")
@@ -8892,6 +9003,18 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs:
8892
9003
  def threads_per_core(self, value: pulumi.Input[int]):
8893
9004
  pulumi.set(self, "threads_per_core", value)
8894
9005
 
9006
+ @property
9007
+ @pulumi.getter(name="enableNestedVirtualization")
9008
+ def enable_nested_virtualization(self) -> Optional[pulumi.Input[bool]]:
9009
+ """
9010
+ Defines whether the instance should have nested virtualization enabled. Defaults to false.
9011
+ """
9012
+ return pulumi.get(self, "enable_nested_virtualization")
9013
+
9014
+ @enable_nested_virtualization.setter
9015
+ def enable_nested_virtualization(self, value: Optional[pulumi.Input[bool]]):
9016
+ pulumi.set(self, "enable_nested_virtualization", value)
9017
+
8895
9018
 
8896
9019
  @pulumi.input_type
8897
9020
  class ClusterNodePoolNodeConfigConfidentialNodesArgs:
@@ -9186,6 +9309,7 @@ class ClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs:
9186
9309
  :param pulumi.Input[str] gpu_sharing_strategy: The type of GPU sharing strategy to enable on the GPU node.
9187
9310
  Accepted values are:
9188
9311
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
9312
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
9189
9313
  :param pulumi.Input[int] max_shared_clients_per_gpu: The maximum number of containers that can share a GPU.
9190
9314
  """
9191
9315
  pulumi.set(__self__, "gpu_sharing_strategy", gpu_sharing_strategy)
@@ -9198,6 +9322,7 @@ class ClusterNodePoolNodeConfigGuestAcceleratorGpuSharingConfigArgs:
9198
9322
  The type of GPU sharing strategy to enable on the GPU node.
9199
9323
  Accepted values are:
9200
9324
  * `"TIME_SHARING"`: Allow multiple containers to have [time-shared](https://cloud.google.com/kubernetes-engine/docs/concepts/timesharing-gpus) access to a single GPU device.
9325
+ * `"MPS"`: Enable co-operative multi-process CUDA workloads to run concurrently on a single GPU device with [MPS](https://cloud.google.com/kubernetes-engine/docs/how-to/nvidia-mps-gpus)
9201
9326
  """
9202
9327
  return pulumi.get(self, "gpu_sharing_strategy")
9203
9328
 
@@ -9519,6 +9644,44 @@ class ClusterNodePoolNodeConfigSandboxConfigArgs:
9519
9644
  pulumi.set(self, "sandbox_type", value)
9520
9645
 
9521
9646
 
9647
+ @pulumi.input_type
9648
+ class ClusterNodePoolNodeConfigSecondaryBootDiskArgs:
9649
+ def __init__(__self__, *,
9650
+ disk_image: pulumi.Input[str],
9651
+ mode: Optional[pulumi.Input[str]] = None):
9652
+ """
9653
+ :param pulumi.Input[str] disk_image: Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
9654
+ :param pulumi.Input[str] mode: Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
9655
+ """
9656
+ pulumi.set(__self__, "disk_image", disk_image)
9657
+ if mode is not None:
9658
+ pulumi.set(__self__, "mode", mode)
9659
+
9660
+ @property
9661
+ @pulumi.getter(name="diskImage")
9662
+ def disk_image(self) -> pulumi.Input[str]:
9663
+ """
9664
+ Path to disk image to create the secondary boot disk from. After using the [gke-disk-image-builder](https://github.com/GoogleCloudPlatform/ai-on-gke/tree/main/tools/gke-disk-image-builder), this argument should be `global/images/DISK_IMAGE_NAME`.
9665
+ """
9666
+ return pulumi.get(self, "disk_image")
9667
+
9668
+ @disk_image.setter
9669
+ def disk_image(self, value: pulumi.Input[str]):
9670
+ pulumi.set(self, "disk_image", value)
9671
+
9672
+ @property
9673
+ @pulumi.getter
9674
+ def mode(self) -> Optional[pulumi.Input[str]]:
9675
+ """
9676
+ Mode for how the secondary boot disk is used. An example mode is `CONTAINER_IMAGE_CACHE`.
9677
+ """
9678
+ return pulumi.get(self, "mode")
9679
+
9680
+ @mode.setter
9681
+ def mode(self, value: Optional[pulumi.Input[str]]):
9682
+ pulumi.set(self, "mode", value)
9683
+
9684
+
9522
9685
  @pulumi.input_type
9523
9686
  class ClusterNodePoolNodeConfigShieldedInstanceConfigArgs:
9524
9687
  def __init__(__self__, *,
@@ -11097,6 +11260,7 @@ class NodePoolNodeConfigArgs:
11097
11260
  resource_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
11098
11261
  resource_manager_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
11099
11262
  sandbox_config: Optional[pulumi.Input['NodePoolNodeConfigSandboxConfigArgs']] = None,
11263
+ secondary_boot_disks: Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSecondaryBootDiskArgs']]]] = None,
11100
11264
  service_account: Optional[pulumi.Input[str]] = None,
11101
11265
  shielded_instance_config: Optional[pulumi.Input['NodePoolNodeConfigShieldedInstanceConfigArgs']] = None,
11102
11266
  sole_tenant_config: Optional[pulumi.Input['NodePoolNodeConfigSoleTenantConfigArgs']] = None,
@@ -11136,6 +11300,7 @@ class NodePoolNodeConfigArgs:
11136
11300
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] resource_labels: The GCE resource labels (a map of key/value pairs) to be applied to the node pool.
11137
11301
  :param pulumi.Input[Mapping[str, Any]] resource_manager_tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
11138
11302
  :param pulumi.Input['NodePoolNodeConfigSandboxConfigArgs'] sandbox_config: Sandbox configuration for this node.
11303
+ :param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSecondaryBootDiskArgs']]] secondary_boot_disks: Secondary boot disks for preloading data or container images.
11139
11304
  :param pulumi.Input[str] service_account: The Google Cloud Platform Service Account to be used by the node VMs.
11140
11305
  :param pulumi.Input['NodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options.
11141
11306
  :param pulumi.Input['NodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Node affinity options for sole tenant node pools.
@@ -11206,6 +11371,8 @@ class NodePoolNodeConfigArgs:
11206
11371
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
11207
11372
  if sandbox_config is not None:
11208
11373
  pulumi.set(__self__, "sandbox_config", sandbox_config)
11374
+ if secondary_boot_disks is not None:
11375
+ pulumi.set(__self__, "secondary_boot_disks", secondary_boot_disks)
11209
11376
  if service_account is not None:
11210
11377
  pulumi.set(__self__, "service_account", service_account)
11211
11378
  if shielded_instance_config is not None:
@@ -11593,6 +11760,18 @@ class NodePoolNodeConfigArgs:
11593
11760
  def sandbox_config(self, value: Optional[pulumi.Input['NodePoolNodeConfigSandboxConfigArgs']]):
11594
11761
  pulumi.set(self, "sandbox_config", value)
11595
11762
 
11763
+ @property
11764
+ @pulumi.getter(name="secondaryBootDisks")
11765
+ def secondary_boot_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSecondaryBootDiskArgs']]]]:
11766
+ """
11767
+ Secondary boot disks for preloading data or container images.
11768
+ """
11769
+ return pulumi.get(self, "secondary_boot_disks")
11770
+
11771
+ @secondary_boot_disks.setter
11772
+ def secondary_boot_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSecondaryBootDiskArgs']]]]):
11773
+ pulumi.set(self, "secondary_boot_disks", value)
11774
+
11596
11775
  @property
11597
11776
  @pulumi.getter(name="serviceAccount")
11598
11777
  def service_account(self) -> Optional[pulumi.Input[str]]:
@@ -11681,11 +11860,15 @@ class NodePoolNodeConfigArgs:
11681
11860
  @pulumi.input_type
11682
11861
  class NodePoolNodeConfigAdvancedMachineFeaturesArgs:
11683
11862
  def __init__(__self__, *,
11684
- threads_per_core: pulumi.Input[int]):
11863
+ threads_per_core: pulumi.Input[int],
11864
+ enable_nested_virtualization: Optional[pulumi.Input[bool]] = None):
11685
11865
  """
11686
11866
  :param pulumi.Input[int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
11867
+ :param pulumi.Input[bool] enable_nested_virtualization: Whether the node should have nested virtualization enabled.
11687
11868
  """
11688
11869
  pulumi.set(__self__, "threads_per_core", threads_per_core)
11870
+ if enable_nested_virtualization is not None:
11871
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
11689
11872
 
11690
11873
  @property
11691
11874
  @pulumi.getter(name="threadsPerCore")
@@ -11699,6 +11882,18 @@ class NodePoolNodeConfigAdvancedMachineFeaturesArgs:
11699
11882
  def threads_per_core(self, value: pulumi.Input[int]):
11700
11883
  pulumi.set(self, "threads_per_core", value)
11701
11884
 
11885
+ @property
11886
+ @pulumi.getter(name="enableNestedVirtualization")
11887
+ def enable_nested_virtualization(self) -> Optional[pulumi.Input[bool]]:
11888
+ """
11889
+ Whether the node should have nested virtualization enabled.
11890
+ """
11891
+ return pulumi.get(self, "enable_nested_virtualization")
11892
+
11893
+ @enable_nested_virtualization.setter
11894
+ def enable_nested_virtualization(self, value: Optional[pulumi.Input[bool]]):
11895
+ pulumi.set(self, "enable_nested_virtualization", value)
11896
+
11702
11897
 
11703
11898
  @pulumi.input_type
11704
11899
  class NodePoolNodeConfigConfidentialNodesArgs:
@@ -12262,6 +12457,44 @@ class NodePoolNodeConfigSandboxConfigArgs:
12262
12457
  pulumi.set(self, "sandbox_type", value)
12263
12458
 
12264
12459
 
12460
+ @pulumi.input_type
12461
+ class NodePoolNodeConfigSecondaryBootDiskArgs:
12462
+ def __init__(__self__, *,
12463
+ disk_image: pulumi.Input[str],
12464
+ mode: Optional[pulumi.Input[str]] = None):
12465
+ """
12466
+ :param pulumi.Input[str] disk_image: Disk image to create the secondary boot disk from
12467
+ :param pulumi.Input[str] mode: Mode for how the secondary boot disk is used.
12468
+ """
12469
+ pulumi.set(__self__, "disk_image", disk_image)
12470
+ if mode is not None:
12471
+ pulumi.set(__self__, "mode", mode)
12472
+
12473
+ @property
12474
+ @pulumi.getter(name="diskImage")
12475
+ def disk_image(self) -> pulumi.Input[str]:
12476
+ """
12477
+ Disk image to create the secondary boot disk from
12478
+ """
12479
+ return pulumi.get(self, "disk_image")
12480
+
12481
+ @disk_image.setter
12482
+ def disk_image(self, value: pulumi.Input[str]):
12483
+ pulumi.set(self, "disk_image", value)
12484
+
12485
+ @property
12486
+ @pulumi.getter
12487
+ def mode(self) -> Optional[pulumi.Input[str]]:
12488
+ """
12489
+ Mode for how the secondary boot disk is used.
12490
+ """
12491
+ return pulumi.get(self, "mode")
12492
+
12493
+ @mode.setter
12494
+ def mode(self, value: Optional[pulumi.Input[str]]):
12495
+ pulumi.set(self, "mode", value)
12496
+
12497
+
12265
12498
  @pulumi.input_type
12266
12499
  class NodePoolNodeConfigShieldedInstanceConfigArgs:
12267
12500
  def __init__(__self__, *,