pulumi-gcp 8.10.2__py3-none-any.whl → 8.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. pulumi_gcp/__init__.py +91 -0
  2. pulumi_gcp/accesscontextmanager/_inputs.py +29 -0
  3. pulumi_gcp/accesscontextmanager/outputs.py +18 -0
  4. pulumi_gcp/accesscontextmanager/service_perimeter.py +36 -0
  5. pulumi_gcp/accesscontextmanager/service_perimeter_dry_run_egress_policy.py +6 -0
  6. pulumi_gcp/accesscontextmanager/service_perimeter_dry_run_ingress_policy.py +6 -0
  7. pulumi_gcp/accesscontextmanager/service_perimeter_egress_policy.py +6 -0
  8. pulumi_gcp/accesscontextmanager/service_perimeter_ingress_policy.py +6 -0
  9. pulumi_gcp/apigee/app_group.py +7 -7
  10. pulumi_gcp/applicationintegration/client.py +8 -6
  11. pulumi_gcp/artifactregistry/_inputs.py +24 -15
  12. pulumi_gcp/artifactregistry/outputs.py +32 -20
  13. pulumi_gcp/artifactregistry/repository.py +128 -0
  14. pulumi_gcp/assuredworkloads/workload.py +7 -7
  15. pulumi_gcp/backupdisasterrecovery/__init__.py +2 -0
  16. pulumi_gcp/backupdisasterrecovery/backup_plan.py +2 -2
  17. pulumi_gcp/backupdisasterrecovery/backup_vault.py +12 -8
  18. pulumi_gcp/backupdisasterrecovery/get_backup.py +153 -0
  19. pulumi_gcp/backupdisasterrecovery/get_backup_vault.py +415 -0
  20. pulumi_gcp/backupdisasterrecovery/outputs.py +63 -0
  21. pulumi_gcp/bigquery/app_profile.py +75 -0
  22. pulumi_gcp/billing/_inputs.py +6 -6
  23. pulumi_gcp/billing/outputs.py +4 -4
  24. pulumi_gcp/certificateauthority/_inputs.py +9 -9
  25. pulumi_gcp/certificateauthority/outputs.py +8 -8
  26. pulumi_gcp/cloudbuild/_inputs.py +53 -0
  27. pulumi_gcp/cloudbuild/outputs.py +50 -0
  28. pulumi_gcp/cloudbuild/worker_pool.py +47 -0
  29. pulumi_gcp/clouddeploy/_inputs.py +254 -0
  30. pulumi_gcp/clouddeploy/outputs.py +211 -0
  31. pulumi_gcp/clouddeploy/target.py +47 -0
  32. pulumi_gcp/cloudfunctionsv2/_inputs.py +6 -6
  33. pulumi_gcp/cloudfunctionsv2/outputs.py +8 -8
  34. pulumi_gcp/cloudrunv2/job.py +4 -4
  35. pulumi_gcp/cloudrunv2/service.py +4 -4
  36. pulumi_gcp/composer/get_user_workloads_config_map.py +4 -0
  37. pulumi_gcp/composer/get_user_workloads_secret.py +4 -0
  38. pulumi_gcp/composer/user_workloads_config_map.py +14 -0
  39. pulumi_gcp/composer/user_workloads_secret.py +6 -0
  40. pulumi_gcp/compute/_inputs.py +566 -25
  41. pulumi_gcp/compute/disk.py +21 -7
  42. pulumi_gcp/compute/firewall_policy_rule.py +12 -0
  43. pulumi_gcp/compute/get_forwarding_rules.py +2 -2
  44. pulumi_gcp/compute/get_global_forwarding_rule.py +12 -1
  45. pulumi_gcp/compute/get_instance_group_manager.py +12 -1
  46. pulumi_gcp/compute/get_network.py +35 -1
  47. pulumi_gcp/compute/get_region_instance_group_manager.py +12 -1
  48. pulumi_gcp/compute/get_region_network_endpoint_group.py +12 -1
  49. pulumi_gcp/compute/global_forwarding_rule.py +28 -0
  50. pulumi_gcp/compute/instance_group_manager.py +28 -0
  51. pulumi_gcp/compute/network.py +75 -0
  52. pulumi_gcp/compute/outputs.py +655 -26
  53. pulumi_gcp/compute/region_health_check.py +28 -0
  54. pulumi_gcp/compute/region_instance_group_manager.py +28 -0
  55. pulumi_gcp/compute/region_network_endpoint.py +28 -0
  56. pulumi_gcp/compute/region_network_endpoint_group.py +70 -2
  57. pulumi_gcp/compute/subnetwork.py +30 -2
  58. pulumi_gcp/compute/url_map.py +7 -7
  59. pulumi_gcp/config/__init__.pyi +4 -0
  60. pulumi_gcp/config/vars.py +8 -0
  61. pulumi_gcp/container/_inputs.py +201 -3
  62. pulumi_gcp/container/cluster.py +68 -14
  63. pulumi_gcp/container/get_cluster.py +12 -1
  64. pulumi_gcp/container/outputs.py +249 -3
  65. pulumi_gcp/dataproc/_inputs.py +209 -1
  66. pulumi_gcp/dataproc/batch.py +76 -0
  67. pulumi_gcp/dataproc/outputs.py +169 -3
  68. pulumi_gcp/diagflow/_inputs.py +3 -3
  69. pulumi_gcp/diagflow/outputs.py +2 -2
  70. pulumi_gcp/discoveryengine/search_engine.py +7 -7
  71. pulumi_gcp/firebaserules/release.py +76 -0
  72. pulumi_gcp/firestore/field.py +4 -4
  73. pulumi_gcp/gemini/__init__.py +15 -0
  74. pulumi_gcp/gemini/_inputs.py +183 -0
  75. pulumi_gcp/gemini/code_repository_index.py +659 -0
  76. pulumi_gcp/gemini/get_repository_group_iam_policy.py +171 -0
  77. pulumi_gcp/gemini/outputs.py +130 -0
  78. pulumi_gcp/gemini/repository_group.py +586 -0
  79. pulumi_gcp/gemini/repository_group_iam_binding.py +604 -0
  80. pulumi_gcp/gemini/repository_group_iam_member.py +604 -0
  81. pulumi_gcp/gemini/repository_group_iam_policy.py +443 -0
  82. pulumi_gcp/gkehub/_inputs.py +30 -10
  83. pulumi_gcp/gkehub/membership_binding.py +6 -6
  84. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  85. pulumi_gcp/gkehub/namespace.py +4 -4
  86. pulumi_gcp/gkehub/outputs.py +21 -7
  87. pulumi_gcp/gkehub/scope_rbac_role_binding.py +4 -4
  88. pulumi_gcp/iam/__init__.py +1 -0
  89. pulumi_gcp/iam/_inputs.py +137 -0
  90. pulumi_gcp/iam/folders_policy_binding.py +16 -0
  91. pulumi_gcp/iam/organizations_policy_binding.py +16 -0
  92. pulumi_gcp/iam/outputs.py +99 -0
  93. pulumi_gcp/iam/principal_access_boundary_policy.py +16 -0
  94. pulumi_gcp/iam/projects_policy_binding.py +917 -0
  95. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  96. pulumi_gcp/integrationconnectors/_inputs.py +15 -15
  97. pulumi_gcp/integrationconnectors/managed_zone.py +8 -8
  98. pulumi_gcp/integrationconnectors/outputs.py +10 -10
  99. pulumi_gcp/monitoring/_inputs.py +13 -6
  100. pulumi_gcp/monitoring/outputs.py +10 -4
  101. pulumi_gcp/netapp/_inputs.py +3 -3
  102. pulumi_gcp/netapp/active_directory.py +7 -7
  103. pulumi_gcp/netapp/outputs.py +2 -2
  104. pulumi_gcp/netapp/volume.py +11 -11
  105. pulumi_gcp/networkconnectivity/spoke.py +10 -10
  106. pulumi_gcp/networksecurity/__init__.py +4 -0
  107. pulumi_gcp/networksecurity/_inputs.py +115 -0
  108. pulumi_gcp/networksecurity/mirroring_deployment.py +848 -0
  109. pulumi_gcp/networksecurity/mirroring_deployment_group.py +752 -0
  110. pulumi_gcp/networksecurity/mirroring_endpoint_group.py +737 -0
  111. pulumi_gcp/networksecurity/mirroring_endpoint_group_association.py +840 -0
  112. pulumi_gcp/networksecurity/outputs.py +70 -0
  113. pulumi_gcp/oracledatabase/autonomous_database.py +42 -4
  114. pulumi_gcp/oracledatabase/cloud_exadata_infrastructure.py +42 -4
  115. pulumi_gcp/oracledatabase/cloud_vm_cluster.py +50 -8
  116. pulumi_gcp/oracledatabase/get_autonomous_database.py +12 -1
  117. pulumi_gcp/oracledatabase/get_cloud_exadata_infrastructure.py +12 -1
  118. pulumi_gcp/oracledatabase/get_cloud_vm_cluster.py +12 -1
  119. pulumi_gcp/oracledatabase/outputs.py +21 -0
  120. pulumi_gcp/orgpolicy/policy.py +2 -2
  121. pulumi_gcp/parallelstore/instance.py +4 -0
  122. pulumi_gcp/provider.py +20 -0
  123. pulumi_gcp/pubsub/subscription.py +6 -6
  124. pulumi_gcp/pulumi-plugin.json +1 -1
  125. pulumi_gcp/redis/_inputs.py +435 -3
  126. pulumi_gcp/redis/cluster.py +287 -16
  127. pulumi_gcp/redis/outputs.py +304 -2
  128. pulumi_gcp/serviceaccount/get_account_id_token.py +2 -2
  129. pulumi_gcp/serviceaccount/get_account_key.py +2 -2
  130. pulumi_gcp/storage/get_bucket.py +2 -2
  131. pulumi_gcp/storage/get_bucket_object_content.py +2 -2
  132. pulumi_gcp/tpu/__init__.py +1 -0
  133. pulumi_gcp/tpu/_inputs.py +188 -6
  134. pulumi_gcp/tpu/outputs.py +164 -4
  135. pulumi_gcp/tpu/v2_queued_resource.py +434 -0
  136. pulumi_gcp/vertex/ai_endpoint.py +4 -4
  137. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  138. pulumi_gcp/vertex/ai_index_endpoint.py +2 -2
  139. {pulumi_gcp-8.10.2.dist-info → pulumi_gcp-8.11.0.dist-info}/METADATA +3 -3
  140. {pulumi_gcp-8.10.2.dist-info → pulumi_gcp-8.11.0.dist-info}/RECORD +142 -125
  141. {pulumi_gcp-8.10.2.dist-info → pulumi_gcp-8.11.0.dist-info}/WHEEL +0 -0
  142. {pulumi_gcp-8.10.2.dist-info → pulumi_gcp-8.11.0.dist-info}/top_level.txt +0 -0
@@ -123,6 +123,7 @@ __all__ = [
123
123
  'ClusterDefaultSnatStatus',
124
124
  'ClusterDnsConfig',
125
125
  'ClusterEnableK8sBetaApis',
126
+ 'ClusterEnterpriseConfig',
126
127
  'ClusterFleet',
127
128
  'ClusterGatewayApiConfig',
128
129
  'ClusterIdentityServiceConfig',
@@ -175,6 +176,7 @@ __all__ = [
175
176
  'ClusterNodeConfigWorkloadMetadataConfig',
176
177
  'ClusterNodePool',
177
178
  'ClusterNodePoolAutoConfig',
179
+ 'ClusterNodePoolAutoConfigLinuxNodeConfig',
178
180
  'ClusterNodePoolAutoConfigNetworkTags',
179
181
  'ClusterNodePoolAutoConfigNodeKubeletConfig',
180
182
  'ClusterNodePoolAutoscaling',
@@ -323,6 +325,7 @@ __all__ = [
323
325
  'GetClusterDefaultSnatStatusResult',
324
326
  'GetClusterDnsConfigResult',
325
327
  'GetClusterEnableK8sBetaApiResult',
328
+ 'GetClusterEnterpriseConfigResult',
326
329
  'GetClusterFleetResult',
327
330
  'GetClusterGatewayApiConfigResult',
328
331
  'GetClusterIdentityServiceConfigResult',
@@ -375,6 +378,7 @@ __all__ = [
375
378
  'GetClusterNodeConfigWorkloadMetadataConfigResult',
376
379
  'GetClusterNodePoolResult',
377
380
  'GetClusterNodePoolAutoConfigResult',
381
+ 'GetClusterNodePoolAutoConfigLinuxNodeConfigResult',
378
382
  'GetClusterNodePoolAutoConfigNetworkTagResult',
379
383
  'GetClusterNodePoolAutoConfigNodeKubeletConfigResult',
380
384
  'GetClusterNodePoolAutoscalingResult',
@@ -4780,7 +4784,7 @@ class ClusterClusterAutoscalingAutoProvisioningDefaults(dict):
4780
4784
  """
4781
4785
  :param str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: https://cloud.google.com/compute/docs/disks/customer-managed-encryption
4782
4786
  :param int disk_size: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. Defaults to `100`
4783
- :param str disk_type: Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced'). Defaults to `pd-standard`
4787
+ :param str disk_type: Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd', 'pd-balanced', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`.
4784
4788
  :param str image_type: The default image type used by NAP once a new node pool is being created. Please note that according to the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning#default-image-type) the value must be one of the [COS_CONTAINERD, COS, UBUNTU_CONTAINERD, UBUNTU]. __NOTE__ : COS AND UBUNTU are deprecated as of `GKE 1.24`
4785
4789
  :param 'ClusterClusterAutoscalingAutoProvisioningDefaultsManagementArgs' management: NodeManagement configuration for this NodePool. Structure is documented below.
4786
4790
  :param str min_cpu_platform: Minimum CPU platform to be used for NAP created node pools. The instance may be scheduled on the
@@ -4834,7 +4838,7 @@ class ClusterClusterAutoscalingAutoProvisioningDefaults(dict):
4834
4838
  @pulumi.getter(name="diskType")
4835
4839
  def disk_type(self) -> Optional[str]:
4836
4840
  """
4837
- Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or 'pd-balanced'). Defaults to `pd-standard`
4841
+ Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd', 'pd-balanced', or 'hyperdisk-balanced'). Defaults to `hyperdisk-balanced` if `hyperdisk-balanced` is supported and `pd-balanced` is not supported for the machine type; otherwise defaults to `pd-balanced`.
4838
4842
  """
4839
4843
  return pulumi.get(self, "disk_type")
4840
4844
 
@@ -5654,6 +5658,56 @@ class ClusterEnableK8sBetaApis(dict):
5654
5658
  return pulumi.get(self, "enabled_apis")
5655
5659
 
5656
5660
 
5661
+ @pulumi.output_type
5662
+ class ClusterEnterpriseConfig(dict):
5663
+ @staticmethod
5664
+ def __key_warning(key: str):
5665
+ suggest = None
5666
+ if key == "clusterTier":
5667
+ suggest = "cluster_tier"
5668
+ elif key == "desiredTier":
5669
+ suggest = "desired_tier"
5670
+
5671
+ if suggest:
5672
+ pulumi.log.warn(f"Key '{key}' not found in ClusterEnterpriseConfig. Access the value via the '{suggest}' property getter instead.")
5673
+
5674
+ def __getitem__(self, key: str) -> Any:
5675
+ ClusterEnterpriseConfig.__key_warning(key)
5676
+ return super().__getitem__(key)
5677
+
5678
+ def get(self, key: str, default = None) -> Any:
5679
+ ClusterEnterpriseConfig.__key_warning(key)
5680
+ return super().get(key, default)
5681
+
5682
+ def __init__(__self__, *,
5683
+ cluster_tier: Optional[str] = None,
5684
+ desired_tier: Optional[str] = None):
5685
+ """
5686
+ :param str cluster_tier: The effective tier of the cluster.
5687
+ :param str desired_tier: Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
5688
+ """
5689
+ if cluster_tier is not None:
5690
+ pulumi.set(__self__, "cluster_tier", cluster_tier)
5691
+ if desired_tier is not None:
5692
+ pulumi.set(__self__, "desired_tier", desired_tier)
5693
+
5694
+ @property
5695
+ @pulumi.getter(name="clusterTier")
5696
+ def cluster_tier(self) -> Optional[str]:
5697
+ """
5698
+ The effective tier of the cluster.
5699
+ """
5700
+ return pulumi.get(self, "cluster_tier")
5701
+
5702
+ @property
5703
+ @pulumi.getter(name="desiredTier")
5704
+ def desired_tier(self) -> Optional[str]:
5705
+ """
5706
+ Sets the tier of the cluster. Available options include `STANDARD` and `ENTERPRISE`.
5707
+ """
5708
+ return pulumi.get(self, "desired_tier")
5709
+
5710
+
5657
5711
  @pulumi.output_type
5658
5712
  class ClusterFleet(dict):
5659
5713
  @staticmethod
@@ -6865,6 +6919,8 @@ class ClusterNodeConfig(dict):
6865
6919
  suggest = "local_nvme_ssd_block_config"
6866
6920
  elif key == "localSsdCount":
6867
6921
  suggest = "local_ssd_count"
6922
+ elif key == "localSsdEncryptionMode":
6923
+ suggest = "local_ssd_encryption_mode"
6868
6924
  elif key == "loggingVariant":
6869
6925
  suggest = "logging_variant"
6870
6926
  elif key == "machineType":
@@ -6929,6 +6985,7 @@ class ClusterNodeConfig(dict):
6929
6985
  linux_node_config: Optional['outputs.ClusterNodeConfigLinuxNodeConfig'] = None,
6930
6986
  local_nvme_ssd_block_config: Optional['outputs.ClusterNodeConfigLocalNvmeSsdBlockConfig'] = None,
6931
6987
  local_ssd_count: Optional[int] = None,
6988
+ local_ssd_encryption_mode: Optional[str] = None,
6932
6989
  logging_variant: Optional[str] = None,
6933
6990
  machine_type: Optional[str] = None,
6934
6991
  metadata: Optional[Mapping[str, str]] = None,
@@ -7000,6 +7057,10 @@ class ClusterNodeConfig(dict):
7000
7057
  :param 'ClusterNodeConfigLocalNvmeSsdBlockConfigArgs' local_nvme_ssd_block_config: Parameters for the local NVMe SSDs. Structure is documented below.
7001
7058
  :param int local_ssd_count: The amount of local SSD disks that will be
7002
7059
  attached to each cluster node. Defaults to 0.
7060
+ :param str local_ssd_encryption_mode: Possible Local SSD encryption modes:
7061
+ Accepted values are:
7062
+ * `STANDARD_ENCRYPTION`: The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted.
7063
+ * `EPHEMERAL_KEY_ENCRYPTION`: The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash.
7003
7064
  :param str logging_variant: Parameter for specifying the type of logging agent used in a node pool. This will override any cluster-wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information.
7004
7065
  :param str machine_type: The name of a Google Compute Engine machine type.
7005
7066
  Defaults to `e2-medium`. To create a custom machine type, value should be set as specified
@@ -7092,6 +7153,8 @@ class ClusterNodeConfig(dict):
7092
7153
  pulumi.set(__self__, "local_nvme_ssd_block_config", local_nvme_ssd_block_config)
7093
7154
  if local_ssd_count is not None:
7094
7155
  pulumi.set(__self__, "local_ssd_count", local_ssd_count)
7156
+ if local_ssd_encryption_mode is not None:
7157
+ pulumi.set(__self__, "local_ssd_encryption_mode", local_ssd_encryption_mode)
7095
7158
  if logging_variant is not None:
7096
7159
  pulumi.set(__self__, "logging_variant", logging_variant)
7097
7160
  if machine_type is not None:
@@ -7330,6 +7393,17 @@ class ClusterNodeConfig(dict):
7330
7393
  """
7331
7394
  return pulumi.get(self, "local_ssd_count")
7332
7395
 
7396
+ @property
7397
+ @pulumi.getter(name="localSsdEncryptionMode")
7398
+ def local_ssd_encryption_mode(self) -> Optional[str]:
7399
+ """
7400
+ Possible Local SSD encryption modes:
7401
+ Accepted values are:
7402
+ * `STANDARD_ENCRYPTION`: The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted.
7403
+ * `EPHEMERAL_KEY_ENCRYPTION`: The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash.
7404
+ """
7405
+ return pulumi.get(self, "local_ssd_encryption_mode")
7406
+
7333
7407
  @property
7334
7408
  @pulumi.getter(name="loggingVariant")
7335
7409
  def logging_variant(self) -> Optional[str]:
@@ -9054,7 +9128,9 @@ class ClusterNodePoolAutoConfig(dict):
9054
9128
  @staticmethod
9055
9129
  def __key_warning(key: str):
9056
9130
  suggest = None
9057
- if key == "networkTags":
9131
+ if key == "linuxNodeConfig":
9132
+ suggest = "linux_node_config"
9133
+ elif key == "networkTags":
9058
9134
  suggest = "network_tags"
9059
9135
  elif key == "nodeKubeletConfig":
9060
9136
  suggest = "node_kubelet_config"
@@ -9073,15 +9149,19 @@ class ClusterNodePoolAutoConfig(dict):
9073
9149
  return super().get(key, default)
9074
9150
 
9075
9151
  def __init__(__self__, *,
9152
+ linux_node_config: Optional['outputs.ClusterNodePoolAutoConfigLinuxNodeConfig'] = None,
9076
9153
  network_tags: Optional['outputs.ClusterNodePoolAutoConfigNetworkTags'] = None,
9077
9154
  node_kubelet_config: Optional['outputs.ClusterNodePoolAutoConfigNodeKubeletConfig'] = None,
9078
9155
  resource_manager_tags: Optional[Mapping[str, str]] = None):
9079
9156
  """
9157
+ :param 'ClusterNodePoolAutoConfigLinuxNodeConfigArgs' linux_node_config: Linux system configuration for the cluster's automatically provisioned node pools. Only `cgroup_mode` field is supported in `node_pool_auto_config`. Structure is documented below.
9080
9158
  :param 'ClusterNodePoolAutoConfigNetworkTagsArgs' network_tags: The network tag config for the cluster's automatically provisioned node pools. Structure is documented below.
9081
9159
  :param 'ClusterNodePoolAutoConfigNodeKubeletConfigArgs' node_kubelet_config: Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here.
9082
9160
  Structure is documented below.
9083
9161
  :param Mapping[str, str] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`.
9084
9162
  """
9163
+ if linux_node_config is not None:
9164
+ pulumi.set(__self__, "linux_node_config", linux_node_config)
9085
9165
  if network_tags is not None:
9086
9166
  pulumi.set(__self__, "network_tags", network_tags)
9087
9167
  if node_kubelet_config is not None:
@@ -9089,6 +9169,14 @@ class ClusterNodePoolAutoConfig(dict):
9089
9169
  if resource_manager_tags is not None:
9090
9170
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
9091
9171
 
9172
+ @property
9173
+ @pulumi.getter(name="linuxNodeConfig")
9174
+ def linux_node_config(self) -> Optional['outputs.ClusterNodePoolAutoConfigLinuxNodeConfig']:
9175
+ """
9176
+ Linux system configuration for the cluster's automatically provisioned node pools. Only `cgroup_mode` field is supported in `node_pool_auto_config`. Structure is documented below.
9177
+ """
9178
+ return pulumi.get(self, "linux_node_config")
9179
+
9092
9180
  @property
9093
9181
  @pulumi.getter(name="networkTags")
9094
9182
  def network_tags(self) -> Optional['outputs.ClusterNodePoolAutoConfigNetworkTags']:
@@ -9115,6 +9203,50 @@ class ClusterNodePoolAutoConfig(dict):
9115
9203
  return pulumi.get(self, "resource_manager_tags")
9116
9204
 
9117
9205
 
9206
+ @pulumi.output_type
9207
+ class ClusterNodePoolAutoConfigLinuxNodeConfig(dict):
9208
+ @staticmethod
9209
+ def __key_warning(key: str):
9210
+ suggest = None
9211
+ if key == "cgroupMode":
9212
+ suggest = "cgroup_mode"
9213
+
9214
+ if suggest:
9215
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolAutoConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
9216
+
9217
+ def __getitem__(self, key: str) -> Any:
9218
+ ClusterNodePoolAutoConfigLinuxNodeConfig.__key_warning(key)
9219
+ return super().__getitem__(key)
9220
+
9221
+ def get(self, key: str, default = None) -> Any:
9222
+ ClusterNodePoolAutoConfigLinuxNodeConfig.__key_warning(key)
9223
+ return super().get(key, default)
9224
+
9225
+ def __init__(__self__, *,
9226
+ cgroup_mode: Optional[str] = None):
9227
+ """
9228
+ :param str cgroup_mode: Possible cgroup modes that can be used.
9229
+ Accepted values are:
9230
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
9231
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
9232
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
9233
+ """
9234
+ if cgroup_mode is not None:
9235
+ pulumi.set(__self__, "cgroup_mode", cgroup_mode)
9236
+
9237
+ @property
9238
+ @pulumi.getter(name="cgroupMode")
9239
+ def cgroup_mode(self) -> Optional[str]:
9240
+ """
9241
+ Possible cgroup modes that can be used.
9242
+ Accepted values are:
9243
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
9244
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
9245
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
9246
+ """
9247
+ return pulumi.get(self, "cgroup_mode")
9248
+
9249
+
9118
9250
  @pulumi.output_type
9119
9251
  class ClusterNodePoolAutoConfigNetworkTags(dict):
9120
9252
  def __init__(__self__, *,
@@ -9947,6 +10079,8 @@ class ClusterNodePoolNodeConfig(dict):
9947
10079
  suggest = "local_nvme_ssd_block_config"
9948
10080
  elif key == "localSsdCount":
9949
10081
  suggest = "local_ssd_count"
10082
+ elif key == "localSsdEncryptionMode":
10083
+ suggest = "local_ssd_encryption_mode"
9950
10084
  elif key == "loggingVariant":
9951
10085
  suggest = "logging_variant"
9952
10086
  elif key == "machineType":
@@ -10011,6 +10145,7 @@ class ClusterNodePoolNodeConfig(dict):
10011
10145
  linux_node_config: Optional['outputs.ClusterNodePoolNodeConfigLinuxNodeConfig'] = None,
10012
10146
  local_nvme_ssd_block_config: Optional['outputs.ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig'] = None,
10013
10147
  local_ssd_count: Optional[int] = None,
10148
+ local_ssd_encryption_mode: Optional[str] = None,
10014
10149
  logging_variant: Optional[str] = None,
10015
10150
  machine_type: Optional[str] = None,
10016
10151
  metadata: Optional[Mapping[str, str]] = None,
@@ -10082,6 +10217,10 @@ class ClusterNodePoolNodeConfig(dict):
10082
10217
  :param 'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgs' local_nvme_ssd_block_config: Parameters for the local NVMe SSDs. Structure is documented below.
10083
10218
  :param int local_ssd_count: The amount of local SSD disks that will be
10084
10219
  attached to each cluster node. Defaults to 0.
10220
+ :param str local_ssd_encryption_mode: Possible Local SSD encryption modes:
10221
+ Accepted values are:
10222
+ * `STANDARD_ENCRYPTION`: The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted.
10223
+ * `EPHEMERAL_KEY_ENCRYPTION`: The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash.
10085
10224
  :param str logging_variant: Parameter for specifying the type of logging agent used in a node pool. This will override any cluster-wide default value. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information.
10086
10225
  :param str machine_type: The name of a Google Compute Engine machine type.
10087
10226
  Defaults to `e2-medium`. To create a custom machine type, value should be set as specified
@@ -10174,6 +10313,8 @@ class ClusterNodePoolNodeConfig(dict):
10174
10313
  pulumi.set(__self__, "local_nvme_ssd_block_config", local_nvme_ssd_block_config)
10175
10314
  if local_ssd_count is not None:
10176
10315
  pulumi.set(__self__, "local_ssd_count", local_ssd_count)
10316
+ if local_ssd_encryption_mode is not None:
10317
+ pulumi.set(__self__, "local_ssd_encryption_mode", local_ssd_encryption_mode)
10177
10318
  if logging_variant is not None:
10178
10319
  pulumi.set(__self__, "logging_variant", logging_variant)
10179
10320
  if machine_type is not None:
@@ -10412,6 +10553,17 @@ class ClusterNodePoolNodeConfig(dict):
10412
10553
  """
10413
10554
  return pulumi.get(self, "local_ssd_count")
10414
10555
 
10556
+ @property
10557
+ @pulumi.getter(name="localSsdEncryptionMode")
10558
+ def local_ssd_encryption_mode(self) -> Optional[str]:
10559
+ """
10560
+ Possible Local SSD encryption modes:
10561
+ Accepted values are:
10562
+ * `STANDARD_ENCRYPTION`: The given node will be encrypted using keys managed by Google infrastructure and the keys wll be deleted when the node is deleted.
10563
+ * `EPHEMERAL_KEY_ENCRYPTION`: The given node will opt-in for using ephemeral key for encrypting Local SSDs. The Local SSDs will not be able to recover data in case of node crash.
10564
+ """
10565
+ return pulumi.get(self, "local_ssd_encryption_mode")
10566
+
10415
10567
  @property
10416
10568
  @pulumi.getter(name="loggingVariant")
10417
10569
  def logging_variant(self) -> Optional[str]:
@@ -13510,6 +13662,8 @@ class NodePoolNodeConfig(dict):
13510
13662
  suggest = "local_nvme_ssd_block_config"
13511
13663
  elif key == "localSsdCount":
13512
13664
  suggest = "local_ssd_count"
13665
+ elif key == "localSsdEncryptionMode":
13666
+ suggest = "local_ssd_encryption_mode"
13513
13667
  elif key == "loggingVariant":
13514
13668
  suggest = "logging_variant"
13515
13669
  elif key == "machineType":
@@ -13574,6 +13728,7 @@ class NodePoolNodeConfig(dict):
13574
13728
  linux_node_config: Optional['outputs.NodePoolNodeConfigLinuxNodeConfig'] = None,
13575
13729
  local_nvme_ssd_block_config: Optional['outputs.NodePoolNodeConfigLocalNvmeSsdBlockConfig'] = None,
13576
13730
  local_ssd_count: Optional[int] = None,
13731
+ local_ssd_encryption_mode: Optional[str] = None,
13577
13732
  logging_variant: Optional[str] = None,
13578
13733
  machine_type: Optional[str] = None,
13579
13734
  metadata: Optional[Mapping[str, str]] = None,
@@ -13616,6 +13771,7 @@ class NodePoolNodeConfig(dict):
13616
13771
  :param 'NodePoolNodeConfigLinuxNodeConfigArgs' linux_node_config: Parameters that can be configured on Linux nodes.
13617
13772
  :param 'NodePoolNodeConfigLocalNvmeSsdBlockConfigArgs' local_nvme_ssd_block_config: Parameters for raw-block local NVMe SSDs.
13618
13773
  :param int local_ssd_count: The number of local SSD disks to be attached to the node.
13774
+ :param str local_ssd_encryption_mode: LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
13619
13775
  :param str logging_variant: Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
13620
13776
  :param str machine_type: The name of a Google Compute Engine machine type.
13621
13777
  :param Mapping[str, str] metadata: The metadata key/value pairs assigned to instances in the cluster.
@@ -13682,6 +13838,8 @@ class NodePoolNodeConfig(dict):
13682
13838
  pulumi.set(__self__, "local_nvme_ssd_block_config", local_nvme_ssd_block_config)
13683
13839
  if local_ssd_count is not None:
13684
13840
  pulumi.set(__self__, "local_ssd_count", local_ssd_count)
13841
+ if local_ssd_encryption_mode is not None:
13842
+ pulumi.set(__self__, "local_ssd_encryption_mode", local_ssd_encryption_mode)
13685
13843
  if logging_variant is not None:
13686
13844
  pulumi.set(__self__, "logging_variant", logging_variant)
13687
13845
  if machine_type is not None:
@@ -13891,6 +14049,14 @@ class NodePoolNodeConfig(dict):
13891
14049
  """
13892
14050
  return pulumi.get(self, "local_ssd_count")
13893
14051
 
14052
+ @property
14053
+ @pulumi.getter(name="localSsdEncryptionMode")
14054
+ def local_ssd_encryption_mode(self) -> Optional[str]:
14055
+ """
14056
+ LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
14057
+ """
14058
+ return pulumi.get(self, "local_ssd_encryption_mode")
14059
+
13894
14060
  @property
13895
14061
  @pulumi.getter(name="loggingVariant")
13896
14062
  def logging_variant(self) -> Optional[str]:
@@ -16657,6 +16823,35 @@ class GetClusterEnableK8sBetaApiResult(dict):
16657
16823
  return pulumi.get(self, "enabled_apis")
16658
16824
 
16659
16825
 
16826
+ @pulumi.output_type
16827
+ class GetClusterEnterpriseConfigResult(dict):
16828
+ def __init__(__self__, *,
16829
+ cluster_tier: str,
16830
+ desired_tier: str):
16831
+ """
16832
+ :param str cluster_tier: Indicates the effective cluster tier. Available options include STANDARD and ENTERPRISE.
16833
+ :param str desired_tier: Indicates the desired cluster tier. Available options include STANDARD and ENTERPRISE.
16834
+ """
16835
+ pulumi.set(__self__, "cluster_tier", cluster_tier)
16836
+ pulumi.set(__self__, "desired_tier", desired_tier)
16837
+
16838
+ @property
16839
+ @pulumi.getter(name="clusterTier")
16840
+ def cluster_tier(self) -> str:
16841
+ """
16842
+ Indicates the effective cluster tier. Available options include STANDARD and ENTERPRISE.
16843
+ """
16844
+ return pulumi.get(self, "cluster_tier")
16845
+
16846
+ @property
16847
+ @pulumi.getter(name="desiredTier")
16848
+ def desired_tier(self) -> str:
16849
+ """
16850
+ Indicates the desired cluster tier. Available options include STANDARD and ENTERPRISE.
16851
+ """
16852
+ return pulumi.get(self, "desired_tier")
16853
+
16854
+
16660
16855
  @pulumi.output_type
16661
16856
  class GetClusterFleetResult(dict):
16662
16857
  def __init__(__self__, *,
@@ -17327,6 +17522,7 @@ class GetClusterNodeConfigResult(dict):
17327
17522
  linux_node_configs: Sequence['outputs.GetClusterNodeConfigLinuxNodeConfigResult'],
17328
17523
  local_nvme_ssd_block_configs: Sequence['outputs.GetClusterNodeConfigLocalNvmeSsdBlockConfigResult'],
17329
17524
  local_ssd_count: int,
17525
+ local_ssd_encryption_mode: str,
17330
17526
  logging_variant: str,
17331
17527
  machine_type: str,
17332
17528
  metadata: Mapping[str, str],
@@ -17369,6 +17565,7 @@ class GetClusterNodeConfigResult(dict):
17369
17565
  :param Sequence['GetClusterNodeConfigLinuxNodeConfigArgs'] linux_node_configs: Parameters that can be configured on Linux nodes.
17370
17566
  :param Sequence['GetClusterNodeConfigLocalNvmeSsdBlockConfigArgs'] local_nvme_ssd_block_configs: Parameters for raw-block local NVMe SSDs.
17371
17567
  :param int local_ssd_count: The number of local SSD disks to be attached to the node.
17568
+ :param str local_ssd_encryption_mode: LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
17372
17569
  :param str logging_variant: Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
17373
17570
  :param str machine_type: The name of a Google Compute Engine machine type.
17374
17571
  :param Mapping[str, str] metadata: The metadata key/value pairs assigned to instances in the cluster.
@@ -17411,6 +17608,7 @@ class GetClusterNodeConfigResult(dict):
17411
17608
  pulumi.set(__self__, "linux_node_configs", linux_node_configs)
17412
17609
  pulumi.set(__self__, "local_nvme_ssd_block_configs", local_nvme_ssd_block_configs)
17413
17610
  pulumi.set(__self__, "local_ssd_count", local_ssd_count)
17611
+ pulumi.set(__self__, "local_ssd_encryption_mode", local_ssd_encryption_mode)
17414
17612
  pulumi.set(__self__, "logging_variant", logging_variant)
17415
17613
  pulumi.set(__self__, "machine_type", machine_type)
17416
17614
  pulumi.set(__self__, "metadata", metadata)
@@ -17600,6 +17798,14 @@ class GetClusterNodeConfigResult(dict):
17600
17798
  """
17601
17799
  return pulumi.get(self, "local_ssd_count")
17602
17800
 
17801
+ @property
17802
+ @pulumi.getter(name="localSsdEncryptionMode")
17803
+ def local_ssd_encryption_mode(self) -> str:
17804
+ """
17805
+ LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
17806
+ """
17807
+ return pulumi.get(self, "local_ssd_encryption_mode")
17808
+
17603
17809
  @property
17604
17810
  @pulumi.getter(name="loggingVariant")
17605
17811
  def logging_variant(self) -> str:
@@ -18722,18 +18928,29 @@ class GetClusterNodePoolResult(dict):
18722
18928
  @pulumi.output_type
18723
18929
  class GetClusterNodePoolAutoConfigResult(dict):
18724
18930
  def __init__(__self__, *,
18931
+ linux_node_configs: Sequence['outputs.GetClusterNodePoolAutoConfigLinuxNodeConfigResult'],
18725
18932
  network_tags: Sequence['outputs.GetClusterNodePoolAutoConfigNetworkTagResult'],
18726
18933
  node_kubelet_configs: Sequence['outputs.GetClusterNodePoolAutoConfigNodeKubeletConfigResult'],
18727
18934
  resource_manager_tags: Mapping[str, str]):
18728
18935
  """
18936
+ :param Sequence['GetClusterNodePoolAutoConfigLinuxNodeConfigArgs'] linux_node_configs: Linux node configuration options.
18729
18937
  :param Sequence['GetClusterNodePoolAutoConfigNetworkTagArgs'] network_tags: Collection of Compute Engine network tags that can be applied to a node's underlying VM instance.
18730
18938
  :param Sequence['GetClusterNodePoolAutoConfigNodeKubeletConfigArgs'] node_kubelet_configs: Node kubelet configs.
18731
18939
  :param Mapping[str, str] resource_manager_tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
18732
18940
  """
18941
+ pulumi.set(__self__, "linux_node_configs", linux_node_configs)
18733
18942
  pulumi.set(__self__, "network_tags", network_tags)
18734
18943
  pulumi.set(__self__, "node_kubelet_configs", node_kubelet_configs)
18735
18944
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
18736
18945
 
18946
+ @property
18947
+ @pulumi.getter(name="linuxNodeConfigs")
18948
+ def linux_node_configs(self) -> Sequence['outputs.GetClusterNodePoolAutoConfigLinuxNodeConfigResult']:
18949
+ """
18950
+ Linux node configuration options.
18951
+ """
18952
+ return pulumi.get(self, "linux_node_configs")
18953
+
18737
18954
  @property
18738
18955
  @pulumi.getter(name="networkTags")
18739
18956
  def network_tags(self) -> Sequence['outputs.GetClusterNodePoolAutoConfigNetworkTagResult']:
@@ -18759,6 +18976,24 @@ class GetClusterNodePoolAutoConfigResult(dict):
18759
18976
  return pulumi.get(self, "resource_manager_tags")
18760
18977
 
18761
18978
 
18979
+ @pulumi.output_type
18980
+ class GetClusterNodePoolAutoConfigLinuxNodeConfigResult(dict):
18981
+ def __init__(__self__, *,
18982
+ cgroup_mode: str):
18983
+ """
18984
+ :param str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
18985
+ """
18986
+ pulumi.set(__self__, "cgroup_mode", cgroup_mode)
18987
+
18988
+ @property
18989
+ @pulumi.getter(name="cgroupMode")
18990
+ def cgroup_mode(self) -> str:
18991
+ """
18992
+ cgroupMode specifies the cgroup mode to be used on the node.
18993
+ """
18994
+ return pulumi.get(self, "cgroup_mode")
18995
+
18996
+
18762
18997
  @pulumi.output_type
18763
18998
  class GetClusterNodePoolAutoConfigNetworkTagResult(dict):
18764
18999
  def __init__(__self__, *,
@@ -19285,6 +19520,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
19285
19520
  linux_node_configs: Sequence['outputs.GetClusterNodePoolNodeConfigLinuxNodeConfigResult'],
19286
19521
  local_nvme_ssd_block_configs: Sequence['outputs.GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigResult'],
19287
19522
  local_ssd_count: int,
19523
+ local_ssd_encryption_mode: str,
19288
19524
  logging_variant: str,
19289
19525
  machine_type: str,
19290
19526
  metadata: Mapping[str, str],
@@ -19327,6 +19563,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
19327
19563
  :param Sequence['GetClusterNodePoolNodeConfigLinuxNodeConfigArgs'] linux_node_configs: Parameters that can be configured on Linux nodes.
19328
19564
  :param Sequence['GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgs'] local_nvme_ssd_block_configs: Parameters for raw-block local NVMe SSDs.
19329
19565
  :param int local_ssd_count: The number of local SSD disks to be attached to the node.
19566
+ :param str local_ssd_encryption_mode: LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
19330
19567
  :param str logging_variant: Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
19331
19568
  :param str machine_type: The name of a Google Compute Engine machine type.
19332
19569
  :param Mapping[str, str] metadata: The metadata key/value pairs assigned to instances in the cluster.
@@ -19369,6 +19606,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
19369
19606
  pulumi.set(__self__, "linux_node_configs", linux_node_configs)
19370
19607
  pulumi.set(__self__, "local_nvme_ssd_block_configs", local_nvme_ssd_block_configs)
19371
19608
  pulumi.set(__self__, "local_ssd_count", local_ssd_count)
19609
+ pulumi.set(__self__, "local_ssd_encryption_mode", local_ssd_encryption_mode)
19372
19610
  pulumi.set(__self__, "logging_variant", logging_variant)
19373
19611
  pulumi.set(__self__, "machine_type", machine_type)
19374
19612
  pulumi.set(__self__, "metadata", metadata)
@@ -19558,6 +19796,14 @@ class GetClusterNodePoolNodeConfigResult(dict):
19558
19796
  """
19559
19797
  return pulumi.get(self, "local_ssd_count")
19560
19798
 
19799
+ @property
19800
+ @pulumi.getter(name="localSsdEncryptionMode")
19801
+ def local_ssd_encryption_mode(self) -> str:
19802
+ """
19803
+ LocalSsdEncryptionMode specified the method used for encrypting the local SSDs attached to the node.
19804
+ """
19805
+ return pulumi.get(self, "local_ssd_encryption_mode")
19806
+
19561
19807
  @property
19562
19808
  @pulumi.getter(name="loggingVariant")
19563
19809
  def logging_variant(self) -> str: