pulumi-gcp 8.1.0a1726293903__py3-none-any.whl → 8.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. pulumi_gcp/__init__.py +62 -0
  2. pulumi_gcp/alloydb/_inputs.py +94 -0
  3. pulumi_gcp/alloydb/cluster.py +94 -1
  4. pulumi_gcp/alloydb/outputs.py +79 -0
  5. pulumi_gcp/assuredworkloads/workload.py +135 -16
  6. pulumi_gcp/backupdisasterrecovery/__init__.py +1 -0
  7. pulumi_gcp/backupdisasterrecovery/backup_vault.py +1203 -0
  8. pulumi_gcp/bigquery/__init__.py +1 -0
  9. pulumi_gcp/bigquery/_inputs.py +33 -0
  10. pulumi_gcp/bigquery/data_transfer_config.py +134 -6
  11. pulumi_gcp/bigquery/get_tables.py +143 -0
  12. pulumi_gcp/bigquery/outputs.py +66 -0
  13. pulumi_gcp/bigqueryanalyticshub/_inputs.py +169 -7
  14. pulumi_gcp/bigqueryanalyticshub/data_exchange.py +96 -3
  15. pulumi_gcp/bigqueryanalyticshub/listing.py +114 -0
  16. pulumi_gcp/bigqueryanalyticshub/outputs.py +138 -5
  17. pulumi_gcp/bigtable/_inputs.py +21 -1
  18. pulumi_gcp/bigtable/outputs.py +13 -1
  19. pulumi_gcp/bigtable/table.py +34 -0
  20. pulumi_gcp/certificateauthority/authority.py +14 -7
  21. pulumi_gcp/certificatemanager/__init__.py +1 -0
  22. pulumi_gcp/certificatemanager/certificate.py +28 -0
  23. pulumi_gcp/certificatemanager/get_certificates.py +150 -0
  24. pulumi_gcp/certificatemanager/outputs.py +322 -0
  25. pulumi_gcp/cloudbuild/_inputs.py +6 -6
  26. pulumi_gcp/cloudbuild/outputs.py +4 -4
  27. pulumi_gcp/cloudrun/_inputs.py +6 -12
  28. pulumi_gcp/cloudrun/outputs.py +8 -16
  29. pulumi_gcp/cloudrunv2/_inputs.py +72 -15
  30. pulumi_gcp/cloudrunv2/outputs.py +82 -16
  31. pulumi_gcp/cloudrunv2/service.py +50 -4
  32. pulumi_gcp/cloudtasks/_inputs.py +630 -0
  33. pulumi_gcp/cloudtasks/outputs.py +479 -0
  34. pulumi_gcp/cloudtasks/queue.py +238 -0
  35. pulumi_gcp/compute/__init__.py +1 -0
  36. pulumi_gcp/compute/_inputs.py +2300 -257
  37. pulumi_gcp/compute/get_instance.py +4 -1
  38. pulumi_gcp/compute/health_check.py +114 -0
  39. pulumi_gcp/compute/instance.py +86 -4
  40. pulumi_gcp/compute/instance_template.py +66 -0
  41. pulumi_gcp/compute/interconnect.py +28 -21
  42. pulumi_gcp/compute/network_firewall_policy_with_rules.py +826 -0
  43. pulumi_gcp/compute/node_template.py +93 -0
  44. pulumi_gcp/compute/outputs.py +1491 -6
  45. pulumi_gcp/compute/region_target_http_proxy.py +159 -0
  46. pulumi_gcp/compute/region_target_https_proxy.py +175 -0
  47. pulumi_gcp/compute/service_attachment.py +75 -0
  48. pulumi_gcp/compute/target_http_proxy.py +49 -28
  49. pulumi_gcp/compute/target_https_proxy.py +77 -28
  50. pulumi_gcp/config/__init__.pyi +4 -0
  51. pulumi_gcp/config/vars.py +8 -0
  52. pulumi_gcp/container/_inputs.py +236 -3
  53. pulumi_gcp/container/attached_cluster.py +61 -8
  54. pulumi_gcp/container/outputs.py +276 -2
  55. pulumi_gcp/databasemigrationservice/_inputs.py +176 -148
  56. pulumi_gcp/databasemigrationservice/connection_profile.py +206 -0
  57. pulumi_gcp/databasemigrationservice/outputs.py +109 -87
  58. pulumi_gcp/dataloss/_inputs.py +353 -1
  59. pulumi_gcp/dataloss/outputs.py +274 -3
  60. pulumi_gcp/dataproc/_inputs.py +27 -27
  61. pulumi_gcp/dataproc/metastore_federation.py +8 -8
  62. pulumi_gcp/dataproc/metastore_service.py +2 -0
  63. pulumi_gcp/dataproc/outputs.py +18 -18
  64. pulumi_gcp/datastream/_inputs.py +69 -1
  65. pulumi_gcp/datastream/outputs.py +44 -2
  66. pulumi_gcp/datastream/stream.py +201 -7
  67. pulumi_gcp/developerconnect/__init__.py +11 -0
  68. pulumi_gcp/developerconnect/_inputs.py +301 -0
  69. pulumi_gcp/developerconnect/connection.py +1034 -0
  70. pulumi_gcp/developerconnect/git_repository_link.py +873 -0
  71. pulumi_gcp/developerconnect/outputs.py +247 -0
  72. pulumi_gcp/discoveryengine/_inputs.py +188 -0
  73. pulumi_gcp/discoveryengine/data_store.py +14 -14
  74. pulumi_gcp/discoveryengine/outputs.py +153 -1
  75. pulumi_gcp/firebase/database_instance.py +7 -7
  76. pulumi_gcp/gkehub/_inputs.py +25 -1
  77. pulumi_gcp/gkehub/feature_membership.py +12 -6
  78. pulumi_gcp/gkehub/outputs.py +17 -1
  79. pulumi_gcp/gkeonprem/_inputs.py +3 -3
  80. pulumi_gcp/gkeonprem/outputs.py +2 -2
  81. pulumi_gcp/iam/_inputs.py +196 -0
  82. pulumi_gcp/iam/get_workload_identity_pool_provider.py +13 -3
  83. pulumi_gcp/iam/outputs.py +295 -0
  84. pulumi_gcp/iam/workload_identity_pool_provider.py +164 -3
  85. pulumi_gcp/kms/__init__.py +2 -0
  86. pulumi_gcp/kms/autokey_config.py +10 -2
  87. pulumi_gcp/kms/get_crypto_key_latest_version.py +222 -0
  88. pulumi_gcp/kms/get_crypto_key_versions.py +175 -0
  89. pulumi_gcp/kms/outputs.py +164 -0
  90. pulumi_gcp/memorystore/__init__.py +10 -0
  91. pulumi_gcp/memorystore/_inputs.py +731 -0
  92. pulumi_gcp/memorystore/instance.py +1663 -0
  93. pulumi_gcp/memorystore/outputs.py +598 -0
  94. pulumi_gcp/netapp/active_directory.py +6 -6
  95. pulumi_gcp/netapp/backup.py +6 -6
  96. pulumi_gcp/netapp/backup_policy.py +6 -6
  97. pulumi_gcp/netapp/backup_vault.py +6 -6
  98. pulumi_gcp/netapp/storage_pool.py +4 -4
  99. pulumi_gcp/netapp/volume.py +108 -0
  100. pulumi_gcp/networkconnectivity/_inputs.py +21 -1
  101. pulumi_gcp/networkconnectivity/outputs.py +15 -1
  102. pulumi_gcp/networkconnectivity/spoke.py +8 -0
  103. pulumi_gcp/networksecurity/client_tls_policy.py +24 -22
  104. pulumi_gcp/networksecurity/server_tls_policy.py +20 -32
  105. pulumi_gcp/organizations/folder.py +52 -33
  106. pulumi_gcp/organizations/get_project.py +13 -3
  107. pulumi_gcp/organizations/project.py +88 -3
  108. pulumi_gcp/parallelstore/instance.py +121 -121
  109. pulumi_gcp/projects/iam_member_remove.py +26 -0
  110. pulumi_gcp/projects/usage_export_bucket.py +38 -0
  111. pulumi_gcp/provider.py +40 -0
  112. pulumi_gcp/pubsub/_inputs.py +40 -0
  113. pulumi_gcp/pubsub/outputs.py +51 -1
  114. pulumi_gcp/pubsub/subscription.py +6 -0
  115. pulumi_gcp/pulumi-plugin.json +1 -1
  116. pulumi_gcp/redis/_inputs.py +419 -0
  117. pulumi_gcp/redis/cluster.py +123 -0
  118. pulumi_gcp/redis/outputs.py +315 -0
  119. pulumi_gcp/securitycenter/__init__.py +2 -0
  120. pulumi_gcp/securitycenter/v2_folder_scc_big_query_export.py +857 -0
  121. pulumi_gcp/securitycenter/v2_organization_scc_big_query_exports.py +4 -4
  122. pulumi_gcp/securitycenter/v2_project_mute_config.py +2 -2
  123. pulumi_gcp/securitycenter/v2_project_scc_big_query_export.py +796 -0
  124. pulumi_gcp/vpcaccess/connector.py +21 -28
  125. {pulumi_gcp-8.1.0a1726293903.dist-info → pulumi_gcp-8.2.0.dist-info}/METADATA +1 -1
  126. {pulumi_gcp-8.1.0a1726293903.dist-info → pulumi_gcp-8.2.0.dist-info}/RECORD +128 -111
  127. {pulumi_gcp-8.1.0a1726293903.dist-info → pulumi_gcp-8.2.0.dist-info}/WHEEL +1 -1
  128. {pulumi_gcp-8.1.0a1726293903.dist-info → pulumi_gcp-8.2.0.dist-info}/top_level.txt +0 -0
@@ -27,6 +27,7 @@ __all__ = [
27
27
  'AttachedClusterOidcConfig',
28
28
  'AttachedClusterProxyConfig',
29
29
  'AttachedClusterProxyConfigKubernetesSecret',
30
+ 'AttachedClusterSecurityPostureConfig',
30
31
  'AttachedClusterWorkloadIdentityConfig',
31
32
  'AwsClusterAuthorization',
32
33
  'AwsClusterAuthorizationAdminGroup',
@@ -170,6 +171,7 @@ __all__ = [
170
171
  'ClusterNodePool',
171
172
  'ClusterNodePoolAutoConfig',
172
173
  'ClusterNodePoolAutoConfigNetworkTags',
174
+ 'ClusterNodePoolAutoConfigNodeKubeletConfig',
173
175
  'ClusterNodePoolAutoscaling',
174
176
  'ClusterNodePoolDefaults',
175
177
  'ClusterNodePoolDefaultsNodeConfigDefaults',
@@ -362,6 +364,7 @@ __all__ = [
362
364
  'GetClusterNodePoolResult',
363
365
  'GetClusterNodePoolAutoConfigResult',
364
366
  'GetClusterNodePoolAutoConfigNetworkTagResult',
367
+ 'GetClusterNodePoolAutoConfigNodeKubeletConfigResult',
365
368
  'GetClusterNodePoolAutoscalingResult',
366
369
  'GetClusterNodePoolDefaultResult',
367
370
  'GetClusterNodePoolDefaultNodeConfigDefaultResult',
@@ -837,6 +840,43 @@ class AttachedClusterProxyConfigKubernetesSecret(dict):
837
840
  return pulumi.get(self, "namespace")
838
841
 
839
842
 
843
+ @pulumi.output_type
844
+ class AttachedClusterSecurityPostureConfig(dict):
845
+ @staticmethod
846
+ def __key_warning(key: str):
847
+ suggest = None
848
+ if key == "vulnerabilityMode":
849
+ suggest = "vulnerability_mode"
850
+
851
+ if suggest:
852
+ pulumi.log.warn(f"Key '{key}' not found in AttachedClusterSecurityPostureConfig. Access the value via the '{suggest}' property getter instead.")
853
+
854
+ def __getitem__(self, key: str) -> Any:
855
+ AttachedClusterSecurityPostureConfig.__key_warning(key)
856
+ return super().__getitem__(key)
857
+
858
+ def get(self, key: str, default = None) -> Any:
859
+ AttachedClusterSecurityPostureConfig.__key_warning(key)
860
+ return super().get(key, default)
861
+
862
+ def __init__(__self__, *,
863
+ vulnerability_mode: str):
864
+ """
865
+ :param str vulnerability_mode: Sets the mode of the Kubernetes security posture API's workload vulnerability scanning.
866
+ Possible values are: `VULNERABILITY_DISABLED`, `VULNERABILITY_ENTERPRISE`.
867
+ """
868
+ pulumi.set(__self__, "vulnerability_mode", vulnerability_mode)
869
+
870
+ @property
871
+ @pulumi.getter(name="vulnerabilityMode")
872
+ def vulnerability_mode(self) -> str:
873
+ """
874
+ Sets the mode of the Kubernetes security posture API's workload vulnerability scanning.
875
+ Possible values are: `VULNERABILITY_DISABLED`, `VULNERABILITY_ENTERPRISE`.
876
+ """
877
+ return pulumi.get(self, "vulnerability_mode")
878
+
879
+
840
880
  @pulumi.output_type
841
881
  class AttachedClusterWorkloadIdentityConfig(dict):
842
882
  @staticmethod
@@ -6629,6 +6669,8 @@ class ClusterNodeConfig(dict):
6629
6669
  suggest = "shielded_instance_config"
6630
6670
  elif key == "soleTenantConfig":
6631
6671
  suggest = "sole_tenant_config"
6672
+ elif key == "storagePools":
6673
+ suggest = "storage_pools"
6632
6674
  elif key == "workloadMetadataConfig":
6633
6675
  suggest = "workload_metadata_config"
6634
6676
 
@@ -6681,6 +6723,7 @@ class ClusterNodeConfig(dict):
6681
6723
  shielded_instance_config: Optional['outputs.ClusterNodeConfigShieldedInstanceConfig'] = None,
6682
6724
  sole_tenant_config: Optional['outputs.ClusterNodeConfigSoleTenantConfig'] = None,
6683
6725
  spot: Optional[bool] = None,
6726
+ storage_pools: Optional[Sequence[str]] = None,
6684
6727
  tags: Optional[Sequence[str]] = None,
6685
6728
  taints: Optional[Sequence['outputs.ClusterNodeConfigTaint']] = None,
6686
6729
  workload_metadata_config: Optional['outputs.ClusterNodeConfigWorkloadMetadataConfig'] = None):
@@ -6771,6 +6814,7 @@ class ClusterNodeConfig(dict):
6771
6814
  :param bool spot: A boolean that represents whether the underlying node VMs are spot.
6772
6815
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
6773
6816
  for more information. Defaults to false.
6817
+ :param Sequence[str] storage_pools: The list of Storage Pools where boot disks are provisioned.
6774
6818
  :param Sequence[str] tags: The list of instance tags applied to all nodes. Tags are used to identify
6775
6819
  valid sources or targets for network firewalls.
6776
6820
  :param Sequence['ClusterNodeConfigTaintArgs'] taints: A list of [Kubernetes taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
@@ -6858,6 +6902,8 @@ class ClusterNodeConfig(dict):
6858
6902
  pulumi.set(__self__, "sole_tenant_config", sole_tenant_config)
6859
6903
  if spot is not None:
6860
6904
  pulumi.set(__self__, "spot", spot)
6905
+ if storage_pools is not None:
6906
+ pulumi.set(__self__, "storage_pools", storage_pools)
6861
6907
  if tags is not None:
6862
6908
  pulumi.set(__self__, "tags", tags)
6863
6909
  if taints is not None:
@@ -7210,6 +7256,14 @@ class ClusterNodeConfig(dict):
7210
7256
  """
7211
7257
  return pulumi.get(self, "spot")
7212
7258
 
7259
+ @property
7260
+ @pulumi.getter(name="storagePools")
7261
+ def storage_pools(self) -> Optional[Sequence[str]]:
7262
+ """
7263
+ The list of Storage Pools where boot disks are provisioned.
7264
+ """
7265
+ return pulumi.get(self, "storage_pools")
7266
+
7213
7267
  @property
7214
7268
  @pulumi.getter
7215
7269
  def tags(self) -> Optional[Sequence[str]]:
@@ -7875,6 +7929,8 @@ class ClusterNodeConfigKubeletConfig(dict):
7875
7929
  suggest = "cpu_cfs_quota"
7876
7930
  elif key == "cpuCfsQuotaPeriod":
7877
7931
  suggest = "cpu_cfs_quota_period"
7932
+ elif key == "insecureKubeletReadonlyPortEnabled":
7933
+ suggest = "insecure_kubelet_readonly_port_enabled"
7878
7934
  elif key == "podPidsLimit":
7879
7935
  suggest = "pod_pids_limit"
7880
7936
 
@@ -7893,6 +7949,7 @@ class ClusterNodeConfigKubeletConfig(dict):
7893
7949
  cpu_manager_policy: str,
7894
7950
  cpu_cfs_quota: Optional[bool] = None,
7895
7951
  cpu_cfs_quota_period: Optional[str] = None,
7952
+ insecure_kubelet_readonly_port_enabled: Optional[str] = None,
7896
7953
  pod_pids_limit: Optional[int] = None):
7897
7954
  """
7898
7955
  :param str cpu_manager_policy: The CPU management policy on the node. See
@@ -7909,6 +7966,7 @@ class ClusterNodeConfigKubeletConfig(dict):
7909
7966
  value and accepts an invalid `default` value instead. While this remains true,
7910
7967
  not specifying the `kubelet_config` block should be the equivalent of specifying
7911
7968
  `none`.
7969
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
7912
7970
  :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
7913
7971
  """
7914
7972
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
@@ -7916,6 +7974,8 @@ class ClusterNodeConfigKubeletConfig(dict):
7916
7974
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
7917
7975
  if cpu_cfs_quota_period is not None:
7918
7976
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
7977
+ if insecure_kubelet_readonly_port_enabled is not None:
7978
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
7919
7979
  if pod_pids_limit is not None:
7920
7980
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
7921
7981
 
@@ -7954,6 +8014,14 @@ class ClusterNodeConfigKubeletConfig(dict):
7954
8014
  """
7955
8015
  return pulumi.get(self, "cpu_cfs_quota_period")
7956
8016
 
8017
+ @property
8018
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
8019
+ def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]:
8020
+ """
8021
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8022
+ """
8023
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
8024
+
7957
8025
  @property
7958
8026
  @pulumi.getter(name="podPidsLimit")
7959
8027
  def pod_pids_limit(self) -> Optional[int]:
@@ -8707,6 +8775,8 @@ class ClusterNodePoolAutoConfig(dict):
8707
8775
  suggest = None
8708
8776
  if key == "networkTags":
8709
8777
  suggest = "network_tags"
8778
+ elif key == "nodeKubeletConfig":
8779
+ suggest = "node_kubelet_config"
8710
8780
  elif key == "resourceManagerTags":
8711
8781
  suggest = "resource_manager_tags"
8712
8782
 
@@ -8723,13 +8793,18 @@ class ClusterNodePoolAutoConfig(dict):
8723
8793
 
8724
8794
  def __init__(__self__, *,
8725
8795
  network_tags: Optional['outputs.ClusterNodePoolAutoConfigNetworkTags'] = None,
8796
+ node_kubelet_config: Optional['outputs.ClusterNodePoolAutoConfigNodeKubeletConfig'] = None,
8726
8797
  resource_manager_tags: Optional[Mapping[str, str]] = None):
8727
8798
  """
8728
- :param 'ClusterNodePoolAutoConfigNetworkTagsArgs' network_tags: The network tag config for the cluster's automatically provisioned node pools.
8799
+ :param 'ClusterNodePoolAutoConfigNetworkTagsArgs' network_tags: The network tag config for the cluster's automatically provisioned node pools. Structure is documented below.
8800
+ :param 'ClusterNodePoolAutoConfigNodeKubeletConfigArgs' node_kubelet_config: Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here.
8801
+ Structure is documented below.
8729
8802
  :param Mapping[str, str] resource_manager_tags: A map of resource manager tag keys and values to be attached to the nodes for managing Compute Engine firewalls using Network Firewall Policies. Tags must be according to specifications found [here](https://cloud.google.com/vpc/docs/tags-firewalls-overview#specifications). A maximum of 5 tag key-value pairs can be specified. Existing tags will be replaced with new values. Tags must be in one of the following formats ([KEY]=[VALUE]) 1. `tagKeys/{tag_key_id}=tagValues/{tag_value_id}` 2. `{org_id}/{tag_key_name}={tag_value_name}` 3. `{project_id}/{tag_key_name}={tag_value_name}`.
8730
8803
  """
8731
8804
  if network_tags is not None:
8732
8805
  pulumi.set(__self__, "network_tags", network_tags)
8806
+ if node_kubelet_config is not None:
8807
+ pulumi.set(__self__, "node_kubelet_config", node_kubelet_config)
8733
8808
  if resource_manager_tags is not None:
8734
8809
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
8735
8810
 
@@ -8737,10 +8812,19 @@ class ClusterNodePoolAutoConfig(dict):
8737
8812
  @pulumi.getter(name="networkTags")
8738
8813
  def network_tags(self) -> Optional['outputs.ClusterNodePoolAutoConfigNetworkTags']:
8739
8814
  """
8740
- The network tag config for the cluster's automatically provisioned node pools.
8815
+ The network tag config for the cluster's automatically provisioned node pools. Structure is documented below.
8741
8816
  """
8742
8817
  return pulumi.get(self, "network_tags")
8743
8818
 
8819
+ @property
8820
+ @pulumi.getter(name="nodeKubeletConfig")
8821
+ def node_kubelet_config(self) -> Optional['outputs.ClusterNodePoolAutoConfigNodeKubeletConfig']:
8822
+ """
8823
+ Kubelet configuration for Autopilot clusters. Currently, only `insecure_kubelet_readonly_port_enabled` is supported here.
8824
+ Structure is documented below.
8825
+ """
8826
+ return pulumi.get(self, "node_kubelet_config")
8827
+
8744
8828
  @property
8745
8829
  @pulumi.getter(name="resourceManagerTags")
8746
8830
  def resource_manager_tags(self) -> Optional[Mapping[str, str]]:
@@ -8769,6 +8853,42 @@ class ClusterNodePoolAutoConfigNetworkTags(dict):
8769
8853
  return pulumi.get(self, "tags")
8770
8854
 
8771
8855
 
8856
+ @pulumi.output_type
8857
+ class ClusterNodePoolAutoConfigNodeKubeletConfig(dict):
8858
+ @staticmethod
8859
+ def __key_warning(key: str):
8860
+ suggest = None
8861
+ if key == "insecureKubeletReadonlyPortEnabled":
8862
+ suggest = "insecure_kubelet_readonly_port_enabled"
8863
+
8864
+ if suggest:
8865
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolAutoConfigNodeKubeletConfig. Access the value via the '{suggest}' property getter instead.")
8866
+
8867
+ def __getitem__(self, key: str) -> Any:
8868
+ ClusterNodePoolAutoConfigNodeKubeletConfig.__key_warning(key)
8869
+ return super().__getitem__(key)
8870
+
8871
+ def get(self, key: str, default = None) -> Any:
8872
+ ClusterNodePoolAutoConfigNodeKubeletConfig.__key_warning(key)
8873
+ return super().get(key, default)
8874
+
8875
+ def __init__(__self__, *,
8876
+ insecure_kubelet_readonly_port_enabled: Optional[str] = None):
8877
+ """
8878
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8879
+ """
8880
+ if insecure_kubelet_readonly_port_enabled is not None:
8881
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
8882
+
8883
+ @property
8884
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
8885
+ def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]:
8886
+ """
8887
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8888
+ """
8889
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
8890
+
8891
+
8772
8892
  @pulumi.output_type
8773
8893
  class ClusterNodePoolAutoscaling(dict):
8774
8894
  @staticmethod
@@ -8906,6 +9026,8 @@ class ClusterNodePoolDefaultsNodeConfigDefaults(dict):
8906
9026
  suggest = "containerd_config"
8907
9027
  elif key == "gcfsConfig":
8908
9028
  suggest = "gcfs_config"
9029
+ elif key == "insecureKubeletReadonlyPortEnabled":
9030
+ suggest = "insecure_kubelet_readonly_port_enabled"
8909
9031
  elif key == "loggingVariant":
8910
9032
  suggest = "logging_variant"
8911
9033
 
@@ -8923,16 +9045,20 @@ class ClusterNodePoolDefaultsNodeConfigDefaults(dict):
8923
9045
  def __init__(__self__, *,
8924
9046
  containerd_config: Optional['outputs.ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfig'] = None,
8925
9047
  gcfs_config: Optional['outputs.ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfig'] = None,
9048
+ insecure_kubelet_readonly_port_enabled: Optional[str] = None,
8926
9049
  logging_variant: Optional[str] = None):
8927
9050
  """
8928
9051
  :param 'ClusterNodePoolDefaultsNodeConfigDefaultsContainerdConfigArgs' containerd_config: Parameters for containerd configuration.
8929
9052
  :param 'ClusterNodePoolDefaultsNodeConfigDefaultsGcfsConfigArgs' gcfs_config: The default Google Container Filesystem (GCFS) configuration at the cluster level. e.g. enable [image streaming](https://cloud.google.com/kubernetes-engine/docs/how-to/image-streaming) across all the node pools within the cluster. Structure is documented below.
9053
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8930
9054
  :param str logging_variant: The type of logging agent that is deployed by default for newly created node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT. See [Increasing logging agent throughput](https://cloud.google.com/stackdriver/docs/solutions/gke/managing-logs#throughput) for more information.
8931
9055
  """
8932
9056
  if containerd_config is not None:
8933
9057
  pulumi.set(__self__, "containerd_config", containerd_config)
8934
9058
  if gcfs_config is not None:
8935
9059
  pulumi.set(__self__, "gcfs_config", gcfs_config)
9060
+ if insecure_kubelet_readonly_port_enabled is not None:
9061
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
8936
9062
  if logging_variant is not None:
8937
9063
  pulumi.set(__self__, "logging_variant", logging_variant)
8938
9064
 
@@ -8952,6 +9078,14 @@ class ClusterNodePoolDefaultsNodeConfigDefaults(dict):
8952
9078
  """
8953
9079
  return pulumi.get(self, "gcfs_config")
8954
9080
 
9081
+ @property
9082
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
9083
+ def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]:
9084
+ """
9085
+ Controls whether the kubelet read-only port is enabled for newly created node pools in the cluster. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
9086
+ """
9087
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
9088
+
8955
9089
  @property
8956
9090
  @pulumi.getter(name="loggingVariant")
8957
9091
  def logging_variant(self) -> Optional[str]:
@@ -9558,6 +9692,8 @@ class ClusterNodePoolNodeConfig(dict):
9558
9692
  suggest = "shielded_instance_config"
9559
9693
  elif key == "soleTenantConfig":
9560
9694
  suggest = "sole_tenant_config"
9695
+ elif key == "storagePools":
9696
+ suggest = "storage_pools"
9561
9697
  elif key == "workloadMetadataConfig":
9562
9698
  suggest = "workload_metadata_config"
9563
9699
 
@@ -9610,6 +9746,7 @@ class ClusterNodePoolNodeConfig(dict):
9610
9746
  shielded_instance_config: Optional['outputs.ClusterNodePoolNodeConfigShieldedInstanceConfig'] = None,
9611
9747
  sole_tenant_config: Optional['outputs.ClusterNodePoolNodeConfigSoleTenantConfig'] = None,
9612
9748
  spot: Optional[bool] = None,
9749
+ storage_pools: Optional[Sequence[str]] = None,
9613
9750
  tags: Optional[Sequence[str]] = None,
9614
9751
  taints: Optional[Sequence['outputs.ClusterNodePoolNodeConfigTaint']] = None,
9615
9752
  workload_metadata_config: Optional['outputs.ClusterNodePoolNodeConfigWorkloadMetadataConfig'] = None):
@@ -9700,6 +9837,7 @@ class ClusterNodePoolNodeConfig(dict):
9700
9837
  :param bool spot: A boolean that represents whether the underlying node VMs are spot.
9701
9838
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
9702
9839
  for more information. Defaults to false.
9840
+ :param Sequence[str] storage_pools: The list of Storage Pools where boot disks are provisioned.
9703
9841
  :param Sequence[str] tags: The list of instance tags applied to all nodes. Tags are used to identify
9704
9842
  valid sources or targets for network firewalls.
9705
9843
  :param Sequence['ClusterNodePoolNodeConfigTaintArgs'] taints: A list of [Kubernetes taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
@@ -9787,6 +9925,8 @@ class ClusterNodePoolNodeConfig(dict):
9787
9925
  pulumi.set(__self__, "sole_tenant_config", sole_tenant_config)
9788
9926
  if spot is not None:
9789
9927
  pulumi.set(__self__, "spot", spot)
9928
+ if storage_pools is not None:
9929
+ pulumi.set(__self__, "storage_pools", storage_pools)
9790
9930
  if tags is not None:
9791
9931
  pulumi.set(__self__, "tags", tags)
9792
9932
  if taints is not None:
@@ -10139,6 +10279,14 @@ class ClusterNodePoolNodeConfig(dict):
10139
10279
  """
10140
10280
  return pulumi.get(self, "spot")
10141
10281
 
10282
+ @property
10283
+ @pulumi.getter(name="storagePools")
10284
+ def storage_pools(self) -> Optional[Sequence[str]]:
10285
+ """
10286
+ The list of Storage Pools where boot disks are provisioned.
10287
+ """
10288
+ return pulumi.get(self, "storage_pools")
10289
+
10142
10290
  @property
10143
10291
  @pulumi.getter
10144
10292
  def tags(self) -> Optional[Sequence[str]]:
@@ -10804,6 +10952,8 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
10804
10952
  suggest = "cpu_cfs_quota"
10805
10953
  elif key == "cpuCfsQuotaPeriod":
10806
10954
  suggest = "cpu_cfs_quota_period"
10955
+ elif key == "insecureKubeletReadonlyPortEnabled":
10956
+ suggest = "insecure_kubelet_readonly_port_enabled"
10807
10957
  elif key == "podPidsLimit":
10808
10958
  suggest = "pod_pids_limit"
10809
10959
 
@@ -10822,6 +10972,7 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
10822
10972
  cpu_manager_policy: str,
10823
10973
  cpu_cfs_quota: Optional[bool] = None,
10824
10974
  cpu_cfs_quota_period: Optional[str] = None,
10975
+ insecure_kubelet_readonly_port_enabled: Optional[str] = None,
10825
10976
  pod_pids_limit: Optional[int] = None):
10826
10977
  """
10827
10978
  :param str cpu_manager_policy: The CPU management policy on the node. See
@@ -10838,6 +10989,7 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
10838
10989
  value and accepts an invalid `default` value instead. While this remains true,
10839
10990
  not specifying the `kubelet_config` block should be the equivalent of specifying
10840
10991
  `none`.
10992
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
10841
10993
  :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
10842
10994
  """
10843
10995
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
@@ -10845,6 +10997,8 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
10845
10997
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
10846
10998
  if cpu_cfs_quota_period is not None:
10847
10999
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
11000
+ if insecure_kubelet_readonly_port_enabled is not None:
11001
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
10848
11002
  if pod_pids_limit is not None:
10849
11003
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
10850
11004
 
@@ -10883,6 +11037,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
10883
11037
  """
10884
11038
  return pulumi.get(self, "cpu_cfs_quota_period")
10885
11039
 
11040
+ @property
11041
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
11042
+ def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]:
11043
+ """
11044
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
11045
+ """
11046
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
11047
+
10886
11048
  @property
10887
11049
  @pulumi.getter(name="podPidsLimit")
10888
11050
  def pod_pids_limit(self) -> Optional[int]:
@@ -12900,6 +13062,8 @@ class NodePoolNodeConfig(dict):
12900
13062
  suggest = "shielded_instance_config"
12901
13063
  elif key == "soleTenantConfig":
12902
13064
  suggest = "sole_tenant_config"
13065
+ elif key == "storagePools":
13066
+ suggest = "storage_pools"
12903
13067
  elif key == "workloadMetadataConfig":
12904
13068
  suggest = "workload_metadata_config"
12905
13069
 
@@ -12952,6 +13116,7 @@ class NodePoolNodeConfig(dict):
12952
13116
  shielded_instance_config: Optional['outputs.NodePoolNodeConfigShieldedInstanceConfig'] = None,
12953
13117
  sole_tenant_config: Optional['outputs.NodePoolNodeConfigSoleTenantConfig'] = None,
12954
13118
  spot: Optional[bool] = None,
13119
+ storage_pools: Optional[Sequence[str]] = None,
12955
13120
  tags: Optional[Sequence[str]] = None,
12956
13121
  taints: Optional[Sequence['outputs.NodePoolNodeConfigTaint']] = None,
12957
13122
  workload_metadata_config: Optional['outputs.NodePoolNodeConfigWorkloadMetadataConfig'] = None):
@@ -12996,6 +13161,7 @@ class NodePoolNodeConfig(dict):
12996
13161
  :param 'NodePoolNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options.
12997
13162
  :param 'NodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Node affinity options for sole tenant node pools.
12998
13163
  :param bool spot: Whether the nodes are created as spot VM instances.
13164
+ :param Sequence[str] storage_pools: The list of Storage Pools where boot disks are provisioned.
12999
13165
  :param Sequence[str] tags: The list of instance tags applied to all nodes.
13000
13166
  :param Sequence['NodePoolNodeConfigTaintArgs'] taints: List of Kubernetes taints to be applied to each node.
13001
13167
  :param 'NodePoolNodeConfigWorkloadMetadataConfigArgs' workload_metadata_config: The workload metadata configuration for this node.
@@ -13074,6 +13240,8 @@ class NodePoolNodeConfig(dict):
13074
13240
  pulumi.set(__self__, "sole_tenant_config", sole_tenant_config)
13075
13241
  if spot is not None:
13076
13242
  pulumi.set(__self__, "spot", spot)
13243
+ if storage_pools is not None:
13244
+ pulumi.set(__self__, "storage_pools", storage_pools)
13077
13245
  if tags is not None:
13078
13246
  pulumi.set(__self__, "tags", tags)
13079
13247
  if taints is not None:
@@ -13380,6 +13548,14 @@ class NodePoolNodeConfig(dict):
13380
13548
  """
13381
13549
  return pulumi.get(self, "spot")
13382
13550
 
13551
+ @property
13552
+ @pulumi.getter(name="storagePools")
13553
+ def storage_pools(self) -> Optional[Sequence[str]]:
13554
+ """
13555
+ The list of Storage Pools where boot disks are provisioned.
13556
+ """
13557
+ return pulumi.get(self, "storage_pools")
13558
+
13383
13559
  @property
13384
13560
  @pulumi.getter
13385
13561
  def tags(self) -> Optional[Sequence[str]]:
@@ -14018,6 +14194,8 @@ class NodePoolNodeConfigKubeletConfig(dict):
14018
14194
  suggest = "cpu_cfs_quota"
14019
14195
  elif key == "cpuCfsQuotaPeriod":
14020
14196
  suggest = "cpu_cfs_quota_period"
14197
+ elif key == "insecureKubeletReadonlyPortEnabled":
14198
+ suggest = "insecure_kubelet_readonly_port_enabled"
14021
14199
  elif key == "podPidsLimit":
14022
14200
  suggest = "pod_pids_limit"
14023
14201
 
@@ -14036,11 +14214,13 @@ class NodePoolNodeConfigKubeletConfig(dict):
14036
14214
  cpu_manager_policy: str,
14037
14215
  cpu_cfs_quota: Optional[bool] = None,
14038
14216
  cpu_cfs_quota_period: Optional[str] = None,
14217
+ insecure_kubelet_readonly_port_enabled: Optional[str] = None,
14039
14218
  pod_pids_limit: Optional[int] = None):
14040
14219
  """
14041
14220
  :param str cpu_manager_policy: Control the CPU management policy on the node.
14042
14221
  :param bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
14043
14222
  :param str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
14223
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
14044
14224
  :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
14045
14225
  """
14046
14226
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
@@ -14048,6 +14228,8 @@ class NodePoolNodeConfigKubeletConfig(dict):
14048
14228
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
14049
14229
  if cpu_cfs_quota_period is not None:
14050
14230
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
14231
+ if insecure_kubelet_readonly_port_enabled is not None:
14232
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
14051
14233
  if pod_pids_limit is not None:
14052
14234
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
14053
14235
 
@@ -14075,6 +14257,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
14075
14257
  """
14076
14258
  return pulumi.get(self, "cpu_cfs_quota_period")
14077
14259
 
14260
+ @property
14261
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
14262
+ def insecure_kubelet_readonly_port_enabled(self) -> Optional[str]:
14263
+ """
14264
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
14265
+ """
14266
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
14267
+
14078
14268
  @property
14079
14269
  @pulumi.getter(name="podPidsLimit")
14080
14270
  def pod_pids_limit(self) -> Optional[int]:
@@ -16533,6 +16723,7 @@ class GetClusterNodeConfigResult(dict):
16533
16723
  shielded_instance_configs: Sequence['outputs.GetClusterNodeConfigShieldedInstanceConfigResult'],
16534
16724
  sole_tenant_configs: Sequence['outputs.GetClusterNodeConfigSoleTenantConfigResult'],
16535
16725
  spot: bool,
16726
+ storage_pools: Sequence[str],
16536
16727
  tags: Sequence[str],
16537
16728
  taints: Sequence['outputs.GetClusterNodeConfigTaintResult'],
16538
16729
  workload_metadata_configs: Sequence['outputs.GetClusterNodeConfigWorkloadMetadataConfigResult']):
@@ -16574,6 +16765,7 @@ class GetClusterNodeConfigResult(dict):
16574
16765
  :param Sequence['GetClusterNodeConfigShieldedInstanceConfigArgs'] shielded_instance_configs: Shielded Instance options.
16575
16766
  :param Sequence['GetClusterNodeConfigSoleTenantConfigArgs'] sole_tenant_configs: Node affinity options for sole tenant node pools.
16576
16767
  :param bool spot: Whether the nodes are created as spot VM instances.
16768
+ :param Sequence[str] storage_pools: The list of Storage Pools where boot disks are provisioned.
16577
16769
  :param Sequence[str] tags: The list of instance tags applied to all nodes.
16578
16770
  :param Sequence['GetClusterNodeConfigTaintArgs'] taints: List of Kubernetes taints to be applied to each node.
16579
16771
  :param Sequence['GetClusterNodeConfigWorkloadMetadataConfigArgs'] workload_metadata_configs: The workload metadata configuration for this node.
@@ -16615,6 +16807,7 @@ class GetClusterNodeConfigResult(dict):
16615
16807
  pulumi.set(__self__, "shielded_instance_configs", shielded_instance_configs)
16616
16808
  pulumi.set(__self__, "sole_tenant_configs", sole_tenant_configs)
16617
16809
  pulumi.set(__self__, "spot", spot)
16810
+ pulumi.set(__self__, "storage_pools", storage_pools)
16618
16811
  pulumi.set(__self__, "tags", tags)
16619
16812
  pulumi.set(__self__, "taints", taints)
16620
16813
  pulumi.set(__self__, "workload_metadata_configs", workload_metadata_configs)
@@ -16915,6 +17108,14 @@ class GetClusterNodeConfigResult(dict):
16915
17108
  """
16916
17109
  return pulumi.get(self, "spot")
16917
17110
 
17111
+ @property
17112
+ @pulumi.getter(name="storagePools")
17113
+ def storage_pools(self) -> Sequence[str]:
17114
+ """
17115
+ The list of Storage Pools where boot disks are provisioned.
17116
+ """
17117
+ return pulumi.get(self, "storage_pools")
17118
+
16918
17119
  @property
16919
17120
  @pulumi.getter
16920
17121
  def tags(self) -> Sequence[str]:
@@ -17344,16 +17545,19 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
17344
17545
  cpu_cfs_quota: bool,
17345
17546
  cpu_cfs_quota_period: str,
17346
17547
  cpu_manager_policy: str,
17548
+ insecure_kubelet_readonly_port_enabled: str,
17347
17549
  pod_pids_limit: int):
17348
17550
  """
17349
17551
  :param bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
17350
17552
  :param str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
17351
17553
  :param str cpu_manager_policy: Control the CPU management policy on the node.
17554
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
17352
17555
  :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
17353
17556
  """
17354
17557
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
17355
17558
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
17356
17559
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
17560
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
17357
17561
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
17358
17562
 
17359
17563
  @property
@@ -17380,6 +17584,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
17380
17584
  """
17381
17585
  return pulumi.get(self, "cpu_manager_policy")
17382
17586
 
17587
+ @property
17588
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
17589
+ def insecure_kubelet_readonly_port_enabled(self) -> str:
17590
+ """
17591
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
17592
+ """
17593
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
17594
+
17383
17595
  @property
17384
17596
  @pulumi.getter(name="podPidsLimit")
17385
17597
  def pod_pids_limit(self) -> int:
@@ -17851,12 +18063,15 @@ class GetClusterNodePoolResult(dict):
17851
18063
  class GetClusterNodePoolAutoConfigResult(dict):
17852
18064
  def __init__(__self__, *,
17853
18065
  network_tags: Sequence['outputs.GetClusterNodePoolAutoConfigNetworkTagResult'],
18066
+ node_kubelet_configs: Sequence['outputs.GetClusterNodePoolAutoConfigNodeKubeletConfigResult'],
17854
18067
  resource_manager_tags: Mapping[str, str]):
17855
18068
  """
17856
18069
  :param Sequence['GetClusterNodePoolAutoConfigNetworkTagArgs'] network_tags: Collection of Compute Engine network tags that can be applied to a node's underlying VM instance.
18070
+ :param Sequence['GetClusterNodePoolAutoConfigNodeKubeletConfigArgs'] node_kubelet_configs: Node kubelet configs.
17857
18071
  :param Mapping[str, str] resource_manager_tags: A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.
17858
18072
  """
17859
18073
  pulumi.set(__self__, "network_tags", network_tags)
18074
+ pulumi.set(__self__, "node_kubelet_configs", node_kubelet_configs)
17860
18075
  pulumi.set(__self__, "resource_manager_tags", resource_manager_tags)
17861
18076
 
17862
18077
  @property
@@ -17867,6 +18082,14 @@ class GetClusterNodePoolAutoConfigResult(dict):
17867
18082
  """
17868
18083
  return pulumi.get(self, "network_tags")
17869
18084
 
18085
+ @property
18086
+ @pulumi.getter(name="nodeKubeletConfigs")
18087
+ def node_kubelet_configs(self) -> Sequence['outputs.GetClusterNodePoolAutoConfigNodeKubeletConfigResult']:
18088
+ """
18089
+ Node kubelet configs.
18090
+ """
18091
+ return pulumi.get(self, "node_kubelet_configs")
18092
+
17870
18093
  @property
17871
18094
  @pulumi.getter(name="resourceManagerTags")
17872
18095
  def resource_manager_tags(self) -> Mapping[str, str]:
@@ -17894,6 +18117,24 @@ class GetClusterNodePoolAutoConfigNetworkTagResult(dict):
17894
18117
  return pulumi.get(self, "tags")
17895
18118
 
17896
18119
 
18120
+ @pulumi.output_type
18121
+ class GetClusterNodePoolAutoConfigNodeKubeletConfigResult(dict):
18122
+ def __init__(__self__, *,
18123
+ insecure_kubelet_readonly_port_enabled: str):
18124
+ """
18125
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
18126
+ """
18127
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
18128
+
18129
+ @property
18130
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
18131
+ def insecure_kubelet_readonly_port_enabled(self) -> str:
18132
+ """
18133
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
18134
+ """
18135
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
18136
+
18137
+
17897
18138
  @pulumi.output_type
17898
18139
  class GetClusterNodePoolAutoscalingResult(dict):
17899
18140
  def __init__(__self__, *,
@@ -17979,14 +18220,17 @@ class GetClusterNodePoolDefaultNodeConfigDefaultResult(dict):
17979
18220
  def __init__(__self__, *,
17980
18221
  containerd_configs: Sequence['outputs.GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigResult'],
17981
18222
  gcfs_configs: Sequence['outputs.GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfigResult'],
18223
+ insecure_kubelet_readonly_port_enabled: str,
17982
18224
  logging_variant: str):
17983
18225
  """
17984
18226
  :param Sequence['GetClusterNodePoolDefaultNodeConfigDefaultContainerdConfigArgs'] containerd_configs: Parameters for containerd configuration.
17985
18227
  :param Sequence['GetClusterNodePoolDefaultNodeConfigDefaultGcfsConfigArgs'] gcfs_configs: GCFS configuration for this node.
18228
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
17986
18229
  :param str logging_variant: Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.
17987
18230
  """
17988
18231
  pulumi.set(__self__, "containerd_configs", containerd_configs)
17989
18232
  pulumi.set(__self__, "gcfs_configs", gcfs_configs)
18233
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
17990
18234
  pulumi.set(__self__, "logging_variant", logging_variant)
17991
18235
 
17992
18236
  @property
@@ -18005,6 +18249,14 @@ class GetClusterNodePoolDefaultNodeConfigDefaultResult(dict):
18005
18249
  """
18006
18250
  return pulumi.get(self, "gcfs_configs")
18007
18251
 
18252
+ @property
18253
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
18254
+ def insecure_kubelet_readonly_port_enabled(self) -> str:
18255
+ """
18256
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
18257
+ """
18258
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
18259
+
18008
18260
  @property
18009
18261
  @pulumi.getter(name="loggingVariant")
18010
18262
  def logging_variant(self) -> str:
@@ -18389,6 +18641,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
18389
18641
  shielded_instance_configs: Sequence['outputs.GetClusterNodePoolNodeConfigShieldedInstanceConfigResult'],
18390
18642
  sole_tenant_configs: Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigResult'],
18391
18643
  spot: bool,
18644
+ storage_pools: Sequence[str],
18392
18645
  tags: Sequence[str],
18393
18646
  taints: Sequence['outputs.GetClusterNodePoolNodeConfigTaintResult'],
18394
18647
  workload_metadata_configs: Sequence['outputs.GetClusterNodePoolNodeConfigWorkloadMetadataConfigResult']):
@@ -18430,6 +18683,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
18430
18683
  :param Sequence['GetClusterNodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_configs: Shielded Instance options.
18431
18684
  :param Sequence['GetClusterNodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_configs: Node affinity options for sole tenant node pools.
18432
18685
  :param bool spot: Whether the nodes are created as spot VM instances.
18686
+ :param Sequence[str] storage_pools: The list of Storage Pools where boot disks are provisioned.
18433
18687
  :param Sequence[str] tags: The list of instance tags applied to all nodes.
18434
18688
  :param Sequence['GetClusterNodePoolNodeConfigTaintArgs'] taints: List of Kubernetes taints to be applied to each node.
18435
18689
  :param Sequence['GetClusterNodePoolNodeConfigWorkloadMetadataConfigArgs'] workload_metadata_configs: The workload metadata configuration for this node.
@@ -18471,6 +18725,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
18471
18725
  pulumi.set(__self__, "shielded_instance_configs", shielded_instance_configs)
18472
18726
  pulumi.set(__self__, "sole_tenant_configs", sole_tenant_configs)
18473
18727
  pulumi.set(__self__, "spot", spot)
18728
+ pulumi.set(__self__, "storage_pools", storage_pools)
18474
18729
  pulumi.set(__self__, "tags", tags)
18475
18730
  pulumi.set(__self__, "taints", taints)
18476
18731
  pulumi.set(__self__, "workload_metadata_configs", workload_metadata_configs)
@@ -18771,6 +19026,14 @@ class GetClusterNodePoolNodeConfigResult(dict):
18771
19026
  """
18772
19027
  return pulumi.get(self, "spot")
18773
19028
 
19029
+ @property
19030
+ @pulumi.getter(name="storagePools")
19031
+ def storage_pools(self) -> Sequence[str]:
19032
+ """
19033
+ The list of Storage Pools where boot disks are provisioned.
19034
+ """
19035
+ return pulumi.get(self, "storage_pools")
19036
+
18774
19037
  @property
18775
19038
  @pulumi.getter
18776
19039
  def tags(self) -> Sequence[str]:
@@ -19200,16 +19463,19 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
19200
19463
  cpu_cfs_quota: bool,
19201
19464
  cpu_cfs_quota_period: str,
19202
19465
  cpu_manager_policy: str,
19466
+ insecure_kubelet_readonly_port_enabled: str,
19203
19467
  pod_pids_limit: int):
19204
19468
  """
19205
19469
  :param bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
19206
19470
  :param str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
19207
19471
  :param str cpu_manager_policy: Control the CPU management policy on the node.
19472
+ :param str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
19208
19473
  :param int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
19209
19474
  """
19210
19475
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
19211
19476
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
19212
19477
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
19478
+ pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
19213
19479
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
19214
19480
 
19215
19481
  @property
@@ -19236,6 +19502,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
19236
19502
  """
19237
19503
  return pulumi.get(self, "cpu_manager_policy")
19238
19504
 
19505
+ @property
19506
+ @pulumi.getter(name="insecureKubeletReadonlyPortEnabled")
19507
+ def insecure_kubelet_readonly_port_enabled(self) -> str:
19508
+ """
19509
+ Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
19510
+ """
19511
+ return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
19512
+
19239
19513
  @property
19240
19514
  @pulumi.getter(name="podPidsLimit")
19241
19515
  def pod_pids_limit(self) -> int: