pulumi-gcp 8.41.0a1755297349__py3-none-any.whl → 8.42.0a1756095712__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (343) hide show
  1. pulumi_gcp/__init__.py +24 -0
  2. pulumi_gcp/accesscontextmanager/access_policy_iam_binding.py +2 -0
  3. pulumi_gcp/accesscontextmanager/access_policy_iam_member.py +2 -0
  4. pulumi_gcp/accesscontextmanager/access_policy_iam_policy.py +2 -0
  5. pulumi_gcp/apigateway/api_config_iam_binding.py +2 -0
  6. pulumi_gcp/apigateway/api_config_iam_member.py +2 -0
  7. pulumi_gcp/apigateway/api_config_iam_policy.py +2 -0
  8. pulumi_gcp/apigateway/api_iam_binding.py +2 -0
  9. pulumi_gcp/apigateway/api_iam_member.py +2 -0
  10. pulumi_gcp/apigateway/api_iam_policy.py +2 -0
  11. pulumi_gcp/apigateway/gateway_iam_binding.py +2 -0
  12. pulumi_gcp/apigateway/gateway_iam_member.py +2 -0
  13. pulumi_gcp/apigateway/gateway_iam_policy.py +2 -0
  14. pulumi_gcp/apigee/environment_iam_binding.py +2 -0
  15. pulumi_gcp/apigee/environment_iam_member.py +2 -0
  16. pulumi_gcp/apigee/environment_iam_policy.py +2 -0
  17. pulumi_gcp/artifactregistry/__init__.py +5 -0
  18. pulumi_gcp/artifactregistry/get_package.py +220 -0
  19. pulumi_gcp/artifactregistry/get_repositories.py +160 -0
  20. pulumi_gcp/artifactregistry/get_tag.py +187 -0
  21. pulumi_gcp/artifactregistry/get_tags.py +200 -0
  22. pulumi_gcp/artifactregistry/get_version.py +261 -0
  23. pulumi_gcp/artifactregistry/outputs.py +130 -0
  24. pulumi_gcp/artifactregistry/repository_iam_binding.py +2 -0
  25. pulumi_gcp/artifactregistry/repository_iam_member.py +2 -0
  26. pulumi_gcp/artifactregistry/repository_iam_policy.py +2 -0
  27. pulumi_gcp/backupdisasterrecovery/backup_plan.py +114 -7
  28. pulumi_gcp/backupdisasterrecovery/get_backup_plan.py +12 -1
  29. pulumi_gcp/beyondcorp/application_iam_binding.py +8 -0
  30. pulumi_gcp/beyondcorp/application_iam_member.py +8 -0
  31. pulumi_gcp/beyondcorp/application_iam_policy.py +8 -0
  32. pulumi_gcp/beyondcorp/get_application_iam_policy.py +4 -0
  33. pulumi_gcp/beyondcorp/security_gateway_application_iam_binding.py +2 -0
  34. pulumi_gcp/beyondcorp/security_gateway_application_iam_member.py +2 -0
  35. pulumi_gcp/beyondcorp/security_gateway_application_iam_policy.py +2 -0
  36. pulumi_gcp/beyondcorp/security_gateway_iam_binding.py +2 -0
  37. pulumi_gcp/beyondcorp/security_gateway_iam_member.py +2 -0
  38. pulumi_gcp/beyondcorp/security_gateway_iam_policy.py +2 -0
  39. pulumi_gcp/bigquery/connection_iam_binding.py +2 -0
  40. pulumi_gcp/bigquery/connection_iam_member.py +2 -0
  41. pulumi_gcp/bigquery/connection_iam_policy.py +2 -0
  42. pulumi_gcp/bigquery/data_transfer_config.py +2 -0
  43. pulumi_gcp/bigquery/dataset.py +2 -2
  44. pulumi_gcp/bigquery/iam_binding.py +2 -0
  45. pulumi_gcp/bigquery/iam_member.py +2 -0
  46. pulumi_gcp/bigquery/iam_policy.py +2 -0
  47. pulumi_gcp/bigquery/reservation.py +535 -0
  48. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_binding.py +2 -0
  49. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_member.py +2 -0
  50. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_policy.py +2 -0
  51. pulumi_gcp/bigqueryanalyticshub/listing_iam_binding.py +2 -0
  52. pulumi_gcp/bigqueryanalyticshub/listing_iam_member.py +2 -0
  53. pulumi_gcp/bigqueryanalyticshub/listing_iam_policy.py +2 -0
  54. pulumi_gcp/bigquerydatapolicy/data_policy_iam_binding.py +2 -0
  55. pulumi_gcp/bigquerydatapolicy/data_policy_iam_member.py +2 -0
  56. pulumi_gcp/bigquerydatapolicy/data_policy_iam_policy.py +2 -0
  57. pulumi_gcp/binaryauthorization/attestor_iam_binding.py +2 -0
  58. pulumi_gcp/binaryauthorization/attestor_iam_member.py +2 -0
  59. pulumi_gcp/binaryauthorization/attestor_iam_policy.py +2 -0
  60. pulumi_gcp/certificateauthority/ca_pool_iam_binding.py +2 -0
  61. pulumi_gcp/certificateauthority/ca_pool_iam_member.py +2 -0
  62. pulumi_gcp/certificateauthority/ca_pool_iam_policy.py +2 -0
  63. pulumi_gcp/certificateauthority/certificate_template_iam_binding.py +2 -0
  64. pulumi_gcp/certificateauthority/certificate_template_iam_member.py +2 -0
  65. pulumi_gcp/certificateauthority/certificate_template_iam_policy.py +2 -0
  66. pulumi_gcp/cloudbuildv2/connection_iam_binding.py +2 -0
  67. pulumi_gcp/cloudbuildv2/connection_iam_member.py +2 -0
  68. pulumi_gcp/cloudbuildv2/connection_iam_policy.py +2 -0
  69. pulumi_gcp/clouddeploy/_inputs.py +48 -48
  70. pulumi_gcp/clouddeploy/deploy_policy.py +54 -74
  71. pulumi_gcp/clouddeploy/outputs.py +32 -32
  72. pulumi_gcp/cloudfunctions/function_iam_binding.py +2 -0
  73. pulumi_gcp/cloudfunctions/function_iam_member.py +2 -0
  74. pulumi_gcp/cloudfunctions/function_iam_policy.py +2 -0
  75. pulumi_gcp/cloudfunctionsv2/function_iam_binding.py +2 -0
  76. pulumi_gcp/cloudfunctionsv2/function_iam_member.py +2 -0
  77. pulumi_gcp/cloudfunctionsv2/function_iam_policy.py +2 -0
  78. pulumi_gcp/cloudrun/iam_binding.py +2 -0
  79. pulumi_gcp/cloudrun/iam_member.py +2 -0
  80. pulumi_gcp/cloudrun/iam_policy.py +2 -0
  81. pulumi_gcp/cloudrunv2/job_iam_binding.py +2 -0
  82. pulumi_gcp/cloudrunv2/job_iam_member.py +2 -0
  83. pulumi_gcp/cloudrunv2/job_iam_policy.py +2 -0
  84. pulumi_gcp/cloudrunv2/service_iam_binding.py +2 -0
  85. pulumi_gcp/cloudrunv2/service_iam_member.py +2 -0
  86. pulumi_gcp/cloudrunv2/service_iam_policy.py +2 -0
  87. pulumi_gcp/cloudrunv2/worker_pool_iam_binding.py +2 -0
  88. pulumi_gcp/cloudrunv2/worker_pool_iam_member.py +2 -0
  89. pulumi_gcp/cloudrunv2/worker_pool_iam_policy.py +2 -0
  90. pulumi_gcp/cloudtasks/queue_iam_binding.py +2 -0
  91. pulumi_gcp/cloudtasks/queue_iam_member.py +2 -0
  92. pulumi_gcp/cloudtasks/queue_iam_policy.py +2 -0
  93. pulumi_gcp/colab/runtime_template_iam_binding.py +2 -0
  94. pulumi_gcp/colab/runtime_template_iam_member.py +2 -0
  95. pulumi_gcp/colab/runtime_template_iam_policy.py +2 -0
  96. pulumi_gcp/composer/user_workloads_config_map.py +26 -2
  97. pulumi_gcp/compute/_inputs.py +355 -0
  98. pulumi_gcp/compute/disk_iam_binding.py +2 -0
  99. pulumi_gcp/compute/disk_iam_member.py +2 -0
  100. pulumi_gcp/compute/disk_iam_policy.py +2 -0
  101. pulumi_gcp/compute/get_region_backend_service.py +12 -1
  102. pulumi_gcp/compute/image_iam_binding.py +2 -0
  103. pulumi_gcp/compute/image_iam_member.py +2 -0
  104. pulumi_gcp/compute/image_iam_policy.py +2 -0
  105. pulumi_gcp/compute/instance_iam_binding.py +2 -0
  106. pulumi_gcp/compute/instance_iam_member.py +2 -0
  107. pulumi_gcp/compute/instance_iam_policy.py +2 -0
  108. pulumi_gcp/compute/instance_template_iam_binding.py +2 -0
  109. pulumi_gcp/compute/instance_template_iam_member.py +2 -0
  110. pulumi_gcp/compute/instance_template_iam_policy.py +2 -0
  111. pulumi_gcp/compute/instant_snapshot_iam_binding.py +2 -0
  112. pulumi_gcp/compute/instant_snapshot_iam_member.py +2 -0
  113. pulumi_gcp/compute/instant_snapshot_iam_policy.py +2 -0
  114. pulumi_gcp/compute/machine_image_iam_binding.py +2 -0
  115. pulumi_gcp/compute/machine_image_iam_member.py +2 -0
  116. pulumi_gcp/compute/machine_image_iam_policy.py +2 -0
  117. pulumi_gcp/compute/outputs.py +404 -0
  118. pulumi_gcp/compute/region_backend_service.py +257 -0
  119. pulumi_gcp/compute/region_disk_iam_binding.py +2 -0
  120. pulumi_gcp/compute/region_disk_iam_member.py +2 -0
  121. pulumi_gcp/compute/region_disk_iam_policy.py +2 -0
  122. pulumi_gcp/compute/region_security_policy.py +54 -0
  123. pulumi_gcp/compute/service_attachment.py +126 -0
  124. pulumi_gcp/compute/snapshot_iam_binding.py +2 -0
  125. pulumi_gcp/compute/snapshot_iam_member.py +2 -0
  126. pulumi_gcp/compute/snapshot_iam_policy.py +2 -0
  127. pulumi_gcp/compute/storage_pool_iam_binding.py +2 -0
  128. pulumi_gcp/compute/storage_pool_iam_member.py +2 -0
  129. pulumi_gcp/compute/storage_pool_iam_policy.py +2 -0
  130. pulumi_gcp/compute/subnetwork_iam_binding.py +2 -0
  131. pulumi_gcp/compute/subnetwork_iam_member.py +2 -0
  132. pulumi_gcp/compute/subnetwork_iam_policy.py +2 -0
  133. pulumi_gcp/config/__init__.pyi +0 -4
  134. pulumi_gcp/config/vars.py +0 -8
  135. pulumi_gcp/container/_inputs.py +2373 -267
  136. pulumi_gcp/container/outputs.py +2481 -81
  137. pulumi_gcp/containeranalysis/note_iam_binding.py +2 -0
  138. pulumi_gcp/containeranalysis/note_iam_member.py +2 -0
  139. pulumi_gcp/containeranalysis/note_iam_policy.py +2 -0
  140. pulumi_gcp/datacatalog/entry_group_iam_binding.py +2 -0
  141. pulumi_gcp/datacatalog/entry_group_iam_member.py +2 -0
  142. pulumi_gcp/datacatalog/entry_group_iam_policy.py +2 -0
  143. pulumi_gcp/datacatalog/policy_tag_iam_binding.py +2 -0
  144. pulumi_gcp/datacatalog/policy_tag_iam_member.py +2 -0
  145. pulumi_gcp/datacatalog/policy_tag_iam_policy.py +2 -0
  146. pulumi_gcp/datacatalog/tag_template_iam_binding.py +2 -0
  147. pulumi_gcp/datacatalog/tag_template_iam_member.py +2 -0
  148. pulumi_gcp/datacatalog/tag_template_iam_policy.py +2 -0
  149. pulumi_gcp/datacatalog/taxonomy_iam_binding.py +2 -0
  150. pulumi_gcp/datacatalog/taxonomy_iam_member.py +2 -0
  151. pulumi_gcp/datacatalog/taxonomy_iam_policy.py +2 -0
  152. pulumi_gcp/datafusion/instance.py +18 -4
  153. pulumi_gcp/dataplex/aspect_type_iam_binding.py +2 -0
  154. pulumi_gcp/dataplex/aspect_type_iam_member.py +2 -0
  155. pulumi_gcp/dataplex/aspect_type_iam_policy.py +2 -0
  156. pulumi_gcp/dataplex/asset_iam_binding.py +2 -0
  157. pulumi_gcp/dataplex/asset_iam_member.py +2 -0
  158. pulumi_gcp/dataplex/asset_iam_policy.py +2 -0
  159. pulumi_gcp/dataplex/datascan_iam_binding.py +2 -0
  160. pulumi_gcp/dataplex/datascan_iam_member.py +2 -0
  161. pulumi_gcp/dataplex/datascan_iam_policy.py +2 -0
  162. pulumi_gcp/dataplex/entry_group_iam_binding.py +2 -0
  163. pulumi_gcp/dataplex/entry_group_iam_member.py +2 -0
  164. pulumi_gcp/dataplex/entry_group_iam_policy.py +2 -0
  165. pulumi_gcp/dataplex/entry_type_iam_binding.py +2 -0
  166. pulumi_gcp/dataplex/entry_type_iam_member.py +2 -0
  167. pulumi_gcp/dataplex/entry_type_iam_policy.py +2 -0
  168. pulumi_gcp/dataplex/glossary_iam_binding.py +2 -0
  169. pulumi_gcp/dataplex/glossary_iam_member.py +2 -0
  170. pulumi_gcp/dataplex/glossary_iam_policy.py +2 -0
  171. pulumi_gcp/dataplex/lake_iam_binding.py +2 -0
  172. pulumi_gcp/dataplex/lake_iam_member.py +2 -0
  173. pulumi_gcp/dataplex/lake_iam_policy.py +2 -0
  174. pulumi_gcp/dataplex/task_iam_binding.py +2 -0
  175. pulumi_gcp/dataplex/task_iam_member.py +2 -0
  176. pulumi_gcp/dataplex/task_iam_policy.py +2 -0
  177. pulumi_gcp/dataplex/zone_iam_binding.py +2 -0
  178. pulumi_gcp/dataplex/zone_iam_member.py +2 -0
  179. pulumi_gcp/dataplex/zone_iam_policy.py +2 -0
  180. pulumi_gcp/dataproc/autoscaling_policy_iam_binding.py +2 -0
  181. pulumi_gcp/dataproc/autoscaling_policy_iam_member.py +2 -0
  182. pulumi_gcp/dataproc/autoscaling_policy_iam_policy.py +2 -0
  183. pulumi_gcp/dataproc/metastore_database_iam_binding.py +2 -0
  184. pulumi_gcp/dataproc/metastore_database_iam_member.py +2 -0
  185. pulumi_gcp/dataproc/metastore_database_iam_policy.py +2 -0
  186. pulumi_gcp/dataproc/metastore_federation_iam_binding.py +2 -0
  187. pulumi_gcp/dataproc/metastore_federation_iam_member.py +2 -0
  188. pulumi_gcp/dataproc/metastore_federation_iam_policy.py +2 -0
  189. pulumi_gcp/dataproc/metastore_service_iam_binding.py +2 -0
  190. pulumi_gcp/dataproc/metastore_service_iam_member.py +2 -0
  191. pulumi_gcp/dataproc/metastore_service_iam_policy.py +2 -0
  192. pulumi_gcp/dataproc/metastore_table_iam_binding.py +2 -0
  193. pulumi_gcp/dataproc/metastore_table_iam_member.py +2 -0
  194. pulumi_gcp/dataproc/metastore_table_iam_policy.py +2 -0
  195. pulumi_gcp/diagflow/__init__.py +2 -0
  196. pulumi_gcp/diagflow/_inputs.py +2829 -0
  197. pulumi_gcp/diagflow/conversation_profile.py +959 -0
  198. pulumi_gcp/diagflow/cx_playbook.py +967 -0
  199. pulumi_gcp/diagflow/outputs.py +2330 -0
  200. pulumi_gcp/dns/dns_managed_zone_iam_binding.py +2 -0
  201. pulumi_gcp/dns/dns_managed_zone_iam_member.py +2 -0
  202. pulumi_gcp/dns/dns_managed_zone_iam_policy.py +2 -0
  203. pulumi_gcp/endpoints/service_iam_binding.py +2 -0
  204. pulumi_gcp/endpoints/service_iam_member.py +2 -0
  205. pulumi_gcp/endpoints/service_iam_policy.py +2 -0
  206. pulumi_gcp/gemini/repository_group_iam_binding.py +2 -0
  207. pulumi_gcp/gemini/repository_group_iam_member.py +2 -0
  208. pulumi_gcp/gemini/repository_group_iam_policy.py +2 -0
  209. pulumi_gcp/gkebackup/backup_plan_iam_binding.py +2 -0
  210. pulumi_gcp/gkebackup/backup_plan_iam_member.py +2 -0
  211. pulumi_gcp/gkebackup/backup_plan_iam_policy.py +2 -0
  212. pulumi_gcp/gkebackup/restore_plan_iam_binding.py +2 -0
  213. pulumi_gcp/gkebackup/restore_plan_iam_member.py +2 -0
  214. pulumi_gcp/gkebackup/restore_plan_iam_policy.py +2 -0
  215. pulumi_gcp/gkehub/feature_iam_binding.py +2 -0
  216. pulumi_gcp/gkehub/feature_iam_member.py +2 -0
  217. pulumi_gcp/gkehub/feature_iam_policy.py +2 -0
  218. pulumi_gcp/gkehub/membership_iam_binding.py +2 -0
  219. pulumi_gcp/gkehub/membership_iam_member.py +2 -0
  220. pulumi_gcp/gkehub/membership_iam_policy.py +2 -0
  221. pulumi_gcp/gkehub/scope_iam_binding.py +2 -0
  222. pulumi_gcp/gkehub/scope_iam_member.py +2 -0
  223. pulumi_gcp/gkehub/scope_iam_policy.py +2 -0
  224. pulumi_gcp/gkeonprem/vmware_admin_cluster.py +24 -3
  225. pulumi_gcp/healthcare/consent_store_iam_binding.py +2 -0
  226. pulumi_gcp/healthcare/consent_store_iam_member.py +2 -0
  227. pulumi_gcp/healthcare/consent_store_iam_policy.py +2 -0
  228. pulumi_gcp/iam/workforce_pool_iam_binding.py +2 -0
  229. pulumi_gcp/iam/workforce_pool_iam_member.py +2 -0
  230. pulumi_gcp/iam/workforce_pool_iam_policy.py +2 -0
  231. pulumi_gcp/iap/app_engine_service_iam_binding.py +2 -0
  232. pulumi_gcp/iap/app_engine_service_iam_member.py +2 -0
  233. pulumi_gcp/iap/app_engine_service_iam_policy.py +2 -0
  234. pulumi_gcp/iap/app_engine_version_iam_binding.py +2 -0
  235. pulumi_gcp/iap/app_engine_version_iam_member.py +2 -0
  236. pulumi_gcp/iap/app_engine_version_iam_policy.py +2 -0
  237. pulumi_gcp/iap/tunnel_dest_group_iam_binding.py +2 -0
  238. pulumi_gcp/iap/tunnel_dest_group_iam_member.py +2 -0
  239. pulumi_gcp/iap/tunnel_dest_group_iam_policy.py +2 -0
  240. pulumi_gcp/iap/tunnel_iam_binding.py +2 -0
  241. pulumi_gcp/iap/tunnel_iam_member.py +2 -0
  242. pulumi_gcp/iap/tunnel_iam_policy.py +2 -0
  243. pulumi_gcp/iap/tunnel_instance_iam_binding.py +2 -0
  244. pulumi_gcp/iap/tunnel_instance_iam_member.py +2 -0
  245. pulumi_gcp/iap/tunnel_instance_iam_policy.py +2 -0
  246. pulumi_gcp/iap/web_backend_service_iam_binding.py +2 -0
  247. pulumi_gcp/iap/web_backend_service_iam_member.py +2 -0
  248. pulumi_gcp/iap/web_backend_service_iam_policy.py +2 -0
  249. pulumi_gcp/iap/web_cloud_run_service_iam_binding.py +2 -0
  250. pulumi_gcp/iap/web_cloud_run_service_iam_member.py +2 -0
  251. pulumi_gcp/iap/web_cloud_run_service_iam_policy.py +2 -0
  252. pulumi_gcp/iap/web_iam_binding.py +2 -0
  253. pulumi_gcp/iap/web_iam_member.py +2 -0
  254. pulumi_gcp/iap/web_iam_policy.py +2 -0
  255. pulumi_gcp/iap/web_region_backend_service_iam_binding.py +2 -0
  256. pulumi_gcp/iap/web_region_backend_service_iam_member.py +2 -0
  257. pulumi_gcp/iap/web_region_backend_service_iam_policy.py +2 -0
  258. pulumi_gcp/iap/web_type_app_enging_iam_binding.py +2 -0
  259. pulumi_gcp/iap/web_type_app_enging_iam_member.py +2 -0
  260. pulumi_gcp/iap/web_type_app_enging_iam_policy.py +2 -0
  261. pulumi_gcp/iap/web_type_compute_iam_binding.py +2 -0
  262. pulumi_gcp/iap/web_type_compute_iam_member.py +2 -0
  263. pulumi_gcp/iap/web_type_compute_iam_policy.py +2 -0
  264. pulumi_gcp/kms/crypto_key.py +7 -0
  265. pulumi_gcp/kms/ekm_connection_iam_binding.py +2 -0
  266. pulumi_gcp/kms/ekm_connection_iam_member.py +2 -0
  267. pulumi_gcp/kms/ekm_connection_iam_policy.py +2 -0
  268. pulumi_gcp/kms/outputs.py +2 -0
  269. pulumi_gcp/logging/log_view_iam_binding.py +2 -0
  270. pulumi_gcp/logging/log_view_iam_member.py +2 -0
  271. pulumi_gcp/logging/log_view_iam_policy.py +2 -0
  272. pulumi_gcp/memorystore/get_instance.py +12 -1
  273. pulumi_gcp/memorystore/instance.py +70 -0
  274. pulumi_gcp/monitoring/_inputs.py +3 -3
  275. pulumi_gcp/monitoring/outputs.py +2 -2
  276. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +213 -168
  277. pulumi_gcp/notebooks/instance.py +18 -18
  278. pulumi_gcp/notebooks/instance_iam_binding.py +2 -0
  279. pulumi_gcp/notebooks/instance_iam_member.py +2 -0
  280. pulumi_gcp/notebooks/instance_iam_policy.py +2 -0
  281. pulumi_gcp/notebooks/runtime_iam_binding.py +2 -0
  282. pulumi_gcp/notebooks/runtime_iam_member.py +2 -0
  283. pulumi_gcp/notebooks/runtime_iam_policy.py +2 -0
  284. pulumi_gcp/organizations/folder.py +56 -0
  285. pulumi_gcp/organizations/get_folder.py +29 -1
  286. pulumi_gcp/projects/api_key.py +88 -1
  287. pulumi_gcp/provider.py +0 -40
  288. pulumi_gcp/pubsub/schema_iam_binding.py +2 -0
  289. pulumi_gcp/pubsub/schema_iam_member.py +2 -0
  290. pulumi_gcp/pubsub/schema_iam_policy.py +2 -0
  291. pulumi_gcp/pubsub/subscription.py +84 -0
  292. pulumi_gcp/pubsub/topic.py +80 -0
  293. pulumi_gcp/pubsub/topic_iam_binding.py +2 -0
  294. pulumi_gcp/pubsub/topic_iam_member.py +2 -0
  295. pulumi_gcp/pubsub/topic_iam_policy.py +2 -0
  296. pulumi_gcp/pulumi-plugin.json +1 -1
  297. pulumi_gcp/secretmanager/regional_secret_iam_binding.py +2 -0
  298. pulumi_gcp/secretmanager/regional_secret_iam_member.py +2 -0
  299. pulumi_gcp/secretmanager/regional_secret_iam_policy.py +2 -0
  300. pulumi_gcp/secretmanager/secret_iam_binding.py +2 -0
  301. pulumi_gcp/secretmanager/secret_iam_member.py +2 -0
  302. pulumi_gcp/secretmanager/secret_iam_policy.py +2 -0
  303. pulumi_gcp/secretmanager/secret_version.py +1 -48
  304. pulumi_gcp/securesourcemanager/repository_iam_binding.py +2 -0
  305. pulumi_gcp/securesourcemanager/repository_iam_member.py +2 -0
  306. pulumi_gcp/securesourcemanager/repository_iam_policy.py +2 -0
  307. pulumi_gcp/securitycenter/instance_iam_binding.py +18 -4
  308. pulumi_gcp/securitycenter/instance_iam_member.py +18 -4
  309. pulumi_gcp/securitycenter/instance_iam_policy.py +18 -4
  310. pulumi_gcp/securitycenter/v2_organization_source_iam_binding.py +2 -0
  311. pulumi_gcp/securitycenter/v2_organization_source_iam_member.py +2 -0
  312. pulumi_gcp/securitycenter/v2_organization_source_iam_policy.py +2 -0
  313. pulumi_gcp/servicedirectory/namespace_iam_binding.py +2 -0
  314. pulumi_gcp/servicedirectory/namespace_iam_member.py +2 -0
  315. pulumi_gcp/servicedirectory/namespace_iam_policy.py +2 -0
  316. pulumi_gcp/servicedirectory/service_iam_binding.py +2 -0
  317. pulumi_gcp/servicedirectory/service_iam_member.py +2 -0
  318. pulumi_gcp/servicedirectory/service_iam_policy.py +2 -0
  319. pulumi_gcp/sourcerepo/repository_iam_binding.py +2 -0
  320. pulumi_gcp/sourcerepo/repository_iam_member.py +2 -0
  321. pulumi_gcp/sourcerepo/repository_iam_policy.py +2 -0
  322. pulumi_gcp/sql/_inputs.py +82 -4
  323. pulumi_gcp/sql/database_instance.py +108 -7
  324. pulumi_gcp/sql/get_database_instance.py +12 -1
  325. pulumi_gcp/sql/outputs.py +154 -7
  326. pulumi_gcp/storage/_inputs.py +104 -12
  327. pulumi_gcp/storage/outputs.py +84 -7
  328. pulumi_gcp/tags/tag_key_iam_binding.py +2 -0
  329. pulumi_gcp/tags/tag_key_iam_member.py +2 -0
  330. pulumi_gcp/tags/tag_key_iam_policy.py +2 -0
  331. pulumi_gcp/tags/tag_value_iam_binding.py +2 -0
  332. pulumi_gcp/tags/tag_value_iam_member.py +2 -0
  333. pulumi_gcp/tags/tag_value_iam_policy.py +2 -0
  334. pulumi_gcp/tpu/get_tensorflow_versions.py +10 -0
  335. pulumi_gcp/vertex/__init__.py +1 -0
  336. pulumi_gcp/vertex/_inputs.py +122 -0
  337. pulumi_gcp/vertex/ai_index.py +21 -7
  338. pulumi_gcp/vertex/ai_rag_engine_config.py +354 -0
  339. pulumi_gcp/vertex/outputs.py +69 -0
  340. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/METADATA +1 -1
  341. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/RECORD +343 -335
  342. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/WHEEL +0 -0
  343. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/top_level.txt +0 -0
@@ -154,6 +154,7 @@ __all__ = [
154
154
  'ClusterNetworkPolicy',
155
155
  'ClusterNodeConfig',
156
156
  'ClusterNodeConfigAdvancedMachineFeatures',
157
+ 'ClusterNodeConfigBootDisk',
157
158
  'ClusterNodeConfigConfidentialNodes',
158
159
  'ClusterNodeConfigContainerdConfig',
159
160
  'ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfig',
@@ -170,6 +171,9 @@ __all__ = [
170
171
  'ClusterNodeConfigGvnic',
171
172
  'ClusterNodeConfigHostMaintenancePolicy',
172
173
  'ClusterNodeConfigKubeletConfig',
174
+ 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaim',
175
+ 'ClusterNodeConfigKubeletConfigEvictionSoft',
176
+ 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod',
173
177
  'ClusterNodeConfigLinuxNodeConfig',
174
178
  'ClusterNodeConfigLinuxNodeConfigHugepagesConfig',
175
179
  'ClusterNodeConfigLocalNvmeSsdBlockConfig',
@@ -203,6 +207,7 @@ __all__ = [
203
207
  'ClusterNodePoolNetworkConfigPodCidrOverprovisionConfig',
204
208
  'ClusterNodePoolNodeConfig',
205
209
  'ClusterNodePoolNodeConfigAdvancedMachineFeatures',
210
+ 'ClusterNodePoolNodeConfigBootDisk',
206
211
  'ClusterNodePoolNodeConfigConfidentialNodes',
207
212
  'ClusterNodePoolNodeConfigContainerdConfig',
208
213
  'ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig',
@@ -219,6 +224,9 @@ __all__ = [
219
224
  'ClusterNodePoolNodeConfigGvnic',
220
225
  'ClusterNodePoolNodeConfigHostMaintenancePolicy',
221
226
  'ClusterNodePoolNodeConfigKubeletConfig',
227
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
228
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoft',
229
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
222
230
  'ClusterNodePoolNodeConfigLinuxNodeConfig',
223
231
  'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
224
232
  'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -267,6 +275,7 @@ __all__ = [
267
275
  'NodePoolNetworkConfigPodCidrOverprovisionConfig',
268
276
  'NodePoolNodeConfig',
269
277
  'NodePoolNodeConfigAdvancedMachineFeatures',
278
+ 'NodePoolNodeConfigBootDisk',
270
279
  'NodePoolNodeConfigConfidentialNodes',
271
280
  'NodePoolNodeConfigContainerdConfig',
272
281
  'NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig',
@@ -283,6 +292,9 @@ __all__ = [
283
292
  'NodePoolNodeConfigGvnic',
284
293
  'NodePoolNodeConfigHostMaintenancePolicy',
285
294
  'NodePoolNodeConfigKubeletConfig',
295
+ 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
296
+ 'NodePoolNodeConfigKubeletConfigEvictionSoft',
297
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
286
298
  'NodePoolNodeConfigLinuxNodeConfig',
287
299
  'NodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
288
300
  'NodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -369,6 +381,7 @@ __all__ = [
369
381
  'GetClusterNetworkPolicyResult',
370
382
  'GetClusterNodeConfigResult',
371
383
  'GetClusterNodeConfigAdvancedMachineFeatureResult',
384
+ 'GetClusterNodeConfigBootDiskResult',
372
385
  'GetClusterNodeConfigConfidentialNodeResult',
373
386
  'GetClusterNodeConfigContainerdConfigResult',
374
387
  'GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigResult',
@@ -385,6 +398,9 @@ __all__ = [
385
398
  'GetClusterNodeConfigGvnicResult',
386
399
  'GetClusterNodeConfigHostMaintenancePolicyResult',
387
400
  'GetClusterNodeConfigKubeletConfigResult',
401
+ 'GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult',
402
+ 'GetClusterNodeConfigKubeletConfigEvictionSoftResult',
403
+ 'GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
388
404
  'GetClusterNodeConfigLinuxNodeConfigResult',
389
405
  'GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult',
390
406
  'GetClusterNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -418,6 +434,7 @@ __all__ = [
418
434
  'GetClusterNodePoolNetworkConfigPodCidrOverprovisionConfigResult',
419
435
  'GetClusterNodePoolNodeConfigResult',
420
436
  'GetClusterNodePoolNodeConfigAdvancedMachineFeatureResult',
437
+ 'GetClusterNodePoolNodeConfigBootDiskResult',
421
438
  'GetClusterNodePoolNodeConfigConfidentialNodeResult',
422
439
  'GetClusterNodePoolNodeConfigContainerdConfigResult',
423
440
  'GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigResult',
@@ -434,6 +451,9 @@ __all__ = [
434
451
  'GetClusterNodePoolNodeConfigGvnicResult',
435
452
  'GetClusterNodePoolNodeConfigHostMaintenancePolicyResult',
436
453
  'GetClusterNodePoolNodeConfigKubeletConfigResult',
454
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult',
455
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult',
456
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
437
457
  'GetClusterNodePoolNodeConfigLinuxNodeConfigResult',
438
458
  'GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult',
439
459
  'GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -7248,6 +7268,8 @@ class ClusterNodeConfig(dict):
7248
7268
  suggest = None
7249
7269
  if key == "advancedMachineFeatures":
7250
7270
  suggest = "advanced_machine_features"
7271
+ elif key == "bootDisk":
7272
+ suggest = "boot_disk"
7251
7273
  elif key == "bootDiskKmsKey":
7252
7274
  suggest = "boot_disk_kms_key"
7253
7275
  elif key == "confidentialNodes":
@@ -7336,6 +7358,7 @@ class ClusterNodeConfig(dict):
7336
7358
 
7337
7359
  def __init__(__self__, *,
7338
7360
  advanced_machine_features: Optional['outputs.ClusterNodeConfigAdvancedMachineFeatures'] = None,
7361
+ boot_disk: Optional['outputs.ClusterNodeConfigBootDisk'] = None,
7339
7362
  boot_disk_kms_key: Optional[_builtins.str] = None,
7340
7363
  confidential_nodes: Optional['outputs.ClusterNodeConfigConfidentialNodes'] = None,
7341
7364
  containerd_config: Optional['outputs.ClusterNodeConfigContainerdConfig'] = None,
@@ -7383,13 +7406,15 @@ class ClusterNodeConfig(dict):
7383
7406
  """
7384
7407
  :param 'ClusterNodeConfigAdvancedMachineFeaturesArgs' advanced_machine_features: Specifies options for controlling
7385
7408
  advanced machine features. Structure is documented below.
7409
+ :param 'ClusterNodeConfigBootDiskArgs' boot_disk: Configuration of the node pool boot disk. Structure is documented below
7386
7410
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
7387
7411
  :param 'ClusterNodeConfigConfidentialNodesArgs' confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
7388
7412
  :param 'ClusterNodeConfigContainerdConfigArgs' containerd_config: Parameters to customize containerd runtime. Structure is documented below.
7389
7413
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified
7390
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
7414
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
7415
+ Prefer configuring `boot_disk`.
7391
7416
  :param _builtins.str disk_type: Type of the disk attached to each node
7392
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
7417
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7393
7418
  :param Sequence['ClusterNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
7394
7419
  :param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
7395
7420
  :param 'ClusterNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -7469,7 +7494,7 @@ class ClusterNodeConfig(dict):
7469
7494
  :param _builtins.str service_account: The service account to be used by the Node VMs.
7470
7495
  If not specified, the "default" service account is used.
7471
7496
  :param 'ClusterNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
7472
- :param 'ClusterNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
7497
+ :param 'ClusterNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
7473
7498
  :param _builtins.bool spot: A boolean that represents whether the underlying node VMs are spot.
7474
7499
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
7475
7500
  for more information. Defaults to false.
@@ -7490,6 +7515,8 @@ class ClusterNodeConfig(dict):
7490
7515
  """
7491
7516
  if advanced_machine_features is not None:
7492
7517
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
7518
+ if boot_disk is not None:
7519
+ pulumi.set(__self__, "boot_disk", boot_disk)
7493
7520
  if boot_disk_kms_key is not None:
7494
7521
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
7495
7522
  if confidential_nodes is not None:
@@ -7588,6 +7615,14 @@ class ClusterNodeConfig(dict):
7588
7615
  """
7589
7616
  return pulumi.get(self, "advanced_machine_features")
7590
7617
 
7618
+ @_builtins.property
7619
+ @pulumi.getter(name="bootDisk")
7620
+ def boot_disk(self) -> Optional['outputs.ClusterNodeConfigBootDisk']:
7621
+ """
7622
+ Configuration of the node pool boot disk. Structure is documented below
7623
+ """
7624
+ return pulumi.get(self, "boot_disk")
7625
+
7591
7626
  @_builtins.property
7592
7627
  @pulumi.getter(name="bootDiskKmsKey")
7593
7628
  def boot_disk_kms_key(self) -> Optional[_builtins.str]:
@@ -7617,7 +7652,8 @@ class ClusterNodeConfig(dict):
7617
7652
  def disk_size_gb(self) -> Optional[_builtins.int]:
7618
7653
  """
7619
7654
  Size of the disk attached to each node, specified
7620
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
7655
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
7656
+ Prefer configuring `boot_disk`.
7621
7657
  """
7622
7658
  return pulumi.get(self, "disk_size_gb")
7623
7659
 
@@ -7626,7 +7662,7 @@ class ClusterNodeConfig(dict):
7626
7662
  def disk_type(self) -> Optional[_builtins.str]:
7627
7663
  """
7628
7664
  Type of the disk attached to each node
7629
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
7665
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7630
7666
  """
7631
7667
  return pulumi.get(self, "disk_type")
7632
7668
 
@@ -7937,7 +7973,7 @@ class ClusterNodeConfig(dict):
7937
7973
  @pulumi.getter(name="soleTenantConfig")
7938
7974
  def sole_tenant_config(self) -> Optional['outputs.ClusterNodeConfigSoleTenantConfig']:
7939
7975
  """
7940
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
7976
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
7941
7977
  """
7942
7978
  return pulumi.get(self, "sole_tenant_config")
7943
7979
 
@@ -8064,6 +8100,88 @@ class ClusterNodeConfigAdvancedMachineFeatures(dict):
8064
8100
  return pulumi.get(self, "performance_monitoring_unit")
8065
8101
 
8066
8102
 
8103
+ @pulumi.output_type
8104
+ class ClusterNodeConfigBootDisk(dict):
8105
+ @staticmethod
8106
+ def __key_warning(key: str):
8107
+ suggest = None
8108
+ if key == "diskType":
8109
+ suggest = "disk_type"
8110
+ elif key == "provisionedIops":
8111
+ suggest = "provisioned_iops"
8112
+ elif key == "provisionedThroughput":
8113
+ suggest = "provisioned_throughput"
8114
+ elif key == "sizeGb":
8115
+ suggest = "size_gb"
8116
+
8117
+ if suggest:
8118
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigBootDisk. Access the value via the '{suggest}' property getter instead.")
8119
+
8120
+ def __getitem__(self, key: str) -> Any:
8121
+ ClusterNodeConfigBootDisk.__key_warning(key)
8122
+ return super().__getitem__(key)
8123
+
8124
+ def get(self, key: str, default = None) -> Any:
8125
+ ClusterNodeConfigBootDisk.__key_warning(key)
8126
+ return super().get(key, default)
8127
+
8128
+ def __init__(__self__, *,
8129
+ disk_type: Optional[_builtins.str] = None,
8130
+ provisioned_iops: Optional[_builtins.int] = None,
8131
+ provisioned_throughput: Optional[_builtins.int] = None,
8132
+ size_gb: Optional[_builtins.int] = None):
8133
+ """
8134
+ :param _builtins.str disk_type: Type of the disk attached to each node
8135
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8136
+ :param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8137
+ :param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8138
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified
8139
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
8140
+ """
8141
+ if disk_type is not None:
8142
+ pulumi.set(__self__, "disk_type", disk_type)
8143
+ if provisioned_iops is not None:
8144
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
8145
+ if provisioned_throughput is not None:
8146
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
8147
+ if size_gb is not None:
8148
+ pulumi.set(__self__, "size_gb", size_gb)
8149
+
8150
+ @_builtins.property
8151
+ @pulumi.getter(name="diskType")
8152
+ def disk_type(self) -> Optional[_builtins.str]:
8153
+ """
8154
+ Type of the disk attached to each node
8155
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8156
+ """
8157
+ return pulumi.get(self, "disk_type")
8158
+
8159
+ @_builtins.property
8160
+ @pulumi.getter(name="provisionedIops")
8161
+ def provisioned_iops(self) -> Optional[_builtins.int]:
8162
+ """
8163
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8164
+ """
8165
+ return pulumi.get(self, "provisioned_iops")
8166
+
8167
+ @_builtins.property
8168
+ @pulumi.getter(name="provisionedThroughput")
8169
+ def provisioned_throughput(self) -> Optional[_builtins.int]:
8170
+ """
8171
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8172
+ """
8173
+ return pulumi.get(self, "provisioned_throughput")
8174
+
8175
+ @_builtins.property
8176
+ @pulumi.getter(name="sizeGb")
8177
+ def size_gb(self) -> Optional[_builtins.int]:
8178
+ """
8179
+ Size of the disk attached to each node, specified
8180
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
8181
+ """
8182
+ return pulumi.get(self, "size_gb")
8183
+
8184
+
8067
8185
  @pulumi.output_type
8068
8186
  class ClusterNodeConfigConfidentialNodes(dict):
8069
8187
  @staticmethod
@@ -8697,6 +8815,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8697
8815
  suggest = "cpu_cfs_quota_period"
8698
8816
  elif key == "cpuManagerPolicy":
8699
8817
  suggest = "cpu_manager_policy"
8818
+ elif key == "evictionMaxPodGracePeriodSeconds":
8819
+ suggest = "eviction_max_pod_grace_period_seconds"
8820
+ elif key == "evictionMinimumReclaim":
8821
+ suggest = "eviction_minimum_reclaim"
8822
+ elif key == "evictionSoft":
8823
+ suggest = "eviction_soft"
8824
+ elif key == "evictionSoftGracePeriod":
8825
+ suggest = "eviction_soft_grace_period"
8700
8826
  elif key == "imageGcHighThresholdPercent":
8701
8827
  suggest = "image_gc_high_threshold_percent"
8702
8828
  elif key == "imageGcLowThresholdPercent":
@@ -8707,8 +8833,12 @@ class ClusterNodeConfigKubeletConfig(dict):
8707
8833
  suggest = "image_minimum_gc_age"
8708
8834
  elif key == "insecureKubeletReadonlyPortEnabled":
8709
8835
  suggest = "insecure_kubelet_readonly_port_enabled"
8836
+ elif key == "maxParallelImagePulls":
8837
+ suggest = "max_parallel_image_pulls"
8710
8838
  elif key == "podPidsLimit":
8711
8839
  suggest = "pod_pids_limit"
8840
+ elif key == "singleProcessOomKill":
8841
+ suggest = "single_process_oom_kill"
8712
8842
 
8713
8843
  if suggest:
8714
8844
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -8728,12 +8858,18 @@ class ClusterNodeConfigKubeletConfig(dict):
8728
8858
  cpu_cfs_quota: Optional[_builtins.bool] = None,
8729
8859
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
8730
8860
  cpu_manager_policy: Optional[_builtins.str] = None,
8861
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
8862
+ eviction_minimum_reclaim: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
8863
+ eviction_soft: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoft'] = None,
8864
+ eviction_soft_grace_period: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
8731
8865
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
8732
8866
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
8733
8867
  image_maximum_gc_age: Optional[_builtins.str] = None,
8734
8868
  image_minimum_gc_age: Optional[_builtins.str] = None,
8735
8869
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
8736
- pod_pids_limit: Optional[_builtins.int] = None):
8870
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
8871
+ pod_pids_limit: Optional[_builtins.int] = None,
8872
+ single_process_oom_kill: Optional[_builtins.bool] = None):
8737
8873
  """
8738
8874
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
8739
8875
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -8753,12 +8889,18 @@ class ClusterNodeConfigKubeletConfig(dict):
8753
8889
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
8754
8890
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
8755
8891
  is setting the empty string `""`, which will function identically to not setting this field.
8892
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
8893
+ :param 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
8894
+ :param 'ClusterNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
8895
+ :param 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
8756
8896
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
8757
8897
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
8758
8898
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
8759
8899
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
8760
8900
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8901
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
8761
8902
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
8903
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
8762
8904
  """
8763
8905
  if allowed_unsafe_sysctls is not None:
8764
8906
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -8772,6 +8914,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8772
8914
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
8773
8915
  if cpu_manager_policy is not None:
8774
8916
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
8917
+ if eviction_max_pod_grace_period_seconds is not None:
8918
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
8919
+ if eviction_minimum_reclaim is not None:
8920
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
8921
+ if eviction_soft is not None:
8922
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
8923
+ if eviction_soft_grace_period is not None:
8924
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
8775
8925
  if image_gc_high_threshold_percent is not None:
8776
8926
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
8777
8927
  if image_gc_low_threshold_percent is not None:
@@ -8782,8 +8932,12 @@ class ClusterNodeConfigKubeletConfig(dict):
8782
8932
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
8783
8933
  if insecure_kubelet_readonly_port_enabled is not None:
8784
8934
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
8935
+ if max_parallel_image_pulls is not None:
8936
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
8785
8937
  if pod_pids_limit is not None:
8786
8938
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
8939
+ if single_process_oom_kill is not None:
8940
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
8787
8941
 
8788
8942
  @_builtins.property
8789
8943
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -8845,6 +8999,38 @@ class ClusterNodeConfigKubeletConfig(dict):
8845
8999
  """
8846
9000
  return pulumi.get(self, "cpu_manager_policy")
8847
9001
 
9002
+ @_builtins.property
9003
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
9004
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
9005
+ """
9006
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
9007
+ """
9008
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
9009
+
9010
+ @_builtins.property
9011
+ @pulumi.getter(name="evictionMinimumReclaim")
9012
+ def eviction_minimum_reclaim(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionMinimumReclaim']:
9013
+ """
9014
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
9015
+ """
9016
+ return pulumi.get(self, "eviction_minimum_reclaim")
9017
+
9018
+ @_builtins.property
9019
+ @pulumi.getter(name="evictionSoft")
9020
+ def eviction_soft(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoft']:
9021
+ """
9022
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
9023
+ """
9024
+ return pulumi.get(self, "eviction_soft")
9025
+
9026
+ @_builtins.property
9027
+ @pulumi.getter(name="evictionSoftGracePeriod")
9028
+ def eviction_soft_grace_period(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod']:
9029
+ """
9030
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
9031
+ """
9032
+ return pulumi.get(self, "eviction_soft_grace_period")
9033
+
8848
9034
  @_builtins.property
8849
9035
  @pulumi.getter(name="imageGcHighThresholdPercent")
8850
9036
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -8885,6 +9071,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8885
9071
  """
8886
9072
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
8887
9073
 
9074
+ @_builtins.property
9075
+ @pulumi.getter(name="maxParallelImagePulls")
9076
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
9077
+ """
9078
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
9079
+ """
9080
+ return pulumi.get(self, "max_parallel_image_pulls")
9081
+
8888
9082
  @_builtins.property
8889
9083
  @pulumi.getter(name="podPidsLimit")
8890
9084
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -8893,6 +9087,332 @@ class ClusterNodeConfigKubeletConfig(dict):
8893
9087
  """
8894
9088
  return pulumi.get(self, "pod_pids_limit")
8895
9089
 
9090
+ @_builtins.property
9091
+ @pulumi.getter(name="singleProcessOomKill")
9092
+ def single_process_oom_kill(self) -> Optional[_builtins.bool]:
9093
+ """
9094
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
9095
+ """
9096
+ return pulumi.get(self, "single_process_oom_kill")
9097
+
9098
+
9099
+ @pulumi.output_type
9100
+ class ClusterNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
9101
+ @staticmethod
9102
+ def __key_warning(key: str):
9103
+ suggest = None
9104
+ if key == "imagefsAvailable":
9105
+ suggest = "imagefs_available"
9106
+ elif key == "imagefsInodesFree":
9107
+ suggest = "imagefs_inodes_free"
9108
+ elif key == "memoryAvailable":
9109
+ suggest = "memory_available"
9110
+ elif key == "nodefsAvailable":
9111
+ suggest = "nodefs_available"
9112
+ elif key == "nodefsInodesFree":
9113
+ suggest = "nodefs_inodes_free"
9114
+ elif key == "pidAvailable":
9115
+ suggest = "pid_available"
9116
+
9117
+ if suggest:
9118
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
9119
+
9120
+ def __getitem__(self, key: str) -> Any:
9121
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
9122
+ return super().__getitem__(key)
9123
+
9124
+ def get(self, key: str, default = None) -> Any:
9125
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
9126
+ return super().get(key, default)
9127
+
9128
+ def __init__(__self__, *,
9129
+ imagefs_available: Optional[_builtins.str] = None,
9130
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9131
+ memory_available: Optional[_builtins.str] = None,
9132
+ nodefs_available: Optional[_builtins.str] = None,
9133
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9134
+ pid_available: Optional[_builtins.str] = None):
9135
+ """
9136
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9137
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9138
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9139
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9140
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9141
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9142
+ """
9143
+ if imagefs_available is not None:
9144
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9145
+ if imagefs_inodes_free is not None:
9146
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9147
+ if memory_available is not None:
9148
+ pulumi.set(__self__, "memory_available", memory_available)
9149
+ if nodefs_available is not None:
9150
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9151
+ if nodefs_inodes_free is not None:
9152
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9153
+ if pid_available is not None:
9154
+ pulumi.set(__self__, "pid_available", pid_available)
9155
+
9156
+ @_builtins.property
9157
+ @pulumi.getter(name="imagefsAvailable")
9158
+ def imagefs_available(self) -> Optional[_builtins.str]:
9159
+ """
9160
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9161
+ """
9162
+ return pulumi.get(self, "imagefs_available")
9163
+
9164
+ @_builtins.property
9165
+ @pulumi.getter(name="imagefsInodesFree")
9166
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
9167
+ """
9168
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9169
+ """
9170
+ return pulumi.get(self, "imagefs_inodes_free")
9171
+
9172
+ @_builtins.property
9173
+ @pulumi.getter(name="memoryAvailable")
9174
+ def memory_available(self) -> Optional[_builtins.str]:
9175
+ """
9176
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9177
+ """
9178
+ return pulumi.get(self, "memory_available")
9179
+
9180
+ @_builtins.property
9181
+ @pulumi.getter(name="nodefsAvailable")
9182
+ def nodefs_available(self) -> Optional[_builtins.str]:
9183
+ """
9184
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9185
+ """
9186
+ return pulumi.get(self, "nodefs_available")
9187
+
9188
+ @_builtins.property
9189
+ @pulumi.getter(name="nodefsInodesFree")
9190
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9191
+ """
9192
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9193
+ """
9194
+ return pulumi.get(self, "nodefs_inodes_free")
9195
+
9196
+ @_builtins.property
9197
+ @pulumi.getter(name="pidAvailable")
9198
+ def pid_available(self) -> Optional[_builtins.str]:
9199
+ """
9200
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9201
+ """
9202
+ return pulumi.get(self, "pid_available")
9203
+
9204
+
9205
+ @pulumi.output_type
9206
+ class ClusterNodeConfigKubeletConfigEvictionSoft(dict):
9207
+ @staticmethod
9208
+ def __key_warning(key: str):
9209
+ suggest = None
9210
+ if key == "imagefsAvailable":
9211
+ suggest = "imagefs_available"
9212
+ elif key == "imagefsInodesFree":
9213
+ suggest = "imagefs_inodes_free"
9214
+ elif key == "memoryAvailable":
9215
+ suggest = "memory_available"
9216
+ elif key == "nodefsAvailable":
9217
+ suggest = "nodefs_available"
9218
+ elif key == "nodefsInodesFree":
9219
+ suggest = "nodefs_inodes_free"
9220
+ elif key == "pidAvailable":
9221
+ suggest = "pid_available"
9222
+
9223
+ if suggest:
9224
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
9225
+
9226
+ def __getitem__(self, key: str) -> Any:
9227
+ ClusterNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
9228
+ return super().__getitem__(key)
9229
+
9230
+ def get(self, key: str, default = None) -> Any:
9231
+ ClusterNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
9232
+ return super().get(key, default)
9233
+
9234
+ def __init__(__self__, *,
9235
+ imagefs_available: Optional[_builtins.str] = None,
9236
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9237
+ memory_available: Optional[_builtins.str] = None,
9238
+ nodefs_available: Optional[_builtins.str] = None,
9239
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9240
+ pid_available: Optional[_builtins.str] = None):
9241
+ """
9242
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
9243
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9244
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
9245
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9246
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9247
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9248
+ """
9249
+ if imagefs_available is not None:
9250
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9251
+ if imagefs_inodes_free is not None:
9252
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9253
+ if memory_available is not None:
9254
+ pulumi.set(__self__, "memory_available", memory_available)
9255
+ if nodefs_available is not None:
9256
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9257
+ if nodefs_inodes_free is not None:
9258
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9259
+ if pid_available is not None:
9260
+ pulumi.set(__self__, "pid_available", pid_available)
9261
+
9262
+ @_builtins.property
9263
+ @pulumi.getter(name="imagefsAvailable")
9264
+ def imagefs_available(self) -> Optional[_builtins.str]:
9265
+ """
9266
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
9267
+ """
9268
+ return pulumi.get(self, "imagefs_available")
9269
+
9270
+ @_builtins.property
9271
+ @pulumi.getter(name="imagefsInodesFree")
9272
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
9273
+ """
9274
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9275
+ """
9276
+ return pulumi.get(self, "imagefs_inodes_free")
9277
+
9278
+ @_builtins.property
9279
+ @pulumi.getter(name="memoryAvailable")
9280
+ def memory_available(self) -> Optional[_builtins.str]:
9281
+ """
9282
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
9283
+ """
9284
+ return pulumi.get(self, "memory_available")
9285
+
9286
+ @_builtins.property
9287
+ @pulumi.getter(name="nodefsAvailable")
9288
+ def nodefs_available(self) -> Optional[_builtins.str]:
9289
+ """
9290
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9291
+ """
9292
+ return pulumi.get(self, "nodefs_available")
9293
+
9294
+ @_builtins.property
9295
+ @pulumi.getter(name="nodefsInodesFree")
9296
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9297
+ """
9298
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9299
+ """
9300
+ return pulumi.get(self, "nodefs_inodes_free")
9301
+
9302
+ @_builtins.property
9303
+ @pulumi.getter(name="pidAvailable")
9304
+ def pid_available(self) -> Optional[_builtins.str]:
9305
+ """
9306
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9307
+ """
9308
+ return pulumi.get(self, "pid_available")
9309
+
9310
+
9311
+ @pulumi.output_type
9312
+ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
9313
+ @staticmethod
9314
+ def __key_warning(key: str):
9315
+ suggest = None
9316
+ if key == "imagefsAvailable":
9317
+ suggest = "imagefs_available"
9318
+ elif key == "imagefsInodesFree":
9319
+ suggest = "imagefs_inodes_free"
9320
+ elif key == "memoryAvailable":
9321
+ suggest = "memory_available"
9322
+ elif key == "nodefsAvailable":
9323
+ suggest = "nodefs_available"
9324
+ elif key == "nodefsInodesFree":
9325
+ suggest = "nodefs_inodes_free"
9326
+ elif key == "pidAvailable":
9327
+ suggest = "pid_available"
9328
+
9329
+ if suggest:
9330
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
9331
+
9332
+ def __getitem__(self, key: str) -> Any:
9333
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
9334
+ return super().__getitem__(key)
9335
+
9336
+ def get(self, key: str, default = None) -> Any:
9337
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
9338
+ return super().get(key, default)
9339
+
9340
+ def __init__(__self__, *,
9341
+ imagefs_available: Optional[_builtins.str] = None,
9342
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9343
+ memory_available: Optional[_builtins.str] = None,
9344
+ nodefs_available: Optional[_builtins.str] = None,
9345
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9346
+ pid_available: Optional[_builtins.str] = None):
9347
+ """
9348
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9349
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9350
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
9351
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9352
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9353
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9354
+ """
9355
+ if imagefs_available is not None:
9356
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9357
+ if imagefs_inodes_free is not None:
9358
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9359
+ if memory_available is not None:
9360
+ pulumi.set(__self__, "memory_available", memory_available)
9361
+ if nodefs_available is not None:
9362
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9363
+ if nodefs_inodes_free is not None:
9364
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9365
+ if pid_available is not None:
9366
+ pulumi.set(__self__, "pid_available", pid_available)
9367
+
9368
+ @_builtins.property
9369
+ @pulumi.getter(name="imagefsAvailable")
9370
+ def imagefs_available(self) -> Optional[_builtins.str]:
9371
+ """
9372
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9373
+ """
9374
+ return pulumi.get(self, "imagefs_available")
9375
+
9376
+ @_builtins.property
9377
+ @pulumi.getter(name="imagefsInodesFree")
9378
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
9379
+ """
9380
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9381
+ """
9382
+ return pulumi.get(self, "imagefs_inodes_free")
9383
+
9384
+ @_builtins.property
9385
+ @pulumi.getter(name="memoryAvailable")
9386
+ def memory_available(self) -> Optional[_builtins.str]:
9387
+ """
9388
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
9389
+ """
9390
+ return pulumi.get(self, "memory_available")
9391
+
9392
+ @_builtins.property
9393
+ @pulumi.getter(name="nodefsAvailable")
9394
+ def nodefs_available(self) -> Optional[_builtins.str]:
9395
+ """
9396
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9397
+ """
9398
+ return pulumi.get(self, "nodefs_available")
9399
+
9400
+ @_builtins.property
9401
+ @pulumi.getter(name="nodefsInodesFree")
9402
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9403
+ """
9404
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9405
+ """
9406
+ return pulumi.get(self, "nodefs_inodes_free")
9407
+
9408
+ @_builtins.property
9409
+ @pulumi.getter(name="pidAvailable")
9410
+ def pid_available(self) -> Optional[_builtins.str]:
9411
+ """
9412
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9413
+ """
9414
+ return pulumi.get(self, "pid_available")
9415
+
8896
9416
 
8897
9417
  @pulumi.output_type
8898
9418
  class ClusterNodeConfigLinuxNodeConfig(dict):
@@ -8903,6 +9423,10 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
8903
9423
  suggest = "cgroup_mode"
8904
9424
  elif key == "hugepagesConfig":
8905
9425
  suggest = "hugepages_config"
9426
+ elif key == "transparentHugepageDefrag":
9427
+ suggest = "transparent_hugepage_defrag"
9428
+ elif key == "transparentHugepageEnabled":
9429
+ suggest = "transparent_hugepage_enabled"
8906
9430
 
8907
9431
  if suggest:
8908
9432
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -8918,7 +9442,9 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
8918
9442
  def __init__(__self__, *,
8919
9443
  cgroup_mode: Optional[_builtins.str] = None,
8920
9444
  hugepages_config: Optional['outputs.ClusterNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
8921
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
9445
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
9446
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
9447
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
8922
9448
  """
8923
9449
  :param _builtins.str cgroup_mode: Possible cgroup modes that can be used.
8924
9450
  Accepted values are:
@@ -8930,6 +9456,8 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
8930
9456
  and all pods running on the nodes. Specified as a map from the key, such as
8931
9457
  `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
8932
9458
  Note that validations happen all server side. All attributes are optional.
9459
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
9460
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
8933
9461
  """
8934
9462
  if cgroup_mode is not None:
8935
9463
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -8937,6 +9465,10 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
8937
9465
  pulumi.set(__self__, "hugepages_config", hugepages_config)
8938
9466
  if sysctls is not None:
8939
9467
  pulumi.set(__self__, "sysctls", sysctls)
9468
+ if transparent_hugepage_defrag is not None:
9469
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
9470
+ if transparent_hugepage_enabled is not None:
9471
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
8940
9472
 
8941
9473
  @_builtins.property
8942
9474
  @pulumi.getter(name="cgroupMode")
@@ -8969,6 +9501,22 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
8969
9501
  """
8970
9502
  return pulumi.get(self, "sysctls")
8971
9503
 
9504
+ @_builtins.property
9505
+ @pulumi.getter(name="transparentHugepageDefrag")
9506
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
9507
+ """
9508
+ The Linux kernel transparent hugepage defrag setting.
9509
+ """
9510
+ return pulumi.get(self, "transparent_hugepage_defrag")
9511
+
9512
+ @_builtins.property
9513
+ @pulumi.getter(name="transparentHugepageEnabled")
9514
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
9515
+ """
9516
+ The Linux kernel transparent hugepage setting.
9517
+ """
9518
+ return pulumi.get(self, "transparent_hugepage_enabled")
9519
+
8972
9520
 
8973
9521
  @pulumi.output_type
8974
9522
  class ClusterNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -9281,6 +9829,8 @@ class ClusterNodeConfigSoleTenantConfig(dict):
9281
9829
  suggest = None
9282
9830
  if key == "nodeAffinities":
9283
9831
  suggest = "node_affinities"
9832
+ elif key == "minNodeCpus":
9833
+ suggest = "min_node_cpus"
9284
9834
 
9285
9835
  if suggest:
9286
9836
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -9294,20 +9844,32 @@ class ClusterNodeConfigSoleTenantConfig(dict):
9294
9844
  return super().get(key, default)
9295
9845
 
9296
9846
  def __init__(__self__, *,
9297
- node_affinities: Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity']):
9847
+ node_affinities: Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity'],
9848
+ min_node_cpus: Optional[_builtins.int] = None):
9298
9849
  """
9299
- :param Sequence['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
9850
+ :param Sequence['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
9851
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
9300
9852
  """
9301
9853
  pulumi.set(__self__, "node_affinities", node_affinities)
9854
+ if min_node_cpus is not None:
9855
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
9302
9856
 
9303
9857
  @_builtins.property
9304
9858
  @pulumi.getter(name="nodeAffinities")
9305
9859
  def node_affinities(self) -> Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity']:
9306
9860
  """
9307
- .
9861
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
9308
9862
  """
9309
9863
  return pulumi.get(self, "node_affinities")
9310
9864
 
9865
+ @_builtins.property
9866
+ @pulumi.getter(name="minNodeCpus")
9867
+ def min_node_cpus(self) -> Optional[_builtins.int]:
9868
+ """
9869
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
9870
+ """
9871
+ return pulumi.get(self, "min_node_cpus")
9872
+
9311
9873
 
9312
9874
  @pulumi.output_type
9313
9875
  class ClusterNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -10648,6 +11210,8 @@ class ClusterNodePoolNodeConfig(dict):
10648
11210
  suggest = None
10649
11211
  if key == "advancedMachineFeatures":
10650
11212
  suggest = "advanced_machine_features"
11213
+ elif key == "bootDisk":
11214
+ suggest = "boot_disk"
10651
11215
  elif key == "bootDiskKmsKey":
10652
11216
  suggest = "boot_disk_kms_key"
10653
11217
  elif key == "confidentialNodes":
@@ -10736,6 +11300,7 @@ class ClusterNodePoolNodeConfig(dict):
10736
11300
 
10737
11301
  def __init__(__self__, *,
10738
11302
  advanced_machine_features: Optional['outputs.ClusterNodePoolNodeConfigAdvancedMachineFeatures'] = None,
11303
+ boot_disk: Optional['outputs.ClusterNodePoolNodeConfigBootDisk'] = None,
10739
11304
  boot_disk_kms_key: Optional[_builtins.str] = None,
10740
11305
  confidential_nodes: Optional['outputs.ClusterNodePoolNodeConfigConfidentialNodes'] = None,
10741
11306
  containerd_config: Optional['outputs.ClusterNodePoolNodeConfigContainerdConfig'] = None,
@@ -10783,13 +11348,15 @@ class ClusterNodePoolNodeConfig(dict):
10783
11348
  """
10784
11349
  :param 'ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs' advanced_machine_features: Specifies options for controlling
10785
11350
  advanced machine features. Structure is documented below.
11351
+ :param 'ClusterNodePoolNodeConfigBootDiskArgs' boot_disk: Configuration of the node pool boot disk. Structure is documented below
10786
11352
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
10787
11353
  :param 'ClusterNodePoolNodeConfigConfidentialNodesArgs' confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
10788
11354
  :param 'ClusterNodePoolNodeConfigContainerdConfigArgs' containerd_config: Parameters to customize containerd runtime. Structure is documented below.
10789
11355
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified
10790
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
11356
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
11357
+ Prefer configuring `boot_disk`.
10791
11358
  :param _builtins.str disk_type: Type of the disk attached to each node
10792
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
11359
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
10793
11360
  :param Sequence['ClusterNodePoolNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
10794
11361
  :param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
10795
11362
  :param 'ClusterNodePoolNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -10869,7 +11436,7 @@ class ClusterNodePoolNodeConfig(dict):
10869
11436
  :param _builtins.str service_account: The service account to be used by the Node VMs.
10870
11437
  If not specified, the "default" service account is used.
10871
11438
  :param 'ClusterNodePoolNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
10872
- :param 'ClusterNodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
11439
+ :param 'ClusterNodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
10873
11440
  :param _builtins.bool spot: A boolean that represents whether the underlying node VMs are spot.
10874
11441
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
10875
11442
  for more information. Defaults to false.
@@ -10890,6 +11457,8 @@ class ClusterNodePoolNodeConfig(dict):
10890
11457
  """
10891
11458
  if advanced_machine_features is not None:
10892
11459
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
11460
+ if boot_disk is not None:
11461
+ pulumi.set(__self__, "boot_disk", boot_disk)
10893
11462
  if boot_disk_kms_key is not None:
10894
11463
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
10895
11464
  if confidential_nodes is not None:
@@ -10988,6 +11557,14 @@ class ClusterNodePoolNodeConfig(dict):
10988
11557
  """
10989
11558
  return pulumi.get(self, "advanced_machine_features")
10990
11559
 
11560
+ @_builtins.property
11561
+ @pulumi.getter(name="bootDisk")
11562
+ def boot_disk(self) -> Optional['outputs.ClusterNodePoolNodeConfigBootDisk']:
11563
+ """
11564
+ Configuration of the node pool boot disk. Structure is documented below
11565
+ """
11566
+ return pulumi.get(self, "boot_disk")
11567
+
10991
11568
  @_builtins.property
10992
11569
  @pulumi.getter(name="bootDiskKmsKey")
10993
11570
  def boot_disk_kms_key(self) -> Optional[_builtins.str]:
@@ -11017,7 +11594,8 @@ class ClusterNodePoolNodeConfig(dict):
11017
11594
  def disk_size_gb(self) -> Optional[_builtins.int]:
11018
11595
  """
11019
11596
  Size of the disk attached to each node, specified
11020
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
11597
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
11598
+ Prefer configuring `boot_disk`.
11021
11599
  """
11022
11600
  return pulumi.get(self, "disk_size_gb")
11023
11601
 
@@ -11026,7 +11604,7 @@ class ClusterNodePoolNodeConfig(dict):
11026
11604
  def disk_type(self) -> Optional[_builtins.str]:
11027
11605
  """
11028
11606
  Type of the disk attached to each node
11029
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
11607
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
11030
11608
  """
11031
11609
  return pulumi.get(self, "disk_type")
11032
11610
 
@@ -11337,7 +11915,7 @@ class ClusterNodePoolNodeConfig(dict):
11337
11915
  @pulumi.getter(name="soleTenantConfig")
11338
11916
  def sole_tenant_config(self) -> Optional['outputs.ClusterNodePoolNodeConfigSoleTenantConfig']:
11339
11917
  """
11340
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
11918
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
11341
11919
  """
11342
11920
  return pulumi.get(self, "sole_tenant_config")
11343
11921
 
@@ -11465,59 +12043,141 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeatures(dict):
11465
12043
 
11466
12044
 
11467
12045
  @pulumi.output_type
11468
- class ClusterNodePoolNodeConfigConfidentialNodes(dict):
12046
+ class ClusterNodePoolNodeConfigBootDisk(dict):
11469
12047
  @staticmethod
11470
12048
  def __key_warning(key: str):
11471
12049
  suggest = None
11472
- if key == "confidentialInstanceType":
11473
- suggest = "confidential_instance_type"
12050
+ if key == "diskType":
12051
+ suggest = "disk_type"
12052
+ elif key == "provisionedIops":
12053
+ suggest = "provisioned_iops"
12054
+ elif key == "provisionedThroughput":
12055
+ suggest = "provisioned_throughput"
12056
+ elif key == "sizeGb":
12057
+ suggest = "size_gb"
11474
12058
 
11475
12059
  if suggest:
11476
- pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigConfidentialNodes. Access the value via the '{suggest}' property getter instead.")
12060
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigBootDisk. Access the value via the '{suggest}' property getter instead.")
11477
12061
 
11478
12062
  def __getitem__(self, key: str) -> Any:
11479
- ClusterNodePoolNodeConfigConfidentialNodes.__key_warning(key)
12063
+ ClusterNodePoolNodeConfigBootDisk.__key_warning(key)
11480
12064
  return super().__getitem__(key)
11481
12065
 
11482
12066
  def get(self, key: str, default = None) -> Any:
11483
- ClusterNodePoolNodeConfigConfidentialNodes.__key_warning(key)
12067
+ ClusterNodePoolNodeConfigBootDisk.__key_warning(key)
11484
12068
  return super().get(key, default)
11485
12069
 
11486
12070
  def __init__(__self__, *,
11487
- enabled: _builtins.bool,
11488
- confidential_instance_type: Optional[_builtins.str] = None):
12071
+ disk_type: Optional[_builtins.str] = None,
12072
+ provisioned_iops: Optional[_builtins.int] = None,
12073
+ provisioned_throughput: Optional[_builtins.int] = None,
12074
+ size_gb: Optional[_builtins.int] = None):
11489
12075
  """
11490
- :param _builtins.bool enabled: Enable Confidential GKE Nodes for this cluster, to
11491
- enforce encryption of data in-use.
11492
- :param _builtins.str confidential_instance_type: Defines the type of technology used
11493
- by the confidential node.
12076
+ :param _builtins.str disk_type: Type of the disk attached to each node
12077
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
12078
+ :param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12079
+ :param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12080
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified
12081
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
11494
12082
  """
11495
- pulumi.set(__self__, "enabled", enabled)
11496
- if confidential_instance_type is not None:
11497
- pulumi.set(__self__, "confidential_instance_type", confidential_instance_type)
12083
+ if disk_type is not None:
12084
+ pulumi.set(__self__, "disk_type", disk_type)
12085
+ if provisioned_iops is not None:
12086
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
12087
+ if provisioned_throughput is not None:
12088
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
12089
+ if size_gb is not None:
12090
+ pulumi.set(__self__, "size_gb", size_gb)
11498
12091
 
11499
12092
  @_builtins.property
11500
- @pulumi.getter
11501
- def enabled(self) -> _builtins.bool:
12093
+ @pulumi.getter(name="diskType")
12094
+ def disk_type(self) -> Optional[_builtins.str]:
11502
12095
  """
11503
- Enable Confidential GKE Nodes for this cluster, to
11504
- enforce encryption of data in-use.
12096
+ Type of the disk attached to each node
12097
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
11505
12098
  """
11506
- return pulumi.get(self, "enabled")
12099
+ return pulumi.get(self, "disk_type")
11507
12100
 
11508
12101
  @_builtins.property
11509
- @pulumi.getter(name="confidentialInstanceType")
11510
- def confidential_instance_type(self) -> Optional[_builtins.str]:
12102
+ @pulumi.getter(name="provisionedIops")
12103
+ def provisioned_iops(self) -> Optional[_builtins.int]:
11511
12104
  """
11512
- Defines the type of technology used
11513
- by the confidential node.
12105
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
11514
12106
  """
11515
- return pulumi.get(self, "confidential_instance_type")
12107
+ return pulumi.get(self, "provisioned_iops")
11516
12108
 
12109
+ @_builtins.property
12110
+ @pulumi.getter(name="provisionedThroughput")
12111
+ def provisioned_throughput(self) -> Optional[_builtins.int]:
12112
+ """
12113
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12114
+ """
12115
+ return pulumi.get(self, "provisioned_throughput")
11517
12116
 
11518
- @pulumi.output_type
11519
- class ClusterNodePoolNodeConfigContainerdConfig(dict):
11520
- @staticmethod
12117
+ @_builtins.property
12118
+ @pulumi.getter(name="sizeGb")
12119
+ def size_gb(self) -> Optional[_builtins.int]:
12120
+ """
12121
+ Size of the disk attached to each node, specified
12122
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
12123
+ """
12124
+ return pulumi.get(self, "size_gb")
12125
+
12126
+
12127
+ @pulumi.output_type
12128
+ class ClusterNodePoolNodeConfigConfidentialNodes(dict):
12129
+ @staticmethod
12130
+ def __key_warning(key: str):
12131
+ suggest = None
12132
+ if key == "confidentialInstanceType":
12133
+ suggest = "confidential_instance_type"
12134
+
12135
+ if suggest:
12136
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigConfidentialNodes. Access the value via the '{suggest}' property getter instead.")
12137
+
12138
+ def __getitem__(self, key: str) -> Any:
12139
+ ClusterNodePoolNodeConfigConfidentialNodes.__key_warning(key)
12140
+ return super().__getitem__(key)
12141
+
12142
+ def get(self, key: str, default = None) -> Any:
12143
+ ClusterNodePoolNodeConfigConfidentialNodes.__key_warning(key)
12144
+ return super().get(key, default)
12145
+
12146
+ def __init__(__self__, *,
12147
+ enabled: _builtins.bool,
12148
+ confidential_instance_type: Optional[_builtins.str] = None):
12149
+ """
12150
+ :param _builtins.bool enabled: Enable Confidential GKE Nodes for this cluster, to
12151
+ enforce encryption of data in-use.
12152
+ :param _builtins.str confidential_instance_type: Defines the type of technology used
12153
+ by the confidential node.
12154
+ """
12155
+ pulumi.set(__self__, "enabled", enabled)
12156
+ if confidential_instance_type is not None:
12157
+ pulumi.set(__self__, "confidential_instance_type", confidential_instance_type)
12158
+
12159
+ @_builtins.property
12160
+ @pulumi.getter
12161
+ def enabled(self) -> _builtins.bool:
12162
+ """
12163
+ Enable Confidential GKE Nodes for this cluster, to
12164
+ enforce encryption of data in-use.
12165
+ """
12166
+ return pulumi.get(self, "enabled")
12167
+
12168
+ @_builtins.property
12169
+ @pulumi.getter(name="confidentialInstanceType")
12170
+ def confidential_instance_type(self) -> Optional[_builtins.str]:
12171
+ """
12172
+ Defines the type of technology used
12173
+ by the confidential node.
12174
+ """
12175
+ return pulumi.get(self, "confidential_instance_type")
12176
+
12177
+
12178
+ @pulumi.output_type
12179
+ class ClusterNodePoolNodeConfigContainerdConfig(dict):
12180
+ @staticmethod
11521
12181
  def __key_warning(key: str):
11522
12182
  suggest = None
11523
12183
  if key == "privateRegistryAccessConfig":
@@ -12097,6 +12757,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12097
12757
  suggest = "cpu_cfs_quota_period"
12098
12758
  elif key == "cpuManagerPolicy":
12099
12759
  suggest = "cpu_manager_policy"
12760
+ elif key == "evictionMaxPodGracePeriodSeconds":
12761
+ suggest = "eviction_max_pod_grace_period_seconds"
12762
+ elif key == "evictionMinimumReclaim":
12763
+ suggest = "eviction_minimum_reclaim"
12764
+ elif key == "evictionSoft":
12765
+ suggest = "eviction_soft"
12766
+ elif key == "evictionSoftGracePeriod":
12767
+ suggest = "eviction_soft_grace_period"
12100
12768
  elif key == "imageGcHighThresholdPercent":
12101
12769
  suggest = "image_gc_high_threshold_percent"
12102
12770
  elif key == "imageGcLowThresholdPercent":
@@ -12107,8 +12775,12 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12107
12775
  suggest = "image_minimum_gc_age"
12108
12776
  elif key == "insecureKubeletReadonlyPortEnabled":
12109
12777
  suggest = "insecure_kubelet_readonly_port_enabled"
12778
+ elif key == "maxParallelImagePulls":
12779
+ suggest = "max_parallel_image_pulls"
12110
12780
  elif key == "podPidsLimit":
12111
12781
  suggest = "pod_pids_limit"
12782
+ elif key == "singleProcessOomKill":
12783
+ suggest = "single_process_oom_kill"
12112
12784
 
12113
12785
  if suggest:
12114
12786
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12128,12 +12800,18 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12128
12800
  cpu_cfs_quota: Optional[_builtins.bool] = None,
12129
12801
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
12130
12802
  cpu_manager_policy: Optional[_builtins.str] = None,
12803
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
12804
+ eviction_minimum_reclaim: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
12805
+ eviction_soft: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoft'] = None,
12806
+ eviction_soft_grace_period: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
12131
12807
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
12132
12808
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
12133
12809
  image_maximum_gc_age: Optional[_builtins.str] = None,
12134
12810
  image_minimum_gc_age: Optional[_builtins.str] = None,
12135
12811
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
12136
- pod_pids_limit: Optional[_builtins.int] = None):
12812
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
12813
+ pod_pids_limit: Optional[_builtins.int] = None,
12814
+ single_process_oom_kill: Optional[_builtins.bool] = None):
12137
12815
  """
12138
12816
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
12139
12817
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -12153,12 +12831,18 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12153
12831
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
12154
12832
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
12155
12833
  is setting the empty string `""`, which will function identically to not setting this field.
12834
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
12835
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
12836
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
12837
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
12156
12838
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
12157
12839
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
12158
12840
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
12159
12841
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
12160
12842
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
12843
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
12161
12844
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
12845
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
12162
12846
  """
12163
12847
  if allowed_unsafe_sysctls is not None:
12164
12848
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -12172,6 +12856,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12172
12856
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
12173
12857
  if cpu_manager_policy is not None:
12174
12858
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
12859
+ if eviction_max_pod_grace_period_seconds is not None:
12860
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
12861
+ if eviction_minimum_reclaim is not None:
12862
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
12863
+ if eviction_soft is not None:
12864
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
12865
+ if eviction_soft_grace_period is not None:
12866
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
12175
12867
  if image_gc_high_threshold_percent is not None:
12176
12868
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
12177
12869
  if image_gc_low_threshold_percent is not None:
@@ -12182,8 +12874,12 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12182
12874
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
12183
12875
  if insecure_kubelet_readonly_port_enabled is not None:
12184
12876
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
12877
+ if max_parallel_image_pulls is not None:
12878
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
12185
12879
  if pod_pids_limit is not None:
12186
12880
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
12881
+ if single_process_oom_kill is not None:
12882
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
12187
12883
 
12188
12884
  @_builtins.property
12189
12885
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -12245,6 +12941,38 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12245
12941
  """
12246
12942
  return pulumi.get(self, "cpu_manager_policy")
12247
12943
 
12944
+ @_builtins.property
12945
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
12946
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
12947
+ """
12948
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
12949
+ """
12950
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
12951
+
12952
+ @_builtins.property
12953
+ @pulumi.getter(name="evictionMinimumReclaim")
12954
+ def eviction_minimum_reclaim(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim']:
12955
+ """
12956
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
12957
+ """
12958
+ return pulumi.get(self, "eviction_minimum_reclaim")
12959
+
12960
+ @_builtins.property
12961
+ @pulumi.getter(name="evictionSoft")
12962
+ def eviction_soft(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoft']:
12963
+ """
12964
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
12965
+ """
12966
+ return pulumi.get(self, "eviction_soft")
12967
+
12968
+ @_builtins.property
12969
+ @pulumi.getter(name="evictionSoftGracePeriod")
12970
+ def eviction_soft_grace_period(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod']:
12971
+ """
12972
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
12973
+ """
12974
+ return pulumi.get(self, "eviction_soft_grace_period")
12975
+
12248
12976
  @_builtins.property
12249
12977
  @pulumi.getter(name="imageGcHighThresholdPercent")
12250
12978
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -12285,6 +13013,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12285
13013
  """
12286
13014
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
12287
13015
 
13016
+ @_builtins.property
13017
+ @pulumi.getter(name="maxParallelImagePulls")
13018
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
13019
+ """
13020
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
13021
+ """
13022
+ return pulumi.get(self, "max_parallel_image_pulls")
13023
+
12288
13024
  @_builtins.property
12289
13025
  @pulumi.getter(name="podPidsLimit")
12290
13026
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -12293,6 +13029,332 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12293
13029
  """
12294
13030
  return pulumi.get(self, "pod_pids_limit")
12295
13031
 
13032
+ @_builtins.property
13033
+ @pulumi.getter(name="singleProcessOomKill")
13034
+ def single_process_oom_kill(self) -> Optional[_builtins.bool]:
13035
+ """
13036
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
13037
+ """
13038
+ return pulumi.get(self, "single_process_oom_kill")
13039
+
13040
+
13041
+ @pulumi.output_type
13042
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
13043
+ @staticmethod
13044
+ def __key_warning(key: str):
13045
+ suggest = None
13046
+ if key == "imagefsAvailable":
13047
+ suggest = "imagefs_available"
13048
+ elif key == "imagefsInodesFree":
13049
+ suggest = "imagefs_inodes_free"
13050
+ elif key == "memoryAvailable":
13051
+ suggest = "memory_available"
13052
+ elif key == "nodefsAvailable":
13053
+ suggest = "nodefs_available"
13054
+ elif key == "nodefsInodesFree":
13055
+ suggest = "nodefs_inodes_free"
13056
+ elif key == "pidAvailable":
13057
+ suggest = "pid_available"
13058
+
13059
+ if suggest:
13060
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
13061
+
13062
+ def __getitem__(self, key: str) -> Any:
13063
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
13064
+ return super().__getitem__(key)
13065
+
13066
+ def get(self, key: str, default = None) -> Any:
13067
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
13068
+ return super().get(key, default)
13069
+
13070
+ def __init__(__self__, *,
13071
+ imagefs_available: Optional[_builtins.str] = None,
13072
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13073
+ memory_available: Optional[_builtins.str] = None,
13074
+ nodefs_available: Optional[_builtins.str] = None,
13075
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13076
+ pid_available: Optional[_builtins.str] = None):
13077
+ """
13078
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13079
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13080
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13081
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13082
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13083
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13084
+ """
13085
+ if imagefs_available is not None:
13086
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13087
+ if imagefs_inodes_free is not None:
13088
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13089
+ if memory_available is not None:
13090
+ pulumi.set(__self__, "memory_available", memory_available)
13091
+ if nodefs_available is not None:
13092
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13093
+ if nodefs_inodes_free is not None:
13094
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13095
+ if pid_available is not None:
13096
+ pulumi.set(__self__, "pid_available", pid_available)
13097
+
13098
+ @_builtins.property
13099
+ @pulumi.getter(name="imagefsAvailable")
13100
+ def imagefs_available(self) -> Optional[_builtins.str]:
13101
+ """
13102
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13103
+ """
13104
+ return pulumi.get(self, "imagefs_available")
13105
+
13106
+ @_builtins.property
13107
+ @pulumi.getter(name="imagefsInodesFree")
13108
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13109
+ """
13110
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13111
+ """
13112
+ return pulumi.get(self, "imagefs_inodes_free")
13113
+
13114
+ @_builtins.property
13115
+ @pulumi.getter(name="memoryAvailable")
13116
+ def memory_available(self) -> Optional[_builtins.str]:
13117
+ """
13118
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13119
+ """
13120
+ return pulumi.get(self, "memory_available")
13121
+
13122
+ @_builtins.property
13123
+ @pulumi.getter(name="nodefsAvailable")
13124
+ def nodefs_available(self) -> Optional[_builtins.str]:
13125
+ """
13126
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13127
+ """
13128
+ return pulumi.get(self, "nodefs_available")
13129
+
13130
+ @_builtins.property
13131
+ @pulumi.getter(name="nodefsInodesFree")
13132
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13133
+ """
13134
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13135
+ """
13136
+ return pulumi.get(self, "nodefs_inodes_free")
13137
+
13138
+ @_builtins.property
13139
+ @pulumi.getter(name="pidAvailable")
13140
+ def pid_available(self) -> Optional[_builtins.str]:
13141
+ """
13142
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13143
+ """
13144
+ return pulumi.get(self, "pid_available")
13145
+
13146
+
13147
+ @pulumi.output_type
13148
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoft(dict):
13149
+ @staticmethod
13150
+ def __key_warning(key: str):
13151
+ suggest = None
13152
+ if key == "imagefsAvailable":
13153
+ suggest = "imagefs_available"
13154
+ elif key == "imagefsInodesFree":
13155
+ suggest = "imagefs_inodes_free"
13156
+ elif key == "memoryAvailable":
13157
+ suggest = "memory_available"
13158
+ elif key == "nodefsAvailable":
13159
+ suggest = "nodefs_available"
13160
+ elif key == "nodefsInodesFree":
13161
+ suggest = "nodefs_inodes_free"
13162
+ elif key == "pidAvailable":
13163
+ suggest = "pid_available"
13164
+
13165
+ if suggest:
13166
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
13167
+
13168
+ def __getitem__(self, key: str) -> Any:
13169
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
13170
+ return super().__getitem__(key)
13171
+
13172
+ def get(self, key: str, default = None) -> Any:
13173
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
13174
+ return super().get(key, default)
13175
+
13176
+ def __init__(__self__, *,
13177
+ imagefs_available: Optional[_builtins.str] = None,
13178
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13179
+ memory_available: Optional[_builtins.str] = None,
13180
+ nodefs_available: Optional[_builtins.str] = None,
13181
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13182
+ pid_available: Optional[_builtins.str] = None):
13183
+ """
13184
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
13185
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13186
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
13187
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13188
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13189
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13190
+ """
13191
+ if imagefs_available is not None:
13192
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13193
+ if imagefs_inodes_free is not None:
13194
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13195
+ if memory_available is not None:
13196
+ pulumi.set(__self__, "memory_available", memory_available)
13197
+ if nodefs_available is not None:
13198
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13199
+ if nodefs_inodes_free is not None:
13200
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13201
+ if pid_available is not None:
13202
+ pulumi.set(__self__, "pid_available", pid_available)
13203
+
13204
+ @_builtins.property
13205
+ @pulumi.getter(name="imagefsAvailable")
13206
+ def imagefs_available(self) -> Optional[_builtins.str]:
13207
+ """
13208
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
13209
+ """
13210
+ return pulumi.get(self, "imagefs_available")
13211
+
13212
+ @_builtins.property
13213
+ @pulumi.getter(name="imagefsInodesFree")
13214
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13215
+ """
13216
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13217
+ """
13218
+ return pulumi.get(self, "imagefs_inodes_free")
13219
+
13220
+ @_builtins.property
13221
+ @pulumi.getter(name="memoryAvailable")
13222
+ def memory_available(self) -> Optional[_builtins.str]:
13223
+ """
13224
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
13225
+ """
13226
+ return pulumi.get(self, "memory_available")
13227
+
13228
+ @_builtins.property
13229
+ @pulumi.getter(name="nodefsAvailable")
13230
+ def nodefs_available(self) -> Optional[_builtins.str]:
13231
+ """
13232
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13233
+ """
13234
+ return pulumi.get(self, "nodefs_available")
13235
+
13236
+ @_builtins.property
13237
+ @pulumi.getter(name="nodefsInodesFree")
13238
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13239
+ """
13240
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13241
+ """
13242
+ return pulumi.get(self, "nodefs_inodes_free")
13243
+
13244
+ @_builtins.property
13245
+ @pulumi.getter(name="pidAvailable")
13246
+ def pid_available(self) -> Optional[_builtins.str]:
13247
+ """
13248
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13249
+ """
13250
+ return pulumi.get(self, "pid_available")
13251
+
13252
+
13253
+ @pulumi.output_type
13254
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
13255
+ @staticmethod
13256
+ def __key_warning(key: str):
13257
+ suggest = None
13258
+ if key == "imagefsAvailable":
13259
+ suggest = "imagefs_available"
13260
+ elif key == "imagefsInodesFree":
13261
+ suggest = "imagefs_inodes_free"
13262
+ elif key == "memoryAvailable":
13263
+ suggest = "memory_available"
13264
+ elif key == "nodefsAvailable":
13265
+ suggest = "nodefs_available"
13266
+ elif key == "nodefsInodesFree":
13267
+ suggest = "nodefs_inodes_free"
13268
+ elif key == "pidAvailable":
13269
+ suggest = "pid_available"
13270
+
13271
+ if suggest:
13272
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
13273
+
13274
+ def __getitem__(self, key: str) -> Any:
13275
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
13276
+ return super().__getitem__(key)
13277
+
13278
+ def get(self, key: str, default = None) -> Any:
13279
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
13280
+ return super().get(key, default)
13281
+
13282
+ def __init__(__self__, *,
13283
+ imagefs_available: Optional[_builtins.str] = None,
13284
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13285
+ memory_available: Optional[_builtins.str] = None,
13286
+ nodefs_available: Optional[_builtins.str] = None,
13287
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13288
+ pid_available: Optional[_builtins.str] = None):
13289
+ """
13290
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13291
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13292
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
13293
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13294
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13295
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13296
+ """
13297
+ if imagefs_available is not None:
13298
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13299
+ if imagefs_inodes_free is not None:
13300
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13301
+ if memory_available is not None:
13302
+ pulumi.set(__self__, "memory_available", memory_available)
13303
+ if nodefs_available is not None:
13304
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13305
+ if nodefs_inodes_free is not None:
13306
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13307
+ if pid_available is not None:
13308
+ pulumi.set(__self__, "pid_available", pid_available)
13309
+
13310
+ @_builtins.property
13311
+ @pulumi.getter(name="imagefsAvailable")
13312
+ def imagefs_available(self) -> Optional[_builtins.str]:
13313
+ """
13314
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13315
+ """
13316
+ return pulumi.get(self, "imagefs_available")
13317
+
13318
+ @_builtins.property
13319
+ @pulumi.getter(name="imagefsInodesFree")
13320
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13321
+ """
13322
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13323
+ """
13324
+ return pulumi.get(self, "imagefs_inodes_free")
13325
+
13326
+ @_builtins.property
13327
+ @pulumi.getter(name="memoryAvailable")
13328
+ def memory_available(self) -> Optional[_builtins.str]:
13329
+ """
13330
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
13331
+ """
13332
+ return pulumi.get(self, "memory_available")
13333
+
13334
+ @_builtins.property
13335
+ @pulumi.getter(name="nodefsAvailable")
13336
+ def nodefs_available(self) -> Optional[_builtins.str]:
13337
+ """
13338
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13339
+ """
13340
+ return pulumi.get(self, "nodefs_available")
13341
+
13342
+ @_builtins.property
13343
+ @pulumi.getter(name="nodefsInodesFree")
13344
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13345
+ """
13346
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13347
+ """
13348
+ return pulumi.get(self, "nodefs_inodes_free")
13349
+
13350
+ @_builtins.property
13351
+ @pulumi.getter(name="pidAvailable")
13352
+ def pid_available(self) -> Optional[_builtins.str]:
13353
+ """
13354
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13355
+ """
13356
+ return pulumi.get(self, "pid_available")
13357
+
12296
13358
 
12297
13359
  @pulumi.output_type
12298
13360
  class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
@@ -12303,6 +13365,10 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12303
13365
  suggest = "cgroup_mode"
12304
13366
  elif key == "hugepagesConfig":
12305
13367
  suggest = "hugepages_config"
13368
+ elif key == "transparentHugepageDefrag":
13369
+ suggest = "transparent_hugepage_defrag"
13370
+ elif key == "transparentHugepageEnabled":
13371
+ suggest = "transparent_hugepage_enabled"
12306
13372
 
12307
13373
  if suggest:
12308
13374
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12318,7 +13384,9 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12318
13384
  def __init__(__self__, *,
12319
13385
  cgroup_mode: Optional[_builtins.str] = None,
12320
13386
  hugepages_config: Optional['outputs.ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
12321
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
13387
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
13388
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
13389
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
12322
13390
  """
12323
13391
  :param _builtins.str cgroup_mode: Possible cgroup modes that can be used.
12324
13392
  Accepted values are:
@@ -12330,6 +13398,8 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12330
13398
  and all pods running on the nodes. Specified as a map from the key, such as
12331
13399
  `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
12332
13400
  Note that validations happen all server side. All attributes are optional.
13401
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
13402
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
12333
13403
  """
12334
13404
  if cgroup_mode is not None:
12335
13405
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -12337,6 +13407,10 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12337
13407
  pulumi.set(__self__, "hugepages_config", hugepages_config)
12338
13408
  if sysctls is not None:
12339
13409
  pulumi.set(__self__, "sysctls", sysctls)
13410
+ if transparent_hugepage_defrag is not None:
13411
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
13412
+ if transparent_hugepage_enabled is not None:
13413
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
12340
13414
 
12341
13415
  @_builtins.property
12342
13416
  @pulumi.getter(name="cgroupMode")
@@ -12369,6 +13443,22 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12369
13443
  """
12370
13444
  return pulumi.get(self, "sysctls")
12371
13445
 
13446
+ @_builtins.property
13447
+ @pulumi.getter(name="transparentHugepageDefrag")
13448
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
13449
+ """
13450
+ The Linux kernel transparent hugepage defrag setting.
13451
+ """
13452
+ return pulumi.get(self, "transparent_hugepage_defrag")
13453
+
13454
+ @_builtins.property
13455
+ @pulumi.getter(name="transparentHugepageEnabled")
13456
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
13457
+ """
13458
+ The Linux kernel transparent hugepage setting.
13459
+ """
13460
+ return pulumi.get(self, "transparent_hugepage_enabled")
13461
+
12372
13462
 
12373
13463
  @pulumi.output_type
12374
13464
  class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -12681,6 +13771,8 @@ class ClusterNodePoolNodeConfigSoleTenantConfig(dict):
12681
13771
  suggest = None
12682
13772
  if key == "nodeAffinities":
12683
13773
  suggest = "node_affinities"
13774
+ elif key == "minNodeCpus":
13775
+ suggest = "min_node_cpus"
12684
13776
 
12685
13777
  if suggest:
12686
13778
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12694,20 +13786,32 @@ class ClusterNodePoolNodeConfigSoleTenantConfig(dict):
12694
13786
  return super().get(key, default)
12695
13787
 
12696
13788
  def __init__(__self__, *,
12697
- node_affinities: Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity']):
13789
+ node_affinities: Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity'],
13790
+ min_node_cpus: Optional[_builtins.int] = None):
12698
13791
  """
12699
- :param Sequence['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
13792
+ :param Sequence['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
13793
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
12700
13794
  """
12701
13795
  pulumi.set(__self__, "node_affinities", node_affinities)
13796
+ if min_node_cpus is not None:
13797
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
12702
13798
 
12703
13799
  @_builtins.property
12704
13800
  @pulumi.getter(name="nodeAffinities")
12705
13801
  def node_affinities(self) -> Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity']:
12706
13802
  """
12707
- .
13803
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
12708
13804
  """
12709
13805
  return pulumi.get(self, "node_affinities")
12710
13806
 
13807
+ @_builtins.property
13808
+ @pulumi.getter(name="minNodeCpus")
13809
+ def min_node_cpus(self) -> Optional[_builtins.int]:
13810
+ """
13811
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
13812
+ """
13813
+ return pulumi.get(self, "min_node_cpus")
13814
+
12711
13815
 
12712
13816
  @pulumi.output_type
12713
13817
  class ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -14638,6 +15742,8 @@ class NodePoolNodeConfig(dict):
14638
15742
  suggest = None
14639
15743
  if key == "advancedMachineFeatures":
14640
15744
  suggest = "advanced_machine_features"
15745
+ elif key == "bootDisk":
15746
+ suggest = "boot_disk"
14641
15747
  elif key == "bootDiskKmsKey":
14642
15748
  suggest = "boot_disk_kms_key"
14643
15749
  elif key == "confidentialNodes":
@@ -14726,6 +15832,7 @@ class NodePoolNodeConfig(dict):
14726
15832
 
14727
15833
  def __init__(__self__, *,
14728
15834
  advanced_machine_features: Optional['outputs.NodePoolNodeConfigAdvancedMachineFeatures'] = None,
15835
+ boot_disk: Optional['outputs.NodePoolNodeConfigBootDisk'] = None,
14729
15836
  boot_disk_kms_key: Optional[_builtins.str] = None,
14730
15837
  confidential_nodes: Optional['outputs.NodePoolNodeConfigConfidentialNodes'] = None,
14731
15838
  containerd_config: Optional['outputs.NodePoolNodeConfigContainerdConfig'] = None,
@@ -14772,6 +15879,7 @@ class NodePoolNodeConfig(dict):
14772
15879
  workload_metadata_config: Optional['outputs.NodePoolNodeConfigWorkloadMetadataConfig'] = None):
14773
15880
  """
14774
15881
  :param 'NodePoolNodeConfigAdvancedMachineFeaturesArgs' advanced_machine_features: Specifies options for controlling advanced machine features.
15882
+ :param 'NodePoolNodeConfigBootDiskArgs' boot_disk: Boot disk configuration for node pools nodes.
14775
15883
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
14776
15884
  :param 'NodePoolNodeConfigConfidentialNodesArgs' confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
14777
15885
  :param 'NodePoolNodeConfigContainerdConfigArgs' containerd_config: Parameters for containerd configuration.
@@ -14819,6 +15927,8 @@ class NodePoolNodeConfig(dict):
14819
15927
  """
14820
15928
  if advanced_machine_features is not None:
14821
15929
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
15930
+ if boot_disk is not None:
15931
+ pulumi.set(__self__, "boot_disk", boot_disk)
14822
15932
  if boot_disk_kms_key is not None:
14823
15933
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
14824
15934
  if confidential_nodes is not None:
@@ -14916,6 +16026,14 @@ class NodePoolNodeConfig(dict):
14916
16026
  """
14917
16027
  return pulumi.get(self, "advanced_machine_features")
14918
16028
 
16029
+ @_builtins.property
16030
+ @pulumi.getter(name="bootDisk")
16031
+ def boot_disk(self) -> Optional['outputs.NodePoolNodeConfigBootDisk']:
16032
+ """
16033
+ Boot disk configuration for node pools nodes.
16034
+ """
16035
+ return pulumi.get(self, "boot_disk")
16036
+
14919
16037
  @_builtins.property
14920
16038
  @pulumi.getter(name="bootDiskKmsKey")
14921
16039
  def boot_disk_kms_key(self) -> Optional[_builtins.str]:
@@ -15333,46 +16451,124 @@ class NodePoolNodeConfigAdvancedMachineFeatures(dict):
15333
16451
 
15334
16452
 
15335
16453
  @pulumi.output_type
15336
- class NodePoolNodeConfigConfidentialNodes(dict):
16454
+ class NodePoolNodeConfigBootDisk(dict):
15337
16455
  @staticmethod
15338
16456
  def __key_warning(key: str):
15339
16457
  suggest = None
15340
- if key == "confidentialInstanceType":
15341
- suggest = "confidential_instance_type"
16458
+ if key == "diskType":
16459
+ suggest = "disk_type"
16460
+ elif key == "provisionedIops":
16461
+ suggest = "provisioned_iops"
16462
+ elif key == "provisionedThroughput":
16463
+ suggest = "provisioned_throughput"
16464
+ elif key == "sizeGb":
16465
+ suggest = "size_gb"
15342
16466
 
15343
16467
  if suggest:
15344
- pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigConfidentialNodes. Access the value via the '{suggest}' property getter instead.")
16468
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigBootDisk. Access the value via the '{suggest}' property getter instead.")
15345
16469
 
15346
16470
  def __getitem__(self, key: str) -> Any:
15347
- NodePoolNodeConfigConfidentialNodes.__key_warning(key)
16471
+ NodePoolNodeConfigBootDisk.__key_warning(key)
15348
16472
  return super().__getitem__(key)
15349
16473
 
15350
16474
  def get(self, key: str, default = None) -> Any:
15351
- NodePoolNodeConfigConfidentialNodes.__key_warning(key)
16475
+ NodePoolNodeConfigBootDisk.__key_warning(key)
15352
16476
  return super().get(key, default)
15353
16477
 
15354
16478
  def __init__(__self__, *,
15355
- enabled: _builtins.bool,
15356
- confidential_instance_type: Optional[_builtins.str] = None):
16479
+ disk_type: Optional[_builtins.str] = None,
16480
+ provisioned_iops: Optional[_builtins.int] = None,
16481
+ provisioned_throughput: Optional[_builtins.int] = None,
16482
+ size_gb: Optional[_builtins.int] = None):
15357
16483
  """
15358
- :param _builtins.bool enabled: Whether Confidential Nodes feature is enabled for all nodes in this pool.
15359
- :param _builtins.str confidential_instance_type: Defines the type of technology used by the confidential node.
16484
+ :param _builtins.str disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
16485
+ :param _builtins.int provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
16486
+ :param _builtins.int provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
16487
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
15360
16488
  """
15361
- pulumi.set(__self__, "enabled", enabled)
15362
- if confidential_instance_type is not None:
15363
- pulumi.set(__self__, "confidential_instance_type", confidential_instance_type)
16489
+ if disk_type is not None:
16490
+ pulumi.set(__self__, "disk_type", disk_type)
16491
+ if provisioned_iops is not None:
16492
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
16493
+ if provisioned_throughput is not None:
16494
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
16495
+ if size_gb is not None:
16496
+ pulumi.set(__self__, "size_gb", size_gb)
15364
16497
 
15365
16498
  @_builtins.property
15366
- @pulumi.getter
15367
- def enabled(self) -> _builtins.bool:
16499
+ @pulumi.getter(name="diskType")
16500
+ def disk_type(self) -> Optional[_builtins.str]:
15368
16501
  """
15369
- Whether Confidential Nodes feature is enabled for all nodes in this pool.
16502
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
15370
16503
  """
15371
- return pulumi.get(self, "enabled")
16504
+ return pulumi.get(self, "disk_type")
15372
16505
 
15373
16506
  @_builtins.property
15374
- @pulumi.getter(name="confidentialInstanceType")
15375
- def confidential_instance_type(self) -> Optional[_builtins.str]:
16507
+ @pulumi.getter(name="provisionedIops")
16508
+ def provisioned_iops(self) -> Optional[_builtins.int]:
16509
+ """
16510
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
16511
+ """
16512
+ return pulumi.get(self, "provisioned_iops")
16513
+
16514
+ @_builtins.property
16515
+ @pulumi.getter(name="provisionedThroughput")
16516
+ def provisioned_throughput(self) -> Optional[_builtins.int]:
16517
+ """
16518
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
16519
+ """
16520
+ return pulumi.get(self, "provisioned_throughput")
16521
+
16522
+ @_builtins.property
16523
+ @pulumi.getter(name="sizeGb")
16524
+ def size_gb(self) -> Optional[_builtins.int]:
16525
+ """
16526
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
16527
+ """
16528
+ return pulumi.get(self, "size_gb")
16529
+
16530
+
16531
+ @pulumi.output_type
16532
+ class NodePoolNodeConfigConfidentialNodes(dict):
16533
+ @staticmethod
16534
+ def __key_warning(key: str):
16535
+ suggest = None
16536
+ if key == "confidentialInstanceType":
16537
+ suggest = "confidential_instance_type"
16538
+
16539
+ if suggest:
16540
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigConfidentialNodes. Access the value via the '{suggest}' property getter instead.")
16541
+
16542
+ def __getitem__(self, key: str) -> Any:
16543
+ NodePoolNodeConfigConfidentialNodes.__key_warning(key)
16544
+ return super().__getitem__(key)
16545
+
16546
+ def get(self, key: str, default = None) -> Any:
16547
+ NodePoolNodeConfigConfidentialNodes.__key_warning(key)
16548
+ return super().get(key, default)
16549
+
16550
+ def __init__(__self__, *,
16551
+ enabled: _builtins.bool,
16552
+ confidential_instance_type: Optional[_builtins.str] = None):
16553
+ """
16554
+ :param _builtins.bool enabled: Whether Confidential Nodes feature is enabled for all nodes in this pool.
16555
+ :param _builtins.str confidential_instance_type: Defines the type of technology used by the confidential node.
16556
+ """
16557
+ pulumi.set(__self__, "enabled", enabled)
16558
+ if confidential_instance_type is not None:
16559
+ pulumi.set(__self__, "confidential_instance_type", confidential_instance_type)
16560
+
16561
+ @_builtins.property
16562
+ @pulumi.getter
16563
+ def enabled(self) -> _builtins.bool:
16564
+ """
16565
+ Whether Confidential Nodes feature is enabled for all nodes in this pool.
16566
+ """
16567
+ return pulumi.get(self, "enabled")
16568
+
16569
+ @_builtins.property
16570
+ @pulumi.getter(name="confidentialInstanceType")
16571
+ def confidential_instance_type(self) -> Optional[_builtins.str]:
15376
16572
  """
15377
16573
  Defines the type of technology used by the confidential node.
15378
16574
  """
@@ -15945,6 +17141,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
15945
17141
  suggest = "cpu_cfs_quota_period"
15946
17142
  elif key == "cpuManagerPolicy":
15947
17143
  suggest = "cpu_manager_policy"
17144
+ elif key == "evictionMaxPodGracePeriodSeconds":
17145
+ suggest = "eviction_max_pod_grace_period_seconds"
17146
+ elif key == "evictionMinimumReclaim":
17147
+ suggest = "eviction_minimum_reclaim"
17148
+ elif key == "evictionSoft":
17149
+ suggest = "eviction_soft"
17150
+ elif key == "evictionSoftGracePeriod":
17151
+ suggest = "eviction_soft_grace_period"
15948
17152
  elif key == "imageGcHighThresholdPercent":
15949
17153
  suggest = "image_gc_high_threshold_percent"
15950
17154
  elif key == "imageGcLowThresholdPercent":
@@ -15955,8 +17159,12 @@ class NodePoolNodeConfigKubeletConfig(dict):
15955
17159
  suggest = "image_minimum_gc_age"
15956
17160
  elif key == "insecureKubeletReadonlyPortEnabled":
15957
17161
  suggest = "insecure_kubelet_readonly_port_enabled"
17162
+ elif key == "maxParallelImagePulls":
17163
+ suggest = "max_parallel_image_pulls"
15958
17164
  elif key == "podPidsLimit":
15959
17165
  suggest = "pod_pids_limit"
17166
+ elif key == "singleProcessOomKill":
17167
+ suggest = "single_process_oom_kill"
15960
17168
 
15961
17169
  if suggest:
15962
17170
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -15976,12 +17184,18 @@ class NodePoolNodeConfigKubeletConfig(dict):
15976
17184
  cpu_cfs_quota: Optional[_builtins.bool] = None,
15977
17185
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
15978
17186
  cpu_manager_policy: Optional[_builtins.str] = None,
17187
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
17188
+ eviction_minimum_reclaim: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
17189
+ eviction_soft: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoft'] = None,
17190
+ eviction_soft_grace_period: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
15979
17191
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
15980
17192
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
15981
17193
  image_maximum_gc_age: Optional[_builtins.str] = None,
15982
17194
  image_minimum_gc_age: Optional[_builtins.str] = None,
15983
17195
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
15984
- pod_pids_limit: Optional[_builtins.int] = None):
17196
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
17197
+ pod_pids_limit: Optional[_builtins.int] = None,
17198
+ single_process_oom_kill: Optional[_builtins.bool] = None):
15985
17199
  """
15986
17200
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
15987
17201
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -15989,12 +17203,18 @@ class NodePoolNodeConfigKubeletConfig(dict):
15989
17203
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
15990
17204
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
15991
17205
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
17206
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
17207
+ :param 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
17208
+ :param 'NodePoolNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
17209
+ :param 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
15992
17210
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
15993
17211
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
15994
17212
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
15995
17213
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
15996
17214
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
17215
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
15997
17216
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
17217
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
15998
17218
  """
15999
17219
  if allowed_unsafe_sysctls is not None:
16000
17220
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -16008,6 +17228,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
16008
17228
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
16009
17229
  if cpu_manager_policy is not None:
16010
17230
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
17231
+ if eviction_max_pod_grace_period_seconds is not None:
17232
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
17233
+ if eviction_minimum_reclaim is not None:
17234
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
17235
+ if eviction_soft is not None:
17236
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
17237
+ if eviction_soft_grace_period is not None:
17238
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
16011
17239
  if image_gc_high_threshold_percent is not None:
16012
17240
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
16013
17241
  if image_gc_low_threshold_percent is not None:
@@ -16018,8 +17246,12 @@ class NodePoolNodeConfigKubeletConfig(dict):
16018
17246
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
16019
17247
  if insecure_kubelet_readonly_port_enabled is not None:
16020
17248
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
17249
+ if max_parallel_image_pulls is not None:
17250
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
16021
17251
  if pod_pids_limit is not None:
16022
17252
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
17253
+ if single_process_oom_kill is not None:
17254
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
16023
17255
 
16024
17256
  @_builtins.property
16025
17257
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -16069,6 +17301,38 @@ class NodePoolNodeConfigKubeletConfig(dict):
16069
17301
  """
16070
17302
  return pulumi.get(self, "cpu_manager_policy")
16071
17303
 
17304
+ @_builtins.property
17305
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
17306
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
17307
+ """
17308
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
17309
+ """
17310
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
17311
+
17312
+ @_builtins.property
17313
+ @pulumi.getter(name="evictionMinimumReclaim")
17314
+ def eviction_minimum_reclaim(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim']:
17315
+ """
17316
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
17317
+ """
17318
+ return pulumi.get(self, "eviction_minimum_reclaim")
17319
+
17320
+ @_builtins.property
17321
+ @pulumi.getter(name="evictionSoft")
17322
+ def eviction_soft(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoft']:
17323
+ """
17324
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
17325
+ """
17326
+ return pulumi.get(self, "eviction_soft")
17327
+
17328
+ @_builtins.property
17329
+ @pulumi.getter(name="evictionSoftGracePeriod")
17330
+ def eviction_soft_grace_period(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod']:
17331
+ """
17332
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
17333
+ """
17334
+ return pulumi.get(self, "eviction_soft_grace_period")
17335
+
16072
17336
  @_builtins.property
16073
17337
  @pulumi.getter(name="imageGcHighThresholdPercent")
16074
17338
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -16109,6 +17373,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
16109
17373
  """
16110
17374
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
16111
17375
 
17376
+ @_builtins.property
17377
+ @pulumi.getter(name="maxParallelImagePulls")
17378
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
17379
+ """
17380
+ Set the maximum number of image pulls in parallel.
17381
+ """
17382
+ return pulumi.get(self, "max_parallel_image_pulls")
17383
+
16112
17384
  @_builtins.property
16113
17385
  @pulumi.getter(name="podPidsLimit")
16114
17386
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -16117,6 +17389,332 @@ class NodePoolNodeConfigKubeletConfig(dict):
16117
17389
  """
16118
17390
  return pulumi.get(self, "pod_pids_limit")
16119
17391
 
17392
+ @_builtins.property
17393
+ @pulumi.getter(name="singleProcessOomKill")
17394
+ def single_process_oom_kill(self) -> Optional[_builtins.bool]:
17395
+ """
17396
+ Defines whether to enable single process OOM killer.
17397
+ """
17398
+ return pulumi.get(self, "single_process_oom_kill")
17399
+
17400
+
17401
+ @pulumi.output_type
17402
+ class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
17403
+ @staticmethod
17404
+ def __key_warning(key: str):
17405
+ suggest = None
17406
+ if key == "imagefsAvailable":
17407
+ suggest = "imagefs_available"
17408
+ elif key == "imagefsInodesFree":
17409
+ suggest = "imagefs_inodes_free"
17410
+ elif key == "memoryAvailable":
17411
+ suggest = "memory_available"
17412
+ elif key == "nodefsAvailable":
17413
+ suggest = "nodefs_available"
17414
+ elif key == "nodefsInodesFree":
17415
+ suggest = "nodefs_inodes_free"
17416
+ elif key == "pidAvailable":
17417
+ suggest = "pid_available"
17418
+
17419
+ if suggest:
17420
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
17421
+
17422
+ def __getitem__(self, key: str) -> Any:
17423
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
17424
+ return super().__getitem__(key)
17425
+
17426
+ def get(self, key: str, default = None) -> Any:
17427
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
17428
+ return super().get(key, default)
17429
+
17430
+ def __init__(__self__, *,
17431
+ imagefs_available: Optional[_builtins.str] = None,
17432
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17433
+ memory_available: Optional[_builtins.str] = None,
17434
+ nodefs_available: Optional[_builtins.str] = None,
17435
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17436
+ pid_available: Optional[_builtins.str] = None):
17437
+ """
17438
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
17439
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
17440
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
17441
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
17442
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
17443
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
17444
+ """
17445
+ if imagefs_available is not None:
17446
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17447
+ if imagefs_inodes_free is not None:
17448
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17449
+ if memory_available is not None:
17450
+ pulumi.set(__self__, "memory_available", memory_available)
17451
+ if nodefs_available is not None:
17452
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17453
+ if nodefs_inodes_free is not None:
17454
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17455
+ if pid_available is not None:
17456
+ pulumi.set(__self__, "pid_available", pid_available)
17457
+
17458
+ @_builtins.property
17459
+ @pulumi.getter(name="imagefsAvailable")
17460
+ def imagefs_available(self) -> Optional[_builtins.str]:
17461
+ """
17462
+ Defines percentage of minimum reclaim for imagefs.available.
17463
+ """
17464
+ return pulumi.get(self, "imagefs_available")
17465
+
17466
+ @_builtins.property
17467
+ @pulumi.getter(name="imagefsInodesFree")
17468
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17469
+ """
17470
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
17471
+ """
17472
+ return pulumi.get(self, "imagefs_inodes_free")
17473
+
17474
+ @_builtins.property
17475
+ @pulumi.getter(name="memoryAvailable")
17476
+ def memory_available(self) -> Optional[_builtins.str]:
17477
+ """
17478
+ Defines percentage of minimum reclaim for memory.available.
17479
+ """
17480
+ return pulumi.get(self, "memory_available")
17481
+
17482
+ @_builtins.property
17483
+ @pulumi.getter(name="nodefsAvailable")
17484
+ def nodefs_available(self) -> Optional[_builtins.str]:
17485
+ """
17486
+ Defines percentage of minimum reclaim for nodefs.available.
17487
+ """
17488
+ return pulumi.get(self, "nodefs_available")
17489
+
17490
+ @_builtins.property
17491
+ @pulumi.getter(name="nodefsInodesFree")
17492
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17493
+ """
17494
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
17495
+ """
17496
+ return pulumi.get(self, "nodefs_inodes_free")
17497
+
17498
+ @_builtins.property
17499
+ @pulumi.getter(name="pidAvailable")
17500
+ def pid_available(self) -> Optional[_builtins.str]:
17501
+ """
17502
+ Defines percentage of minimum reclaim for pid.available.
17503
+ """
17504
+ return pulumi.get(self, "pid_available")
17505
+
17506
+
17507
+ @pulumi.output_type
17508
+ class NodePoolNodeConfigKubeletConfigEvictionSoft(dict):
17509
+ @staticmethod
17510
+ def __key_warning(key: str):
17511
+ suggest = None
17512
+ if key == "imagefsAvailable":
17513
+ suggest = "imagefs_available"
17514
+ elif key == "imagefsInodesFree":
17515
+ suggest = "imagefs_inodes_free"
17516
+ elif key == "memoryAvailable":
17517
+ suggest = "memory_available"
17518
+ elif key == "nodefsAvailable":
17519
+ suggest = "nodefs_available"
17520
+ elif key == "nodefsInodesFree":
17521
+ suggest = "nodefs_inodes_free"
17522
+ elif key == "pidAvailable":
17523
+ suggest = "pid_available"
17524
+
17525
+ if suggest:
17526
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
17527
+
17528
+ def __getitem__(self, key: str) -> Any:
17529
+ NodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
17530
+ return super().__getitem__(key)
17531
+
17532
+ def get(self, key: str, default = None) -> Any:
17533
+ NodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
17534
+ return super().get(key, default)
17535
+
17536
+ def __init__(__self__, *,
17537
+ imagefs_available: Optional[_builtins.str] = None,
17538
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17539
+ memory_available: Optional[_builtins.str] = None,
17540
+ nodefs_available: Optional[_builtins.str] = None,
17541
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17542
+ pid_available: Optional[_builtins.str] = None):
17543
+ """
17544
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
17545
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
17546
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
17547
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
17548
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
17549
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
17550
+ """
17551
+ if imagefs_available is not None:
17552
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17553
+ if imagefs_inodes_free is not None:
17554
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17555
+ if memory_available is not None:
17556
+ pulumi.set(__self__, "memory_available", memory_available)
17557
+ if nodefs_available is not None:
17558
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17559
+ if nodefs_inodes_free is not None:
17560
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17561
+ if pid_available is not None:
17562
+ pulumi.set(__self__, "pid_available", pid_available)
17563
+
17564
+ @_builtins.property
17565
+ @pulumi.getter(name="imagefsAvailable")
17566
+ def imagefs_available(self) -> Optional[_builtins.str]:
17567
+ """
17568
+ Defines percentage of soft eviction threshold for imagefs.available.
17569
+ """
17570
+ return pulumi.get(self, "imagefs_available")
17571
+
17572
+ @_builtins.property
17573
+ @pulumi.getter(name="imagefsInodesFree")
17574
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17575
+ """
17576
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
17577
+ """
17578
+ return pulumi.get(self, "imagefs_inodes_free")
17579
+
17580
+ @_builtins.property
17581
+ @pulumi.getter(name="memoryAvailable")
17582
+ def memory_available(self) -> Optional[_builtins.str]:
17583
+ """
17584
+ Defines quantity of soft eviction threshold for memory.available.
17585
+ """
17586
+ return pulumi.get(self, "memory_available")
17587
+
17588
+ @_builtins.property
17589
+ @pulumi.getter(name="nodefsAvailable")
17590
+ def nodefs_available(self) -> Optional[_builtins.str]:
17591
+ """
17592
+ Defines percentage of soft eviction threshold for nodefs.available.
17593
+ """
17594
+ return pulumi.get(self, "nodefs_available")
17595
+
17596
+ @_builtins.property
17597
+ @pulumi.getter(name="nodefsInodesFree")
17598
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17599
+ """
17600
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
17601
+ """
17602
+ return pulumi.get(self, "nodefs_inodes_free")
17603
+
17604
+ @_builtins.property
17605
+ @pulumi.getter(name="pidAvailable")
17606
+ def pid_available(self) -> Optional[_builtins.str]:
17607
+ """
17608
+ Defines percentage of soft eviction threshold for pid.available.
17609
+ """
17610
+ return pulumi.get(self, "pid_available")
17611
+
17612
+
17613
+ @pulumi.output_type
17614
+ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
17615
+ @staticmethod
17616
+ def __key_warning(key: str):
17617
+ suggest = None
17618
+ if key == "imagefsAvailable":
17619
+ suggest = "imagefs_available"
17620
+ elif key == "imagefsInodesFree":
17621
+ suggest = "imagefs_inodes_free"
17622
+ elif key == "memoryAvailable":
17623
+ suggest = "memory_available"
17624
+ elif key == "nodefsAvailable":
17625
+ suggest = "nodefs_available"
17626
+ elif key == "nodefsInodesFree":
17627
+ suggest = "nodefs_inodes_free"
17628
+ elif key == "pidAvailable":
17629
+ suggest = "pid_available"
17630
+
17631
+ if suggest:
17632
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
17633
+
17634
+ def __getitem__(self, key: str) -> Any:
17635
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
17636
+ return super().__getitem__(key)
17637
+
17638
+ def get(self, key: str, default = None) -> Any:
17639
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
17640
+ return super().get(key, default)
17641
+
17642
+ def __init__(__self__, *,
17643
+ imagefs_available: Optional[_builtins.str] = None,
17644
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17645
+ memory_available: Optional[_builtins.str] = None,
17646
+ nodefs_available: Optional[_builtins.str] = None,
17647
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17648
+ pid_available: Optional[_builtins.str] = None):
17649
+ """
17650
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
17651
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
17652
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
17653
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
17654
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
17655
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
17656
+ """
17657
+ if imagefs_available is not None:
17658
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17659
+ if imagefs_inodes_free is not None:
17660
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17661
+ if memory_available is not None:
17662
+ pulumi.set(__self__, "memory_available", memory_available)
17663
+ if nodefs_available is not None:
17664
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17665
+ if nodefs_inodes_free is not None:
17666
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17667
+ if pid_available is not None:
17668
+ pulumi.set(__self__, "pid_available", pid_available)
17669
+
17670
+ @_builtins.property
17671
+ @pulumi.getter(name="imagefsAvailable")
17672
+ def imagefs_available(self) -> Optional[_builtins.str]:
17673
+ """
17674
+ Defines grace period for the imagefs.available soft eviction threshold
17675
+ """
17676
+ return pulumi.get(self, "imagefs_available")
17677
+
17678
+ @_builtins.property
17679
+ @pulumi.getter(name="imagefsInodesFree")
17680
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17681
+ """
17682
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
17683
+ """
17684
+ return pulumi.get(self, "imagefs_inodes_free")
17685
+
17686
+ @_builtins.property
17687
+ @pulumi.getter(name="memoryAvailable")
17688
+ def memory_available(self) -> Optional[_builtins.str]:
17689
+ """
17690
+ Defines grace period for the memory.available soft eviction threshold.
17691
+ """
17692
+ return pulumi.get(self, "memory_available")
17693
+
17694
+ @_builtins.property
17695
+ @pulumi.getter(name="nodefsAvailable")
17696
+ def nodefs_available(self) -> Optional[_builtins.str]:
17697
+ """
17698
+ Defines grace period for the nodefs.available soft eviction threshold.
17699
+ """
17700
+ return pulumi.get(self, "nodefs_available")
17701
+
17702
+ @_builtins.property
17703
+ @pulumi.getter(name="nodefsInodesFree")
17704
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17705
+ """
17706
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
17707
+ """
17708
+ return pulumi.get(self, "nodefs_inodes_free")
17709
+
17710
+ @_builtins.property
17711
+ @pulumi.getter(name="pidAvailable")
17712
+ def pid_available(self) -> Optional[_builtins.str]:
17713
+ """
17714
+ Defines grace period for the pid.available soft eviction threshold.
17715
+ """
17716
+ return pulumi.get(self, "pid_available")
17717
+
16120
17718
 
16121
17719
  @pulumi.output_type
16122
17720
  class NodePoolNodeConfigLinuxNodeConfig(dict):
@@ -16127,6 +17725,10 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16127
17725
  suggest = "cgroup_mode"
16128
17726
  elif key == "hugepagesConfig":
16129
17727
  suggest = "hugepages_config"
17728
+ elif key == "transparentHugepageDefrag":
17729
+ suggest = "transparent_hugepage_defrag"
17730
+ elif key == "transparentHugepageEnabled":
17731
+ suggest = "transparent_hugepage_enabled"
16130
17732
 
16131
17733
  if suggest:
16132
17734
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -16142,11 +17744,15 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16142
17744
  def __init__(__self__, *,
16143
17745
  cgroup_mode: Optional[_builtins.str] = None,
16144
17746
  hugepages_config: Optional['outputs.NodePoolNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
16145
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
17747
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
17748
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
17749
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
16146
17750
  """
16147
17751
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
16148
17752
  :param 'NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs' hugepages_config: Amounts for 2M and 1G hugepages.
16149
17753
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
17754
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
17755
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
16150
17756
  """
16151
17757
  if cgroup_mode is not None:
16152
17758
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -16154,6 +17760,10 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16154
17760
  pulumi.set(__self__, "hugepages_config", hugepages_config)
16155
17761
  if sysctls is not None:
16156
17762
  pulumi.set(__self__, "sysctls", sysctls)
17763
+ if transparent_hugepage_defrag is not None:
17764
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
17765
+ if transparent_hugepage_enabled is not None:
17766
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
16157
17767
 
16158
17768
  @_builtins.property
16159
17769
  @pulumi.getter(name="cgroupMode")
@@ -16179,6 +17789,22 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16179
17789
  """
16180
17790
  return pulumi.get(self, "sysctls")
16181
17791
 
17792
+ @_builtins.property
17793
+ @pulumi.getter(name="transparentHugepageDefrag")
17794
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
17795
+ """
17796
+ The Linux kernel transparent hugepage defrag setting.
17797
+ """
17798
+ return pulumi.get(self, "transparent_hugepage_defrag")
17799
+
17800
+ @_builtins.property
17801
+ @pulumi.getter(name="transparentHugepageEnabled")
17802
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
17803
+ """
17804
+ The Linux kernel transparent hugepage setting.
17805
+ """
17806
+ return pulumi.get(self, "transparent_hugepage_enabled")
17807
+
16182
17808
 
16183
17809
  @pulumi.output_type
16184
17810
  class NodePoolNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -16475,6 +18101,8 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16475
18101
  suggest = None
16476
18102
  if key == "nodeAffinities":
16477
18103
  suggest = "node_affinities"
18104
+ elif key == "minNodeCpus":
18105
+ suggest = "min_node_cpus"
16478
18106
 
16479
18107
  if suggest:
16480
18108
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -16488,11 +18116,15 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16488
18116
  return super().get(key, default)
16489
18117
 
16490
18118
  def __init__(__self__, *,
16491
- node_affinities: Sequence['outputs.NodePoolNodeConfigSoleTenantConfigNodeAffinity']):
18119
+ node_affinities: Sequence['outputs.NodePoolNodeConfigSoleTenantConfigNodeAffinity'],
18120
+ min_node_cpus: Optional[_builtins.int] = None):
16492
18121
  """
16493
18122
  :param Sequence['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
18123
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
16494
18124
  """
16495
18125
  pulumi.set(__self__, "node_affinities", node_affinities)
18126
+ if min_node_cpus is not None:
18127
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
16496
18128
 
16497
18129
  @_builtins.property
16498
18130
  @pulumi.getter(name="nodeAffinities")
@@ -16502,6 +18134,14 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16502
18134
  """
16503
18135
  return pulumi.get(self, "node_affinities")
16504
18136
 
18137
+ @_builtins.property
18138
+ @pulumi.getter(name="minNodeCpus")
18139
+ def min_node_cpus(self) -> Optional[_builtins.int]:
18140
+ """
18141
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
18142
+ """
18143
+ return pulumi.get(self, "min_node_cpus")
18144
+
16505
18145
 
16506
18146
  @pulumi.output_type
16507
18147
  class NodePoolNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -18938,6 +20578,7 @@ class GetClusterNodeConfigResult(dict):
18938
20578
  def __init__(__self__, *,
18939
20579
  advanced_machine_features: Sequence['outputs.GetClusterNodeConfigAdvancedMachineFeatureResult'],
18940
20580
  boot_disk_kms_key: _builtins.str,
20581
+ boot_disks: Sequence['outputs.GetClusterNodeConfigBootDiskResult'],
18941
20582
  confidential_nodes: Sequence['outputs.GetClusterNodeConfigConfidentialNodeResult'],
18942
20583
  containerd_configs: Sequence['outputs.GetClusterNodeConfigContainerdConfigResult'],
18943
20584
  disk_size_gb: _builtins.int,
@@ -18984,6 +20625,7 @@ class GetClusterNodeConfigResult(dict):
18984
20625
  """
18985
20626
  :param Sequence['GetClusterNodeConfigAdvancedMachineFeatureArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
18986
20627
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
20628
+ :param Sequence['GetClusterNodeConfigBootDiskArgs'] boot_disks: Boot disk configuration for node pools nodes.
18987
20629
  :param Sequence['GetClusterNodeConfigConfidentialNodeArgs'] confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
18988
20630
  :param Sequence['GetClusterNodeConfigContainerdConfigArgs'] containerd_configs: Parameters for containerd configuration.
18989
20631
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
@@ -19030,6 +20672,7 @@ class GetClusterNodeConfigResult(dict):
19030
20672
  """
19031
20673
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
19032
20674
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
20675
+ pulumi.set(__self__, "boot_disks", boot_disks)
19033
20676
  pulumi.set(__self__, "confidential_nodes", confidential_nodes)
19034
20677
  pulumi.set(__self__, "containerd_configs", containerd_configs)
19035
20678
  pulumi.set(__self__, "disk_size_gb", disk_size_gb)
@@ -19090,6 +20733,14 @@ class GetClusterNodeConfigResult(dict):
19090
20733
  """
19091
20734
  return pulumi.get(self, "boot_disk_kms_key")
19092
20735
 
20736
+ @_builtins.property
20737
+ @pulumi.getter(name="bootDisks")
20738
+ def boot_disks(self) -> Sequence['outputs.GetClusterNodeConfigBootDiskResult']:
20739
+ """
20740
+ Boot disk configuration for node pools nodes.
20741
+ """
20742
+ return pulumi.get(self, "boot_disks")
20743
+
19093
20744
  @_builtins.property
19094
20745
  @pulumi.getter(name="confidentialNodes")
19095
20746
  def confidential_nodes(self) -> Sequence['outputs.GetClusterNodeConfigConfidentialNodeResult']:
@@ -19475,6 +21126,57 @@ class GetClusterNodeConfigAdvancedMachineFeatureResult(dict):
19475
21126
  return pulumi.get(self, "threads_per_core")
19476
21127
 
19477
21128
 
21129
+ @pulumi.output_type
21130
+ class GetClusterNodeConfigBootDiskResult(dict):
21131
+ def __init__(__self__, *,
21132
+ disk_type: _builtins.str,
21133
+ provisioned_iops: _builtins.int,
21134
+ provisioned_throughput: _builtins.int,
21135
+ size_gb: _builtins.int):
21136
+ """
21137
+ :param _builtins.str disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
21138
+ :param _builtins.int provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
21139
+ :param _builtins.int provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
21140
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
21141
+ """
21142
+ pulumi.set(__self__, "disk_type", disk_type)
21143
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
21144
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
21145
+ pulumi.set(__self__, "size_gb", size_gb)
21146
+
21147
+ @_builtins.property
21148
+ @pulumi.getter(name="diskType")
21149
+ def disk_type(self) -> _builtins.str:
21150
+ """
21151
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
21152
+ """
21153
+ return pulumi.get(self, "disk_type")
21154
+
21155
+ @_builtins.property
21156
+ @pulumi.getter(name="provisionedIops")
21157
+ def provisioned_iops(self) -> _builtins.int:
21158
+ """
21159
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
21160
+ """
21161
+ return pulumi.get(self, "provisioned_iops")
21162
+
21163
+ @_builtins.property
21164
+ @pulumi.getter(name="provisionedThroughput")
21165
+ def provisioned_throughput(self) -> _builtins.int:
21166
+ """
21167
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
21168
+ """
21169
+ return pulumi.get(self, "provisioned_throughput")
21170
+
21171
+ @_builtins.property
21172
+ @pulumi.getter(name="sizeGb")
21173
+ def size_gb(self) -> _builtins.int:
21174
+ """
21175
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
21176
+ """
21177
+ return pulumi.get(self, "size_gb")
21178
+
21179
+
19478
21180
  @pulumi.output_type
19479
21181
  class GetClusterNodeConfigConfidentialNodeResult(dict):
19480
21182
  def __init__(__self__, *,
@@ -19875,12 +21577,18 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19875
21577
  cpu_cfs_quota: _builtins.bool,
19876
21578
  cpu_cfs_quota_period: _builtins.str,
19877
21579
  cpu_manager_policy: _builtins.str,
21580
+ eviction_max_pod_grace_period_seconds: _builtins.int,
21581
+ eviction_minimum_reclaims: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult'],
21582
+ eviction_soft_grace_periods: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult'],
21583
+ eviction_softs: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftResult'],
19878
21584
  image_gc_high_threshold_percent: _builtins.int,
19879
21585
  image_gc_low_threshold_percent: _builtins.int,
19880
21586
  image_maximum_gc_age: _builtins.str,
19881
21587
  image_minimum_gc_age: _builtins.str,
19882
21588
  insecure_kubelet_readonly_port_enabled: _builtins.str,
19883
- pod_pids_limit: _builtins.int):
21589
+ max_parallel_image_pulls: _builtins.int,
21590
+ pod_pids_limit: _builtins.int,
21591
+ single_process_oom_kill: _builtins.bool):
19884
21592
  """
19885
21593
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
19886
21594
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -19888,12 +21596,18 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19888
21596
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
19889
21597
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
19890
21598
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
21599
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21600
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaims: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21601
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_periods: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21602
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionSoftArgs'] eviction_softs: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
19891
21603
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
19892
21604
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
19893
21605
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
19894
21606
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
19895
21607
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
21608
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
19896
21609
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
21610
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
19897
21611
  """
19898
21612
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
19899
21613
  pulumi.set(__self__, "container_log_max_files", container_log_max_files)
@@ -19901,12 +21615,18 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19901
21615
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
19902
21616
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
19903
21617
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
21618
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
21619
+ pulumi.set(__self__, "eviction_minimum_reclaims", eviction_minimum_reclaims)
21620
+ pulumi.set(__self__, "eviction_soft_grace_periods", eviction_soft_grace_periods)
21621
+ pulumi.set(__self__, "eviction_softs", eviction_softs)
19904
21622
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
19905
21623
  pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
19906
21624
  pulumi.set(__self__, "image_maximum_gc_age", image_maximum_gc_age)
19907
21625
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
19908
21626
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
21627
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
19909
21628
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
21629
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
19910
21630
 
19911
21631
  @_builtins.property
19912
21632
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -19957,15 +21677,47 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19957
21677
  return pulumi.get(self, "cpu_manager_policy")
19958
21678
 
19959
21679
  @_builtins.property
19960
- @pulumi.getter(name="imageGcHighThresholdPercent")
19961
- def image_gc_high_threshold_percent(self) -> _builtins.int:
21680
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
21681
+ def eviction_max_pod_grace_period_seconds(self) -> _builtins.int:
19962
21682
  """
19963
- Defines the percent of disk usage after which image garbage collection is always run.
21683
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
19964
21684
  """
19965
- return pulumi.get(self, "image_gc_high_threshold_percent")
21685
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
19966
21686
 
19967
21687
  @_builtins.property
19968
- @pulumi.getter(name="imageGcLowThresholdPercent")
21688
+ @pulumi.getter(name="evictionMinimumReclaims")
21689
+ def eviction_minimum_reclaims(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult']:
21690
+ """
21691
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21692
+ """
21693
+ return pulumi.get(self, "eviction_minimum_reclaims")
21694
+
21695
+ @_builtins.property
21696
+ @pulumi.getter(name="evictionSoftGracePeriods")
21697
+ def eviction_soft_grace_periods(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult']:
21698
+ """
21699
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21700
+ """
21701
+ return pulumi.get(self, "eviction_soft_grace_periods")
21702
+
21703
+ @_builtins.property
21704
+ @pulumi.getter(name="evictionSofts")
21705
+ def eviction_softs(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftResult']:
21706
+ """
21707
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21708
+ """
21709
+ return pulumi.get(self, "eviction_softs")
21710
+
21711
+ @_builtins.property
21712
+ @pulumi.getter(name="imageGcHighThresholdPercent")
21713
+ def image_gc_high_threshold_percent(self) -> _builtins.int:
21714
+ """
21715
+ Defines the percent of disk usage after which image garbage collection is always run.
21716
+ """
21717
+ return pulumi.get(self, "image_gc_high_threshold_percent")
21718
+
21719
+ @_builtins.property
21720
+ @pulumi.getter(name="imageGcLowThresholdPercent")
19969
21721
  def image_gc_low_threshold_percent(self) -> _builtins.int:
19970
21722
  """
19971
21723
  Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
@@ -19996,6 +21748,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19996
21748
  """
19997
21749
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
19998
21750
 
21751
+ @_builtins.property
21752
+ @pulumi.getter(name="maxParallelImagePulls")
21753
+ def max_parallel_image_pulls(self) -> _builtins.int:
21754
+ """
21755
+ Set the maximum number of image pulls in parallel.
21756
+ """
21757
+ return pulumi.get(self, "max_parallel_image_pulls")
21758
+
19999
21759
  @_builtins.property
20000
21760
  @pulumi.getter(name="podPidsLimit")
20001
21761
  def pod_pids_limit(self) -> _builtins.int:
@@ -20004,21 +21764,254 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
20004
21764
  """
20005
21765
  return pulumi.get(self, "pod_pids_limit")
20006
21766
 
21767
+ @_builtins.property
21768
+ @pulumi.getter(name="singleProcessOomKill")
21769
+ def single_process_oom_kill(self) -> _builtins.bool:
21770
+ """
21771
+ Defines whether to enable single process OOM killer.
21772
+ """
21773
+ return pulumi.get(self, "single_process_oom_kill")
21774
+
21775
+
21776
+ @pulumi.output_type
21777
+ class GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
21778
+ def __init__(__self__, *,
21779
+ imagefs_available: _builtins.str,
21780
+ imagefs_inodes_free: _builtins.str,
21781
+ memory_available: _builtins.str,
21782
+ nodefs_available: _builtins.str,
21783
+ nodefs_inodes_free: _builtins.str,
21784
+ pid_available: _builtins.str):
21785
+ """
21786
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
21787
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
21788
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
21789
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
21790
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
21791
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
21792
+ """
21793
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21794
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21795
+ pulumi.set(__self__, "memory_available", memory_available)
21796
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21797
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21798
+ pulumi.set(__self__, "pid_available", pid_available)
21799
+
21800
+ @_builtins.property
21801
+ @pulumi.getter(name="imagefsAvailable")
21802
+ def imagefs_available(self) -> _builtins.str:
21803
+ """
21804
+ Defines percentage of minimum reclaim for imagefs.available.
21805
+ """
21806
+ return pulumi.get(self, "imagefs_available")
21807
+
21808
+ @_builtins.property
21809
+ @pulumi.getter(name="imagefsInodesFree")
21810
+ def imagefs_inodes_free(self) -> _builtins.str:
21811
+ """
21812
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
21813
+ """
21814
+ return pulumi.get(self, "imagefs_inodes_free")
21815
+
21816
+ @_builtins.property
21817
+ @pulumi.getter(name="memoryAvailable")
21818
+ def memory_available(self) -> _builtins.str:
21819
+ """
21820
+ Defines percentage of minimum reclaim for memory.available.
21821
+ """
21822
+ return pulumi.get(self, "memory_available")
21823
+
21824
+ @_builtins.property
21825
+ @pulumi.getter(name="nodefsAvailable")
21826
+ def nodefs_available(self) -> _builtins.str:
21827
+ """
21828
+ Defines percentage of minimum reclaim for nodefs.available.
21829
+ """
21830
+ return pulumi.get(self, "nodefs_available")
21831
+
21832
+ @_builtins.property
21833
+ @pulumi.getter(name="nodefsInodesFree")
21834
+ def nodefs_inodes_free(self) -> _builtins.str:
21835
+ """
21836
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
21837
+ """
21838
+ return pulumi.get(self, "nodefs_inodes_free")
21839
+
21840
+ @_builtins.property
21841
+ @pulumi.getter(name="pidAvailable")
21842
+ def pid_available(self) -> _builtins.str:
21843
+ """
21844
+ Defines percentage of minimum reclaim for pid.available.
21845
+ """
21846
+ return pulumi.get(self, "pid_available")
21847
+
21848
+
21849
+ @pulumi.output_type
21850
+ class GetClusterNodeConfigKubeletConfigEvictionSoftResult(dict):
21851
+ def __init__(__self__, *,
21852
+ imagefs_available: _builtins.str,
21853
+ imagefs_inodes_free: _builtins.str,
21854
+ memory_available: _builtins.str,
21855
+ nodefs_available: _builtins.str,
21856
+ nodefs_inodes_free: _builtins.str,
21857
+ pid_available: _builtins.str):
21858
+ """
21859
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
21860
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
21861
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
21862
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
21863
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
21864
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
21865
+ """
21866
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21867
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21868
+ pulumi.set(__self__, "memory_available", memory_available)
21869
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21870
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21871
+ pulumi.set(__self__, "pid_available", pid_available)
21872
+
21873
+ @_builtins.property
21874
+ @pulumi.getter(name="imagefsAvailable")
21875
+ def imagefs_available(self) -> _builtins.str:
21876
+ """
21877
+ Defines percentage of soft eviction threshold for imagefs.available.
21878
+ """
21879
+ return pulumi.get(self, "imagefs_available")
21880
+
21881
+ @_builtins.property
21882
+ @pulumi.getter(name="imagefsInodesFree")
21883
+ def imagefs_inodes_free(self) -> _builtins.str:
21884
+ """
21885
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
21886
+ """
21887
+ return pulumi.get(self, "imagefs_inodes_free")
21888
+
21889
+ @_builtins.property
21890
+ @pulumi.getter(name="memoryAvailable")
21891
+ def memory_available(self) -> _builtins.str:
21892
+ """
21893
+ Defines quantity of soft eviction threshold for memory.available.
21894
+ """
21895
+ return pulumi.get(self, "memory_available")
21896
+
21897
+ @_builtins.property
21898
+ @pulumi.getter(name="nodefsAvailable")
21899
+ def nodefs_available(self) -> _builtins.str:
21900
+ """
21901
+ Defines percentage of soft eviction threshold for nodefs.available.
21902
+ """
21903
+ return pulumi.get(self, "nodefs_available")
21904
+
21905
+ @_builtins.property
21906
+ @pulumi.getter(name="nodefsInodesFree")
21907
+ def nodefs_inodes_free(self) -> _builtins.str:
21908
+ """
21909
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
21910
+ """
21911
+ return pulumi.get(self, "nodefs_inodes_free")
21912
+
21913
+ @_builtins.property
21914
+ @pulumi.getter(name="pidAvailable")
21915
+ def pid_available(self) -> _builtins.str:
21916
+ """
21917
+ Defines percentage of soft eviction threshold for pid.available.
21918
+ """
21919
+ return pulumi.get(self, "pid_available")
21920
+
21921
+
21922
+ @pulumi.output_type
21923
+ class GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
21924
+ def __init__(__self__, *,
21925
+ imagefs_available: _builtins.str,
21926
+ imagefs_inodes_free: _builtins.str,
21927
+ memory_available: _builtins.str,
21928
+ nodefs_available: _builtins.str,
21929
+ nodefs_inodes_free: _builtins.str,
21930
+ pid_available: _builtins.str):
21931
+ """
21932
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
21933
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
21934
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
21935
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
21936
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
21937
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
21938
+ """
21939
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21940
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21941
+ pulumi.set(__self__, "memory_available", memory_available)
21942
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21943
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21944
+ pulumi.set(__self__, "pid_available", pid_available)
21945
+
21946
+ @_builtins.property
21947
+ @pulumi.getter(name="imagefsAvailable")
21948
+ def imagefs_available(self) -> _builtins.str:
21949
+ """
21950
+ Defines grace period for the imagefs.available soft eviction threshold
21951
+ """
21952
+ return pulumi.get(self, "imagefs_available")
21953
+
21954
+ @_builtins.property
21955
+ @pulumi.getter(name="imagefsInodesFree")
21956
+ def imagefs_inodes_free(self) -> _builtins.str:
21957
+ """
21958
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
21959
+ """
21960
+ return pulumi.get(self, "imagefs_inodes_free")
21961
+
21962
+ @_builtins.property
21963
+ @pulumi.getter(name="memoryAvailable")
21964
+ def memory_available(self) -> _builtins.str:
21965
+ """
21966
+ Defines grace period for the memory.available soft eviction threshold.
21967
+ """
21968
+ return pulumi.get(self, "memory_available")
21969
+
21970
+ @_builtins.property
21971
+ @pulumi.getter(name="nodefsAvailable")
21972
+ def nodefs_available(self) -> _builtins.str:
21973
+ """
21974
+ Defines grace period for the nodefs.available soft eviction threshold.
21975
+ """
21976
+ return pulumi.get(self, "nodefs_available")
21977
+
21978
+ @_builtins.property
21979
+ @pulumi.getter(name="nodefsInodesFree")
21980
+ def nodefs_inodes_free(self) -> _builtins.str:
21981
+ """
21982
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
21983
+ """
21984
+ return pulumi.get(self, "nodefs_inodes_free")
21985
+
21986
+ @_builtins.property
21987
+ @pulumi.getter(name="pidAvailable")
21988
+ def pid_available(self) -> _builtins.str:
21989
+ """
21990
+ Defines grace period for the pid.available soft eviction threshold.
21991
+ """
21992
+ return pulumi.get(self, "pid_available")
21993
+
20007
21994
 
20008
21995
  @pulumi.output_type
20009
21996
  class GetClusterNodeConfigLinuxNodeConfigResult(dict):
20010
21997
  def __init__(__self__, *,
20011
21998
  cgroup_mode: _builtins.str,
20012
21999
  hugepages_configs: Sequence['outputs.GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult'],
20013
- sysctls: Mapping[str, _builtins.str]):
22000
+ sysctls: Mapping[str, _builtins.str],
22001
+ transparent_hugepage_defrag: _builtins.str,
22002
+ transparent_hugepage_enabled: _builtins.str):
20014
22003
  """
20015
22004
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
20016
22005
  :param Sequence['GetClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_configs: Amounts for 2M and 1G hugepages.
20017
22006
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
22007
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
22008
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
20018
22009
  """
20019
22010
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
20020
22011
  pulumi.set(__self__, "hugepages_configs", hugepages_configs)
20021
22012
  pulumi.set(__self__, "sysctls", sysctls)
22013
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
22014
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
20022
22015
 
20023
22016
  @_builtins.property
20024
22017
  @pulumi.getter(name="cgroupMode")
@@ -20044,6 +22037,22 @@ class GetClusterNodeConfigLinuxNodeConfigResult(dict):
20044
22037
  """
20045
22038
  return pulumi.get(self, "sysctls")
20046
22039
 
22040
+ @_builtins.property
22041
+ @pulumi.getter(name="transparentHugepageDefrag")
22042
+ def transparent_hugepage_defrag(self) -> _builtins.str:
22043
+ """
22044
+ The Linux kernel transparent hugepage defrag setting.
22045
+ """
22046
+ return pulumi.get(self, "transparent_hugepage_defrag")
22047
+
22048
+ @_builtins.property
22049
+ @pulumi.getter(name="transparentHugepageEnabled")
22050
+ def transparent_hugepage_enabled(self) -> _builtins.str:
22051
+ """
22052
+ The Linux kernel transparent hugepage setting.
22053
+ """
22054
+ return pulumi.get(self, "transparent_hugepage_enabled")
22055
+
20047
22056
 
20048
22057
  @pulumi.output_type
20049
22058
  class GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult(dict):
@@ -20211,12 +22220,23 @@ class GetClusterNodeConfigShieldedInstanceConfigResult(dict):
20211
22220
  @pulumi.output_type
20212
22221
  class GetClusterNodeConfigSoleTenantConfigResult(dict):
20213
22222
  def __init__(__self__, *,
22223
+ min_node_cpus: _builtins.int,
20214
22224
  node_affinities: Sequence['outputs.GetClusterNodeConfigSoleTenantConfigNodeAffinityResult']):
20215
22225
  """
22226
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
20216
22227
  :param Sequence['GetClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
20217
22228
  """
22229
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
20218
22230
  pulumi.set(__self__, "node_affinities", node_affinities)
20219
22231
 
22232
+ @_builtins.property
22233
+ @pulumi.getter(name="minNodeCpus")
22234
+ def min_node_cpus(self) -> _builtins.int:
22235
+ """
22236
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22237
+ """
22238
+ return pulumi.get(self, "min_node_cpus")
22239
+
20220
22240
  @_builtins.property
20221
22241
  @pulumi.getter(name="nodeAffinities")
20222
22242
  def node_affinities(self) -> Sequence['outputs.GetClusterNodeConfigSoleTenantConfigNodeAffinityResult']:
@@ -21108,6 +23128,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
21108
23128
  def __init__(__self__, *,
21109
23129
  advanced_machine_features: Sequence['outputs.GetClusterNodePoolNodeConfigAdvancedMachineFeatureResult'],
21110
23130
  boot_disk_kms_key: _builtins.str,
23131
+ boot_disks: Sequence['outputs.GetClusterNodePoolNodeConfigBootDiskResult'],
21111
23132
  confidential_nodes: Sequence['outputs.GetClusterNodePoolNodeConfigConfidentialNodeResult'],
21112
23133
  containerd_configs: Sequence['outputs.GetClusterNodePoolNodeConfigContainerdConfigResult'],
21113
23134
  disk_size_gb: _builtins.int,
@@ -21154,6 +23175,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
21154
23175
  """
21155
23176
  :param Sequence['GetClusterNodePoolNodeConfigAdvancedMachineFeatureArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
21156
23177
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
23178
+ :param Sequence['GetClusterNodePoolNodeConfigBootDiskArgs'] boot_disks: Boot disk configuration for node pools nodes.
21157
23179
  :param Sequence['GetClusterNodePoolNodeConfigConfidentialNodeArgs'] confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
21158
23180
  :param Sequence['GetClusterNodePoolNodeConfigContainerdConfigArgs'] containerd_configs: Parameters for containerd configuration.
21159
23181
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
@@ -21200,6 +23222,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
21200
23222
  """
21201
23223
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
21202
23224
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
23225
+ pulumi.set(__self__, "boot_disks", boot_disks)
21203
23226
  pulumi.set(__self__, "confidential_nodes", confidential_nodes)
21204
23227
  pulumi.set(__self__, "containerd_configs", containerd_configs)
21205
23228
  pulumi.set(__self__, "disk_size_gb", disk_size_gb)
@@ -21260,6 +23283,14 @@ class GetClusterNodePoolNodeConfigResult(dict):
21260
23283
  """
21261
23284
  return pulumi.get(self, "boot_disk_kms_key")
21262
23285
 
23286
+ @_builtins.property
23287
+ @pulumi.getter(name="bootDisks")
23288
+ def boot_disks(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigBootDiskResult']:
23289
+ """
23290
+ Boot disk configuration for node pools nodes.
23291
+ """
23292
+ return pulumi.get(self, "boot_disks")
23293
+
21263
23294
  @_builtins.property
21264
23295
  @pulumi.getter(name="confidentialNodes")
21265
23296
  def confidential_nodes(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigConfidentialNodeResult']:
@@ -21645,6 +23676,57 @@ class GetClusterNodePoolNodeConfigAdvancedMachineFeatureResult(dict):
21645
23676
  return pulumi.get(self, "threads_per_core")
21646
23677
 
21647
23678
 
23679
+ @pulumi.output_type
23680
+ class GetClusterNodePoolNodeConfigBootDiskResult(dict):
23681
+ def __init__(__self__, *,
23682
+ disk_type: _builtins.str,
23683
+ provisioned_iops: _builtins.int,
23684
+ provisioned_throughput: _builtins.int,
23685
+ size_gb: _builtins.int):
23686
+ """
23687
+ :param _builtins.str disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
23688
+ :param _builtins.int provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
23689
+ :param _builtins.int provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
23690
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
23691
+ """
23692
+ pulumi.set(__self__, "disk_type", disk_type)
23693
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
23694
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
23695
+ pulumi.set(__self__, "size_gb", size_gb)
23696
+
23697
+ @_builtins.property
23698
+ @pulumi.getter(name="diskType")
23699
+ def disk_type(self) -> _builtins.str:
23700
+ """
23701
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
23702
+ """
23703
+ return pulumi.get(self, "disk_type")
23704
+
23705
+ @_builtins.property
23706
+ @pulumi.getter(name="provisionedIops")
23707
+ def provisioned_iops(self) -> _builtins.int:
23708
+ """
23709
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
23710
+ """
23711
+ return pulumi.get(self, "provisioned_iops")
23712
+
23713
+ @_builtins.property
23714
+ @pulumi.getter(name="provisionedThroughput")
23715
+ def provisioned_throughput(self) -> _builtins.int:
23716
+ """
23717
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
23718
+ """
23719
+ return pulumi.get(self, "provisioned_throughput")
23720
+
23721
+ @_builtins.property
23722
+ @pulumi.getter(name="sizeGb")
23723
+ def size_gb(self) -> _builtins.int:
23724
+ """
23725
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
23726
+ """
23727
+ return pulumi.get(self, "size_gb")
23728
+
23729
+
21648
23730
  @pulumi.output_type
21649
23731
  class GetClusterNodePoolNodeConfigConfidentialNodeResult(dict):
21650
23732
  def __init__(__self__, *,
@@ -22045,12 +24127,18 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22045
24127
  cpu_cfs_quota: _builtins.bool,
22046
24128
  cpu_cfs_quota_period: _builtins.str,
22047
24129
  cpu_manager_policy: _builtins.str,
24130
+ eviction_max_pod_grace_period_seconds: _builtins.int,
24131
+ eviction_minimum_reclaims: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult'],
24132
+ eviction_soft_grace_periods: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult'],
24133
+ eviction_softs: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult'],
22048
24134
  image_gc_high_threshold_percent: _builtins.int,
22049
24135
  image_gc_low_threshold_percent: _builtins.int,
22050
24136
  image_maximum_gc_age: _builtins.str,
22051
24137
  image_minimum_gc_age: _builtins.str,
22052
24138
  insecure_kubelet_readonly_port_enabled: _builtins.str,
22053
- pod_pids_limit: _builtins.int):
24139
+ max_parallel_image_pulls: _builtins.int,
24140
+ pod_pids_limit: _builtins.int,
24141
+ single_process_oom_kill: _builtins.bool):
22054
24142
  """
22055
24143
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
22056
24144
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -22058,12 +24146,18 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22058
24146
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
22059
24147
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
22060
24148
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
24149
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
24150
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaims: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
24151
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_periods: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
24152
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs'] eviction_softs: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
22061
24153
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
22062
24154
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
22063
24155
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
22064
24156
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
22065
24157
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
24158
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
22066
24159
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
24160
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
22067
24161
  """
22068
24162
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
22069
24163
  pulumi.set(__self__, "container_log_max_files", container_log_max_files)
@@ -22071,12 +24165,18 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22071
24165
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
22072
24166
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
22073
24167
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
24168
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
24169
+ pulumi.set(__self__, "eviction_minimum_reclaims", eviction_minimum_reclaims)
24170
+ pulumi.set(__self__, "eviction_soft_grace_periods", eviction_soft_grace_periods)
24171
+ pulumi.set(__self__, "eviction_softs", eviction_softs)
22074
24172
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
22075
24173
  pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
22076
24174
  pulumi.set(__self__, "image_maximum_gc_age", image_maximum_gc_age)
22077
24175
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
22078
24176
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
24177
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
22079
24178
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
24179
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
22080
24180
 
22081
24181
  @_builtins.property
22082
24182
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -22126,6 +24226,38 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22126
24226
  """
22127
24227
  return pulumi.get(self, "cpu_manager_policy")
22128
24228
 
24229
+ @_builtins.property
24230
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
24231
+ def eviction_max_pod_grace_period_seconds(self) -> _builtins.int:
24232
+ """
24233
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
24234
+ """
24235
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
24236
+
24237
+ @_builtins.property
24238
+ @pulumi.getter(name="evictionMinimumReclaims")
24239
+ def eviction_minimum_reclaims(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult']:
24240
+ """
24241
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
24242
+ """
24243
+ return pulumi.get(self, "eviction_minimum_reclaims")
24244
+
24245
+ @_builtins.property
24246
+ @pulumi.getter(name="evictionSoftGracePeriods")
24247
+ def eviction_soft_grace_periods(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult']:
24248
+ """
24249
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
24250
+ """
24251
+ return pulumi.get(self, "eviction_soft_grace_periods")
24252
+
24253
+ @_builtins.property
24254
+ @pulumi.getter(name="evictionSofts")
24255
+ def eviction_softs(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult']:
24256
+ """
24257
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
24258
+ """
24259
+ return pulumi.get(self, "eviction_softs")
24260
+
22129
24261
  @_builtins.property
22130
24262
  @pulumi.getter(name="imageGcHighThresholdPercent")
22131
24263
  def image_gc_high_threshold_percent(self) -> _builtins.int:
@@ -22166,6 +24298,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22166
24298
  """
22167
24299
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
22168
24300
 
24301
+ @_builtins.property
24302
+ @pulumi.getter(name="maxParallelImagePulls")
24303
+ def max_parallel_image_pulls(self) -> _builtins.int:
24304
+ """
24305
+ Set the maximum number of image pulls in parallel.
24306
+ """
24307
+ return pulumi.get(self, "max_parallel_image_pulls")
24308
+
22169
24309
  @_builtins.property
22170
24310
  @pulumi.getter(name="podPidsLimit")
22171
24311
  def pod_pids_limit(self) -> _builtins.int:
@@ -22174,21 +24314,254 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22174
24314
  """
22175
24315
  return pulumi.get(self, "pod_pids_limit")
22176
24316
 
24317
+ @_builtins.property
24318
+ @pulumi.getter(name="singleProcessOomKill")
24319
+ def single_process_oom_kill(self) -> _builtins.bool:
24320
+ """
24321
+ Defines whether to enable single process OOM killer.
24322
+ """
24323
+ return pulumi.get(self, "single_process_oom_kill")
24324
+
24325
+
24326
+ @pulumi.output_type
24327
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
24328
+ def __init__(__self__, *,
24329
+ imagefs_available: _builtins.str,
24330
+ imagefs_inodes_free: _builtins.str,
24331
+ memory_available: _builtins.str,
24332
+ nodefs_available: _builtins.str,
24333
+ nodefs_inodes_free: _builtins.str,
24334
+ pid_available: _builtins.str):
24335
+ """
24336
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
24337
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
24338
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
24339
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
24340
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
24341
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
24342
+ """
24343
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24344
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24345
+ pulumi.set(__self__, "memory_available", memory_available)
24346
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24347
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24348
+ pulumi.set(__self__, "pid_available", pid_available)
24349
+
24350
+ @_builtins.property
24351
+ @pulumi.getter(name="imagefsAvailable")
24352
+ def imagefs_available(self) -> _builtins.str:
24353
+ """
24354
+ Defines percentage of minimum reclaim for imagefs.available.
24355
+ """
24356
+ return pulumi.get(self, "imagefs_available")
24357
+
24358
+ @_builtins.property
24359
+ @pulumi.getter(name="imagefsInodesFree")
24360
+ def imagefs_inodes_free(self) -> _builtins.str:
24361
+ """
24362
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
24363
+ """
24364
+ return pulumi.get(self, "imagefs_inodes_free")
24365
+
24366
+ @_builtins.property
24367
+ @pulumi.getter(name="memoryAvailable")
24368
+ def memory_available(self) -> _builtins.str:
24369
+ """
24370
+ Defines percentage of minimum reclaim for memory.available.
24371
+ """
24372
+ return pulumi.get(self, "memory_available")
24373
+
24374
+ @_builtins.property
24375
+ @pulumi.getter(name="nodefsAvailable")
24376
+ def nodefs_available(self) -> _builtins.str:
24377
+ """
24378
+ Defines percentage of minimum reclaim for nodefs.available.
24379
+ """
24380
+ return pulumi.get(self, "nodefs_available")
24381
+
24382
+ @_builtins.property
24383
+ @pulumi.getter(name="nodefsInodesFree")
24384
+ def nodefs_inodes_free(self) -> _builtins.str:
24385
+ """
24386
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
24387
+ """
24388
+ return pulumi.get(self, "nodefs_inodes_free")
24389
+
24390
+ @_builtins.property
24391
+ @pulumi.getter(name="pidAvailable")
24392
+ def pid_available(self) -> _builtins.str:
24393
+ """
24394
+ Defines percentage of minimum reclaim for pid.available.
24395
+ """
24396
+ return pulumi.get(self, "pid_available")
24397
+
24398
+
24399
+ @pulumi.output_type
24400
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult(dict):
24401
+ def __init__(__self__, *,
24402
+ imagefs_available: _builtins.str,
24403
+ imagefs_inodes_free: _builtins.str,
24404
+ memory_available: _builtins.str,
24405
+ nodefs_available: _builtins.str,
24406
+ nodefs_inodes_free: _builtins.str,
24407
+ pid_available: _builtins.str):
24408
+ """
24409
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
24410
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
24411
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
24412
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
24413
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
24414
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
24415
+ """
24416
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24417
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24418
+ pulumi.set(__self__, "memory_available", memory_available)
24419
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24420
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24421
+ pulumi.set(__self__, "pid_available", pid_available)
24422
+
24423
+ @_builtins.property
24424
+ @pulumi.getter(name="imagefsAvailable")
24425
+ def imagefs_available(self) -> _builtins.str:
24426
+ """
24427
+ Defines percentage of soft eviction threshold for imagefs.available.
24428
+ """
24429
+ return pulumi.get(self, "imagefs_available")
24430
+
24431
+ @_builtins.property
24432
+ @pulumi.getter(name="imagefsInodesFree")
24433
+ def imagefs_inodes_free(self) -> _builtins.str:
24434
+ """
24435
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
24436
+ """
24437
+ return pulumi.get(self, "imagefs_inodes_free")
24438
+
24439
+ @_builtins.property
24440
+ @pulumi.getter(name="memoryAvailable")
24441
+ def memory_available(self) -> _builtins.str:
24442
+ """
24443
+ Defines quantity of soft eviction threshold for memory.available.
24444
+ """
24445
+ return pulumi.get(self, "memory_available")
24446
+
24447
+ @_builtins.property
24448
+ @pulumi.getter(name="nodefsAvailable")
24449
+ def nodefs_available(self) -> _builtins.str:
24450
+ """
24451
+ Defines percentage of soft eviction threshold for nodefs.available.
24452
+ """
24453
+ return pulumi.get(self, "nodefs_available")
24454
+
24455
+ @_builtins.property
24456
+ @pulumi.getter(name="nodefsInodesFree")
24457
+ def nodefs_inodes_free(self) -> _builtins.str:
24458
+ """
24459
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
24460
+ """
24461
+ return pulumi.get(self, "nodefs_inodes_free")
24462
+
24463
+ @_builtins.property
24464
+ @pulumi.getter(name="pidAvailable")
24465
+ def pid_available(self) -> _builtins.str:
24466
+ """
24467
+ Defines percentage of soft eviction threshold for pid.available.
24468
+ """
24469
+ return pulumi.get(self, "pid_available")
24470
+
24471
+
24472
+ @pulumi.output_type
24473
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
24474
+ def __init__(__self__, *,
24475
+ imagefs_available: _builtins.str,
24476
+ imagefs_inodes_free: _builtins.str,
24477
+ memory_available: _builtins.str,
24478
+ nodefs_available: _builtins.str,
24479
+ nodefs_inodes_free: _builtins.str,
24480
+ pid_available: _builtins.str):
24481
+ """
24482
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
24483
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
24484
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
24485
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
24486
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
24487
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
24488
+ """
24489
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24490
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24491
+ pulumi.set(__self__, "memory_available", memory_available)
24492
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24493
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24494
+ pulumi.set(__self__, "pid_available", pid_available)
24495
+
24496
+ @_builtins.property
24497
+ @pulumi.getter(name="imagefsAvailable")
24498
+ def imagefs_available(self) -> _builtins.str:
24499
+ """
24500
+ Defines grace period for the imagefs.available soft eviction threshold
24501
+ """
24502
+ return pulumi.get(self, "imagefs_available")
24503
+
24504
+ @_builtins.property
24505
+ @pulumi.getter(name="imagefsInodesFree")
24506
+ def imagefs_inodes_free(self) -> _builtins.str:
24507
+ """
24508
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
24509
+ """
24510
+ return pulumi.get(self, "imagefs_inodes_free")
24511
+
24512
+ @_builtins.property
24513
+ @pulumi.getter(name="memoryAvailable")
24514
+ def memory_available(self) -> _builtins.str:
24515
+ """
24516
+ Defines grace period for the memory.available soft eviction threshold.
24517
+ """
24518
+ return pulumi.get(self, "memory_available")
24519
+
24520
+ @_builtins.property
24521
+ @pulumi.getter(name="nodefsAvailable")
24522
+ def nodefs_available(self) -> _builtins.str:
24523
+ """
24524
+ Defines grace period for the nodefs.available soft eviction threshold.
24525
+ """
24526
+ return pulumi.get(self, "nodefs_available")
24527
+
24528
+ @_builtins.property
24529
+ @pulumi.getter(name="nodefsInodesFree")
24530
+ def nodefs_inodes_free(self) -> _builtins.str:
24531
+ """
24532
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
24533
+ """
24534
+ return pulumi.get(self, "nodefs_inodes_free")
24535
+
24536
+ @_builtins.property
24537
+ @pulumi.getter(name="pidAvailable")
24538
+ def pid_available(self) -> _builtins.str:
24539
+ """
24540
+ Defines grace period for the pid.available soft eviction threshold.
24541
+ """
24542
+ return pulumi.get(self, "pid_available")
24543
+
22177
24544
 
22178
24545
  @pulumi.output_type
22179
24546
  class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
22180
24547
  def __init__(__self__, *,
22181
24548
  cgroup_mode: _builtins.str,
22182
24549
  hugepages_configs: Sequence['outputs.GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult'],
22183
- sysctls: Mapping[str, _builtins.str]):
24550
+ sysctls: Mapping[str, _builtins.str],
24551
+ transparent_hugepage_defrag: _builtins.str,
24552
+ transparent_hugepage_enabled: _builtins.str):
22184
24553
  """
22185
24554
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
22186
24555
  :param Sequence['GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_configs: Amounts for 2M and 1G hugepages.
22187
24556
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
24557
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
24558
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
22188
24559
  """
22189
24560
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
22190
24561
  pulumi.set(__self__, "hugepages_configs", hugepages_configs)
22191
24562
  pulumi.set(__self__, "sysctls", sysctls)
24563
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
24564
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
22192
24565
 
22193
24566
  @_builtins.property
22194
24567
  @pulumi.getter(name="cgroupMode")
@@ -22214,6 +24587,22 @@ class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
22214
24587
  """
22215
24588
  return pulumi.get(self, "sysctls")
22216
24589
 
24590
+ @_builtins.property
24591
+ @pulumi.getter(name="transparentHugepageDefrag")
24592
+ def transparent_hugepage_defrag(self) -> _builtins.str:
24593
+ """
24594
+ The Linux kernel transparent hugepage defrag setting.
24595
+ """
24596
+ return pulumi.get(self, "transparent_hugepage_defrag")
24597
+
24598
+ @_builtins.property
24599
+ @pulumi.getter(name="transparentHugepageEnabled")
24600
+ def transparent_hugepage_enabled(self) -> _builtins.str:
24601
+ """
24602
+ The Linux kernel transparent hugepage setting.
24603
+ """
24604
+ return pulumi.get(self, "transparent_hugepage_enabled")
24605
+
22217
24606
 
22218
24607
  @pulumi.output_type
22219
24608
  class GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult(dict):
@@ -22381,12 +24770,23 @@ class GetClusterNodePoolNodeConfigShieldedInstanceConfigResult(dict):
22381
24770
  @pulumi.output_type
22382
24771
  class GetClusterNodePoolNodeConfigSoleTenantConfigResult(dict):
22383
24772
  def __init__(__self__, *,
24773
+ min_node_cpus: _builtins.int,
22384
24774
  node_affinities: Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityResult']):
22385
24775
  """
24776
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22386
24777
  :param Sequence['GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
22387
24778
  """
24779
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
22388
24780
  pulumi.set(__self__, "node_affinities", node_affinities)
22389
24781
 
24782
+ @_builtins.property
24783
+ @pulumi.getter(name="minNodeCpus")
24784
+ def min_node_cpus(self) -> _builtins.int:
24785
+ """
24786
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
24787
+ """
24788
+ return pulumi.get(self, "min_node_cpus")
24789
+
22390
24790
  @_builtins.property
22391
24791
  @pulumi.getter(name="nodeAffinities")
22392
24792
  def node_affinities(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityResult']: