pulumi-gcp 8.40.0a1754721948__py3-none-any.whl → 8.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (469) hide show
  1. pulumi_gcp/__init__.py +152 -0
  2. pulumi_gcp/accesscontextmanager/_inputs.py +24 -4
  3. pulumi_gcp/accesscontextmanager/access_policy_iam_binding.py +2 -0
  4. pulumi_gcp/accesscontextmanager/access_policy_iam_member.py +2 -0
  5. pulumi_gcp/accesscontextmanager/access_policy_iam_policy.py +2 -0
  6. pulumi_gcp/accesscontextmanager/outputs.py +15 -3
  7. pulumi_gcp/apigateway/api_config_iam_binding.py +2 -0
  8. pulumi_gcp/apigateway/api_config_iam_member.py +2 -0
  9. pulumi_gcp/apigateway/api_config_iam_policy.py +2 -0
  10. pulumi_gcp/apigateway/api_iam_binding.py +2 -0
  11. pulumi_gcp/apigateway/api_iam_member.py +2 -0
  12. pulumi_gcp/apigateway/api_iam_policy.py +2 -0
  13. pulumi_gcp/apigateway/gateway_iam_binding.py +2 -0
  14. pulumi_gcp/apigateway/gateway_iam_member.py +2 -0
  15. pulumi_gcp/apigateway/gateway_iam_policy.py +2 -0
  16. pulumi_gcp/apigee/__init__.py +2 -0
  17. pulumi_gcp/apigee/_inputs.py +1435 -0
  18. pulumi_gcp/apigee/api_product.py +1698 -0
  19. pulumi_gcp/apigee/environment_iam_binding.py +2 -0
  20. pulumi_gcp/apigee/environment_iam_member.py +2 -0
  21. pulumi_gcp/apigee/environment_iam_policy.py +2 -0
  22. pulumi_gcp/apigee/outputs.py +1081 -0
  23. pulumi_gcp/apigee/security_action.py +1010 -0
  24. pulumi_gcp/artifactregistry/__init__.py +6 -0
  25. pulumi_gcp/artifactregistry/get_docker_images.py +164 -0
  26. pulumi_gcp/artifactregistry/get_package.py +220 -0
  27. pulumi_gcp/artifactregistry/get_repositories.py +160 -0
  28. pulumi_gcp/artifactregistry/get_tag.py +187 -0
  29. pulumi_gcp/artifactregistry/get_tags.py +200 -0
  30. pulumi_gcp/artifactregistry/get_version.py +261 -0
  31. pulumi_gcp/artifactregistry/outputs.py +239 -2
  32. pulumi_gcp/artifactregistry/repository.py +6 -6
  33. pulumi_gcp/artifactregistry/repository_iam_binding.py +2 -0
  34. pulumi_gcp/artifactregistry/repository_iam_member.py +2 -0
  35. pulumi_gcp/artifactregistry/repository_iam_policy.py +2 -0
  36. pulumi_gcp/backupdisasterrecovery/backup_plan.py +114 -7
  37. pulumi_gcp/backupdisasterrecovery/backup_vault.py +56 -0
  38. pulumi_gcp/backupdisasterrecovery/get_backup_plan.py +12 -1
  39. pulumi_gcp/backupdisasterrecovery/get_backup_vault.py +12 -1
  40. pulumi_gcp/beyondcorp/application_iam_binding.py +8 -0
  41. pulumi_gcp/beyondcorp/application_iam_member.py +8 -0
  42. pulumi_gcp/beyondcorp/application_iam_policy.py +8 -0
  43. pulumi_gcp/beyondcorp/get_application_iam_policy.py +4 -0
  44. pulumi_gcp/beyondcorp/security_gateway_application_iam_binding.py +2 -0
  45. pulumi_gcp/beyondcorp/security_gateway_application_iam_member.py +2 -0
  46. pulumi_gcp/beyondcorp/security_gateway_application_iam_policy.py +2 -0
  47. pulumi_gcp/beyondcorp/security_gateway_iam_binding.py +2 -0
  48. pulumi_gcp/beyondcorp/security_gateway_iam_member.py +2 -0
  49. pulumi_gcp/beyondcorp/security_gateway_iam_policy.py +2 -0
  50. pulumi_gcp/bigquery/_inputs.py +6 -0
  51. pulumi_gcp/bigquery/connection_iam_binding.py +2 -0
  52. pulumi_gcp/bigquery/connection_iam_member.py +2 -0
  53. pulumi_gcp/bigquery/connection_iam_policy.py +2 -0
  54. pulumi_gcp/bigquery/data_transfer_config.py +2 -0
  55. pulumi_gcp/bigquery/dataset.py +2 -2
  56. pulumi_gcp/bigquery/get_table.py +23 -1
  57. pulumi_gcp/bigquery/iam_binding.py +2 -0
  58. pulumi_gcp/bigquery/iam_member.py +2 -0
  59. pulumi_gcp/bigquery/iam_policy.py +2 -0
  60. pulumi_gcp/bigquery/outputs.py +4 -0
  61. pulumi_gcp/bigquery/reservation.py +535 -0
  62. pulumi_gcp/bigquery/table.py +62 -0
  63. pulumi_gcp/bigqueryanalyticshub/_inputs.py +180 -0
  64. pulumi_gcp/bigqueryanalyticshub/data_exchange.py +80 -0
  65. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_binding.py +2 -0
  66. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_member.py +2 -0
  67. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_policy.py +2 -0
  68. pulumi_gcp/bigqueryanalyticshub/listing.py +322 -2
  69. pulumi_gcp/bigqueryanalyticshub/listing_iam_binding.py +2 -0
  70. pulumi_gcp/bigqueryanalyticshub/listing_iam_member.py +2 -0
  71. pulumi_gcp/bigqueryanalyticshub/listing_iam_policy.py +2 -0
  72. pulumi_gcp/bigqueryanalyticshub/listing_subscription.py +32 -0
  73. pulumi_gcp/bigqueryanalyticshub/outputs.py +159 -0
  74. pulumi_gcp/bigquerydatapolicy/data_policy_iam_binding.py +2 -0
  75. pulumi_gcp/bigquerydatapolicy/data_policy_iam_member.py +2 -0
  76. pulumi_gcp/bigquerydatapolicy/data_policy_iam_policy.py +2 -0
  77. pulumi_gcp/bigtable/__init__.py +1 -0
  78. pulumi_gcp/bigtable/_inputs.py +33 -0
  79. pulumi_gcp/bigtable/outputs.py +36 -0
  80. pulumi_gcp/bigtable/schema_bundle.py +568 -0
  81. pulumi_gcp/binaryauthorization/attestor_iam_binding.py +2 -0
  82. pulumi_gcp/binaryauthorization/attestor_iam_member.py +2 -0
  83. pulumi_gcp/binaryauthorization/attestor_iam_policy.py +2 -0
  84. pulumi_gcp/certificateauthority/ca_pool_iam_binding.py +2 -0
  85. pulumi_gcp/certificateauthority/ca_pool_iam_member.py +2 -0
  86. pulumi_gcp/certificateauthority/ca_pool_iam_policy.py +2 -0
  87. pulumi_gcp/certificateauthority/certificate_template_iam_binding.py +2 -0
  88. pulumi_gcp/certificateauthority/certificate_template_iam_member.py +2 -0
  89. pulumi_gcp/certificateauthority/certificate_template_iam_policy.py +2 -0
  90. pulumi_gcp/cloudbuildv2/connection_iam_binding.py +2 -0
  91. pulumi_gcp/cloudbuildv2/connection_iam_member.py +2 -0
  92. pulumi_gcp/cloudbuildv2/connection_iam_policy.py +2 -0
  93. pulumi_gcp/clouddeploy/_inputs.py +48 -48
  94. pulumi_gcp/clouddeploy/deploy_policy.py +54 -74
  95. pulumi_gcp/clouddeploy/outputs.py +32 -32
  96. pulumi_gcp/cloudfunctions/_inputs.py +48 -0
  97. pulumi_gcp/cloudfunctions/function.py +94 -0
  98. pulumi_gcp/cloudfunctions/function_iam_binding.py +2 -0
  99. pulumi_gcp/cloudfunctions/function_iam_member.py +2 -0
  100. pulumi_gcp/cloudfunctions/function_iam_policy.py +2 -0
  101. pulumi_gcp/cloudfunctions/get_function.py +23 -1
  102. pulumi_gcp/cloudfunctions/outputs.py +70 -0
  103. pulumi_gcp/cloudfunctionsv2/function_iam_binding.py +2 -0
  104. pulumi_gcp/cloudfunctionsv2/function_iam_member.py +2 -0
  105. pulumi_gcp/cloudfunctionsv2/function_iam_policy.py +2 -0
  106. pulumi_gcp/cloudrun/iam_binding.py +2 -0
  107. pulumi_gcp/cloudrun/iam_member.py +2 -0
  108. pulumi_gcp/cloudrun/iam_policy.py +2 -0
  109. pulumi_gcp/cloudrunv2/_inputs.py +20 -0
  110. pulumi_gcp/cloudrunv2/job.py +2 -0
  111. pulumi_gcp/cloudrunv2/job_iam_binding.py +2 -0
  112. pulumi_gcp/cloudrunv2/job_iam_member.py +2 -0
  113. pulumi_gcp/cloudrunv2/job_iam_policy.py +2 -0
  114. pulumi_gcp/cloudrunv2/outputs.py +25 -0
  115. pulumi_gcp/cloudrunv2/service_iam_binding.py +2 -0
  116. pulumi_gcp/cloudrunv2/service_iam_member.py +2 -0
  117. pulumi_gcp/cloudrunv2/service_iam_policy.py +2 -0
  118. pulumi_gcp/cloudrunv2/worker_pool.py +2 -0
  119. pulumi_gcp/cloudrunv2/worker_pool_iam_binding.py +2 -0
  120. pulumi_gcp/cloudrunv2/worker_pool_iam_member.py +2 -0
  121. pulumi_gcp/cloudrunv2/worker_pool_iam_policy.py +2 -0
  122. pulumi_gcp/cloudtasks/queue_iam_binding.py +2 -0
  123. pulumi_gcp/cloudtasks/queue_iam_member.py +2 -0
  124. pulumi_gcp/cloudtasks/queue_iam_policy.py +2 -0
  125. pulumi_gcp/colab/runtime_template_iam_binding.py +2 -0
  126. pulumi_gcp/colab/runtime_template_iam_member.py +2 -0
  127. pulumi_gcp/colab/runtime_template_iam_policy.py +2 -0
  128. pulumi_gcp/composer/user_workloads_config_map.py +26 -2
  129. pulumi_gcp/compute/__init__.py +1 -0
  130. pulumi_gcp/compute/_inputs.py +1068 -22
  131. pulumi_gcp/compute/disk_iam_binding.py +2 -0
  132. pulumi_gcp/compute/disk_iam_member.py +2 -0
  133. pulumi_gcp/compute/disk_iam_policy.py +2 -0
  134. pulumi_gcp/compute/firewall_policy_with_rules.py +66 -0
  135. pulumi_gcp/compute/forwarding_rule.py +0 -21
  136. pulumi_gcp/compute/get_region_backend_service.py +12 -1
  137. pulumi_gcp/compute/get_router.py +12 -1
  138. pulumi_gcp/compute/image_iam_binding.py +2 -0
  139. pulumi_gcp/compute/image_iam_member.py +2 -0
  140. pulumi_gcp/compute/image_iam_policy.py +2 -0
  141. pulumi_gcp/compute/instance_iam_binding.py +2 -0
  142. pulumi_gcp/compute/instance_iam_member.py +2 -0
  143. pulumi_gcp/compute/instance_iam_policy.py +2 -0
  144. pulumi_gcp/compute/instance_template_iam_binding.py +2 -0
  145. pulumi_gcp/compute/instance_template_iam_member.py +2 -0
  146. pulumi_gcp/compute/instance_template_iam_policy.py +2 -0
  147. pulumi_gcp/compute/instant_snapshot_iam_binding.py +2 -0
  148. pulumi_gcp/compute/instant_snapshot_iam_member.py +2 -0
  149. pulumi_gcp/compute/instant_snapshot_iam_policy.py +2 -0
  150. pulumi_gcp/compute/machine_image_iam_binding.py +2 -0
  151. pulumi_gcp/compute/machine_image_iam_member.py +2 -0
  152. pulumi_gcp/compute/machine_image_iam_policy.py +2 -0
  153. pulumi_gcp/compute/outputs.py +966 -22
  154. pulumi_gcp/compute/preview_feature.py +396 -0
  155. pulumi_gcp/compute/region_backend_service.py +257 -0
  156. pulumi_gcp/compute/region_disk_iam_binding.py +2 -0
  157. pulumi_gcp/compute/region_disk_iam_member.py +2 -0
  158. pulumi_gcp/compute/region_disk_iam_policy.py +2 -0
  159. pulumi_gcp/compute/region_security_policy.py +54 -0
  160. pulumi_gcp/compute/region_url_map.py +392 -0
  161. pulumi_gcp/compute/reservation.py +4 -4
  162. pulumi_gcp/compute/router.py +54 -0
  163. pulumi_gcp/compute/service_attachment.py +126 -0
  164. pulumi_gcp/compute/snapshot_iam_binding.py +2 -0
  165. pulumi_gcp/compute/snapshot_iam_member.py +2 -0
  166. pulumi_gcp/compute/snapshot_iam_policy.py +2 -0
  167. pulumi_gcp/compute/storage_pool.py +154 -0
  168. pulumi_gcp/compute/storage_pool_iam_binding.py +2 -0
  169. pulumi_gcp/compute/storage_pool_iam_member.py +2 -0
  170. pulumi_gcp/compute/storage_pool_iam_policy.py +2 -0
  171. pulumi_gcp/compute/subnetwork.py +54 -0
  172. pulumi_gcp/compute/subnetwork_iam_binding.py +2 -0
  173. pulumi_gcp/compute/subnetwork_iam_member.py +2 -0
  174. pulumi_gcp/compute/subnetwork_iam_policy.py +2 -0
  175. pulumi_gcp/config/__init__.pyi +2 -4
  176. pulumi_gcp/config/vars.py +4 -8
  177. pulumi_gcp/container/_inputs.py +2622 -246
  178. pulumi_gcp/container/cluster.py +61 -21
  179. pulumi_gcp/container/get_cluster.py +12 -1
  180. pulumi_gcp/container/outputs.py +2877 -133
  181. pulumi_gcp/containeranalysis/note_iam_binding.py +2 -0
  182. pulumi_gcp/containeranalysis/note_iam_member.py +2 -0
  183. pulumi_gcp/containeranalysis/note_iam_policy.py +2 -0
  184. pulumi_gcp/datacatalog/entry_group_iam_binding.py +2 -0
  185. pulumi_gcp/datacatalog/entry_group_iam_member.py +2 -0
  186. pulumi_gcp/datacatalog/entry_group_iam_policy.py +2 -0
  187. pulumi_gcp/datacatalog/policy_tag_iam_binding.py +2 -0
  188. pulumi_gcp/datacatalog/policy_tag_iam_member.py +2 -0
  189. pulumi_gcp/datacatalog/policy_tag_iam_policy.py +2 -0
  190. pulumi_gcp/datacatalog/tag_template_iam_binding.py +2 -0
  191. pulumi_gcp/datacatalog/tag_template_iam_member.py +2 -0
  192. pulumi_gcp/datacatalog/tag_template_iam_policy.py +2 -0
  193. pulumi_gcp/datacatalog/taxonomy_iam_binding.py +2 -0
  194. pulumi_gcp/datacatalog/taxonomy_iam_member.py +2 -0
  195. pulumi_gcp/datacatalog/taxonomy_iam_policy.py +2 -0
  196. pulumi_gcp/datafusion/instance.py +18 -4
  197. pulumi_gcp/dataplex/aspect_type_iam_binding.py +2 -0
  198. pulumi_gcp/dataplex/aspect_type_iam_member.py +2 -0
  199. pulumi_gcp/dataplex/aspect_type_iam_policy.py +2 -0
  200. pulumi_gcp/dataplex/asset_iam_binding.py +2 -0
  201. pulumi_gcp/dataplex/asset_iam_member.py +2 -0
  202. pulumi_gcp/dataplex/asset_iam_policy.py +2 -0
  203. pulumi_gcp/dataplex/datascan_iam_binding.py +2 -0
  204. pulumi_gcp/dataplex/datascan_iam_member.py +2 -0
  205. pulumi_gcp/dataplex/datascan_iam_policy.py +2 -0
  206. pulumi_gcp/dataplex/entry_group_iam_binding.py +2 -0
  207. pulumi_gcp/dataplex/entry_group_iam_member.py +2 -0
  208. pulumi_gcp/dataplex/entry_group_iam_policy.py +2 -0
  209. pulumi_gcp/dataplex/entry_type_iam_binding.py +2 -0
  210. pulumi_gcp/dataplex/entry_type_iam_member.py +2 -0
  211. pulumi_gcp/dataplex/entry_type_iam_policy.py +2 -0
  212. pulumi_gcp/dataplex/glossary_iam_binding.py +2 -0
  213. pulumi_gcp/dataplex/glossary_iam_member.py +2 -0
  214. pulumi_gcp/dataplex/glossary_iam_policy.py +2 -0
  215. pulumi_gcp/dataplex/lake_iam_binding.py +2 -0
  216. pulumi_gcp/dataplex/lake_iam_member.py +2 -0
  217. pulumi_gcp/dataplex/lake_iam_policy.py +2 -0
  218. pulumi_gcp/dataplex/task_iam_binding.py +2 -0
  219. pulumi_gcp/dataplex/task_iam_member.py +2 -0
  220. pulumi_gcp/dataplex/task_iam_policy.py +2 -0
  221. pulumi_gcp/dataplex/zone_iam_binding.py +2 -0
  222. pulumi_gcp/dataplex/zone_iam_member.py +2 -0
  223. pulumi_gcp/dataplex/zone_iam_policy.py +2 -0
  224. pulumi_gcp/dataproc/_inputs.py +249 -14
  225. pulumi_gcp/dataproc/autoscaling_policy_iam_binding.py +2 -0
  226. pulumi_gcp/dataproc/autoscaling_policy_iam_member.py +2 -0
  227. pulumi_gcp/dataproc/autoscaling_policy_iam_policy.py +2 -0
  228. pulumi_gcp/dataproc/batch.py +6 -0
  229. pulumi_gcp/dataproc/cluster.py +2 -0
  230. pulumi_gcp/dataproc/metastore_database_iam_binding.py +2 -0
  231. pulumi_gcp/dataproc/metastore_database_iam_member.py +2 -0
  232. pulumi_gcp/dataproc/metastore_database_iam_policy.py +2 -0
  233. pulumi_gcp/dataproc/metastore_federation_iam_binding.py +2 -0
  234. pulumi_gcp/dataproc/metastore_federation_iam_member.py +2 -0
  235. pulumi_gcp/dataproc/metastore_federation_iam_policy.py +2 -0
  236. pulumi_gcp/dataproc/metastore_service_iam_binding.py +2 -0
  237. pulumi_gcp/dataproc/metastore_service_iam_member.py +2 -0
  238. pulumi_gcp/dataproc/metastore_service_iam_policy.py +2 -0
  239. pulumi_gcp/dataproc/metastore_table_iam_binding.py +2 -0
  240. pulumi_gcp/dataproc/metastore_table_iam_member.py +2 -0
  241. pulumi_gcp/dataproc/metastore_table_iam_policy.py +2 -0
  242. pulumi_gcp/dataproc/outputs.py +215 -12
  243. pulumi_gcp/dataproc/session_template.py +14 -2
  244. pulumi_gcp/developerconnect/__init__.py +1 -0
  245. pulumi_gcp/developerconnect/_inputs.py +583 -0
  246. pulumi_gcp/developerconnect/insights_config.py +895 -0
  247. pulumi_gcp/developerconnect/outputs.py +442 -0
  248. pulumi_gcp/diagflow/__init__.py +3 -0
  249. pulumi_gcp/diagflow/_inputs.py +11899 -7963
  250. pulumi_gcp/diagflow/conversation_profile.py +959 -0
  251. pulumi_gcp/diagflow/cx_generator.py +636 -0
  252. pulumi_gcp/diagflow/cx_playbook.py +967 -0
  253. pulumi_gcp/diagflow/cx_tool.py +2 -2
  254. pulumi_gcp/diagflow/cx_webhook.py +380 -36
  255. pulumi_gcp/diagflow/outputs.py +9099 -5946
  256. pulumi_gcp/discoveryengine/__init__.py +2 -0
  257. pulumi_gcp/discoveryengine/_inputs.py +465 -0
  258. pulumi_gcp/discoveryengine/cmek_config.py +707 -0
  259. pulumi_gcp/discoveryengine/outputs.py +412 -0
  260. pulumi_gcp/discoveryengine/recommendation_engine.py +813 -0
  261. pulumi_gcp/dns/dns_managed_zone_iam_binding.py +2 -0
  262. pulumi_gcp/dns/dns_managed_zone_iam_member.py +2 -0
  263. pulumi_gcp/dns/dns_managed_zone_iam_policy.py +2 -0
  264. pulumi_gcp/endpoints/service_iam_binding.py +2 -0
  265. pulumi_gcp/endpoints/service_iam_member.py +2 -0
  266. pulumi_gcp/endpoints/service_iam_policy.py +2 -0
  267. pulumi_gcp/firestore/field.py +6 -6
  268. pulumi_gcp/gemini/gemini_gcp_enablement_setting.py +107 -9
  269. pulumi_gcp/gemini/gemini_gcp_enablement_setting_binding.py +2 -2
  270. pulumi_gcp/gemini/repository_group_iam_binding.py +2 -0
  271. pulumi_gcp/gemini/repository_group_iam_member.py +2 -0
  272. pulumi_gcp/gemini/repository_group_iam_policy.py +2 -0
  273. pulumi_gcp/gkebackup/backup_plan_iam_binding.py +2 -0
  274. pulumi_gcp/gkebackup/backup_plan_iam_member.py +2 -0
  275. pulumi_gcp/gkebackup/backup_plan_iam_policy.py +2 -0
  276. pulumi_gcp/gkebackup/restore_plan_iam_binding.py +2 -0
  277. pulumi_gcp/gkebackup/restore_plan_iam_member.py +2 -0
  278. pulumi_gcp/gkebackup/restore_plan_iam_policy.py +2 -0
  279. pulumi_gcp/gkehub/feature_iam_binding.py +2 -0
  280. pulumi_gcp/gkehub/feature_iam_member.py +2 -0
  281. pulumi_gcp/gkehub/feature_iam_policy.py +2 -0
  282. pulumi_gcp/gkehub/membership_binding.py +6 -6
  283. pulumi_gcp/gkehub/membership_iam_binding.py +2 -0
  284. pulumi_gcp/gkehub/membership_iam_member.py +2 -0
  285. pulumi_gcp/gkehub/membership_iam_policy.py +2 -0
  286. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  287. pulumi_gcp/gkehub/namespace.py +4 -4
  288. pulumi_gcp/gkehub/scope_iam_binding.py +2 -0
  289. pulumi_gcp/gkehub/scope_iam_member.py +2 -0
  290. pulumi_gcp/gkehub/scope_iam_policy.py +2 -0
  291. pulumi_gcp/gkehub/scope_rbac_role_binding.py +8 -8
  292. pulumi_gcp/gkeonprem/vmware_admin_cluster.py +24 -3
  293. pulumi_gcp/healthcare/consent_store_iam_binding.py +2 -0
  294. pulumi_gcp/healthcare/consent_store_iam_member.py +2 -0
  295. pulumi_gcp/healthcare/consent_store_iam_policy.py +2 -0
  296. pulumi_gcp/iam/__init__.py +4 -0
  297. pulumi_gcp/iam/_inputs.py +98 -0
  298. pulumi_gcp/iam/get_workforce_pool_iam_policy.py +161 -0
  299. pulumi_gcp/iam/outputs.py +56 -0
  300. pulumi_gcp/iam/workforce_pool_iam_binding.py +763 -0
  301. pulumi_gcp/iam/workforce_pool_iam_member.py +763 -0
  302. pulumi_gcp/iam/workforce_pool_iam_policy.py +602 -0
  303. pulumi_gcp/iap/app_engine_service_iam_binding.py +2 -0
  304. pulumi_gcp/iap/app_engine_service_iam_member.py +2 -0
  305. pulumi_gcp/iap/app_engine_service_iam_policy.py +2 -0
  306. pulumi_gcp/iap/app_engine_version_iam_binding.py +2 -0
  307. pulumi_gcp/iap/app_engine_version_iam_member.py +2 -0
  308. pulumi_gcp/iap/app_engine_version_iam_policy.py +2 -0
  309. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  310. pulumi_gcp/iap/tunnel_dest_group_iam_binding.py +2 -0
  311. pulumi_gcp/iap/tunnel_dest_group_iam_member.py +2 -0
  312. pulumi_gcp/iap/tunnel_dest_group_iam_policy.py +2 -0
  313. pulumi_gcp/iap/tunnel_iam_binding.py +2 -0
  314. pulumi_gcp/iap/tunnel_iam_member.py +2 -0
  315. pulumi_gcp/iap/tunnel_iam_policy.py +2 -0
  316. pulumi_gcp/iap/tunnel_instance_iam_binding.py +2 -0
  317. pulumi_gcp/iap/tunnel_instance_iam_member.py +2 -0
  318. pulumi_gcp/iap/tunnel_instance_iam_policy.py +2 -0
  319. pulumi_gcp/iap/web_backend_service_iam_binding.py +2 -0
  320. pulumi_gcp/iap/web_backend_service_iam_member.py +2 -0
  321. pulumi_gcp/iap/web_backend_service_iam_policy.py +2 -0
  322. pulumi_gcp/iap/web_cloud_run_service_iam_binding.py +2 -0
  323. pulumi_gcp/iap/web_cloud_run_service_iam_member.py +2 -0
  324. pulumi_gcp/iap/web_cloud_run_service_iam_policy.py +2 -0
  325. pulumi_gcp/iap/web_iam_binding.py +2 -0
  326. pulumi_gcp/iap/web_iam_member.py +2 -0
  327. pulumi_gcp/iap/web_iam_policy.py +2 -0
  328. pulumi_gcp/iap/web_region_backend_service_iam_binding.py +2 -0
  329. pulumi_gcp/iap/web_region_backend_service_iam_member.py +2 -0
  330. pulumi_gcp/iap/web_region_backend_service_iam_policy.py +2 -0
  331. pulumi_gcp/iap/web_type_app_enging_iam_binding.py +2 -0
  332. pulumi_gcp/iap/web_type_app_enging_iam_member.py +2 -0
  333. pulumi_gcp/iap/web_type_app_enging_iam_policy.py +2 -0
  334. pulumi_gcp/iap/web_type_compute_iam_binding.py +2 -0
  335. pulumi_gcp/iap/web_type_compute_iam_member.py +2 -0
  336. pulumi_gcp/iap/web_type_compute_iam_policy.py +2 -0
  337. pulumi_gcp/integrationconnectors/managed_zone.py +8 -8
  338. pulumi_gcp/kms/crypto_key.py +7 -0
  339. pulumi_gcp/kms/ekm_connection_iam_binding.py +2 -0
  340. pulumi_gcp/kms/ekm_connection_iam_member.py +2 -0
  341. pulumi_gcp/kms/ekm_connection_iam_policy.py +2 -0
  342. pulumi_gcp/kms/outputs.py +2 -0
  343. pulumi_gcp/logging/log_view_iam_binding.py +2 -0
  344. pulumi_gcp/logging/log_view_iam_member.py +2 -0
  345. pulumi_gcp/logging/log_view_iam_policy.py +2 -0
  346. pulumi_gcp/looker/instance.py +28 -7
  347. pulumi_gcp/managedkafka/_inputs.py +127 -0
  348. pulumi_gcp/managedkafka/cluster.py +131 -1
  349. pulumi_gcp/managedkafka/connect_cluster.py +4 -4
  350. pulumi_gcp/managedkafka/connector.py +4 -4
  351. pulumi_gcp/managedkafka/outputs.py +128 -0
  352. pulumi_gcp/memorystore/get_instance.py +12 -1
  353. pulumi_gcp/memorystore/instance.py +78 -12
  354. pulumi_gcp/modelarmor/__init__.py +1 -0
  355. pulumi_gcp/modelarmor/_inputs.py +683 -0
  356. pulumi_gcp/modelarmor/floorsetting.py +736 -0
  357. pulumi_gcp/modelarmor/outputs.py +618 -0
  358. pulumi_gcp/monitoring/_inputs.py +3 -3
  359. pulumi_gcp/monitoring/outputs.py +2 -2
  360. pulumi_gcp/networkconnectivity/_inputs.py +60 -0
  361. pulumi_gcp/networkconnectivity/internal_range.py +136 -0
  362. pulumi_gcp/networkconnectivity/outputs.py +55 -0
  363. pulumi_gcp/networkconnectivity/spoke.py +14 -14
  364. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +213 -168
  365. pulumi_gcp/notebooks/instance.py +18 -18
  366. pulumi_gcp/notebooks/instance_iam_binding.py +2 -0
  367. pulumi_gcp/notebooks/instance_iam_member.py +2 -0
  368. pulumi_gcp/notebooks/instance_iam_policy.py +2 -0
  369. pulumi_gcp/notebooks/runtime_iam_binding.py +2 -0
  370. pulumi_gcp/notebooks/runtime_iam_member.py +2 -0
  371. pulumi_gcp/notebooks/runtime_iam_policy.py +2 -0
  372. pulumi_gcp/oracledatabase/__init__.py +2 -0
  373. pulumi_gcp/oracledatabase/autonomous_database.py +262 -38
  374. pulumi_gcp/oracledatabase/cloud_vm_cluster.py +314 -50
  375. pulumi_gcp/oracledatabase/get_autonomous_database.py +23 -1
  376. pulumi_gcp/oracledatabase/get_cloud_vm_cluster.py +34 -1
  377. pulumi_gcp/oracledatabase/odb_network.py +721 -0
  378. pulumi_gcp/oracledatabase/odb_subnet.py +803 -0
  379. pulumi_gcp/oracledatabase/outputs.py +83 -0
  380. pulumi_gcp/organizations/folder.py +56 -0
  381. pulumi_gcp/organizations/get_folder.py +29 -1
  382. pulumi_gcp/orgpolicy/policy.py +2 -2
  383. pulumi_gcp/parametermanager/parameter_version.py +62 -0
  384. pulumi_gcp/parametermanager/regional_parameter_version.py +64 -0
  385. pulumi_gcp/projects/api_key.py +88 -1
  386. pulumi_gcp/provider.py +20 -40
  387. pulumi_gcp/pubsub/schema_iam_binding.py +2 -0
  388. pulumi_gcp/pubsub/schema_iam_member.py +2 -0
  389. pulumi_gcp/pubsub/schema_iam_policy.py +2 -0
  390. pulumi_gcp/pubsub/subscription.py +130 -6
  391. pulumi_gcp/pubsub/topic.py +116 -0
  392. pulumi_gcp/pubsub/topic_iam_binding.py +2 -0
  393. pulumi_gcp/pubsub/topic_iam_member.py +2 -0
  394. pulumi_gcp/pubsub/topic_iam_policy.py +2 -0
  395. pulumi_gcp/pulumi-plugin.json +1 -1
  396. pulumi_gcp/redis/cluster.py +70 -0
  397. pulumi_gcp/redis/get_cluster.py +12 -1
  398. pulumi_gcp/redis/instance.py +8 -12
  399. pulumi_gcp/secretmanager/get_regional_secret.py +12 -1
  400. pulumi_gcp/secretmanager/get_secret.py +12 -1
  401. pulumi_gcp/secretmanager/outputs.py +30 -0
  402. pulumi_gcp/secretmanager/regional_secret.py +61 -0
  403. pulumi_gcp/secretmanager/regional_secret_iam_binding.py +2 -0
  404. pulumi_gcp/secretmanager/regional_secret_iam_member.py +2 -0
  405. pulumi_gcp/secretmanager/regional_secret_iam_policy.py +2 -0
  406. pulumi_gcp/secretmanager/secret.py +61 -0
  407. pulumi_gcp/secretmanager/secret_iam_binding.py +2 -0
  408. pulumi_gcp/secretmanager/secret_iam_member.py +2 -0
  409. pulumi_gcp/secretmanager/secret_iam_policy.py +2 -0
  410. pulumi_gcp/secretmanager/secret_version.py +1 -48
  411. pulumi_gcp/securesourcemanager/branch_rule.py +16 -8
  412. pulumi_gcp/securesourcemanager/instance.py +112 -4
  413. pulumi_gcp/securesourcemanager/repository.py +112 -8
  414. pulumi_gcp/securesourcemanager/repository_iam_binding.py +2 -0
  415. pulumi_gcp/securesourcemanager/repository_iam_member.py +2 -0
  416. pulumi_gcp/securesourcemanager/repository_iam_policy.py +2 -0
  417. pulumi_gcp/securitycenter/instance_iam_binding.py +18 -4
  418. pulumi_gcp/securitycenter/instance_iam_member.py +18 -4
  419. pulumi_gcp/securitycenter/instance_iam_policy.py +18 -4
  420. pulumi_gcp/securitycenter/v2_organization_source_iam_binding.py +2 -0
  421. pulumi_gcp/securitycenter/v2_organization_source_iam_member.py +2 -0
  422. pulumi_gcp/securitycenter/v2_organization_source_iam_policy.py +2 -0
  423. pulumi_gcp/serviceaccount/get_account_key.py +1 -0
  424. pulumi_gcp/servicedirectory/namespace_iam_binding.py +2 -0
  425. pulumi_gcp/servicedirectory/namespace_iam_member.py +2 -0
  426. pulumi_gcp/servicedirectory/namespace_iam_policy.py +2 -0
  427. pulumi_gcp/servicedirectory/service_iam_binding.py +2 -0
  428. pulumi_gcp/servicedirectory/service_iam_member.py +2 -0
  429. pulumi_gcp/servicedirectory/service_iam_policy.py +2 -0
  430. pulumi_gcp/sourcerepo/repository_iam_binding.py +2 -0
  431. pulumi_gcp/sourcerepo/repository_iam_member.py +2 -0
  432. pulumi_gcp/sourcerepo/repository_iam_policy.py +2 -0
  433. pulumi_gcp/sql/_inputs.py +88 -10
  434. pulumi_gcp/sql/database.py +0 -12
  435. pulumi_gcp/sql/database_instance.py +108 -7
  436. pulumi_gcp/sql/get_database_instance.py +12 -1
  437. pulumi_gcp/sql/outputs.py +158 -11
  438. pulumi_gcp/storage/__init__.py +2 -0
  439. pulumi_gcp/storage/_inputs.py +555 -12
  440. pulumi_gcp/storage/bucket.py +7 -7
  441. pulumi_gcp/storage/bucket_object.py +34 -0
  442. pulumi_gcp/storage/get_bucket_object.py +12 -1
  443. pulumi_gcp/storage/get_bucket_object_content.py +12 -1
  444. pulumi_gcp/storage/get_insights_dataset_config.py +363 -0
  445. pulumi_gcp/storage/insights_dataset_config.py +1280 -0
  446. pulumi_gcp/storage/outputs.py +703 -7
  447. pulumi_gcp/tags/tag_key_iam_binding.py +2 -0
  448. pulumi_gcp/tags/tag_key_iam_member.py +2 -0
  449. pulumi_gcp/tags/tag_key_iam_policy.py +2 -0
  450. pulumi_gcp/tags/tag_value_iam_binding.py +2 -0
  451. pulumi_gcp/tags/tag_value_iam_member.py +2 -0
  452. pulumi_gcp/tags/tag_value_iam_policy.py +2 -0
  453. pulumi_gcp/tpu/get_tensorflow_versions.py +10 -0
  454. pulumi_gcp/vertex/__init__.py +2 -0
  455. pulumi_gcp/vertex/_inputs.py +3768 -3
  456. pulumi_gcp/vertex/ai_endpoint.py +4 -4
  457. pulumi_gcp/vertex/ai_endpoint_with_model_garden_deployment.py +940 -0
  458. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  459. pulumi_gcp/vertex/ai_index.py +21 -7
  460. pulumi_gcp/vertex/ai_rag_engine_config.py +354 -0
  461. pulumi_gcp/vertex/outputs.py +2678 -2
  462. pulumi_gcp/vmwareengine/network_peering.py +7 -7
  463. pulumi_gcp/workbench/_inputs.py +118 -0
  464. pulumi_gcp/workbench/instance.py +171 -2
  465. pulumi_gcp/workbench/outputs.py +91 -0
  466. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/METADATA +1 -1
  467. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/RECORD +469 -442
  468. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/WHEEL +0 -0
  469. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/top_level.txt +0 -0
@@ -177,6 +177,8 @@ __all__ = [
177
177
  'ClusterAddonsConfigIstioConfigArgsDict',
178
178
  'ClusterAddonsConfigKalmConfigArgs',
179
179
  'ClusterAddonsConfigKalmConfigArgsDict',
180
+ 'ClusterAddonsConfigLustreCsiDriverConfigArgs',
181
+ 'ClusterAddonsConfigLustreCsiDriverConfigArgsDict',
180
182
  'ClusterAddonsConfigNetworkPolicyConfigArgs',
181
183
  'ClusterAddonsConfigNetworkPolicyConfigArgsDict',
182
184
  'ClusterAddonsConfigParallelstoreCsiDriverConfigArgs',
@@ -245,6 +247,8 @@ __all__ = [
245
247
  'ClusterIdentityServiceConfigArgsDict',
246
248
  'ClusterIpAllocationPolicyArgs',
247
249
  'ClusterIpAllocationPolicyArgsDict',
250
+ 'ClusterIpAllocationPolicyAdditionalIpRangesConfigArgs',
251
+ 'ClusterIpAllocationPolicyAdditionalIpRangesConfigArgsDict',
248
252
  'ClusterIpAllocationPolicyAdditionalPodRangesConfigArgs',
249
253
  'ClusterIpAllocationPolicyAdditionalPodRangesConfigArgsDict',
250
254
  'ClusterIpAllocationPolicyPodCidrOverprovisionConfigArgs',
@@ -287,6 +291,8 @@ __all__ = [
287
291
  'ClusterNodeConfigArgsDict',
288
292
  'ClusterNodeConfigAdvancedMachineFeaturesArgs',
289
293
  'ClusterNodeConfigAdvancedMachineFeaturesArgsDict',
294
+ 'ClusterNodeConfigBootDiskArgs',
295
+ 'ClusterNodeConfigBootDiskArgsDict',
290
296
  'ClusterNodeConfigConfidentialNodesArgs',
291
297
  'ClusterNodeConfigConfidentialNodesArgsDict',
292
298
  'ClusterNodeConfigContainerdConfigArgs',
@@ -319,6 +325,12 @@ __all__ = [
319
325
  'ClusterNodeConfigHostMaintenancePolicyArgsDict',
320
326
  'ClusterNodeConfigKubeletConfigArgs',
321
327
  'ClusterNodeConfigKubeletConfigArgsDict',
328
+ 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs',
329
+ 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict',
330
+ 'ClusterNodeConfigKubeletConfigEvictionSoftArgs',
331
+ 'ClusterNodeConfigKubeletConfigEvictionSoftArgsDict',
332
+ 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
333
+ 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
322
334
  'ClusterNodeConfigLinuxNodeConfigArgs',
323
335
  'ClusterNodeConfigLinuxNodeConfigArgsDict',
324
336
  'ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -385,6 +397,8 @@ __all__ = [
385
397
  'ClusterNodePoolNodeConfigArgsDict',
386
398
  'ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs',
387
399
  'ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgsDict',
400
+ 'ClusterNodePoolNodeConfigBootDiskArgs',
401
+ 'ClusterNodePoolNodeConfigBootDiskArgsDict',
388
402
  'ClusterNodePoolNodeConfigConfidentialNodesArgs',
389
403
  'ClusterNodePoolNodeConfigConfidentialNodesArgsDict',
390
404
  'ClusterNodePoolNodeConfigContainerdConfigArgs',
@@ -417,6 +431,12 @@ __all__ = [
417
431
  'ClusterNodePoolNodeConfigHostMaintenancePolicyArgsDict',
418
432
  'ClusterNodePoolNodeConfigKubeletConfigArgs',
419
433
  'ClusterNodePoolNodeConfigKubeletConfigArgsDict',
434
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs',
435
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict',
436
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs',
437
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
438
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
439
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
420
440
  'ClusterNodePoolNodeConfigLinuxNodeConfigArgs',
421
441
  'ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict',
422
442
  'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -469,6 +489,8 @@ __all__ = [
469
489
  'ClusterProtectConfigArgsDict',
470
490
  'ClusterProtectConfigWorkloadConfigArgs',
471
491
  'ClusterProtectConfigWorkloadConfigArgsDict',
492
+ 'ClusterRbacBindingConfigArgs',
493
+ 'ClusterRbacBindingConfigArgsDict',
472
494
  'ClusterReleaseChannelArgs',
473
495
  'ClusterReleaseChannelArgsDict',
474
496
  'ClusterResourceUsageExportConfigArgs',
@@ -511,6 +533,8 @@ __all__ = [
511
533
  'NodePoolNodeConfigArgsDict',
512
534
  'NodePoolNodeConfigAdvancedMachineFeaturesArgs',
513
535
  'NodePoolNodeConfigAdvancedMachineFeaturesArgsDict',
536
+ 'NodePoolNodeConfigBootDiskArgs',
537
+ 'NodePoolNodeConfigBootDiskArgsDict',
514
538
  'NodePoolNodeConfigConfidentialNodesArgs',
515
539
  'NodePoolNodeConfigConfidentialNodesArgsDict',
516
540
  'NodePoolNodeConfigContainerdConfigArgs',
@@ -543,6 +567,12 @@ __all__ = [
543
567
  'NodePoolNodeConfigHostMaintenancePolicyArgsDict',
544
568
  'NodePoolNodeConfigKubeletConfigArgs',
545
569
  'NodePoolNodeConfigKubeletConfigArgsDict',
570
+ 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs',
571
+ 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict',
572
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftArgs',
573
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
574
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
575
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
546
576
  'NodePoolNodeConfigLinuxNodeConfigArgs',
547
577
  'NodePoolNodeConfigLinuxNodeConfigArgsDict',
548
578
  'NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -4636,6 +4666,19 @@ if not MYPY:
4636
4666
  .
4637
4667
  Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set `enabled = true` to enable.
4638
4668
  """
4669
+ lustre_csi_driver_config: NotRequired[pulumi.Input['ClusterAddonsConfigLustreCsiDriverConfigArgsDict']]
4670
+ """
4671
+ The status of the Lustre CSI driver addon,
4672
+ which allows the usage of a Lustre instances as volumes.
4673
+ It is disabled by default for Standard clusters; set `enabled = true` to enable.
4674
+ It is disabled by default for Autopilot clusters; set `enabled = true` to enable.
4675
+ Lustre CSI Driver Config has optional subfield
4676
+ `enable_legacy_lustre_port` which allows the Lustre CSI driver to initialize LNet (the virtual networklayer for Lustre kernel module) using port 6988.
4677
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
4678
+ See [Enable Lustre CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/lustre-csi-driver-new-volume) for more information.
4679
+
4680
+ This example `addons_config` disables two addons:
4681
+ """
4639
4682
  network_policy_config: NotRequired[pulumi.Input['ClusterAddonsConfigNetworkPolicyConfigArgsDict']]
4640
4683
  """
4641
4684
  Whether we should enable the network policy addon
@@ -4652,8 +4695,6 @@ if not MYPY:
4652
4695
  It is disabled by default for Standard clusters; set `enabled = true` to enable.
4653
4696
  It is enabled by default for Autopilot clusters with version 1.29 or later; set `enabled = true` to enable it explicitly.
4654
4697
  See [Enable the Parallelstore CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/parallelstore-csi-new-volume#enable) for more information.
4655
-
4656
- This example `addons_config` disables two addons:
4657
4698
  """
4658
4699
  ray_operator_configs: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterAddonsConfigRayOperatorConfigArgsDict']]]]
4659
4700
  """
@@ -4693,6 +4734,7 @@ class ClusterAddonsConfigArgs:
4693
4734
  http_load_balancing: Optional[pulumi.Input['ClusterAddonsConfigHttpLoadBalancingArgs']] = None,
4694
4735
  istio_config: Optional[pulumi.Input['ClusterAddonsConfigIstioConfigArgs']] = None,
4695
4736
  kalm_config: Optional[pulumi.Input['ClusterAddonsConfigKalmConfigArgs']] = None,
4737
+ lustre_csi_driver_config: Optional[pulumi.Input['ClusterAddonsConfigLustreCsiDriverConfigArgs']] = None,
4696
4738
  network_policy_config: Optional[pulumi.Input['ClusterAddonsConfigNetworkPolicyConfigArgs']] = None,
4697
4739
  parallelstore_csi_driver_config: Optional[pulumi.Input['ClusterAddonsConfigParallelstoreCsiDriverConfigArgs']] = None,
4698
4740
  ray_operator_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterAddonsConfigRayOperatorConfigArgs']]]] = None,
@@ -4733,6 +4775,16 @@ class ClusterAddonsConfigArgs:
4733
4775
  Structure is documented below.
4734
4776
  :param pulumi.Input['ClusterAddonsConfigKalmConfigArgs'] kalm_config: .
4735
4777
  Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set `enabled = true` to enable.
4778
+ :param pulumi.Input['ClusterAddonsConfigLustreCsiDriverConfigArgs'] lustre_csi_driver_config: The status of the Lustre CSI driver addon,
4779
+ which allows the usage of a Lustre instances as volumes.
4780
+ It is disabled by default for Standard clusters; set `enabled = true` to enable.
4781
+ It is disabled by default for Autopilot clusters; set `enabled = true` to enable.
4782
+ Lustre CSI Driver Config has optional subfield
4783
+ `enable_legacy_lustre_port` which allows the Lustre CSI driver to initialize LNet (the virtual networklayer for Lustre kernel module) using port 6988.
4784
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
4785
+ See [Enable Lustre CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/lustre-csi-driver-new-volume) for more information.
4786
+
4787
+ This example `addons_config` disables two addons:
4736
4788
  :param pulumi.Input['ClusterAddonsConfigNetworkPolicyConfigArgs'] network_policy_config: Whether we should enable the network policy addon
4737
4789
  for the master. This must be enabled in order to enable network policy for the nodes.
4738
4790
  To enable this, you must also define a `network_policy` block,
@@ -4744,8 +4796,6 @@ class ClusterAddonsConfigArgs:
4744
4796
  It is disabled by default for Standard clusters; set `enabled = true` to enable.
4745
4797
  It is enabled by default for Autopilot clusters with version 1.29 or later; set `enabled = true` to enable it explicitly.
4746
4798
  See [Enable the Parallelstore CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/parallelstore-csi-new-volume#enable) for more information.
4747
-
4748
- This example `addons_config` disables two addons:
4749
4799
  :param pulumi.Input[Sequence[pulumi.Input['ClusterAddonsConfigRayOperatorConfigArgs']]] ray_operator_configs: . The status of the [Ray Operator
4750
4800
  addon](https://cloud.google.com/kubernetes-engine/docs/add-on/ray-on-gke/concepts/overview).
4751
4801
  It is disabled by default. Set `enabled = true` to enable. The minimum
@@ -4784,6 +4834,8 @@ class ClusterAddonsConfigArgs:
4784
4834
  pulumi.set(__self__, "istio_config", istio_config)
4785
4835
  if kalm_config is not None:
4786
4836
  pulumi.set(__self__, "kalm_config", kalm_config)
4837
+ if lustre_csi_driver_config is not None:
4838
+ pulumi.set(__self__, "lustre_csi_driver_config", lustre_csi_driver_config)
4787
4839
  if network_policy_config is not None:
4788
4840
  pulumi.set(__self__, "network_policy_config", network_policy_config)
4789
4841
  if parallelstore_csi_driver_config is not None:
@@ -4949,6 +5001,27 @@ class ClusterAddonsConfigArgs:
4949
5001
  def kalm_config(self, value: Optional[pulumi.Input['ClusterAddonsConfigKalmConfigArgs']]):
4950
5002
  pulumi.set(self, "kalm_config", value)
4951
5003
 
5004
+ @_builtins.property
5005
+ @pulumi.getter(name="lustreCsiDriverConfig")
5006
+ def lustre_csi_driver_config(self) -> Optional[pulumi.Input['ClusterAddonsConfigLustreCsiDriverConfigArgs']]:
5007
+ """
5008
+ The status of the Lustre CSI driver addon,
5009
+ which allows the usage of a Lustre instances as volumes.
5010
+ It is disabled by default for Standard clusters; set `enabled = true` to enable.
5011
+ It is disabled by default for Autopilot clusters; set `enabled = true` to enable.
5012
+ Lustre CSI Driver Config has optional subfield
5013
+ `enable_legacy_lustre_port` which allows the Lustre CSI driver to initialize LNet (the virtual networklayer for Lustre kernel module) using port 6988.
5014
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
5015
+ See [Enable Lustre CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/lustre-csi-driver-new-volume) for more information.
5016
+
5017
+ This example `addons_config` disables two addons:
5018
+ """
5019
+ return pulumi.get(self, "lustre_csi_driver_config")
5020
+
5021
+ @lustre_csi_driver_config.setter
5022
+ def lustre_csi_driver_config(self, value: Optional[pulumi.Input['ClusterAddonsConfigLustreCsiDriverConfigArgs']]):
5023
+ pulumi.set(self, "lustre_csi_driver_config", value)
5024
+
4952
5025
  @_builtins.property
4953
5026
  @pulumi.getter(name="networkPolicyConfig")
4954
5027
  def network_policy_config(self) -> Optional[pulumi.Input['ClusterAddonsConfigNetworkPolicyConfigArgs']]:
@@ -4975,8 +5048,6 @@ class ClusterAddonsConfigArgs:
4975
5048
  It is disabled by default for Standard clusters; set `enabled = true` to enable.
4976
5049
  It is enabled by default for Autopilot clusters with version 1.29 or later; set `enabled = true` to enable it explicitly.
4977
5050
  See [Enable the Parallelstore CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/parallelstore-csi-new-volume#enable) for more information.
4978
-
4979
- This example `addons_config` disables two addons:
4980
5051
  """
4981
5052
  return pulumi.get(self, "parallelstore_csi_driver_config")
4982
5053
 
@@ -5358,6 +5429,60 @@ class ClusterAddonsConfigKalmConfigArgs:
5358
5429
  pulumi.set(self, "enabled", value)
5359
5430
 
5360
5431
 
5432
+ if not MYPY:
5433
+ class ClusterAddonsConfigLustreCsiDriverConfigArgsDict(TypedDict):
5434
+ enabled: pulumi.Input[_builtins.bool]
5435
+ """
5436
+ Whether the Lustre CSI driver is enabled for this cluster.
5437
+ """
5438
+ enable_legacy_lustre_port: NotRequired[pulumi.Input[_builtins.bool]]
5439
+ """
5440
+ If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988.
5441
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
5442
+ """
5443
+ elif False:
5444
+ ClusterAddonsConfigLustreCsiDriverConfigArgsDict: TypeAlias = Mapping[str, Any]
5445
+
5446
+ @pulumi.input_type
5447
+ class ClusterAddonsConfigLustreCsiDriverConfigArgs:
5448
+ def __init__(__self__, *,
5449
+ enabled: pulumi.Input[_builtins.bool],
5450
+ enable_legacy_lustre_port: Optional[pulumi.Input[_builtins.bool]] = None):
5451
+ """
5452
+ :param pulumi.Input[_builtins.bool] enabled: Whether the Lustre CSI driver is enabled for this cluster.
5453
+ :param pulumi.Input[_builtins.bool] enable_legacy_lustre_port: If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988.
5454
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
5455
+ """
5456
+ pulumi.set(__self__, "enabled", enabled)
5457
+ if enable_legacy_lustre_port is not None:
5458
+ pulumi.set(__self__, "enable_legacy_lustre_port", enable_legacy_lustre_port)
5459
+
5460
+ @_builtins.property
5461
+ @pulumi.getter
5462
+ def enabled(self) -> pulumi.Input[_builtins.bool]:
5463
+ """
5464
+ Whether the Lustre CSI driver is enabled for this cluster.
5465
+ """
5466
+ return pulumi.get(self, "enabled")
5467
+
5468
+ @enabled.setter
5469
+ def enabled(self, value: pulumi.Input[_builtins.bool]):
5470
+ pulumi.set(self, "enabled", value)
5471
+
5472
+ @_builtins.property
5473
+ @pulumi.getter(name="enableLegacyLustrePort")
5474
+ def enable_legacy_lustre_port(self) -> Optional[pulumi.Input[_builtins.bool]]:
5475
+ """
5476
+ If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988.
5477
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
5478
+ """
5479
+ return pulumi.get(self, "enable_legacy_lustre_port")
5480
+
5481
+ @enable_legacy_lustre_port.setter
5482
+ def enable_legacy_lustre_port(self, value: Optional[pulumi.Input[_builtins.bool]]):
5483
+ pulumi.set(self, "enable_legacy_lustre_port", value)
5484
+
5485
+
5361
5486
  if not MYPY:
5362
5487
  class ClusterAddonsConfigNetworkPolicyConfigArgsDict(TypedDict):
5363
5488
  disabled: pulumi.Input[_builtins.bool]
@@ -7261,6 +7386,11 @@ class ClusterIdentityServiceConfigArgs:
7261
7386
 
7262
7387
  if not MYPY:
7263
7388
  class ClusterIpAllocationPolicyArgsDict(TypedDict):
7389
+ additional_ip_ranges_configs: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterIpAllocationPolicyAdditionalIpRangesConfigArgsDict']]]]
7390
+ """
7391
+ The configuration for individual additional subnetworks attached to the cluster.
7392
+ Structure is documented below.
7393
+ """
7264
7394
  additional_pod_ranges_config: NotRequired[pulumi.Input['ClusterIpAllocationPolicyAdditionalPodRangesConfigArgsDict']]
7265
7395
  """
7266
7396
  The configuration for additional pod secondary ranges at
@@ -7312,6 +7442,7 @@ elif False:
7312
7442
  @pulumi.input_type
7313
7443
  class ClusterIpAllocationPolicyArgs:
7314
7444
  def __init__(__self__, *,
7445
+ additional_ip_ranges_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterIpAllocationPolicyAdditionalIpRangesConfigArgs']]]] = None,
7315
7446
  additional_pod_ranges_config: Optional[pulumi.Input['ClusterIpAllocationPolicyAdditionalPodRangesConfigArgs']] = None,
7316
7447
  cluster_ipv4_cidr_block: Optional[pulumi.Input[_builtins.str]] = None,
7317
7448
  cluster_secondary_range_name: Optional[pulumi.Input[_builtins.str]] = None,
@@ -7320,6 +7451,8 @@ class ClusterIpAllocationPolicyArgs:
7320
7451
  services_secondary_range_name: Optional[pulumi.Input[_builtins.str]] = None,
7321
7452
  stack_type: Optional[pulumi.Input[_builtins.str]] = None):
7322
7453
  """
7454
+ :param pulumi.Input[Sequence[pulumi.Input['ClusterIpAllocationPolicyAdditionalIpRangesConfigArgs']]] additional_ip_ranges_configs: The configuration for individual additional subnetworks attached to the cluster.
7455
+ Structure is documented below.
7323
7456
  :param pulumi.Input['ClusterIpAllocationPolicyAdditionalPodRangesConfigArgs'] additional_pod_ranges_config: The configuration for additional pod secondary ranges at
7324
7457
  the cluster level. Used for Autopilot clusters and Standard clusters with which control of the
7325
7458
  secondary Pod IP address assignment to node pools isn't needed. Structure is documented below.
@@ -7345,6 +7478,8 @@ class ClusterIpAllocationPolicyArgs:
7345
7478
  Default value is `IPV4`.
7346
7479
  Possible values are `IPV4` and `IPV4_IPV6`.
7347
7480
  """
7481
+ if additional_ip_ranges_configs is not None:
7482
+ pulumi.set(__self__, "additional_ip_ranges_configs", additional_ip_ranges_configs)
7348
7483
  if additional_pod_ranges_config is not None:
7349
7484
  pulumi.set(__self__, "additional_pod_ranges_config", additional_pod_ranges_config)
7350
7485
  if cluster_ipv4_cidr_block is not None:
@@ -7360,6 +7495,19 @@ class ClusterIpAllocationPolicyArgs:
7360
7495
  if stack_type is not None:
7361
7496
  pulumi.set(__self__, "stack_type", stack_type)
7362
7497
 
7498
+ @_builtins.property
7499
+ @pulumi.getter(name="additionalIpRangesConfigs")
7500
+ def additional_ip_ranges_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterIpAllocationPolicyAdditionalIpRangesConfigArgs']]]]:
7501
+ """
7502
+ The configuration for individual additional subnetworks attached to the cluster.
7503
+ Structure is documented below.
7504
+ """
7505
+ return pulumi.get(self, "additional_ip_ranges_configs")
7506
+
7507
+ @additional_ip_ranges_configs.setter
7508
+ def additional_ip_ranges_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterIpAllocationPolicyAdditionalIpRangesConfigArgs']]]]):
7509
+ pulumi.set(self, "additional_ip_ranges_configs", value)
7510
+
7363
7511
  @_builtins.property
7364
7512
  @pulumi.getter(name="additionalPodRangesConfig")
7365
7513
  def additional_pod_ranges_config(self) -> Optional[pulumi.Input['ClusterIpAllocationPolicyAdditionalPodRangesConfigArgs']]:
@@ -7462,6 +7610,57 @@ class ClusterIpAllocationPolicyArgs:
7462
7610
  pulumi.set(self, "stack_type", value)
7463
7611
 
7464
7612
 
7613
+ if not MYPY:
7614
+ class ClusterIpAllocationPolicyAdditionalIpRangesConfigArgsDict(TypedDict):
7615
+ subnetwork: pulumi.Input[_builtins.str]
7616
+ """
7617
+ Name of the subnetwork. This can be the full path of the subnetwork or just the name.
7618
+ """
7619
+ pod_ipv4_range_names: NotRequired[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]
7620
+ """
7621
+ List of secondary ranges names within this subnetwork that can be used for pod IPs.
7622
+ """
7623
+ elif False:
7624
+ ClusterIpAllocationPolicyAdditionalIpRangesConfigArgsDict: TypeAlias = Mapping[str, Any]
7625
+
7626
+ @pulumi.input_type
7627
+ class ClusterIpAllocationPolicyAdditionalIpRangesConfigArgs:
7628
+ def __init__(__self__, *,
7629
+ subnetwork: pulumi.Input[_builtins.str],
7630
+ pod_ipv4_range_names: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]] = None):
7631
+ """
7632
+ :param pulumi.Input[_builtins.str] subnetwork: Name of the subnetwork. This can be the full path of the subnetwork or just the name.
7633
+ :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] pod_ipv4_range_names: List of secondary ranges names within this subnetwork that can be used for pod IPs.
7634
+ """
7635
+ pulumi.set(__self__, "subnetwork", subnetwork)
7636
+ if pod_ipv4_range_names is not None:
7637
+ pulumi.set(__self__, "pod_ipv4_range_names", pod_ipv4_range_names)
7638
+
7639
+ @_builtins.property
7640
+ @pulumi.getter
7641
+ def subnetwork(self) -> pulumi.Input[_builtins.str]:
7642
+ """
7643
+ Name of the subnetwork. This can be the full path of the subnetwork or just the name.
7644
+ """
7645
+ return pulumi.get(self, "subnetwork")
7646
+
7647
+ @subnetwork.setter
7648
+ def subnetwork(self, value: pulumi.Input[_builtins.str]):
7649
+ pulumi.set(self, "subnetwork", value)
7650
+
7651
+ @_builtins.property
7652
+ @pulumi.getter(name="podIpv4RangeNames")
7653
+ def pod_ipv4_range_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]:
7654
+ """
7655
+ List of secondary ranges names within this subnetwork that can be used for pod IPs.
7656
+ """
7657
+ return pulumi.get(self, "pod_ipv4_range_names")
7658
+
7659
+ @pod_ipv4_range_names.setter
7660
+ def pod_ipv4_range_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]]):
7661
+ pulumi.set(self, "pod_ipv4_range_names", value)
7662
+
7663
+
7465
7664
  if not MYPY:
7466
7665
  class ClusterIpAllocationPolicyAdditionalPodRangesConfigArgsDict(TypedDict):
7467
7666
  pod_range_names: pulumi.Input[Sequence[pulumi.Input[_builtins.str]]]
@@ -8607,6 +8806,10 @@ if not MYPY:
8607
8806
  Specifies options for controlling
8608
8807
  advanced machine features. Structure is documented below.
8609
8808
  """
8809
+ boot_disk: NotRequired[pulumi.Input['ClusterNodeConfigBootDiskArgsDict']]
8810
+ """
8811
+ Configuration of the node pool boot disk. Structure is documented below
8812
+ """
8610
8813
  boot_disk_kms_key: NotRequired[pulumi.Input[_builtins.str]]
8611
8814
  """
8612
8815
  The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
@@ -8622,12 +8825,13 @@ if not MYPY:
8622
8825
  disk_size_gb: NotRequired[pulumi.Input[_builtins.int]]
8623
8826
  """
8624
8827
  Size of the disk attached to each node, specified
8625
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
8828
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
8829
+ Prefer configuring `boot_disk`.
8626
8830
  """
8627
8831
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
8628
8832
  """
8629
8833
  Type of the disk attached to each node
8630
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
8834
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
8631
8835
  """
8632
8836
  effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgsDict']]]]
8633
8837
  """
@@ -8806,7 +9010,7 @@ if not MYPY:
8806
9010
  """
8807
9011
  sole_tenant_config: NotRequired[pulumi.Input['ClusterNodeConfigSoleTenantConfigArgsDict']]
8808
9012
  """
8809
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
9013
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
8810
9014
  """
8811
9015
  spot: NotRequired[pulumi.Input[_builtins.bool]]
8812
9016
  """
@@ -8850,6 +9054,7 @@ elif False:
8850
9054
  class ClusterNodeConfigArgs:
8851
9055
  def __init__(__self__, *,
8852
9056
  advanced_machine_features: Optional[pulumi.Input['ClusterNodeConfigAdvancedMachineFeaturesArgs']] = None,
9057
+ boot_disk: Optional[pulumi.Input['ClusterNodeConfigBootDiskArgs']] = None,
8853
9058
  boot_disk_kms_key: Optional[pulumi.Input[_builtins.str]] = None,
8854
9059
  confidential_nodes: Optional[pulumi.Input['ClusterNodeConfigConfidentialNodesArgs']] = None,
8855
9060
  containerd_config: Optional[pulumi.Input['ClusterNodeConfigContainerdConfigArgs']] = None,
@@ -8897,13 +9102,15 @@ class ClusterNodeConfigArgs:
8897
9102
  """
8898
9103
  :param pulumi.Input['ClusterNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling
8899
9104
  advanced machine features. Structure is documented below.
9105
+ :param pulumi.Input['ClusterNodeConfigBootDiskArgs'] boot_disk: Configuration of the node pool boot disk. Structure is documented below
8900
9106
  :param pulumi.Input[_builtins.str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
8901
9107
  :param pulumi.Input['ClusterNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
8902
9108
  :param pulumi.Input['ClusterNodeConfigContainerdConfigArgs'] containerd_config: Parameters to customize containerd runtime. Structure is documented below.
8903
9109
  :param pulumi.Input[_builtins.int] disk_size_gb: Size of the disk attached to each node, specified
8904
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
9110
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
9111
+ Prefer configuring `boot_disk`.
8905
9112
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
8906
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
9113
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
8907
9114
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
8908
9115
  :param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
8909
9116
  :param pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -8983,7 +9190,7 @@ class ClusterNodeConfigArgs:
8983
9190
  :param pulumi.Input[_builtins.str] service_account: The service account to be used by the Node VMs.
8984
9191
  If not specified, the "default" service account is used.
8985
9192
  :param pulumi.Input['ClusterNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
8986
- :param pulumi.Input['ClusterNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
9193
+ :param pulumi.Input['ClusterNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
8987
9194
  :param pulumi.Input[_builtins.bool] spot: A boolean that represents whether the underlying node VMs are spot.
8988
9195
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
8989
9196
  for more information. Defaults to false.
@@ -9004,6 +9211,8 @@ class ClusterNodeConfigArgs:
9004
9211
  """
9005
9212
  if advanced_machine_features is not None:
9006
9213
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
9214
+ if boot_disk is not None:
9215
+ pulumi.set(__self__, "boot_disk", boot_disk)
9007
9216
  if boot_disk_kms_key is not None:
9008
9217
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
9009
9218
  if confidential_nodes is not None:
@@ -9106,6 +9315,18 @@ class ClusterNodeConfigArgs:
9106
9315
  def advanced_machine_features(self, value: Optional[pulumi.Input['ClusterNodeConfigAdvancedMachineFeaturesArgs']]):
9107
9316
  pulumi.set(self, "advanced_machine_features", value)
9108
9317
 
9318
+ @_builtins.property
9319
+ @pulumi.getter(name="bootDisk")
9320
+ def boot_disk(self) -> Optional[pulumi.Input['ClusterNodeConfigBootDiskArgs']]:
9321
+ """
9322
+ Configuration of the node pool boot disk. Structure is documented below
9323
+ """
9324
+ return pulumi.get(self, "boot_disk")
9325
+
9326
+ @boot_disk.setter
9327
+ def boot_disk(self, value: Optional[pulumi.Input['ClusterNodeConfigBootDiskArgs']]):
9328
+ pulumi.set(self, "boot_disk", value)
9329
+
9109
9330
  @_builtins.property
9110
9331
  @pulumi.getter(name="bootDiskKmsKey")
9111
9332
  def boot_disk_kms_key(self) -> Optional[pulumi.Input[_builtins.str]]:
@@ -9147,7 +9368,8 @@ class ClusterNodeConfigArgs:
9147
9368
  def disk_size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
9148
9369
  """
9149
9370
  Size of the disk attached to each node, specified
9150
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
9371
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
9372
+ Prefer configuring `boot_disk`.
9151
9373
  """
9152
9374
  return pulumi.get(self, "disk_size_gb")
9153
9375
 
@@ -9160,7 +9382,7 @@ class ClusterNodeConfigArgs:
9160
9382
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
9161
9383
  """
9162
9384
  Type of the disk attached to each node
9163
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
9385
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
9164
9386
  """
9165
9387
  return pulumi.get(self, "disk_type")
9166
9388
 
@@ -9603,7 +9825,7 @@ class ClusterNodeConfigArgs:
9603
9825
  @pulumi.getter(name="soleTenantConfig")
9604
9826
  def sole_tenant_config(self) -> Optional[pulumi.Input['ClusterNodeConfigSoleTenantConfigArgs']]:
9605
9827
  """
9606
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
9828
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
9607
9829
  """
9608
9830
  return pulumi.get(self, "sole_tenant_config")
9609
9831
 
@@ -9766,6 +9988,104 @@ class ClusterNodeConfigAdvancedMachineFeaturesArgs:
9766
9988
  pulumi.set(self, "performance_monitoring_unit", value)
9767
9989
 
9768
9990
 
9991
+ if not MYPY:
9992
+ class ClusterNodeConfigBootDiskArgsDict(TypedDict):
9993
+ disk_type: NotRequired[pulumi.Input[_builtins.str]]
9994
+ """
9995
+ Type of the disk attached to each node
9996
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
9997
+ """
9998
+ provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
9999
+ """
10000
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10001
+ """
10002
+ provisioned_throughput: NotRequired[pulumi.Input[_builtins.int]]
10003
+ """
10004
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10005
+ """
10006
+ size_gb: NotRequired[pulumi.Input[_builtins.int]]
10007
+ """
10008
+ Size of the disk attached to each node, specified
10009
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
10010
+ """
10011
+ elif False:
10012
+ ClusterNodeConfigBootDiskArgsDict: TypeAlias = Mapping[str, Any]
10013
+
10014
+ @pulumi.input_type
10015
+ class ClusterNodeConfigBootDiskArgs:
10016
+ def __init__(__self__, *,
10017
+ disk_type: Optional[pulumi.Input[_builtins.str]] = None,
10018
+ provisioned_iops: Optional[pulumi.Input[_builtins.int]] = None,
10019
+ provisioned_throughput: Optional[pulumi.Input[_builtins.int]] = None,
10020
+ size_gb: Optional[pulumi.Input[_builtins.int]] = None):
10021
+ """
10022
+ :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
10023
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10024
+ :param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10025
+ :param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10026
+ :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
10027
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
10028
+ """
10029
+ if disk_type is not None:
10030
+ pulumi.set(__self__, "disk_type", disk_type)
10031
+ if provisioned_iops is not None:
10032
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
10033
+ if provisioned_throughput is not None:
10034
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
10035
+ if size_gb is not None:
10036
+ pulumi.set(__self__, "size_gb", size_gb)
10037
+
10038
+ @_builtins.property
10039
+ @pulumi.getter(name="diskType")
10040
+ def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
10041
+ """
10042
+ Type of the disk attached to each node
10043
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10044
+ """
10045
+ return pulumi.get(self, "disk_type")
10046
+
10047
+ @disk_type.setter
10048
+ def disk_type(self, value: Optional[pulumi.Input[_builtins.str]]):
10049
+ pulumi.set(self, "disk_type", value)
10050
+
10051
+ @_builtins.property
10052
+ @pulumi.getter(name="provisionedIops")
10053
+ def provisioned_iops(self) -> Optional[pulumi.Input[_builtins.int]]:
10054
+ """
10055
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10056
+ """
10057
+ return pulumi.get(self, "provisioned_iops")
10058
+
10059
+ @provisioned_iops.setter
10060
+ def provisioned_iops(self, value: Optional[pulumi.Input[_builtins.int]]):
10061
+ pulumi.set(self, "provisioned_iops", value)
10062
+
10063
+ @_builtins.property
10064
+ @pulumi.getter(name="provisionedThroughput")
10065
+ def provisioned_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
10066
+ """
10067
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10068
+ """
10069
+ return pulumi.get(self, "provisioned_throughput")
10070
+
10071
+ @provisioned_throughput.setter
10072
+ def provisioned_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
10073
+ pulumi.set(self, "provisioned_throughput", value)
10074
+
10075
+ @_builtins.property
10076
+ @pulumi.getter(name="sizeGb")
10077
+ def size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
10078
+ """
10079
+ Size of the disk attached to each node, specified
10080
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
10081
+ """
10082
+ return pulumi.get(self, "size_gb")
10083
+
10084
+ @size_gb.setter
10085
+ def size_gb(self, value: Optional[pulumi.Input[_builtins.int]]):
10086
+ pulumi.set(self, "size_gb", value)
10087
+
10088
+
9769
10089
  if not MYPY:
9770
10090
  class ClusterNodeConfigConfidentialNodesArgsDict(TypedDict):
9771
10091
  enabled: pulumi.Input[_builtins.bool]
@@ -10518,6 +10838,22 @@ if not MYPY:
10518
10838
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
10519
10839
  is setting the empty string `""`, which will function identically to not setting this field.
10520
10840
  """
10841
+ eviction_max_pod_grace_period_seconds: NotRequired[pulumi.Input[_builtins.int]]
10842
+ """
10843
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
10844
+ """
10845
+ eviction_minimum_reclaim: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict']]
10846
+ """
10847
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
10848
+ """
10849
+ eviction_soft: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgsDict']]
10850
+ """
10851
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
10852
+ """
10853
+ eviction_soft_grace_period: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict']]
10854
+ """
10855
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
10856
+ """
10521
10857
  image_gc_high_threshold_percent: NotRequired[pulumi.Input[_builtins.int]]
10522
10858
  """
10523
10859
  Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
@@ -10538,10 +10874,18 @@ if not MYPY:
10538
10874
  """
10539
10875
  Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
10540
10876
  """
10877
+ max_parallel_image_pulls: NotRequired[pulumi.Input[_builtins.int]]
10878
+ """
10879
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
10880
+ """
10541
10881
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
10542
10882
  """
10543
10883
  Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
10544
10884
  """
10885
+ single_process_oom_kill: NotRequired[pulumi.Input[_builtins.bool]]
10886
+ """
10887
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
10888
+ """
10545
10889
  elif False:
10546
10890
  ClusterNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
10547
10891
 
@@ -10554,12 +10898,18 @@ class ClusterNodeConfigKubeletConfigArgs:
10554
10898
  cpu_cfs_quota: Optional[pulumi.Input[_builtins.bool]] = None,
10555
10899
  cpu_cfs_quota_period: Optional[pulumi.Input[_builtins.str]] = None,
10556
10900
  cpu_manager_policy: Optional[pulumi.Input[_builtins.str]] = None,
10901
+ eviction_max_pod_grace_period_seconds: Optional[pulumi.Input[_builtins.int]] = None,
10902
+ eviction_minimum_reclaim: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs']] = None,
10903
+ eviction_soft: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs']] = None,
10904
+ eviction_soft_grace_period: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']] = None,
10557
10905
  image_gc_high_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
10558
10906
  image_gc_low_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
10559
10907
  image_maximum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
10560
10908
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
10561
10909
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
10562
- pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None):
10910
+ max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
10911
+ pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
10912
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
10563
10913
  """
10564
10914
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
10565
10915
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -10579,12 +10929,18 @@ class ClusterNodeConfigKubeletConfigArgs:
10579
10929
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
10580
10930
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
10581
10931
  is setting the empty string `""`, which will function identically to not setting this field.
10932
+ :param pulumi.Input[_builtins.int] eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
10933
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
10934
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs'] eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
10935
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
10582
10936
  :param pulumi.Input[_builtins.int] image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
10583
10937
  :param pulumi.Input[_builtins.int] image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
10584
10938
  :param pulumi.Input[_builtins.str] image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
10585
10939
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
10586
10940
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
10941
+ :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
10587
10942
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
10943
+ :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
10588
10944
  """
10589
10945
  if allowed_unsafe_sysctls is not None:
10590
10946
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -10598,6 +10954,14 @@ class ClusterNodeConfigKubeletConfigArgs:
10598
10954
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
10599
10955
  if cpu_manager_policy is not None:
10600
10956
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
10957
+ if eviction_max_pod_grace_period_seconds is not None:
10958
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
10959
+ if eviction_minimum_reclaim is not None:
10960
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
10961
+ if eviction_soft is not None:
10962
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
10963
+ if eviction_soft_grace_period is not None:
10964
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
10601
10965
  if image_gc_high_threshold_percent is not None:
10602
10966
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
10603
10967
  if image_gc_low_threshold_percent is not None:
@@ -10608,8 +10972,12 @@ class ClusterNodeConfigKubeletConfigArgs:
10608
10972
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
10609
10973
  if insecure_kubelet_readonly_port_enabled is not None:
10610
10974
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
10975
+ if max_parallel_image_pulls is not None:
10976
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
10611
10977
  if pod_pids_limit is not None:
10612
10978
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
10979
+ if single_process_oom_kill is not None:
10980
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
10613
10981
 
10614
10982
  @_builtins.property
10615
10983
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -10695,6 +11063,54 @@ class ClusterNodeConfigKubeletConfigArgs:
10695
11063
  def cpu_manager_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
10696
11064
  pulumi.set(self, "cpu_manager_policy", value)
10697
11065
 
11066
+ @_builtins.property
11067
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
11068
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[pulumi.Input[_builtins.int]]:
11069
+ """
11070
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
11071
+ """
11072
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
11073
+
11074
+ @eviction_max_pod_grace_period_seconds.setter
11075
+ def eviction_max_pod_grace_period_seconds(self, value: Optional[pulumi.Input[_builtins.int]]):
11076
+ pulumi.set(self, "eviction_max_pod_grace_period_seconds", value)
11077
+
11078
+ @_builtins.property
11079
+ @pulumi.getter(name="evictionMinimumReclaim")
11080
+ def eviction_minimum_reclaim(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]:
11081
+ """
11082
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
11083
+ """
11084
+ return pulumi.get(self, "eviction_minimum_reclaim")
11085
+
11086
+ @eviction_minimum_reclaim.setter
11087
+ def eviction_minimum_reclaim(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]):
11088
+ pulumi.set(self, "eviction_minimum_reclaim", value)
11089
+
11090
+ @_builtins.property
11091
+ @pulumi.getter(name="evictionSoft")
11092
+ def eviction_soft(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs']]:
11093
+ """
11094
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
11095
+ """
11096
+ return pulumi.get(self, "eviction_soft")
11097
+
11098
+ @eviction_soft.setter
11099
+ def eviction_soft(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs']]):
11100
+ pulumi.set(self, "eviction_soft", value)
11101
+
11102
+ @_builtins.property
11103
+ @pulumi.getter(name="evictionSoftGracePeriod")
11104
+ def eviction_soft_grace_period(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]:
11105
+ """
11106
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
11107
+ """
11108
+ return pulumi.get(self, "eviction_soft_grace_period")
11109
+
11110
+ @eviction_soft_grace_period.setter
11111
+ def eviction_soft_grace_period(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]):
11112
+ pulumi.set(self, "eviction_soft_grace_period", value)
11113
+
10698
11114
  @_builtins.property
10699
11115
  @pulumi.getter(name="imageGcHighThresholdPercent")
10700
11116
  def image_gc_high_threshold_percent(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -10755,6 +11171,18 @@ class ClusterNodeConfigKubeletConfigArgs:
10755
11171
  def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
10756
11172
  pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value)
10757
11173
 
11174
+ @_builtins.property
11175
+ @pulumi.getter(name="maxParallelImagePulls")
11176
+ def max_parallel_image_pulls(self) -> Optional[pulumi.Input[_builtins.int]]:
11177
+ """
11178
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
11179
+ """
11180
+ return pulumi.get(self, "max_parallel_image_pulls")
11181
+
11182
+ @max_parallel_image_pulls.setter
11183
+ def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
11184
+ pulumi.set(self, "max_parallel_image_pulls", value)
11185
+
10758
11186
  @_builtins.property
10759
11187
  @pulumi.getter(name="podPidsLimit")
10760
11188
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -10767,91 +11195,515 @@ class ClusterNodeConfigKubeletConfigArgs:
10767
11195
  def pod_pids_limit(self, value: Optional[pulumi.Input[_builtins.int]]):
10768
11196
  pulumi.set(self, "pod_pids_limit", value)
10769
11197
 
11198
+ @_builtins.property
11199
+ @pulumi.getter(name="singleProcessOomKill")
11200
+ def single_process_oom_kill(self) -> Optional[pulumi.Input[_builtins.bool]]:
11201
+ """
11202
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
11203
+ """
11204
+ return pulumi.get(self, "single_process_oom_kill")
11205
+
11206
+ @single_process_oom_kill.setter
11207
+ def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
11208
+ pulumi.set(self, "single_process_oom_kill", value)
11209
+
10770
11210
 
10771
11211
  if not MYPY:
10772
- class ClusterNodeConfigLinuxNodeConfigArgsDict(TypedDict):
10773
- cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
11212
+ class ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
11213
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
10774
11214
  """
10775
- Possible cgroup modes that can be used.
10776
- Accepted values are:
10777
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
10778
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
10779
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11215
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10780
11216
  """
10781
- hugepages_config: NotRequired[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
11217
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
10782
11218
  """
10783
- Amounts for 2M and 1G hugepages. Structure is documented below.
11219
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10784
11220
  """
10785
- sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
11221
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
10786
11222
  """
10787
- The Linux kernel parameters to be applied to the nodes
10788
- and all pods running on the nodes. Specified as a map from the key, such as
10789
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
10790
- Note that validations happen all server side. All attributes are optional.
11223
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11224
+ """
11225
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
11226
+ """
11227
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11228
+ """
11229
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11230
+ """
11231
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11232
+ """
11233
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
11234
+ """
11235
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10791
11236
  """
10792
11237
  elif False:
10793
- ClusterNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
11238
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict: TypeAlias = Mapping[str, Any]
10794
11239
 
10795
11240
  @pulumi.input_type
10796
- class ClusterNodeConfigLinuxNodeConfigArgs:
11241
+ class ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs:
10797
11242
  def __init__(__self__, *,
10798
- cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
10799
- hugepages_config: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
10800
- sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None):
11243
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11244
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11245
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
11246
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11247
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11248
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
10801
11249
  """
10802
- :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
10803
- Accepted values are:
10804
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
10805
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
10806
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
10807
- :param pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
10808
- :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
10809
- and all pods running on the nodes. Specified as a map from the key, such as
10810
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
10811
- Note that validations happen all server side. All attributes are optional.
11250
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11251
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11252
+ :param pulumi.Input[_builtins.str] memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11253
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11254
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11255
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10812
11256
  """
10813
- if cgroup_mode is not None:
10814
- pulumi.set(__self__, "cgroup_mode", cgroup_mode)
10815
- if hugepages_config is not None:
10816
- pulumi.set(__self__, "hugepages_config", hugepages_config)
10817
- if sysctls is not None:
10818
- pulumi.set(__self__, "sysctls", sysctls)
11257
+ if imagefs_available is not None:
11258
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
11259
+ if imagefs_inodes_free is not None:
11260
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
11261
+ if memory_available is not None:
11262
+ pulumi.set(__self__, "memory_available", memory_available)
11263
+ if nodefs_available is not None:
11264
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
11265
+ if nodefs_inodes_free is not None:
11266
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
11267
+ if pid_available is not None:
11268
+ pulumi.set(__self__, "pid_available", pid_available)
10819
11269
 
10820
11270
  @_builtins.property
10821
- @pulumi.getter(name="cgroupMode")
10822
- def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
11271
+ @pulumi.getter(name="imagefsAvailable")
11272
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
10823
11273
  """
10824
- Possible cgroup modes that can be used.
10825
- Accepted values are:
10826
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
10827
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
10828
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11274
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10829
11275
  """
10830
- return pulumi.get(self, "cgroup_mode")
11276
+ return pulumi.get(self, "imagefs_available")
10831
11277
 
10832
- @cgroup_mode.setter
10833
- def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
10834
- pulumi.set(self, "cgroup_mode", value)
11278
+ @imagefs_available.setter
11279
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11280
+ pulumi.set(self, "imagefs_available", value)
10835
11281
 
10836
11282
  @_builtins.property
10837
- @pulumi.getter(name="hugepagesConfig")
10838
- def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
11283
+ @pulumi.getter(name="imagefsInodesFree")
11284
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
10839
11285
  """
10840
- Amounts for 2M and 1G hugepages. Structure is documented below.
11286
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10841
11287
  """
10842
- return pulumi.get(self, "hugepages_config")
11288
+ return pulumi.get(self, "imagefs_inodes_free")
10843
11289
 
10844
- @hugepages_config.setter
10845
- def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
10846
- pulumi.set(self, "hugepages_config", value)
11290
+ @imagefs_inodes_free.setter
11291
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11292
+ pulumi.set(self, "imagefs_inodes_free", value)
10847
11293
 
10848
11294
  @_builtins.property
10849
- @pulumi.getter
10850
- def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
11295
+ @pulumi.getter(name="memoryAvailable")
11296
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
10851
11297
  """
10852
- The Linux kernel parameters to be applied to the nodes
10853
- and all pods running on the nodes. Specified as a map from the key, such as
10854
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
11298
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11299
+ """
11300
+ return pulumi.get(self, "memory_available")
11301
+
11302
+ @memory_available.setter
11303
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11304
+ pulumi.set(self, "memory_available", value)
11305
+
11306
+ @_builtins.property
11307
+ @pulumi.getter(name="nodefsAvailable")
11308
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11309
+ """
11310
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11311
+ """
11312
+ return pulumi.get(self, "nodefs_available")
11313
+
11314
+ @nodefs_available.setter
11315
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11316
+ pulumi.set(self, "nodefs_available", value)
11317
+
11318
+ @_builtins.property
11319
+ @pulumi.getter(name="nodefsInodesFree")
11320
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11321
+ """
11322
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11323
+ """
11324
+ return pulumi.get(self, "nodefs_inodes_free")
11325
+
11326
+ @nodefs_inodes_free.setter
11327
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11328
+ pulumi.set(self, "nodefs_inodes_free", value)
11329
+
11330
+ @_builtins.property
11331
+ @pulumi.getter(name="pidAvailable")
11332
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11333
+ """
11334
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11335
+ """
11336
+ return pulumi.get(self, "pid_available")
11337
+
11338
+ @pid_available.setter
11339
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11340
+ pulumi.set(self, "pid_available", value)
11341
+
11342
+
11343
+ if not MYPY:
11344
+ class ClusterNodeConfigKubeletConfigEvictionSoftArgsDict(TypedDict):
11345
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
11346
+ """
11347
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
11348
+ """
11349
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11350
+ """
11351
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11352
+ """
11353
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
11354
+ """
11355
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
11356
+ """
11357
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
11358
+ """
11359
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11360
+ """
11361
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11362
+ """
11363
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11364
+ """
11365
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
11366
+ """
11367
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11368
+ """
11369
+ elif False:
11370
+ ClusterNodeConfigKubeletConfigEvictionSoftArgsDict: TypeAlias = Mapping[str, Any]
11371
+
11372
+ @pulumi.input_type
11373
+ class ClusterNodeConfigKubeletConfigEvictionSoftArgs:
11374
+ def __init__(__self__, *,
11375
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11376
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11377
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
11378
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11379
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11380
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
11381
+ """
11382
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
11383
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11384
+ :param pulumi.Input[_builtins.str] memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
11385
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11386
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11387
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11388
+ """
11389
+ if imagefs_available is not None:
11390
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
11391
+ if imagefs_inodes_free is not None:
11392
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
11393
+ if memory_available is not None:
11394
+ pulumi.set(__self__, "memory_available", memory_available)
11395
+ if nodefs_available is not None:
11396
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
11397
+ if nodefs_inodes_free is not None:
11398
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
11399
+ if pid_available is not None:
11400
+ pulumi.set(__self__, "pid_available", pid_available)
11401
+
11402
+ @_builtins.property
11403
+ @pulumi.getter(name="imagefsAvailable")
11404
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11405
+ """
11406
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
11407
+ """
11408
+ return pulumi.get(self, "imagefs_available")
11409
+
11410
+ @imagefs_available.setter
11411
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11412
+ pulumi.set(self, "imagefs_available", value)
11413
+
11414
+ @_builtins.property
11415
+ @pulumi.getter(name="imagefsInodesFree")
11416
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11417
+ """
11418
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11419
+ """
11420
+ return pulumi.get(self, "imagefs_inodes_free")
11421
+
11422
+ @imagefs_inodes_free.setter
11423
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11424
+ pulumi.set(self, "imagefs_inodes_free", value)
11425
+
11426
+ @_builtins.property
11427
+ @pulumi.getter(name="memoryAvailable")
11428
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11429
+ """
11430
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
11431
+ """
11432
+ return pulumi.get(self, "memory_available")
11433
+
11434
+ @memory_available.setter
11435
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11436
+ pulumi.set(self, "memory_available", value)
11437
+
11438
+ @_builtins.property
11439
+ @pulumi.getter(name="nodefsAvailable")
11440
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11441
+ """
11442
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11443
+ """
11444
+ return pulumi.get(self, "nodefs_available")
11445
+
11446
+ @nodefs_available.setter
11447
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11448
+ pulumi.set(self, "nodefs_available", value)
11449
+
11450
+ @_builtins.property
11451
+ @pulumi.getter(name="nodefsInodesFree")
11452
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11453
+ """
11454
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11455
+ """
11456
+ return pulumi.get(self, "nodefs_inodes_free")
11457
+
11458
+ @nodefs_inodes_free.setter
11459
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11460
+ pulumi.set(self, "nodefs_inodes_free", value)
11461
+
11462
+ @_builtins.property
11463
+ @pulumi.getter(name="pidAvailable")
11464
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11465
+ """
11466
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11467
+ """
11468
+ return pulumi.get(self, "pid_available")
11469
+
11470
+ @pid_available.setter
11471
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11472
+ pulumi.set(self, "pid_available", value)
11473
+
11474
+
11475
+ if not MYPY:
11476
+ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict(TypedDict):
11477
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
11478
+ """
11479
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11480
+ """
11481
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11482
+ """
11483
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11484
+ """
11485
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
11486
+ """
11487
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
11488
+ """
11489
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
11490
+ """
11491
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11492
+ """
11493
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11494
+ """
11495
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11496
+ """
11497
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
11498
+ """
11499
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11500
+ """
11501
+ elif False:
11502
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict: TypeAlias = Mapping[str, Any]
11503
+
11504
+ @pulumi.input_type
11505
+ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
11506
+ def __init__(__self__, *,
11507
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11508
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11509
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
11510
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11511
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11512
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
11513
+ """
11514
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11515
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11516
+ :param pulumi.Input[_builtins.str] memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
11517
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11518
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11519
+ :param pulumi.Input[_builtins.str] pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11520
+ """
11521
+ if imagefs_available is not None:
11522
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
11523
+ if imagefs_inodes_free is not None:
11524
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
11525
+ if memory_available is not None:
11526
+ pulumi.set(__self__, "memory_available", memory_available)
11527
+ if nodefs_available is not None:
11528
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
11529
+ if nodefs_inodes_free is not None:
11530
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
11531
+ if pid_available is not None:
11532
+ pulumi.set(__self__, "pid_available", pid_available)
11533
+
11534
+ @_builtins.property
11535
+ @pulumi.getter(name="imagefsAvailable")
11536
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11537
+ """
11538
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11539
+ """
11540
+ return pulumi.get(self, "imagefs_available")
11541
+
11542
+ @imagefs_available.setter
11543
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11544
+ pulumi.set(self, "imagefs_available", value)
11545
+
11546
+ @_builtins.property
11547
+ @pulumi.getter(name="imagefsInodesFree")
11548
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11549
+ """
11550
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11551
+ """
11552
+ return pulumi.get(self, "imagefs_inodes_free")
11553
+
11554
+ @imagefs_inodes_free.setter
11555
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11556
+ pulumi.set(self, "imagefs_inodes_free", value)
11557
+
11558
+ @_builtins.property
11559
+ @pulumi.getter(name="memoryAvailable")
11560
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11561
+ """
11562
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
11563
+ """
11564
+ return pulumi.get(self, "memory_available")
11565
+
11566
+ @memory_available.setter
11567
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11568
+ pulumi.set(self, "memory_available", value)
11569
+
11570
+ @_builtins.property
11571
+ @pulumi.getter(name="nodefsAvailable")
11572
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11573
+ """
11574
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11575
+ """
11576
+ return pulumi.get(self, "nodefs_available")
11577
+
11578
+ @nodefs_available.setter
11579
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11580
+ pulumi.set(self, "nodefs_available", value)
11581
+
11582
+ @_builtins.property
11583
+ @pulumi.getter(name="nodefsInodesFree")
11584
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11585
+ """
11586
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11587
+ """
11588
+ return pulumi.get(self, "nodefs_inodes_free")
11589
+
11590
+ @nodefs_inodes_free.setter
11591
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11592
+ pulumi.set(self, "nodefs_inodes_free", value)
11593
+
11594
+ @_builtins.property
11595
+ @pulumi.getter(name="pidAvailable")
11596
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11597
+ """
11598
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11599
+ """
11600
+ return pulumi.get(self, "pid_available")
11601
+
11602
+ @pid_available.setter
11603
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11604
+ pulumi.set(self, "pid_available", value)
11605
+
11606
+
11607
+ if not MYPY:
11608
+ class ClusterNodeConfigLinuxNodeConfigArgsDict(TypedDict):
11609
+ cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
11610
+ """
11611
+ Possible cgroup modes that can be used.
11612
+ Accepted values are:
11613
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
11614
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
11615
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11616
+ """
11617
+ hugepages_config: NotRequired[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
11618
+ """
11619
+ Amounts for 2M and 1G hugepages. Structure is documented below.
11620
+ """
11621
+ sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
11622
+ """
11623
+ The Linux kernel parameters to be applied to the nodes
11624
+ and all pods running on the nodes. Specified as a map from the key, such as
11625
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
11626
+ Note that validations happen all server side. All attributes are optional.
11627
+ """
11628
+ transparent_hugepage_defrag: NotRequired[pulumi.Input[_builtins.str]]
11629
+ """
11630
+ The Linux kernel transparent hugepage defrag setting.
11631
+ """
11632
+ transparent_hugepage_enabled: NotRequired[pulumi.Input[_builtins.str]]
11633
+ """
11634
+ The Linux kernel transparent hugepage setting.
11635
+ """
11636
+ elif False:
11637
+ ClusterNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
11638
+
11639
+ @pulumi.input_type
11640
+ class ClusterNodeConfigLinuxNodeConfigArgs:
11641
+ def __init__(__self__, *,
11642
+ cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
11643
+ hugepages_config: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
11644
+ sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
11645
+ transparent_hugepage_defrag: Optional[pulumi.Input[_builtins.str]] = None,
11646
+ transparent_hugepage_enabled: Optional[pulumi.Input[_builtins.str]] = None):
11647
+ """
11648
+ :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
11649
+ Accepted values are:
11650
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
11651
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
11652
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11653
+ :param pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
11654
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
11655
+ and all pods running on the nodes. Specified as a map from the key, such as
11656
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
11657
+ Note that validations happen all server side. All attributes are optional.
11658
+ :param pulumi.Input[_builtins.str] transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
11659
+ :param pulumi.Input[_builtins.str] transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
11660
+ """
11661
+ if cgroup_mode is not None:
11662
+ pulumi.set(__self__, "cgroup_mode", cgroup_mode)
11663
+ if hugepages_config is not None:
11664
+ pulumi.set(__self__, "hugepages_config", hugepages_config)
11665
+ if sysctls is not None:
11666
+ pulumi.set(__self__, "sysctls", sysctls)
11667
+ if transparent_hugepage_defrag is not None:
11668
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
11669
+ if transparent_hugepage_enabled is not None:
11670
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
11671
+
11672
+ @_builtins.property
11673
+ @pulumi.getter(name="cgroupMode")
11674
+ def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
11675
+ """
11676
+ Possible cgroup modes that can be used.
11677
+ Accepted values are:
11678
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
11679
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
11680
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11681
+ """
11682
+ return pulumi.get(self, "cgroup_mode")
11683
+
11684
+ @cgroup_mode.setter
11685
+ def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
11686
+ pulumi.set(self, "cgroup_mode", value)
11687
+
11688
+ @_builtins.property
11689
+ @pulumi.getter(name="hugepagesConfig")
11690
+ def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
11691
+ """
11692
+ Amounts for 2M and 1G hugepages. Structure is documented below.
11693
+ """
11694
+ return pulumi.get(self, "hugepages_config")
11695
+
11696
+ @hugepages_config.setter
11697
+ def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
11698
+ pulumi.set(self, "hugepages_config", value)
11699
+
11700
+ @_builtins.property
11701
+ @pulumi.getter
11702
+ def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
11703
+ """
11704
+ The Linux kernel parameters to be applied to the nodes
11705
+ and all pods running on the nodes. Specified as a map from the key, such as
11706
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
10855
11707
  Note that validations happen all server side. All attributes are optional.
10856
11708
  """
10857
11709
  return pulumi.get(self, "sysctls")
@@ -10860,6 +11712,30 @@ class ClusterNodeConfigLinuxNodeConfigArgs:
10860
11712
  def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
10861
11713
  pulumi.set(self, "sysctls", value)
10862
11714
 
11715
+ @_builtins.property
11716
+ @pulumi.getter(name="transparentHugepageDefrag")
11717
+ def transparent_hugepage_defrag(self) -> Optional[pulumi.Input[_builtins.str]]:
11718
+ """
11719
+ The Linux kernel transparent hugepage defrag setting.
11720
+ """
11721
+ return pulumi.get(self, "transparent_hugepage_defrag")
11722
+
11723
+ @transparent_hugepage_defrag.setter
11724
+ def transparent_hugepage_defrag(self, value: Optional[pulumi.Input[_builtins.str]]):
11725
+ pulumi.set(self, "transparent_hugepage_defrag", value)
11726
+
11727
+ @_builtins.property
11728
+ @pulumi.getter(name="transparentHugepageEnabled")
11729
+ def transparent_hugepage_enabled(self) -> Optional[pulumi.Input[_builtins.str]]:
11730
+ """
11731
+ The Linux kernel transparent hugepage setting.
11732
+ """
11733
+ return pulumi.get(self, "transparent_hugepage_enabled")
11734
+
11735
+ @transparent_hugepage_enabled.setter
11736
+ def transparent_hugepage_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
11737
+ pulumi.set(self, "transparent_hugepage_enabled", value)
11738
+
10863
11739
 
10864
11740
  if not MYPY:
10865
11741
  class ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
@@ -11195,7 +12071,11 @@ if not MYPY:
11195
12071
  class ClusterNodeConfigSoleTenantConfigArgsDict(TypedDict):
11196
12072
  node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgsDict']]]
11197
12073
  """
11198
- .
12074
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
12075
+ """
12076
+ min_node_cpus: NotRequired[pulumi.Input[_builtins.int]]
12077
+ """
12078
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
11199
12079
  """
11200
12080
  elif False:
11201
12081
  ClusterNodeConfigSoleTenantConfigArgsDict: TypeAlias = Mapping[str, Any]
@@ -11203,17 +12083,21 @@ elif False:
11203
12083
  @pulumi.input_type
11204
12084
  class ClusterNodeConfigSoleTenantConfigArgs:
11205
12085
  def __init__(__self__, *,
11206
- node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
12086
+ node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]],
12087
+ min_node_cpus: Optional[pulumi.Input[_builtins.int]] = None):
11207
12088
  """
11208
- :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
12089
+ :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
12090
+ :param pulumi.Input[_builtins.int] min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
11209
12091
  """
11210
12092
  pulumi.set(__self__, "node_affinities", node_affinities)
12093
+ if min_node_cpus is not None:
12094
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
11211
12095
 
11212
12096
  @_builtins.property
11213
12097
  @pulumi.getter(name="nodeAffinities")
11214
12098
  def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
11215
12099
  """
11216
- .
12100
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
11217
12101
  """
11218
12102
  return pulumi.get(self, "node_affinities")
11219
12103
 
@@ -11221,6 +12105,18 @@ class ClusterNodeConfigSoleTenantConfigArgs:
11221
12105
  def node_affinities(self, value: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
11222
12106
  pulumi.set(self, "node_affinities", value)
11223
12107
 
12108
+ @_builtins.property
12109
+ @pulumi.getter(name="minNodeCpus")
12110
+ def min_node_cpus(self) -> Optional[pulumi.Input[_builtins.int]]:
12111
+ """
12112
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
12113
+ """
12114
+ return pulumi.get(self, "min_node_cpus")
12115
+
12116
+ @min_node_cpus.setter
12117
+ def min_node_cpus(self, value: Optional[pulumi.Input[_builtins.int]]):
12118
+ pulumi.set(self, "min_node_cpus", value)
12119
+
11224
12120
 
11225
12121
  if not MYPY:
11226
12122
  class ClusterNodeConfigSoleTenantConfigNodeAffinityArgsDict(TypedDict):
@@ -12555,6 +13451,11 @@ if not MYPY:
12555
13451
  """
12556
13452
  The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
12557
13453
  """
13454
+ subnetwork: NotRequired[pulumi.Input[_builtins.str]]
13455
+ """
13456
+ The name or self_link of the Google Compute Engine
13457
+ subnetwork in which the cluster's instances are launched.
13458
+ """
12558
13459
  elif False:
12559
13460
  ClusterNodePoolNetworkConfigArgsDict: TypeAlias = Mapping[str, Any]
12560
13461
 
@@ -12568,7 +13469,8 @@ class ClusterNodePoolNetworkConfigArgs:
12568
13469
  network_performance_config: Optional[pulumi.Input['ClusterNodePoolNetworkConfigNetworkPerformanceConfigArgs']] = None,
12569
13470
  pod_cidr_overprovision_config: Optional[pulumi.Input['ClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs']] = None,
12570
13471
  pod_ipv4_cidr_block: Optional[pulumi.Input[_builtins.str]] = None,
12571
- pod_range: Optional[pulumi.Input[_builtins.str]] = None):
13472
+ pod_range: Optional[pulumi.Input[_builtins.str]] = None,
13473
+ subnetwork: Optional[pulumi.Input[_builtins.str]] = None):
12572
13474
  """
12573
13475
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgs']]] additional_node_network_configs: We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface
12574
13476
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs']]] additional_pod_network_configs: We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node
@@ -12578,6 +13480,8 @@ class ClusterNodePoolNetworkConfigArgs:
12578
13480
  :param pulumi.Input['ClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
12579
13481
  :param pulumi.Input[_builtins.str] pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if create_pod_range is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
12580
13482
  :param pulumi.Input[_builtins.str] pod_range: The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
13483
+ :param pulumi.Input[_builtins.str] subnetwork: The name or self_link of the Google Compute Engine
13484
+ subnetwork in which the cluster's instances are launched.
12581
13485
  """
12582
13486
  if additional_node_network_configs is not None:
12583
13487
  pulumi.set(__self__, "additional_node_network_configs", additional_node_network_configs)
@@ -12595,6 +13499,8 @@ class ClusterNodePoolNetworkConfigArgs:
12595
13499
  pulumi.set(__self__, "pod_ipv4_cidr_block", pod_ipv4_cidr_block)
12596
13500
  if pod_range is not None:
12597
13501
  pulumi.set(__self__, "pod_range", pod_range)
13502
+ if subnetwork is not None:
13503
+ pulumi.set(__self__, "subnetwork", subnetwork)
12598
13504
 
12599
13505
  @_builtins.property
12600
13506
  @pulumi.getter(name="additionalNodeNetworkConfigs")
@@ -12692,6 +13598,19 @@ class ClusterNodePoolNetworkConfigArgs:
12692
13598
  def pod_range(self, value: Optional[pulumi.Input[_builtins.str]]):
12693
13599
  pulumi.set(self, "pod_range", value)
12694
13600
 
13601
+ @_builtins.property
13602
+ @pulumi.getter
13603
+ def subnetwork(self) -> Optional[pulumi.Input[_builtins.str]]:
13604
+ """
13605
+ The name or self_link of the Google Compute Engine
13606
+ subnetwork in which the cluster's instances are launched.
13607
+ """
13608
+ return pulumi.get(self, "subnetwork")
13609
+
13610
+ @subnetwork.setter
13611
+ def subnetwork(self, value: Optional[pulumi.Input[_builtins.str]]):
13612
+ pulumi.set(self, "subnetwork", value)
13613
+
12695
13614
 
12696
13615
  if not MYPY:
12697
13616
  class ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgsDict(TypedDict):
@@ -12904,6 +13823,10 @@ if not MYPY:
12904
13823
  Specifies options for controlling
12905
13824
  advanced machine features. Structure is documented below.
12906
13825
  """
13826
+ boot_disk: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgsDict']]
13827
+ """
13828
+ Configuration of the node pool boot disk. Structure is documented below
13829
+ """
12907
13830
  boot_disk_kms_key: NotRequired[pulumi.Input[_builtins.str]]
12908
13831
  """
12909
13832
  The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
@@ -12919,12 +13842,13 @@ if not MYPY:
12919
13842
  disk_size_gb: NotRequired[pulumi.Input[_builtins.int]]
12920
13843
  """
12921
13844
  Size of the disk attached to each node, specified
12922
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
13845
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
13846
+ Prefer configuring `boot_disk`.
12923
13847
  """
12924
13848
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
12925
13849
  """
12926
13850
  Type of the disk attached to each node
12927
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
13851
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
12928
13852
  """
12929
13853
  effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgsDict']]]]
12930
13854
  """
@@ -13103,7 +14027,7 @@ if not MYPY:
13103
14027
  """
13104
14028
  sole_tenant_config: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgsDict']]
13105
14029
  """
13106
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
14030
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
13107
14031
  """
13108
14032
  spot: NotRequired[pulumi.Input[_builtins.bool]]
13109
14033
  """
@@ -13147,6 +14071,7 @@ elif False:
13147
14071
  class ClusterNodePoolNodeConfigArgs:
13148
14072
  def __init__(__self__, *,
13149
14073
  advanced_machine_features: Optional[pulumi.Input['ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs']] = None,
14074
+ boot_disk: Optional[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs']] = None,
13150
14075
  boot_disk_kms_key: Optional[pulumi.Input[_builtins.str]] = None,
13151
14076
  confidential_nodes: Optional[pulumi.Input['ClusterNodePoolNodeConfigConfidentialNodesArgs']] = None,
13152
14077
  containerd_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigContainerdConfigArgs']] = None,
@@ -13194,13 +14119,15 @@ class ClusterNodePoolNodeConfigArgs:
13194
14119
  """
13195
14120
  :param pulumi.Input['ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling
13196
14121
  advanced machine features. Structure is documented below.
14122
+ :param pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs'] boot_disk: Configuration of the node pool boot disk. Structure is documented below
13197
14123
  :param pulumi.Input[_builtins.str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
13198
14124
  :param pulumi.Input['ClusterNodePoolNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
13199
14125
  :param pulumi.Input['ClusterNodePoolNodeConfigContainerdConfigArgs'] containerd_config: Parameters to customize containerd runtime. Structure is documented below.
13200
14126
  :param pulumi.Input[_builtins.int] disk_size_gb: Size of the disk attached to each node, specified
13201
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
14127
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
14128
+ Prefer configuring `boot_disk`.
13202
14129
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
13203
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
14130
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
13204
14131
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
13205
14132
  :param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
13206
14133
  :param pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -13280,7 +14207,7 @@ class ClusterNodePoolNodeConfigArgs:
13280
14207
  :param pulumi.Input[_builtins.str] service_account: The service account to be used by the Node VMs.
13281
14208
  If not specified, the "default" service account is used.
13282
14209
  :param pulumi.Input['ClusterNodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
13283
- :param pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
14210
+ :param pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
13284
14211
  :param pulumi.Input[_builtins.bool] spot: A boolean that represents whether the underlying node VMs are spot.
13285
14212
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
13286
14213
  for more information. Defaults to false.
@@ -13301,6 +14228,8 @@ class ClusterNodePoolNodeConfigArgs:
13301
14228
  """
13302
14229
  if advanced_machine_features is not None:
13303
14230
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
14231
+ if boot_disk is not None:
14232
+ pulumi.set(__self__, "boot_disk", boot_disk)
13304
14233
  if boot_disk_kms_key is not None:
13305
14234
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
13306
14235
  if confidential_nodes is not None:
@@ -13404,12 +14333,24 @@ class ClusterNodePoolNodeConfigArgs:
13404
14333
  pulumi.set(self, "advanced_machine_features", value)
13405
14334
 
13406
14335
  @_builtins.property
13407
- @pulumi.getter(name="bootDiskKmsKey")
13408
- def boot_disk_kms_key(self) -> Optional[pulumi.Input[_builtins.str]]:
14336
+ @pulumi.getter(name="bootDisk")
14337
+ def boot_disk(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs']]:
13409
14338
  """
13410
- The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
14339
+ Configuration of the node pool boot disk. Structure is documented below
13411
14340
  """
13412
- return pulumi.get(self, "boot_disk_kms_key")
14341
+ return pulumi.get(self, "boot_disk")
14342
+
14343
+ @boot_disk.setter
14344
+ def boot_disk(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs']]):
14345
+ pulumi.set(self, "boot_disk", value)
14346
+
14347
+ @_builtins.property
14348
+ @pulumi.getter(name="bootDiskKmsKey")
14349
+ def boot_disk_kms_key(self) -> Optional[pulumi.Input[_builtins.str]]:
14350
+ """
14351
+ The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
14352
+ """
14353
+ return pulumi.get(self, "boot_disk_kms_key")
13413
14354
 
13414
14355
  @boot_disk_kms_key.setter
13415
14356
  def boot_disk_kms_key(self, value: Optional[pulumi.Input[_builtins.str]]):
@@ -13444,7 +14385,8 @@ class ClusterNodePoolNodeConfigArgs:
13444
14385
  def disk_size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
13445
14386
  """
13446
14387
  Size of the disk attached to each node, specified
13447
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
14388
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
14389
+ Prefer configuring `boot_disk`.
13448
14390
  """
13449
14391
  return pulumi.get(self, "disk_size_gb")
13450
14392
 
@@ -13457,7 +14399,7 @@ class ClusterNodePoolNodeConfigArgs:
13457
14399
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
13458
14400
  """
13459
14401
  Type of the disk attached to each node
13460
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
14402
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
13461
14403
  """
13462
14404
  return pulumi.get(self, "disk_type")
13463
14405
 
@@ -13900,7 +14842,7 @@ class ClusterNodePoolNodeConfigArgs:
13900
14842
  @pulumi.getter(name="soleTenantConfig")
13901
14843
  def sole_tenant_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgs']]:
13902
14844
  """
13903
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
14845
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
13904
14846
  """
13905
14847
  return pulumi.get(self, "sole_tenant_config")
13906
14848
 
@@ -14063,6 +15005,104 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs:
14063
15005
  pulumi.set(self, "performance_monitoring_unit", value)
14064
15006
 
14065
15007
 
15008
+ if not MYPY:
15009
+ class ClusterNodePoolNodeConfigBootDiskArgsDict(TypedDict):
15010
+ disk_type: NotRequired[pulumi.Input[_builtins.str]]
15011
+ """
15012
+ Type of the disk attached to each node
15013
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15014
+ """
15015
+ provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
15016
+ """
15017
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15018
+ """
15019
+ provisioned_throughput: NotRequired[pulumi.Input[_builtins.int]]
15020
+ """
15021
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15022
+ """
15023
+ size_gb: NotRequired[pulumi.Input[_builtins.int]]
15024
+ """
15025
+ Size of the disk attached to each node, specified
15026
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
15027
+ """
15028
+ elif False:
15029
+ ClusterNodePoolNodeConfigBootDiskArgsDict: TypeAlias = Mapping[str, Any]
15030
+
15031
+ @pulumi.input_type
15032
+ class ClusterNodePoolNodeConfigBootDiskArgs:
15033
+ def __init__(__self__, *,
15034
+ disk_type: Optional[pulumi.Input[_builtins.str]] = None,
15035
+ provisioned_iops: Optional[pulumi.Input[_builtins.int]] = None,
15036
+ provisioned_throughput: Optional[pulumi.Input[_builtins.int]] = None,
15037
+ size_gb: Optional[pulumi.Input[_builtins.int]] = None):
15038
+ """
15039
+ :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
15040
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15041
+ :param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15042
+ :param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15043
+ :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
15044
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
15045
+ """
15046
+ if disk_type is not None:
15047
+ pulumi.set(__self__, "disk_type", disk_type)
15048
+ if provisioned_iops is not None:
15049
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
15050
+ if provisioned_throughput is not None:
15051
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
15052
+ if size_gb is not None:
15053
+ pulumi.set(__self__, "size_gb", size_gb)
15054
+
15055
+ @_builtins.property
15056
+ @pulumi.getter(name="diskType")
15057
+ def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
15058
+ """
15059
+ Type of the disk attached to each node
15060
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15061
+ """
15062
+ return pulumi.get(self, "disk_type")
15063
+
15064
+ @disk_type.setter
15065
+ def disk_type(self, value: Optional[pulumi.Input[_builtins.str]]):
15066
+ pulumi.set(self, "disk_type", value)
15067
+
15068
+ @_builtins.property
15069
+ @pulumi.getter(name="provisionedIops")
15070
+ def provisioned_iops(self) -> Optional[pulumi.Input[_builtins.int]]:
15071
+ """
15072
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15073
+ """
15074
+ return pulumi.get(self, "provisioned_iops")
15075
+
15076
+ @provisioned_iops.setter
15077
+ def provisioned_iops(self, value: Optional[pulumi.Input[_builtins.int]]):
15078
+ pulumi.set(self, "provisioned_iops", value)
15079
+
15080
+ @_builtins.property
15081
+ @pulumi.getter(name="provisionedThroughput")
15082
+ def provisioned_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
15083
+ """
15084
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15085
+ """
15086
+ return pulumi.get(self, "provisioned_throughput")
15087
+
15088
+ @provisioned_throughput.setter
15089
+ def provisioned_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
15090
+ pulumi.set(self, "provisioned_throughput", value)
15091
+
15092
+ @_builtins.property
15093
+ @pulumi.getter(name="sizeGb")
15094
+ def size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
15095
+ """
15096
+ Size of the disk attached to each node, specified
15097
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
15098
+ """
15099
+ return pulumi.get(self, "size_gb")
15100
+
15101
+ @size_gb.setter
15102
+ def size_gb(self, value: Optional[pulumi.Input[_builtins.int]]):
15103
+ pulumi.set(self, "size_gb", value)
15104
+
15105
+
14066
15106
  if not MYPY:
14067
15107
  class ClusterNodePoolNodeConfigConfidentialNodesArgsDict(TypedDict):
14068
15108
  enabled: pulumi.Input[_builtins.bool]
@@ -14815,6 +15855,22 @@ if not MYPY:
14815
15855
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
14816
15856
  is setting the empty string `""`, which will function identically to not setting this field.
14817
15857
  """
15858
+ eviction_max_pod_grace_period_seconds: NotRequired[pulumi.Input[_builtins.int]]
15859
+ """
15860
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
15861
+ """
15862
+ eviction_minimum_reclaim: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict']]
15863
+ """
15864
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
15865
+ """
15866
+ eviction_soft: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict']]
15867
+ """
15868
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
15869
+ """
15870
+ eviction_soft_grace_period: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict']]
15871
+ """
15872
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
15873
+ """
14818
15874
  image_gc_high_threshold_percent: NotRequired[pulumi.Input[_builtins.int]]
14819
15875
  """
14820
15876
  Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
@@ -14835,10 +15891,18 @@ if not MYPY:
14835
15891
  """
14836
15892
  Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
14837
15893
  """
15894
+ max_parallel_image_pulls: NotRequired[pulumi.Input[_builtins.int]]
15895
+ """
15896
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
15897
+ """
14838
15898
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
14839
15899
  """
14840
15900
  Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
14841
15901
  """
15902
+ single_process_oom_kill: NotRequired[pulumi.Input[_builtins.bool]]
15903
+ """
15904
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
15905
+ """
14842
15906
  elif False:
14843
15907
  ClusterNodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
14844
15908
 
@@ -14851,12 +15915,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
14851
15915
  cpu_cfs_quota: Optional[pulumi.Input[_builtins.bool]] = None,
14852
15916
  cpu_cfs_quota_period: Optional[pulumi.Input[_builtins.str]] = None,
14853
15917
  cpu_manager_policy: Optional[pulumi.Input[_builtins.str]] = None,
15918
+ eviction_max_pod_grace_period_seconds: Optional[pulumi.Input[_builtins.int]] = None,
15919
+ eviction_minimum_reclaim: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']] = None,
15920
+ eviction_soft: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs']] = None,
15921
+ eviction_soft_grace_period: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']] = None,
14854
15922
  image_gc_high_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
14855
15923
  image_gc_low_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
14856
15924
  image_maximum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
14857
15925
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
14858
15926
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
14859
- pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None):
15927
+ max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
15928
+ pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
15929
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
14860
15930
  """
14861
15931
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
14862
15932
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -14876,12 +15946,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
14876
15946
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
14877
15947
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
14878
15948
  is setting the empty string `""`, which will function identically to not setting this field.
15949
+ :param pulumi.Input[_builtins.int] eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
15950
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
15951
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs'] eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
15952
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
14879
15953
  :param pulumi.Input[_builtins.int] image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
14880
15954
  :param pulumi.Input[_builtins.int] image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
14881
15955
  :param pulumi.Input[_builtins.str] image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
14882
15956
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
14883
15957
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
15958
+ :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
14884
15959
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
15960
+ :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
14885
15961
  """
14886
15962
  if allowed_unsafe_sysctls is not None:
14887
15963
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -14895,6 +15971,14 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
14895
15971
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
14896
15972
  if cpu_manager_policy is not None:
14897
15973
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
15974
+ if eviction_max_pod_grace_period_seconds is not None:
15975
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
15976
+ if eviction_minimum_reclaim is not None:
15977
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
15978
+ if eviction_soft is not None:
15979
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
15980
+ if eviction_soft_grace_period is not None:
15981
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
14898
15982
  if image_gc_high_threshold_percent is not None:
14899
15983
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
14900
15984
  if image_gc_low_threshold_percent is not None:
@@ -14905,8 +15989,12 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
14905
15989
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
14906
15990
  if insecure_kubelet_readonly_port_enabled is not None:
14907
15991
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
15992
+ if max_parallel_image_pulls is not None:
15993
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
14908
15994
  if pod_pids_limit is not None:
14909
15995
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
15996
+ if single_process_oom_kill is not None:
15997
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
14910
15998
 
14911
15999
  @_builtins.property
14912
16000
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -14992,6 +16080,54 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
14992
16080
  def cpu_manager_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
14993
16081
  pulumi.set(self, "cpu_manager_policy", value)
14994
16082
 
16083
+ @_builtins.property
16084
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
16085
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[pulumi.Input[_builtins.int]]:
16086
+ """
16087
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
16088
+ """
16089
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
16090
+
16091
+ @eviction_max_pod_grace_period_seconds.setter
16092
+ def eviction_max_pod_grace_period_seconds(self, value: Optional[pulumi.Input[_builtins.int]]):
16093
+ pulumi.set(self, "eviction_max_pod_grace_period_seconds", value)
16094
+
16095
+ @_builtins.property
16096
+ @pulumi.getter(name="evictionMinimumReclaim")
16097
+ def eviction_minimum_reclaim(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]:
16098
+ """
16099
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
16100
+ """
16101
+ return pulumi.get(self, "eviction_minimum_reclaim")
16102
+
16103
+ @eviction_minimum_reclaim.setter
16104
+ def eviction_minimum_reclaim(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]):
16105
+ pulumi.set(self, "eviction_minimum_reclaim", value)
16106
+
16107
+ @_builtins.property
16108
+ @pulumi.getter(name="evictionSoft")
16109
+ def eviction_soft(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs']]:
16110
+ """
16111
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
16112
+ """
16113
+ return pulumi.get(self, "eviction_soft")
16114
+
16115
+ @eviction_soft.setter
16116
+ def eviction_soft(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs']]):
16117
+ pulumi.set(self, "eviction_soft", value)
16118
+
16119
+ @_builtins.property
16120
+ @pulumi.getter(name="evictionSoftGracePeriod")
16121
+ def eviction_soft_grace_period(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]:
16122
+ """
16123
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
16124
+ """
16125
+ return pulumi.get(self, "eviction_soft_grace_period")
16126
+
16127
+ @eviction_soft_grace_period.setter
16128
+ def eviction_soft_grace_period(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]):
16129
+ pulumi.set(self, "eviction_soft_grace_period", value)
16130
+
14995
16131
  @_builtins.property
14996
16132
  @pulumi.getter(name="imageGcHighThresholdPercent")
14997
16133
  def image_gc_high_threshold_percent(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -15052,6 +16188,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15052
16188
  def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
15053
16189
  pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value)
15054
16190
 
16191
+ @_builtins.property
16192
+ @pulumi.getter(name="maxParallelImagePulls")
16193
+ def max_parallel_image_pulls(self) -> Optional[pulumi.Input[_builtins.int]]:
16194
+ """
16195
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
16196
+ """
16197
+ return pulumi.get(self, "max_parallel_image_pulls")
16198
+
16199
+ @max_parallel_image_pulls.setter
16200
+ def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
16201
+ pulumi.set(self, "max_parallel_image_pulls", value)
16202
+
15055
16203
  @_builtins.property
15056
16204
  @pulumi.getter(name="podPidsLimit")
15057
16205
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -15064,189 +16212,637 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15064
16212
  def pod_pids_limit(self, value: Optional[pulumi.Input[_builtins.int]]):
15065
16213
  pulumi.set(self, "pod_pids_limit", value)
15066
16214
 
16215
+ @_builtins.property
16216
+ @pulumi.getter(name="singleProcessOomKill")
16217
+ def single_process_oom_kill(self) -> Optional[pulumi.Input[_builtins.bool]]:
16218
+ """
16219
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
16220
+ """
16221
+ return pulumi.get(self, "single_process_oom_kill")
16222
+
16223
+ @single_process_oom_kill.setter
16224
+ def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
16225
+ pulumi.set(self, "single_process_oom_kill", value)
16226
+
15067
16227
 
15068
16228
  if not MYPY:
15069
- class ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
15070
- cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
16229
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
16230
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
15071
16231
  """
15072
- Possible cgroup modes that can be used.
15073
- Accepted values are:
15074
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
15075
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
15076
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16232
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15077
16233
  """
15078
- hugepages_config: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
16234
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
15079
16235
  """
15080
- Amounts for 2M and 1G hugepages. Structure is documented below.
16236
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15081
16237
  """
15082
- sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
16238
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
15083
16239
  """
15084
- The Linux kernel parameters to be applied to the nodes
15085
- and all pods running on the nodes. Specified as a map from the key, such as
15086
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
15087
- Note that validations happen all server side. All attributes are optional.
16240
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16241
+ """
16242
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
16243
+ """
16244
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16245
+ """
16246
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16247
+ """
16248
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16249
+ """
16250
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
16251
+ """
16252
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15088
16253
  """
15089
16254
  elif False:
15090
- ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
16255
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict: TypeAlias = Mapping[str, Any]
15091
16256
 
15092
16257
  @pulumi.input_type
15093
- class ClusterNodePoolNodeConfigLinuxNodeConfigArgs:
16258
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs:
15094
16259
  def __init__(__self__, *,
15095
- cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
15096
- hugepages_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
15097
- sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None):
16260
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16261
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16262
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
16263
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16264
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16265
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
15098
16266
  """
15099
- :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
15100
- Accepted values are:
15101
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
15102
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
15103
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
15104
- :param pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
15105
- :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
15106
- and all pods running on the nodes. Specified as a map from the key, such as
15107
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
15108
- Note that validations happen all server side. All attributes are optional.
16267
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16268
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16269
+ :param pulumi.Input[_builtins.str] memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16270
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16271
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16272
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15109
16273
  """
15110
- if cgroup_mode is not None:
15111
- pulumi.set(__self__, "cgroup_mode", cgroup_mode)
15112
- if hugepages_config is not None:
15113
- pulumi.set(__self__, "hugepages_config", hugepages_config)
15114
- if sysctls is not None:
15115
- pulumi.set(__self__, "sysctls", sysctls)
16274
+ if imagefs_available is not None:
16275
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
16276
+ if imagefs_inodes_free is not None:
16277
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
16278
+ if memory_available is not None:
16279
+ pulumi.set(__self__, "memory_available", memory_available)
16280
+ if nodefs_available is not None:
16281
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
16282
+ if nodefs_inodes_free is not None:
16283
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
16284
+ if pid_available is not None:
16285
+ pulumi.set(__self__, "pid_available", pid_available)
15116
16286
 
15117
16287
  @_builtins.property
15118
- @pulumi.getter(name="cgroupMode")
15119
- def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
16288
+ @pulumi.getter(name="imagefsAvailable")
16289
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15120
16290
  """
15121
- Possible cgroup modes that can be used.
15122
- Accepted values are:
15123
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
15124
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
15125
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16291
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15126
16292
  """
15127
- return pulumi.get(self, "cgroup_mode")
16293
+ return pulumi.get(self, "imagefs_available")
15128
16294
 
15129
- @cgroup_mode.setter
15130
- def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
15131
- pulumi.set(self, "cgroup_mode", value)
16295
+ @imagefs_available.setter
16296
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16297
+ pulumi.set(self, "imagefs_available", value)
15132
16298
 
15133
16299
  @_builtins.property
15134
- @pulumi.getter(name="hugepagesConfig")
15135
- def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
16300
+ @pulumi.getter(name="imagefsInodesFree")
16301
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
15136
16302
  """
15137
- Amounts for 2M and 1G hugepages. Structure is documented below.
16303
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15138
16304
  """
15139
- return pulumi.get(self, "hugepages_config")
16305
+ return pulumi.get(self, "imagefs_inodes_free")
15140
16306
 
15141
- @hugepages_config.setter
15142
- def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
15143
- pulumi.set(self, "hugepages_config", value)
16307
+ @imagefs_inodes_free.setter
16308
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16309
+ pulumi.set(self, "imagefs_inodes_free", value)
15144
16310
 
15145
16311
  @_builtins.property
15146
- @pulumi.getter
15147
- def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
16312
+ @pulumi.getter(name="memoryAvailable")
16313
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15148
16314
  """
15149
- The Linux kernel parameters to be applied to the nodes
15150
- and all pods running on the nodes. Specified as a map from the key, such as
15151
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
15152
- Note that validations happen all server side. All attributes are optional.
16315
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15153
16316
  """
15154
- return pulumi.get(self, "sysctls")
16317
+ return pulumi.get(self, "memory_available")
15155
16318
 
15156
- @sysctls.setter
15157
- def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
15158
- pulumi.set(self, "sysctls", value)
16319
+ @memory_available.setter
16320
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16321
+ pulumi.set(self, "memory_available", value)
16322
+
16323
+ @_builtins.property
16324
+ @pulumi.getter(name="nodefsAvailable")
16325
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16326
+ """
16327
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16328
+ """
16329
+ return pulumi.get(self, "nodefs_available")
16330
+
16331
+ @nodefs_available.setter
16332
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16333
+ pulumi.set(self, "nodefs_available", value)
16334
+
16335
+ @_builtins.property
16336
+ @pulumi.getter(name="nodefsInodesFree")
16337
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
16338
+ """
16339
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16340
+ """
16341
+ return pulumi.get(self, "nodefs_inodes_free")
16342
+
16343
+ @nodefs_inodes_free.setter
16344
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16345
+ pulumi.set(self, "nodefs_inodes_free", value)
16346
+
16347
+ @_builtins.property
16348
+ @pulumi.getter(name="pidAvailable")
16349
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16350
+ """
16351
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16352
+ """
16353
+ return pulumi.get(self, "pid_available")
16354
+
16355
+ @pid_available.setter
16356
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16357
+ pulumi.set(self, "pid_available", value)
15159
16358
 
15160
16359
 
15161
16360
  if not MYPY:
15162
- class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
15163
- hugepage_size1g: NotRequired[pulumi.Input[_builtins.int]]
16361
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict(TypedDict):
16362
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
15164
16363
  """
15165
- Amount of 1G hugepages.
16364
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
15166
16365
  """
15167
- hugepage_size2m: NotRequired[pulumi.Input[_builtins.int]]
16366
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
15168
16367
  """
15169
- Amount of 2M hugepages.
16368
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16369
+ """
16370
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
16371
+ """
16372
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
16373
+ """
16374
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
16375
+ """
16376
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
16377
+ """
16378
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16379
+ """
16380
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16381
+ """
16382
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
16383
+ """
16384
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15170
16385
  """
15171
16386
  elif False:
15172
- ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict: TypeAlias = Mapping[str, Any]
16387
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict: TypeAlias = Mapping[str, Any]
15173
16388
 
15174
16389
  @pulumi.input_type
15175
- class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs:
16390
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs:
15176
16391
  def __init__(__self__, *,
15177
- hugepage_size1g: Optional[pulumi.Input[_builtins.int]] = None,
15178
- hugepage_size2m: Optional[pulumi.Input[_builtins.int]] = None):
16392
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16393
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16394
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
16395
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16396
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16397
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
15179
16398
  """
15180
- :param pulumi.Input[_builtins.int] hugepage_size1g: Amount of 1G hugepages.
15181
- :param pulumi.Input[_builtins.int] hugepage_size2m: Amount of 2M hugepages.
16399
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
16400
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16401
+ :param pulumi.Input[_builtins.str] memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
16402
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
16403
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16404
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15182
16405
  """
15183
- if hugepage_size1g is not None:
15184
- pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
15185
- if hugepage_size2m is not None:
15186
- pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
16406
+ if imagefs_available is not None:
16407
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
16408
+ if imagefs_inodes_free is not None:
16409
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
16410
+ if memory_available is not None:
16411
+ pulumi.set(__self__, "memory_available", memory_available)
16412
+ if nodefs_available is not None:
16413
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
16414
+ if nodefs_inodes_free is not None:
16415
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
16416
+ if pid_available is not None:
16417
+ pulumi.set(__self__, "pid_available", pid_available)
15187
16418
 
15188
16419
  @_builtins.property
15189
- @pulumi.getter(name="hugepageSize1g")
15190
- def hugepage_size1g(self) -> Optional[pulumi.Input[_builtins.int]]:
16420
+ @pulumi.getter(name="imagefsAvailable")
16421
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15191
16422
  """
15192
- Amount of 1G hugepages.
16423
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
15193
16424
  """
15194
- return pulumi.get(self, "hugepage_size1g")
16425
+ return pulumi.get(self, "imagefs_available")
15195
16426
 
15196
- @hugepage_size1g.setter
15197
- def hugepage_size1g(self, value: Optional[pulumi.Input[_builtins.int]]):
15198
- pulumi.set(self, "hugepage_size1g", value)
16427
+ @imagefs_available.setter
16428
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16429
+ pulumi.set(self, "imagefs_available", value)
15199
16430
 
15200
16431
  @_builtins.property
15201
- @pulumi.getter(name="hugepageSize2m")
15202
- def hugepage_size2m(self) -> Optional[pulumi.Input[_builtins.int]]:
16432
+ @pulumi.getter(name="imagefsInodesFree")
16433
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
15203
16434
  """
15204
- Amount of 2M hugepages.
16435
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
15205
16436
  """
15206
- return pulumi.get(self, "hugepage_size2m")
16437
+ return pulumi.get(self, "imagefs_inodes_free")
15207
16438
 
15208
- @hugepage_size2m.setter
15209
- def hugepage_size2m(self, value: Optional[pulumi.Input[_builtins.int]]):
15210
- pulumi.set(self, "hugepage_size2m", value)
16439
+ @imagefs_inodes_free.setter
16440
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16441
+ pulumi.set(self, "imagefs_inodes_free", value)
15211
16442
 
16443
+ @_builtins.property
16444
+ @pulumi.getter(name="memoryAvailable")
16445
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16446
+ """
16447
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
16448
+ """
16449
+ return pulumi.get(self, "memory_available")
15212
16450
 
15213
- if not MYPY:
15214
- class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict(TypedDict):
15215
- local_ssd_count: pulumi.Input[_builtins.int]
16451
+ @memory_available.setter
16452
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16453
+ pulumi.set(self, "memory_available", value)
16454
+
16455
+ @_builtins.property
16456
+ @pulumi.getter(name="nodefsAvailable")
16457
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15216
16458
  """
15217
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
15218
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16459
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15219
16460
  """
15220
- elif False:
15221
- ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict: TypeAlias = Mapping[str, Any]
16461
+ return pulumi.get(self, "nodefs_available")
15222
16462
 
15223
- @pulumi.input_type
15224
- class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgs:
15225
- def __init__(__self__, *,
15226
- local_ssd_count: pulumi.Input[_builtins.int]):
16463
+ @nodefs_available.setter
16464
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16465
+ pulumi.set(self, "nodefs_available", value)
16466
+
16467
+ @_builtins.property
16468
+ @pulumi.getter(name="nodefsInodesFree")
16469
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
15227
16470
  """
15228
- :param pulumi.Input[_builtins.int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
15229
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16471
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
15230
16472
  """
15231
- pulumi.set(__self__, "local_ssd_count", local_ssd_count)
16473
+ return pulumi.get(self, "nodefs_inodes_free")
16474
+
16475
+ @nodefs_inodes_free.setter
16476
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16477
+ pulumi.set(self, "nodefs_inodes_free", value)
15232
16478
 
15233
16479
  @_builtins.property
15234
- @pulumi.getter(name="localSsdCount")
15235
- def local_ssd_count(self) -> pulumi.Input[_builtins.int]:
16480
+ @pulumi.getter(name="pidAvailable")
16481
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15236
16482
  """
15237
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
15238
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16483
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15239
16484
  """
15240
- return pulumi.get(self, "local_ssd_count")
16485
+ return pulumi.get(self, "pid_available")
15241
16486
 
15242
- @local_ssd_count.setter
15243
- def local_ssd_count(self, value: pulumi.Input[_builtins.int]):
15244
- pulumi.set(self, "local_ssd_count", value)
16487
+ @pid_available.setter
16488
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16489
+ pulumi.set(self, "pid_available", value)
15245
16490
 
15246
16491
 
15247
16492
  if not MYPY:
15248
- class ClusterNodePoolNodeConfigReservationAffinityArgsDict(TypedDict):
15249
- consume_reservation_type: pulumi.Input[_builtins.str]
16493
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict(TypedDict):
16494
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
16495
+ """
16496
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16497
+ """
16498
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16499
+ """
16500
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16501
+ """
16502
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
16503
+ """
16504
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
16505
+ """
16506
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
16507
+ """
16508
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16509
+ """
16510
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16511
+ """
16512
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16513
+ """
16514
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
16515
+ """
16516
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16517
+ """
16518
+ elif False:
16519
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict: TypeAlias = Mapping[str, Any]
16520
+
16521
+ @pulumi.input_type
16522
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
16523
+ def __init__(__self__, *,
16524
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16525
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16526
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
16527
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16528
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16529
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
16530
+ """
16531
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16532
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16533
+ :param pulumi.Input[_builtins.str] memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
16534
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16535
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16536
+ :param pulumi.Input[_builtins.str] pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16537
+ """
16538
+ if imagefs_available is not None:
16539
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
16540
+ if imagefs_inodes_free is not None:
16541
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
16542
+ if memory_available is not None:
16543
+ pulumi.set(__self__, "memory_available", memory_available)
16544
+ if nodefs_available is not None:
16545
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
16546
+ if nodefs_inodes_free is not None:
16547
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
16548
+ if pid_available is not None:
16549
+ pulumi.set(__self__, "pid_available", pid_available)
16550
+
16551
+ @_builtins.property
16552
+ @pulumi.getter(name="imagefsAvailable")
16553
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16554
+ """
16555
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16556
+ """
16557
+ return pulumi.get(self, "imagefs_available")
16558
+
16559
+ @imagefs_available.setter
16560
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16561
+ pulumi.set(self, "imagefs_available", value)
16562
+
16563
+ @_builtins.property
16564
+ @pulumi.getter(name="imagefsInodesFree")
16565
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
16566
+ """
16567
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16568
+ """
16569
+ return pulumi.get(self, "imagefs_inodes_free")
16570
+
16571
+ @imagefs_inodes_free.setter
16572
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16573
+ pulumi.set(self, "imagefs_inodes_free", value)
16574
+
16575
+ @_builtins.property
16576
+ @pulumi.getter(name="memoryAvailable")
16577
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16578
+ """
16579
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
16580
+ """
16581
+ return pulumi.get(self, "memory_available")
16582
+
16583
+ @memory_available.setter
16584
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16585
+ pulumi.set(self, "memory_available", value)
16586
+
16587
+ @_builtins.property
16588
+ @pulumi.getter(name="nodefsAvailable")
16589
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16590
+ """
16591
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16592
+ """
16593
+ return pulumi.get(self, "nodefs_available")
16594
+
16595
+ @nodefs_available.setter
16596
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16597
+ pulumi.set(self, "nodefs_available", value)
16598
+
16599
+ @_builtins.property
16600
+ @pulumi.getter(name="nodefsInodesFree")
16601
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
16602
+ """
16603
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16604
+ """
16605
+ return pulumi.get(self, "nodefs_inodes_free")
16606
+
16607
+ @nodefs_inodes_free.setter
16608
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16609
+ pulumi.set(self, "nodefs_inodes_free", value)
16610
+
16611
+ @_builtins.property
16612
+ @pulumi.getter(name="pidAvailable")
16613
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16614
+ """
16615
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16616
+ """
16617
+ return pulumi.get(self, "pid_available")
16618
+
16619
+ @pid_available.setter
16620
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16621
+ pulumi.set(self, "pid_available", value)
16622
+
16623
+
16624
+ if not MYPY:
16625
+ class ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
16626
+ cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
16627
+ """
16628
+ Possible cgroup modes that can be used.
16629
+ Accepted values are:
16630
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
16631
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
16632
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16633
+ """
16634
+ hugepages_config: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
16635
+ """
16636
+ Amounts for 2M and 1G hugepages. Structure is documented below.
16637
+ """
16638
+ sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
16639
+ """
16640
+ The Linux kernel parameters to be applied to the nodes
16641
+ and all pods running on the nodes. Specified as a map from the key, such as
16642
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
16643
+ Note that validations happen all server side. All attributes are optional.
16644
+ """
16645
+ transparent_hugepage_defrag: NotRequired[pulumi.Input[_builtins.str]]
16646
+ """
16647
+ The Linux kernel transparent hugepage defrag setting.
16648
+ """
16649
+ transparent_hugepage_enabled: NotRequired[pulumi.Input[_builtins.str]]
16650
+ """
16651
+ The Linux kernel transparent hugepage setting.
16652
+ """
16653
+ elif False:
16654
+ ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
16655
+
16656
+ @pulumi.input_type
16657
+ class ClusterNodePoolNodeConfigLinuxNodeConfigArgs:
16658
+ def __init__(__self__, *,
16659
+ cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
16660
+ hugepages_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
16661
+ sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
16662
+ transparent_hugepage_defrag: Optional[pulumi.Input[_builtins.str]] = None,
16663
+ transparent_hugepage_enabled: Optional[pulumi.Input[_builtins.str]] = None):
16664
+ """
16665
+ :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
16666
+ Accepted values are:
16667
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
16668
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
16669
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16670
+ :param pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
16671
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
16672
+ and all pods running on the nodes. Specified as a map from the key, such as
16673
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
16674
+ Note that validations happen all server side. All attributes are optional.
16675
+ :param pulumi.Input[_builtins.str] transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
16676
+ :param pulumi.Input[_builtins.str] transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
16677
+ """
16678
+ if cgroup_mode is not None:
16679
+ pulumi.set(__self__, "cgroup_mode", cgroup_mode)
16680
+ if hugepages_config is not None:
16681
+ pulumi.set(__self__, "hugepages_config", hugepages_config)
16682
+ if sysctls is not None:
16683
+ pulumi.set(__self__, "sysctls", sysctls)
16684
+ if transparent_hugepage_defrag is not None:
16685
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
16686
+ if transparent_hugepage_enabled is not None:
16687
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
16688
+
16689
+ @_builtins.property
16690
+ @pulumi.getter(name="cgroupMode")
16691
+ def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
16692
+ """
16693
+ Possible cgroup modes that can be used.
16694
+ Accepted values are:
16695
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
16696
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
16697
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16698
+ """
16699
+ return pulumi.get(self, "cgroup_mode")
16700
+
16701
+ @cgroup_mode.setter
16702
+ def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
16703
+ pulumi.set(self, "cgroup_mode", value)
16704
+
16705
+ @_builtins.property
16706
+ @pulumi.getter(name="hugepagesConfig")
16707
+ def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
16708
+ """
16709
+ Amounts for 2M and 1G hugepages. Structure is documented below.
16710
+ """
16711
+ return pulumi.get(self, "hugepages_config")
16712
+
16713
+ @hugepages_config.setter
16714
+ def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
16715
+ pulumi.set(self, "hugepages_config", value)
16716
+
16717
+ @_builtins.property
16718
+ @pulumi.getter
16719
+ def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
16720
+ """
16721
+ The Linux kernel parameters to be applied to the nodes
16722
+ and all pods running on the nodes. Specified as a map from the key, such as
16723
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
16724
+ Note that validations happen all server side. All attributes are optional.
16725
+ """
16726
+ return pulumi.get(self, "sysctls")
16727
+
16728
+ @sysctls.setter
16729
+ def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
16730
+ pulumi.set(self, "sysctls", value)
16731
+
16732
+ @_builtins.property
16733
+ @pulumi.getter(name="transparentHugepageDefrag")
16734
+ def transparent_hugepage_defrag(self) -> Optional[pulumi.Input[_builtins.str]]:
16735
+ """
16736
+ The Linux kernel transparent hugepage defrag setting.
16737
+ """
16738
+ return pulumi.get(self, "transparent_hugepage_defrag")
16739
+
16740
+ @transparent_hugepage_defrag.setter
16741
+ def transparent_hugepage_defrag(self, value: Optional[pulumi.Input[_builtins.str]]):
16742
+ pulumi.set(self, "transparent_hugepage_defrag", value)
16743
+
16744
+ @_builtins.property
16745
+ @pulumi.getter(name="transparentHugepageEnabled")
16746
+ def transparent_hugepage_enabled(self) -> Optional[pulumi.Input[_builtins.str]]:
16747
+ """
16748
+ The Linux kernel transparent hugepage setting.
16749
+ """
16750
+ return pulumi.get(self, "transparent_hugepage_enabled")
16751
+
16752
+ @transparent_hugepage_enabled.setter
16753
+ def transparent_hugepage_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
16754
+ pulumi.set(self, "transparent_hugepage_enabled", value)
16755
+
16756
+
16757
+ if not MYPY:
16758
+ class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
16759
+ hugepage_size1g: NotRequired[pulumi.Input[_builtins.int]]
16760
+ """
16761
+ Amount of 1G hugepages.
16762
+ """
16763
+ hugepage_size2m: NotRequired[pulumi.Input[_builtins.int]]
16764
+ """
16765
+ Amount of 2M hugepages.
16766
+ """
16767
+ elif False:
16768
+ ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict: TypeAlias = Mapping[str, Any]
16769
+
16770
+ @pulumi.input_type
16771
+ class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs:
16772
+ def __init__(__self__, *,
16773
+ hugepage_size1g: Optional[pulumi.Input[_builtins.int]] = None,
16774
+ hugepage_size2m: Optional[pulumi.Input[_builtins.int]] = None):
16775
+ """
16776
+ :param pulumi.Input[_builtins.int] hugepage_size1g: Amount of 1G hugepages.
16777
+ :param pulumi.Input[_builtins.int] hugepage_size2m: Amount of 2M hugepages.
16778
+ """
16779
+ if hugepage_size1g is not None:
16780
+ pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
16781
+ if hugepage_size2m is not None:
16782
+ pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
16783
+
16784
+ @_builtins.property
16785
+ @pulumi.getter(name="hugepageSize1g")
16786
+ def hugepage_size1g(self) -> Optional[pulumi.Input[_builtins.int]]:
16787
+ """
16788
+ Amount of 1G hugepages.
16789
+ """
16790
+ return pulumi.get(self, "hugepage_size1g")
16791
+
16792
+ @hugepage_size1g.setter
16793
+ def hugepage_size1g(self, value: Optional[pulumi.Input[_builtins.int]]):
16794
+ pulumi.set(self, "hugepage_size1g", value)
16795
+
16796
+ @_builtins.property
16797
+ @pulumi.getter(name="hugepageSize2m")
16798
+ def hugepage_size2m(self) -> Optional[pulumi.Input[_builtins.int]]:
16799
+ """
16800
+ Amount of 2M hugepages.
16801
+ """
16802
+ return pulumi.get(self, "hugepage_size2m")
16803
+
16804
+ @hugepage_size2m.setter
16805
+ def hugepage_size2m(self, value: Optional[pulumi.Input[_builtins.int]]):
16806
+ pulumi.set(self, "hugepage_size2m", value)
16807
+
16808
+
16809
+ if not MYPY:
16810
+ class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict(TypedDict):
16811
+ local_ssd_count: pulumi.Input[_builtins.int]
16812
+ """
16813
+ Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
16814
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16815
+ """
16816
+ elif False:
16817
+ ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict: TypeAlias = Mapping[str, Any]
16818
+
16819
+ @pulumi.input_type
16820
+ class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgs:
16821
+ def __init__(__self__, *,
16822
+ local_ssd_count: pulumi.Input[_builtins.int]):
16823
+ """
16824
+ :param pulumi.Input[_builtins.int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
16825
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16826
+ """
16827
+ pulumi.set(__self__, "local_ssd_count", local_ssd_count)
16828
+
16829
+ @_builtins.property
16830
+ @pulumi.getter(name="localSsdCount")
16831
+ def local_ssd_count(self) -> pulumi.Input[_builtins.int]:
16832
+ """
16833
+ Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
16834
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16835
+ """
16836
+ return pulumi.get(self, "local_ssd_count")
16837
+
16838
+ @local_ssd_count.setter
16839
+ def local_ssd_count(self, value: pulumi.Input[_builtins.int]):
16840
+ pulumi.set(self, "local_ssd_count", value)
16841
+
16842
+
16843
+ if not MYPY:
16844
+ class ClusterNodePoolNodeConfigReservationAffinityArgsDict(TypedDict):
16845
+ consume_reservation_type: pulumi.Input[_builtins.str]
15250
16846
  """
15251
16847
  The type of reservation consumption
15252
16848
  Accepted values are:
@@ -15492,7 +17088,11 @@ if not MYPY:
15492
17088
  class ClusterNodePoolNodeConfigSoleTenantConfigArgsDict(TypedDict):
15493
17089
  node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgsDict']]]
15494
17090
  """
15495
- .
17091
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
17092
+ """
17093
+ min_node_cpus: NotRequired[pulumi.Input[_builtins.int]]
17094
+ """
17095
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
15496
17096
  """
15497
17097
  elif False:
15498
17098
  ClusterNodePoolNodeConfigSoleTenantConfigArgsDict: TypeAlias = Mapping[str, Any]
@@ -15500,17 +17100,21 @@ elif False:
15500
17100
  @pulumi.input_type
15501
17101
  class ClusterNodePoolNodeConfigSoleTenantConfigArgs:
15502
17102
  def __init__(__self__, *,
15503
- node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
17103
+ node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]],
17104
+ min_node_cpus: Optional[pulumi.Input[_builtins.int]] = None):
15504
17105
  """
15505
- :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
17106
+ :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
17107
+ :param pulumi.Input[_builtins.int] min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
15506
17108
  """
15507
17109
  pulumi.set(__self__, "node_affinities", node_affinities)
17110
+ if min_node_cpus is not None:
17111
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
15508
17112
 
15509
17113
  @_builtins.property
15510
17114
  @pulumi.getter(name="nodeAffinities")
15511
17115
  def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
15512
17116
  """
15513
- .
17117
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
15514
17118
  """
15515
17119
  return pulumi.get(self, "node_affinities")
15516
17120
 
@@ -15518,6 +17122,18 @@ class ClusterNodePoolNodeConfigSoleTenantConfigArgs:
15518
17122
  def node_affinities(self, value: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
15519
17123
  pulumi.set(self, "node_affinities", value)
15520
17124
 
17125
+ @_builtins.property
17126
+ @pulumi.getter(name="minNodeCpus")
17127
+ def min_node_cpus(self) -> Optional[pulumi.Input[_builtins.int]]:
17128
+ """
17129
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
17130
+ """
17131
+ return pulumi.get(self, "min_node_cpus")
17132
+
17133
+ @min_node_cpus.setter
17134
+ def min_node_cpus(self, value: Optional[pulumi.Input[_builtins.int]]):
17135
+ pulumi.set(self, "min_node_cpus", value)
17136
+
15521
17137
 
15522
17138
  if not MYPY:
15523
17139
  class ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgsDict(TypedDict):
@@ -16605,6 +18221,58 @@ class ClusterProtectConfigWorkloadConfigArgs:
16605
18221
  pulumi.set(self, "audit_mode", value)
16606
18222
 
16607
18223
 
18224
+ if not MYPY:
18225
+ class ClusterRbacBindingConfigArgsDict(TypedDict):
18226
+ enable_insecure_binding_system_authenticated: NotRequired[pulumi.Input[_builtins.bool]]
18227
+ """
18228
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.
18229
+ """
18230
+ enable_insecure_binding_system_unauthenticated: NotRequired[pulumi.Input[_builtins.bool]]
18231
+ """
18232
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated.
18233
+ """
18234
+ elif False:
18235
+ ClusterRbacBindingConfigArgsDict: TypeAlias = Mapping[str, Any]
18236
+
18237
+ @pulumi.input_type
18238
+ class ClusterRbacBindingConfigArgs:
18239
+ def __init__(__self__, *,
18240
+ enable_insecure_binding_system_authenticated: Optional[pulumi.Input[_builtins.bool]] = None,
18241
+ enable_insecure_binding_system_unauthenticated: Optional[pulumi.Input[_builtins.bool]] = None):
18242
+ """
18243
+ :param pulumi.Input[_builtins.bool] enable_insecure_binding_system_authenticated: Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.
18244
+ :param pulumi.Input[_builtins.bool] enable_insecure_binding_system_unauthenticated: Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated.
18245
+ """
18246
+ if enable_insecure_binding_system_authenticated is not None:
18247
+ pulumi.set(__self__, "enable_insecure_binding_system_authenticated", enable_insecure_binding_system_authenticated)
18248
+ if enable_insecure_binding_system_unauthenticated is not None:
18249
+ pulumi.set(__self__, "enable_insecure_binding_system_unauthenticated", enable_insecure_binding_system_unauthenticated)
18250
+
18251
+ @_builtins.property
18252
+ @pulumi.getter(name="enableInsecureBindingSystemAuthenticated")
18253
+ def enable_insecure_binding_system_authenticated(self) -> Optional[pulumi.Input[_builtins.bool]]:
18254
+ """
18255
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.
18256
+ """
18257
+ return pulumi.get(self, "enable_insecure_binding_system_authenticated")
18258
+
18259
+ @enable_insecure_binding_system_authenticated.setter
18260
+ def enable_insecure_binding_system_authenticated(self, value: Optional[pulumi.Input[_builtins.bool]]):
18261
+ pulumi.set(self, "enable_insecure_binding_system_authenticated", value)
18262
+
18263
+ @_builtins.property
18264
+ @pulumi.getter(name="enableInsecureBindingSystemUnauthenticated")
18265
+ def enable_insecure_binding_system_unauthenticated(self) -> Optional[pulumi.Input[_builtins.bool]]:
18266
+ """
18267
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated.
18268
+ """
18269
+ return pulumi.get(self, "enable_insecure_binding_system_unauthenticated")
18270
+
18271
+ @enable_insecure_binding_system_unauthenticated.setter
18272
+ def enable_insecure_binding_system_unauthenticated(self, value: Optional[pulumi.Input[_builtins.bool]]):
18273
+ pulumi.set(self, "enable_insecure_binding_system_unauthenticated", value)
18274
+
18275
+
16608
18276
  if not MYPY:
16609
18277
  class ClusterReleaseChannelArgsDict(TypedDict):
16610
18278
  channel: pulumi.Input[_builtins.str]
@@ -17529,6 +19197,10 @@ if not MYPY:
17529
19197
  """
17530
19198
  The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.
17531
19199
  """
19200
+ subnetwork: NotRequired[pulumi.Input[_builtins.str]]
19201
+ """
19202
+ The subnetwork path for the node pool. Format: `projects/{project}/regions/{region}/subnetworks/{subnetwork}`. If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable
19203
+ """
17532
19204
  elif False:
17533
19205
  NodePoolNetworkConfigArgsDict: TypeAlias = Mapping[str, Any]
17534
19206
 
@@ -17542,7 +19214,8 @@ class NodePoolNetworkConfigArgs:
17542
19214
  network_performance_config: Optional[pulumi.Input['NodePoolNetworkConfigNetworkPerformanceConfigArgs']] = None,
17543
19215
  pod_cidr_overprovision_config: Optional[pulumi.Input['NodePoolNetworkConfigPodCidrOverprovisionConfigArgs']] = None,
17544
19216
  pod_ipv4_cidr_block: Optional[pulumi.Input[_builtins.str]] = None,
17545
- pod_range: Optional[pulumi.Input[_builtins.str]] = None):
19217
+ pod_range: Optional[pulumi.Input[_builtins.str]] = None,
19218
+ subnetwork: Optional[pulumi.Input[_builtins.str]] = None):
17546
19219
  """
17547
19220
  :param pulumi.Input[Sequence[pulumi.Input['NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs']]] additional_node_network_configs: We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface.
17548
19221
  Structure is documented below
@@ -17554,6 +19227,7 @@ class NodePoolNetworkConfigArgs:
17554
19227
  :param pulumi.Input['NodePoolNetworkConfigPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
17555
19228
  :param pulumi.Input[_builtins.str] pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
17556
19229
  :param pulumi.Input[_builtins.str] pod_range: The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.
19230
+ :param pulumi.Input[_builtins.str] subnetwork: The subnetwork path for the node pool. Format: `projects/{project}/regions/{region}/subnetworks/{subnetwork}`. If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable
17557
19231
  """
17558
19232
  if additional_node_network_configs is not None:
17559
19233
  pulumi.set(__self__, "additional_node_network_configs", additional_node_network_configs)
@@ -17571,6 +19245,8 @@ class NodePoolNetworkConfigArgs:
17571
19245
  pulumi.set(__self__, "pod_ipv4_cidr_block", pod_ipv4_cidr_block)
17572
19246
  if pod_range is not None:
17573
19247
  pulumi.set(__self__, "pod_range", pod_range)
19248
+ if subnetwork is not None:
19249
+ pulumi.set(__self__, "subnetwork", subnetwork)
17574
19250
 
17575
19251
  @_builtins.property
17576
19252
  @pulumi.getter(name="additionalNodeNetworkConfigs")
@@ -17670,6 +19346,18 @@ class NodePoolNetworkConfigArgs:
17670
19346
  def pod_range(self, value: Optional[pulumi.Input[_builtins.str]]):
17671
19347
  pulumi.set(self, "pod_range", value)
17672
19348
 
19349
+ @_builtins.property
19350
+ @pulumi.getter
19351
+ def subnetwork(self) -> Optional[pulumi.Input[_builtins.str]]:
19352
+ """
19353
+ The subnetwork path for the node pool. Format: `projects/{project}/regions/{region}/subnetworks/{subnetwork}`. If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable
19354
+ """
19355
+ return pulumi.get(self, "subnetwork")
19356
+
19357
+ @subnetwork.setter
19358
+ def subnetwork(self, value: Optional[pulumi.Input[_builtins.str]]):
19359
+ pulumi.set(self, "subnetwork", value)
19360
+
17673
19361
 
17674
19362
  if not MYPY:
17675
19363
  class NodePoolNetworkConfigAdditionalNodeNetworkConfigArgsDict(TypedDict):
@@ -17863,6 +19551,10 @@ if not MYPY:
17863
19551
  """
17864
19552
  Specifies options for controlling advanced machine features.
17865
19553
  """
19554
+ boot_disk: NotRequired[pulumi.Input['NodePoolNodeConfigBootDiskArgsDict']]
19555
+ """
19556
+ Boot disk configuration for node pools nodes.
19557
+ """
17866
19558
  boot_disk_kms_key: NotRequired[pulumi.Input[_builtins.str]]
17867
19559
  """
17868
19560
  The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
@@ -18046,6 +19738,7 @@ elif False:
18046
19738
  class NodePoolNodeConfigArgs:
18047
19739
  def __init__(__self__, *,
18048
19740
  advanced_machine_features: Optional[pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs']] = None,
19741
+ boot_disk: Optional[pulumi.Input['NodePoolNodeConfigBootDiskArgs']] = None,
18049
19742
  boot_disk_kms_key: Optional[pulumi.Input[_builtins.str]] = None,
18050
19743
  confidential_nodes: Optional[pulumi.Input['NodePoolNodeConfigConfidentialNodesArgs']] = None,
18051
19744
  containerd_config: Optional[pulumi.Input['NodePoolNodeConfigContainerdConfigArgs']] = None,
@@ -18092,6 +19785,7 @@ class NodePoolNodeConfigArgs:
18092
19785
  workload_metadata_config: Optional[pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs']] = None):
18093
19786
  """
18094
19787
  :param pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
19788
+ :param pulumi.Input['NodePoolNodeConfigBootDiskArgs'] boot_disk: Boot disk configuration for node pools nodes.
18095
19789
  :param pulumi.Input[_builtins.str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
18096
19790
  :param pulumi.Input['NodePoolNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
18097
19791
  :param pulumi.Input['NodePoolNodeConfigContainerdConfigArgs'] containerd_config: Parameters for containerd configuration.
@@ -18139,6 +19833,8 @@ class NodePoolNodeConfigArgs:
18139
19833
  """
18140
19834
  if advanced_machine_features is not None:
18141
19835
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
19836
+ if boot_disk is not None:
19837
+ pulumi.set(__self__, "boot_disk", boot_disk)
18142
19838
  if boot_disk_kms_key is not None:
18143
19839
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
18144
19840
  if confidential_nodes is not None:
@@ -18240,6 +19936,18 @@ class NodePoolNodeConfigArgs:
18240
19936
  def advanced_machine_features(self, value: Optional[pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs']]):
18241
19937
  pulumi.set(self, "advanced_machine_features", value)
18242
19938
 
19939
+ @_builtins.property
19940
+ @pulumi.getter(name="bootDisk")
19941
+ def boot_disk(self) -> Optional[pulumi.Input['NodePoolNodeConfigBootDiskArgs']]:
19942
+ """
19943
+ Boot disk configuration for node pools nodes.
19944
+ """
19945
+ return pulumi.get(self, "boot_disk")
19946
+
19947
+ @boot_disk.setter
19948
+ def boot_disk(self, value: Optional[pulumi.Input['NodePoolNodeConfigBootDiskArgs']]):
19949
+ pulumi.set(self, "boot_disk", value)
19950
+
18243
19951
  @_builtins.property
18244
19952
  @pulumi.getter(name="bootDiskKmsKey")
18245
19953
  def boot_disk_kms_key(self) -> Optional[pulumi.Input[_builtins.str]]:
@@ -18793,51 +20501,143 @@ class NodePoolNodeConfigAdvancedMachineFeaturesArgs:
18793
20501
  enable_nested_virtualization: Optional[pulumi.Input[_builtins.bool]] = None,
18794
20502
  performance_monitoring_unit: Optional[pulumi.Input[_builtins.str]] = None):
18795
20503
  """
18796
- :param pulumi.Input[_builtins.int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
18797
- :param pulumi.Input[_builtins.bool] enable_nested_virtualization: Whether the node should have nested virtualization enabled.
18798
- :param pulumi.Input[_builtins.str] performance_monitoring_unit: Level of Performance Monitoring Unit (PMU) requested. If unset, no access to the PMU is assumed.
20504
+ :param pulumi.Input[_builtins.int] threads_per_core: The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
20505
+ :param pulumi.Input[_builtins.bool] enable_nested_virtualization: Whether the node should have nested virtualization enabled.
20506
+ :param pulumi.Input[_builtins.str] performance_monitoring_unit: Level of Performance Monitoring Unit (PMU) requested. If unset, no access to the PMU is assumed.
20507
+ """
20508
+ pulumi.set(__self__, "threads_per_core", threads_per_core)
20509
+ if enable_nested_virtualization is not None:
20510
+ pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
20511
+ if performance_monitoring_unit is not None:
20512
+ pulumi.set(__self__, "performance_monitoring_unit", performance_monitoring_unit)
20513
+
20514
+ @_builtins.property
20515
+ @pulumi.getter(name="threadsPerCore")
20516
+ def threads_per_core(self) -> pulumi.Input[_builtins.int]:
20517
+ """
20518
+ The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
20519
+ """
20520
+ return pulumi.get(self, "threads_per_core")
20521
+
20522
+ @threads_per_core.setter
20523
+ def threads_per_core(self, value: pulumi.Input[_builtins.int]):
20524
+ pulumi.set(self, "threads_per_core", value)
20525
+
20526
+ @_builtins.property
20527
+ @pulumi.getter(name="enableNestedVirtualization")
20528
+ def enable_nested_virtualization(self) -> Optional[pulumi.Input[_builtins.bool]]:
20529
+ """
20530
+ Whether the node should have nested virtualization enabled.
20531
+ """
20532
+ return pulumi.get(self, "enable_nested_virtualization")
20533
+
20534
+ @enable_nested_virtualization.setter
20535
+ def enable_nested_virtualization(self, value: Optional[pulumi.Input[_builtins.bool]]):
20536
+ pulumi.set(self, "enable_nested_virtualization", value)
20537
+
20538
+ @_builtins.property
20539
+ @pulumi.getter(name="performanceMonitoringUnit")
20540
+ def performance_monitoring_unit(self) -> Optional[pulumi.Input[_builtins.str]]:
20541
+ """
20542
+ Level of Performance Monitoring Unit (PMU) requested. If unset, no access to the PMU is assumed.
20543
+ """
20544
+ return pulumi.get(self, "performance_monitoring_unit")
20545
+
20546
+ @performance_monitoring_unit.setter
20547
+ def performance_monitoring_unit(self, value: Optional[pulumi.Input[_builtins.str]]):
20548
+ pulumi.set(self, "performance_monitoring_unit", value)
20549
+
20550
+
20551
+ if not MYPY:
20552
+ class NodePoolNodeConfigBootDiskArgsDict(TypedDict):
20553
+ disk_type: NotRequired[pulumi.Input[_builtins.str]]
20554
+ """
20555
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
20556
+ """
20557
+ provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
20558
+ """
20559
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
20560
+ """
20561
+ provisioned_throughput: NotRequired[pulumi.Input[_builtins.int]]
20562
+ """
20563
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
20564
+ """
20565
+ size_gb: NotRequired[pulumi.Input[_builtins.int]]
20566
+ """
20567
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
20568
+ """
20569
+ elif False:
20570
+ NodePoolNodeConfigBootDiskArgsDict: TypeAlias = Mapping[str, Any]
20571
+
20572
+ @pulumi.input_type
20573
+ class NodePoolNodeConfigBootDiskArgs:
20574
+ def __init__(__self__, *,
20575
+ disk_type: Optional[pulumi.Input[_builtins.str]] = None,
20576
+ provisioned_iops: Optional[pulumi.Input[_builtins.int]] = None,
20577
+ provisioned_throughput: Optional[pulumi.Input[_builtins.int]] = None,
20578
+ size_gb: Optional[pulumi.Input[_builtins.int]] = None):
20579
+ """
20580
+ :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
20581
+ :param pulumi.Input[_builtins.int] provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
20582
+ :param pulumi.Input[_builtins.int] provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
20583
+ :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
20584
+ """
20585
+ if disk_type is not None:
20586
+ pulumi.set(__self__, "disk_type", disk_type)
20587
+ if provisioned_iops is not None:
20588
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
20589
+ if provisioned_throughput is not None:
20590
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
20591
+ if size_gb is not None:
20592
+ pulumi.set(__self__, "size_gb", size_gb)
20593
+
20594
+ @_builtins.property
20595
+ @pulumi.getter(name="diskType")
20596
+ def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
20597
+ """
20598
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
18799
20599
  """
18800
- pulumi.set(__self__, "threads_per_core", threads_per_core)
18801
- if enable_nested_virtualization is not None:
18802
- pulumi.set(__self__, "enable_nested_virtualization", enable_nested_virtualization)
18803
- if performance_monitoring_unit is not None:
18804
- pulumi.set(__self__, "performance_monitoring_unit", performance_monitoring_unit)
20600
+ return pulumi.get(self, "disk_type")
20601
+
20602
+ @disk_type.setter
20603
+ def disk_type(self, value: Optional[pulumi.Input[_builtins.str]]):
20604
+ pulumi.set(self, "disk_type", value)
18805
20605
 
18806
20606
  @_builtins.property
18807
- @pulumi.getter(name="threadsPerCore")
18808
- def threads_per_core(self) -> pulumi.Input[_builtins.int]:
20607
+ @pulumi.getter(name="provisionedIops")
20608
+ def provisioned_iops(self) -> Optional[pulumi.Input[_builtins.int]]:
18809
20609
  """
18810
- The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.
20610
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
18811
20611
  """
18812
- return pulumi.get(self, "threads_per_core")
20612
+ return pulumi.get(self, "provisioned_iops")
18813
20613
 
18814
- @threads_per_core.setter
18815
- def threads_per_core(self, value: pulumi.Input[_builtins.int]):
18816
- pulumi.set(self, "threads_per_core", value)
20614
+ @provisioned_iops.setter
20615
+ def provisioned_iops(self, value: Optional[pulumi.Input[_builtins.int]]):
20616
+ pulumi.set(self, "provisioned_iops", value)
18817
20617
 
18818
20618
  @_builtins.property
18819
- @pulumi.getter(name="enableNestedVirtualization")
18820
- def enable_nested_virtualization(self) -> Optional[pulumi.Input[_builtins.bool]]:
20619
+ @pulumi.getter(name="provisionedThroughput")
20620
+ def provisioned_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
18821
20621
  """
18822
- Whether the node should have nested virtualization enabled.
20622
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
18823
20623
  """
18824
- return pulumi.get(self, "enable_nested_virtualization")
20624
+ return pulumi.get(self, "provisioned_throughput")
18825
20625
 
18826
- @enable_nested_virtualization.setter
18827
- def enable_nested_virtualization(self, value: Optional[pulumi.Input[_builtins.bool]]):
18828
- pulumi.set(self, "enable_nested_virtualization", value)
20626
+ @provisioned_throughput.setter
20627
+ def provisioned_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
20628
+ pulumi.set(self, "provisioned_throughput", value)
18829
20629
 
18830
20630
  @_builtins.property
18831
- @pulumi.getter(name="performanceMonitoringUnit")
18832
- def performance_monitoring_unit(self) -> Optional[pulumi.Input[_builtins.str]]:
20631
+ @pulumi.getter(name="sizeGb")
20632
+ def size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
18833
20633
  """
18834
- Level of Performance Monitoring Unit (PMU) requested. If unset, no access to the PMU is assumed.
20634
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
18835
20635
  """
18836
- return pulumi.get(self, "performance_monitoring_unit")
20636
+ return pulumi.get(self, "size_gb")
18837
20637
 
18838
- @performance_monitoring_unit.setter
18839
- def performance_monitoring_unit(self, value: Optional[pulumi.Input[_builtins.str]]):
18840
- pulumi.set(self, "performance_monitoring_unit", value)
20638
+ @size_gb.setter
20639
+ def size_gb(self, value: Optional[pulumi.Input[_builtins.int]]):
20640
+ pulumi.set(self, "size_gb", value)
18841
20641
 
18842
20642
 
18843
20643
  if not MYPY:
@@ -19550,6 +21350,22 @@ if not MYPY:
19550
21350
  """
19551
21351
  Control the CPU management policy on the node.
19552
21352
  """
21353
+ eviction_max_pod_grace_period_seconds: NotRequired[pulumi.Input[_builtins.int]]
21354
+ """
21355
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21356
+ """
21357
+ eviction_minimum_reclaim: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict']]
21358
+ """
21359
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21360
+ """
21361
+ eviction_soft: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict']]
21362
+ """
21363
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21364
+ """
21365
+ eviction_soft_grace_period: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict']]
21366
+ """
21367
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21368
+ """
19553
21369
  image_gc_high_threshold_percent: NotRequired[pulumi.Input[_builtins.int]]
19554
21370
  """
19555
21371
  Defines the percent of disk usage after which image garbage collection is always run.
@@ -19570,10 +21386,18 @@ if not MYPY:
19570
21386
  """
19571
21387
  Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
19572
21388
  """
21389
+ max_parallel_image_pulls: NotRequired[pulumi.Input[_builtins.int]]
21390
+ """
21391
+ Set the maximum number of image pulls in parallel.
21392
+ """
19573
21393
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
19574
21394
  """
19575
21395
  Controls the maximum number of processes allowed to run in a pod.
19576
21396
  """
21397
+ single_process_oom_kill: NotRequired[pulumi.Input[_builtins.bool]]
21398
+ """
21399
+ Defines whether to enable single process OOM killer.
21400
+ """
19577
21401
  elif False:
19578
21402
  NodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
19579
21403
 
@@ -19586,12 +21410,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
19586
21410
  cpu_cfs_quota: Optional[pulumi.Input[_builtins.bool]] = None,
19587
21411
  cpu_cfs_quota_period: Optional[pulumi.Input[_builtins.str]] = None,
19588
21412
  cpu_manager_policy: Optional[pulumi.Input[_builtins.str]] = None,
21413
+ eviction_max_pod_grace_period_seconds: Optional[pulumi.Input[_builtins.int]] = None,
21414
+ eviction_minimum_reclaim: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']] = None,
21415
+ eviction_soft: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs']] = None,
21416
+ eviction_soft_grace_period: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']] = None,
19589
21417
  image_gc_high_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
19590
21418
  image_gc_low_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
19591
21419
  image_maximum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
19592
21420
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
19593
21421
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
19594
- pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None):
21422
+ max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
21423
+ pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
21424
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
19595
21425
  """
19596
21426
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
19597
21427
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -19599,12 +21429,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
19599
21429
  :param pulumi.Input[_builtins.bool] cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
19600
21430
  :param pulumi.Input[_builtins.str] cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
19601
21431
  :param pulumi.Input[_builtins.str] cpu_manager_policy: Control the CPU management policy on the node.
21432
+ :param pulumi.Input[_builtins.int] eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21433
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21434
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs'] eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21435
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
19602
21436
  :param pulumi.Input[_builtins.int] image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
19603
21437
  :param pulumi.Input[_builtins.int] image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
19604
21438
  :param pulumi.Input[_builtins.str] image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
19605
21439
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
19606
21440
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
21441
+ :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
19607
21442
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
21443
+ :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer.
19608
21444
  """
19609
21445
  if allowed_unsafe_sysctls is not None:
19610
21446
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -19618,6 +21454,14 @@ class NodePoolNodeConfigKubeletConfigArgs:
19618
21454
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
19619
21455
  if cpu_manager_policy is not None:
19620
21456
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
21457
+ if eviction_max_pod_grace_period_seconds is not None:
21458
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
21459
+ if eviction_minimum_reclaim is not None:
21460
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
21461
+ if eviction_soft is not None:
21462
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
21463
+ if eviction_soft_grace_period is not None:
21464
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
19621
21465
  if image_gc_high_threshold_percent is not None:
19622
21466
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
19623
21467
  if image_gc_low_threshold_percent is not None:
@@ -19628,8 +21472,12 @@ class NodePoolNodeConfigKubeletConfigArgs:
19628
21472
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
19629
21473
  if insecure_kubelet_readonly_port_enabled is not None:
19630
21474
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
21475
+ if max_parallel_image_pulls is not None:
21476
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
19631
21477
  if pod_pids_limit is not None:
19632
21478
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
21479
+ if single_process_oom_kill is not None:
21480
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
19633
21481
 
19634
21482
  @_builtins.property
19635
21483
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -19703,6 +21551,54 @@ class NodePoolNodeConfigKubeletConfigArgs:
19703
21551
  def cpu_manager_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
19704
21552
  pulumi.set(self, "cpu_manager_policy", value)
19705
21553
 
21554
+ @_builtins.property
21555
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
21556
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[pulumi.Input[_builtins.int]]:
21557
+ """
21558
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21559
+ """
21560
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
21561
+
21562
+ @eviction_max_pod_grace_period_seconds.setter
21563
+ def eviction_max_pod_grace_period_seconds(self, value: Optional[pulumi.Input[_builtins.int]]):
21564
+ pulumi.set(self, "eviction_max_pod_grace_period_seconds", value)
21565
+
21566
+ @_builtins.property
21567
+ @pulumi.getter(name="evictionMinimumReclaim")
21568
+ def eviction_minimum_reclaim(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]:
21569
+ """
21570
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21571
+ """
21572
+ return pulumi.get(self, "eviction_minimum_reclaim")
21573
+
21574
+ @eviction_minimum_reclaim.setter
21575
+ def eviction_minimum_reclaim(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]):
21576
+ pulumi.set(self, "eviction_minimum_reclaim", value)
21577
+
21578
+ @_builtins.property
21579
+ @pulumi.getter(name="evictionSoft")
21580
+ def eviction_soft(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs']]:
21581
+ """
21582
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21583
+ """
21584
+ return pulumi.get(self, "eviction_soft")
21585
+
21586
+ @eviction_soft.setter
21587
+ def eviction_soft(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs']]):
21588
+ pulumi.set(self, "eviction_soft", value)
21589
+
21590
+ @_builtins.property
21591
+ @pulumi.getter(name="evictionSoftGracePeriod")
21592
+ def eviction_soft_grace_period(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]:
21593
+ """
21594
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21595
+ """
21596
+ return pulumi.get(self, "eviction_soft_grace_period")
21597
+
21598
+ @eviction_soft_grace_period.setter
21599
+ def eviction_soft_grace_period(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]):
21600
+ pulumi.set(self, "eviction_soft_grace_period", value)
21601
+
19706
21602
  @_builtins.property
19707
21603
  @pulumi.getter(name="imageGcHighThresholdPercent")
19708
21604
  def image_gc_high_threshold_percent(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -19763,6 +21659,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
19763
21659
  def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
19764
21660
  pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value)
19765
21661
 
21662
+ @_builtins.property
21663
+ @pulumi.getter(name="maxParallelImagePulls")
21664
+ def max_parallel_image_pulls(self) -> Optional[pulumi.Input[_builtins.int]]:
21665
+ """
21666
+ Set the maximum number of image pulls in parallel.
21667
+ """
21668
+ return pulumi.get(self, "max_parallel_image_pulls")
21669
+
21670
+ @max_parallel_image_pulls.setter
21671
+ def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
21672
+ pulumi.set(self, "max_parallel_image_pulls", value)
21673
+
19766
21674
  @_builtins.property
19767
21675
  @pulumi.getter(name="podPidsLimit")
19768
21676
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -19775,6 +21683,414 @@ class NodePoolNodeConfigKubeletConfigArgs:
19775
21683
  def pod_pids_limit(self, value: Optional[pulumi.Input[_builtins.int]]):
19776
21684
  pulumi.set(self, "pod_pids_limit", value)
19777
21685
 
21686
+ @_builtins.property
21687
+ @pulumi.getter(name="singleProcessOomKill")
21688
+ def single_process_oom_kill(self) -> Optional[pulumi.Input[_builtins.bool]]:
21689
+ """
21690
+ Defines whether to enable single process OOM killer.
21691
+ """
21692
+ return pulumi.get(self, "single_process_oom_kill")
21693
+
21694
+ @single_process_oom_kill.setter
21695
+ def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
21696
+ pulumi.set(self, "single_process_oom_kill", value)
21697
+
21698
+
21699
+ if not MYPY:
21700
+ class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
21701
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
21702
+ """
21703
+ Defines percentage of minimum reclaim for imagefs.available.
21704
+ """
21705
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21706
+ """
21707
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
21708
+ """
21709
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
21710
+ """
21711
+ Defines percentage of minimum reclaim for memory.available.
21712
+ """
21713
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
21714
+ """
21715
+ Defines percentage of minimum reclaim for nodefs.available.
21716
+ """
21717
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21718
+ """
21719
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
21720
+ """
21721
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
21722
+ """
21723
+ Defines percentage of minimum reclaim for pid.available.
21724
+ """
21725
+ elif False:
21726
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict: TypeAlias = Mapping[str, Any]
21727
+
21728
+ @pulumi.input_type
21729
+ class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs:
21730
+ def __init__(__self__, *,
21731
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21732
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21733
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
21734
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21735
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21736
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
21737
+ """
21738
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
21739
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
21740
+ :param pulumi.Input[_builtins.str] memory_available: Defines percentage of minimum reclaim for memory.available.
21741
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
21742
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
21743
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of minimum reclaim for pid.available.
21744
+ """
21745
+ if imagefs_available is not None:
21746
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21747
+ if imagefs_inodes_free is not None:
21748
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21749
+ if memory_available is not None:
21750
+ pulumi.set(__self__, "memory_available", memory_available)
21751
+ if nodefs_available is not None:
21752
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21753
+ if nodefs_inodes_free is not None:
21754
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21755
+ if pid_available is not None:
21756
+ pulumi.set(__self__, "pid_available", pid_available)
21757
+
21758
+ @_builtins.property
21759
+ @pulumi.getter(name="imagefsAvailable")
21760
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21761
+ """
21762
+ Defines percentage of minimum reclaim for imagefs.available.
21763
+ """
21764
+ return pulumi.get(self, "imagefs_available")
21765
+
21766
+ @imagefs_available.setter
21767
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21768
+ pulumi.set(self, "imagefs_available", value)
21769
+
21770
+ @_builtins.property
21771
+ @pulumi.getter(name="imagefsInodesFree")
21772
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21773
+ """
21774
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
21775
+ """
21776
+ return pulumi.get(self, "imagefs_inodes_free")
21777
+
21778
+ @imagefs_inodes_free.setter
21779
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21780
+ pulumi.set(self, "imagefs_inodes_free", value)
21781
+
21782
+ @_builtins.property
21783
+ @pulumi.getter(name="memoryAvailable")
21784
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21785
+ """
21786
+ Defines percentage of minimum reclaim for memory.available.
21787
+ """
21788
+ return pulumi.get(self, "memory_available")
21789
+
21790
+ @memory_available.setter
21791
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21792
+ pulumi.set(self, "memory_available", value)
21793
+
21794
+ @_builtins.property
21795
+ @pulumi.getter(name="nodefsAvailable")
21796
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21797
+ """
21798
+ Defines percentage of minimum reclaim for nodefs.available.
21799
+ """
21800
+ return pulumi.get(self, "nodefs_available")
21801
+
21802
+ @nodefs_available.setter
21803
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21804
+ pulumi.set(self, "nodefs_available", value)
21805
+
21806
+ @_builtins.property
21807
+ @pulumi.getter(name="nodefsInodesFree")
21808
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21809
+ """
21810
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
21811
+ """
21812
+ return pulumi.get(self, "nodefs_inodes_free")
21813
+
21814
+ @nodefs_inodes_free.setter
21815
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21816
+ pulumi.set(self, "nodefs_inodes_free", value)
21817
+
21818
+ @_builtins.property
21819
+ @pulumi.getter(name="pidAvailable")
21820
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21821
+ """
21822
+ Defines percentage of minimum reclaim for pid.available.
21823
+ """
21824
+ return pulumi.get(self, "pid_available")
21825
+
21826
+ @pid_available.setter
21827
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21828
+ pulumi.set(self, "pid_available", value)
21829
+
21830
+
21831
+ if not MYPY:
21832
+ class NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict(TypedDict):
21833
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
21834
+ """
21835
+ Defines percentage of soft eviction threshold for imagefs.available.
21836
+ """
21837
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21838
+ """
21839
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
21840
+ """
21841
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
21842
+ """
21843
+ Defines quantity of soft eviction threshold for memory.available.
21844
+ """
21845
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
21846
+ """
21847
+ Defines percentage of soft eviction threshold for nodefs.available.
21848
+ """
21849
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21850
+ """
21851
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
21852
+ """
21853
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
21854
+ """
21855
+ Defines percentage of soft eviction threshold for pid.available.
21856
+ """
21857
+ elif False:
21858
+ NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict: TypeAlias = Mapping[str, Any]
21859
+
21860
+ @pulumi.input_type
21861
+ class NodePoolNodeConfigKubeletConfigEvictionSoftArgs:
21862
+ def __init__(__self__, *,
21863
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21864
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21865
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
21866
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21867
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21868
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
21869
+ """
21870
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
21871
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
21872
+ :param pulumi.Input[_builtins.str] memory_available: Defines quantity of soft eviction threshold for memory.available.
21873
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
21874
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
21875
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of soft eviction threshold for pid.available.
21876
+ """
21877
+ if imagefs_available is not None:
21878
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21879
+ if imagefs_inodes_free is not None:
21880
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21881
+ if memory_available is not None:
21882
+ pulumi.set(__self__, "memory_available", memory_available)
21883
+ if nodefs_available is not None:
21884
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21885
+ if nodefs_inodes_free is not None:
21886
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21887
+ if pid_available is not None:
21888
+ pulumi.set(__self__, "pid_available", pid_available)
21889
+
21890
+ @_builtins.property
21891
+ @pulumi.getter(name="imagefsAvailable")
21892
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21893
+ """
21894
+ Defines percentage of soft eviction threshold for imagefs.available.
21895
+ """
21896
+ return pulumi.get(self, "imagefs_available")
21897
+
21898
+ @imagefs_available.setter
21899
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21900
+ pulumi.set(self, "imagefs_available", value)
21901
+
21902
+ @_builtins.property
21903
+ @pulumi.getter(name="imagefsInodesFree")
21904
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21905
+ """
21906
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
21907
+ """
21908
+ return pulumi.get(self, "imagefs_inodes_free")
21909
+
21910
+ @imagefs_inodes_free.setter
21911
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21912
+ pulumi.set(self, "imagefs_inodes_free", value)
21913
+
21914
+ @_builtins.property
21915
+ @pulumi.getter(name="memoryAvailable")
21916
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21917
+ """
21918
+ Defines quantity of soft eviction threshold for memory.available.
21919
+ """
21920
+ return pulumi.get(self, "memory_available")
21921
+
21922
+ @memory_available.setter
21923
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21924
+ pulumi.set(self, "memory_available", value)
21925
+
21926
+ @_builtins.property
21927
+ @pulumi.getter(name="nodefsAvailable")
21928
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21929
+ """
21930
+ Defines percentage of soft eviction threshold for nodefs.available.
21931
+ """
21932
+ return pulumi.get(self, "nodefs_available")
21933
+
21934
+ @nodefs_available.setter
21935
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21936
+ pulumi.set(self, "nodefs_available", value)
21937
+
21938
+ @_builtins.property
21939
+ @pulumi.getter(name="nodefsInodesFree")
21940
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21941
+ """
21942
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
21943
+ """
21944
+ return pulumi.get(self, "nodefs_inodes_free")
21945
+
21946
+ @nodefs_inodes_free.setter
21947
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21948
+ pulumi.set(self, "nodefs_inodes_free", value)
21949
+
21950
+ @_builtins.property
21951
+ @pulumi.getter(name="pidAvailable")
21952
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21953
+ """
21954
+ Defines percentage of soft eviction threshold for pid.available.
21955
+ """
21956
+ return pulumi.get(self, "pid_available")
21957
+
21958
+ @pid_available.setter
21959
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21960
+ pulumi.set(self, "pid_available", value)
21961
+
21962
+
21963
+ if not MYPY:
21964
+ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict(TypedDict):
21965
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
21966
+ """
21967
+ Defines grace period for the imagefs.available soft eviction threshold
21968
+ """
21969
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21970
+ """
21971
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
21972
+ """
21973
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
21974
+ """
21975
+ Defines grace period for the memory.available soft eviction threshold.
21976
+ """
21977
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
21978
+ """
21979
+ Defines grace period for the nodefs.available soft eviction threshold.
21980
+ """
21981
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21982
+ """
21983
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
21984
+ """
21985
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
21986
+ """
21987
+ Defines grace period for the pid.available soft eviction threshold.
21988
+ """
21989
+ elif False:
21990
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict: TypeAlias = Mapping[str, Any]
21991
+
21992
+ @pulumi.input_type
21993
+ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
21994
+ def __init__(__self__, *,
21995
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21996
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21997
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
21998
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21999
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
22000
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
22001
+ """
22002
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
22003
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
22004
+ :param pulumi.Input[_builtins.str] memory_available: Defines grace period for the memory.available soft eviction threshold.
22005
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
22006
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
22007
+ :param pulumi.Input[_builtins.str] pid_available: Defines grace period for the pid.available soft eviction threshold.
22008
+ """
22009
+ if imagefs_available is not None:
22010
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
22011
+ if imagefs_inodes_free is not None:
22012
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
22013
+ if memory_available is not None:
22014
+ pulumi.set(__self__, "memory_available", memory_available)
22015
+ if nodefs_available is not None:
22016
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
22017
+ if nodefs_inodes_free is not None:
22018
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
22019
+ if pid_available is not None:
22020
+ pulumi.set(__self__, "pid_available", pid_available)
22021
+
22022
+ @_builtins.property
22023
+ @pulumi.getter(name="imagefsAvailable")
22024
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22025
+ """
22026
+ Defines grace period for the imagefs.available soft eviction threshold
22027
+ """
22028
+ return pulumi.get(self, "imagefs_available")
22029
+
22030
+ @imagefs_available.setter
22031
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22032
+ pulumi.set(self, "imagefs_available", value)
22033
+
22034
+ @_builtins.property
22035
+ @pulumi.getter(name="imagefsInodesFree")
22036
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
22037
+ """
22038
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
22039
+ """
22040
+ return pulumi.get(self, "imagefs_inodes_free")
22041
+
22042
+ @imagefs_inodes_free.setter
22043
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
22044
+ pulumi.set(self, "imagefs_inodes_free", value)
22045
+
22046
+ @_builtins.property
22047
+ @pulumi.getter(name="memoryAvailable")
22048
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22049
+ """
22050
+ Defines grace period for the memory.available soft eviction threshold.
22051
+ """
22052
+ return pulumi.get(self, "memory_available")
22053
+
22054
+ @memory_available.setter
22055
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22056
+ pulumi.set(self, "memory_available", value)
22057
+
22058
+ @_builtins.property
22059
+ @pulumi.getter(name="nodefsAvailable")
22060
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22061
+ """
22062
+ Defines grace period for the nodefs.available soft eviction threshold.
22063
+ """
22064
+ return pulumi.get(self, "nodefs_available")
22065
+
22066
+ @nodefs_available.setter
22067
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22068
+ pulumi.set(self, "nodefs_available", value)
22069
+
22070
+ @_builtins.property
22071
+ @pulumi.getter(name="nodefsInodesFree")
22072
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
22073
+ """
22074
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
22075
+ """
22076
+ return pulumi.get(self, "nodefs_inodes_free")
22077
+
22078
+ @nodefs_inodes_free.setter
22079
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
22080
+ pulumi.set(self, "nodefs_inodes_free", value)
22081
+
22082
+ @_builtins.property
22083
+ @pulumi.getter(name="pidAvailable")
22084
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22085
+ """
22086
+ Defines grace period for the pid.available soft eviction threshold.
22087
+ """
22088
+ return pulumi.get(self, "pid_available")
22089
+
22090
+ @pid_available.setter
22091
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22092
+ pulumi.set(self, "pid_available", value)
22093
+
19778
22094
 
19779
22095
  if not MYPY:
19780
22096
  class NodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
@@ -19790,6 +22106,14 @@ if not MYPY:
19790
22106
  """
19791
22107
  The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
19792
22108
  """
22109
+ transparent_hugepage_defrag: NotRequired[pulumi.Input[_builtins.str]]
22110
+ """
22111
+ The Linux kernel transparent hugepage defrag setting.
22112
+ """
22113
+ transparent_hugepage_enabled: NotRequired[pulumi.Input[_builtins.str]]
22114
+ """
22115
+ The Linux kernel transparent hugepage setting.
22116
+ """
19793
22117
  elif False:
19794
22118
  NodePoolNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
19795
22119
 
@@ -19798,11 +22122,15 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
19798
22122
  def __init__(__self__, *,
19799
22123
  cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
19800
22124
  hugepages_config: Optional[pulumi.Input['NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
19801
- sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None):
22125
+ sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
22126
+ transparent_hugepage_defrag: Optional[pulumi.Input[_builtins.str]] = None,
22127
+ transparent_hugepage_enabled: Optional[pulumi.Input[_builtins.str]] = None):
19802
22128
  """
19803
22129
  :param pulumi.Input[_builtins.str] cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
19804
22130
  :param pulumi.Input['NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages.
19805
22131
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
22132
+ :param pulumi.Input[_builtins.str] transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
22133
+ :param pulumi.Input[_builtins.str] transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
19806
22134
  """
19807
22135
  if cgroup_mode is not None:
19808
22136
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -19810,6 +22138,10 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
19810
22138
  pulumi.set(__self__, "hugepages_config", hugepages_config)
19811
22139
  if sysctls is not None:
19812
22140
  pulumi.set(__self__, "sysctls", sysctls)
22141
+ if transparent_hugepage_defrag is not None:
22142
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
22143
+ if transparent_hugepage_enabled is not None:
22144
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
19813
22145
 
19814
22146
  @_builtins.property
19815
22147
  @pulumi.getter(name="cgroupMode")
@@ -19847,6 +22179,30 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
19847
22179
  def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
19848
22180
  pulumi.set(self, "sysctls", value)
19849
22181
 
22182
+ @_builtins.property
22183
+ @pulumi.getter(name="transparentHugepageDefrag")
22184
+ def transparent_hugepage_defrag(self) -> Optional[pulumi.Input[_builtins.str]]:
22185
+ """
22186
+ The Linux kernel transparent hugepage defrag setting.
22187
+ """
22188
+ return pulumi.get(self, "transparent_hugepage_defrag")
22189
+
22190
+ @transparent_hugepage_defrag.setter
22191
+ def transparent_hugepage_defrag(self, value: Optional[pulumi.Input[_builtins.str]]):
22192
+ pulumi.set(self, "transparent_hugepage_defrag", value)
22193
+
22194
+ @_builtins.property
22195
+ @pulumi.getter(name="transparentHugepageEnabled")
22196
+ def transparent_hugepage_enabled(self) -> Optional[pulumi.Input[_builtins.str]]:
22197
+ """
22198
+ The Linux kernel transparent hugepage setting.
22199
+ """
22200
+ return pulumi.get(self, "transparent_hugepage_enabled")
22201
+
22202
+ @transparent_hugepage_enabled.setter
22203
+ def transparent_hugepage_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
22204
+ pulumi.set(self, "transparent_hugepage_enabled", value)
22205
+
19850
22206
 
19851
22207
  if not MYPY:
19852
22208
  class NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
@@ -20160,17 +22516,25 @@ if not MYPY:
20160
22516
  """
20161
22517
  .
20162
22518
  """
22519
+ min_node_cpus: NotRequired[pulumi.Input[_builtins.int]]
22520
+ """
22521
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22522
+ """
20163
22523
  elif False:
20164
22524
  NodePoolNodeConfigSoleTenantConfigArgsDict: TypeAlias = Mapping[str, Any]
20165
22525
 
20166
22526
  @pulumi.input_type
20167
22527
  class NodePoolNodeConfigSoleTenantConfigArgs:
20168
22528
  def __init__(__self__, *,
20169
- node_affinities: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
22529
+ node_affinities: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]],
22530
+ min_node_cpus: Optional[pulumi.Input[_builtins.int]] = None):
20170
22531
  """
20171
22532
  :param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
22533
+ :param pulumi.Input[_builtins.int] min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
20172
22534
  """
20173
22535
  pulumi.set(__self__, "node_affinities", node_affinities)
22536
+ if min_node_cpus is not None:
22537
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
20174
22538
 
20175
22539
  @_builtins.property
20176
22540
  @pulumi.getter(name="nodeAffinities")
@@ -20184,6 +22548,18 @@ class NodePoolNodeConfigSoleTenantConfigArgs:
20184
22548
  def node_affinities(self, value: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
20185
22549
  pulumi.set(self, "node_affinities", value)
20186
22550
 
22551
+ @_builtins.property
22552
+ @pulumi.getter(name="minNodeCpus")
22553
+ def min_node_cpus(self) -> Optional[pulumi.Input[_builtins.int]]:
22554
+ """
22555
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22556
+ """
22557
+ return pulumi.get(self, "min_node_cpus")
22558
+
22559
+ @min_node_cpus.setter
22560
+ def min_node_cpus(self, value: Optional[pulumi.Input[_builtins.int]]):
22561
+ pulumi.set(self, "min_node_cpus", value)
22562
+
20187
22563
 
20188
22564
  if not MYPY:
20189
22565
  class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgsDict(TypedDict):