pulumi-gcp 8.40.0a1754721948__py3-none-any.whl → 8.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (469) hide show
  1. pulumi_gcp/__init__.py +152 -0
  2. pulumi_gcp/accesscontextmanager/_inputs.py +24 -4
  3. pulumi_gcp/accesscontextmanager/access_policy_iam_binding.py +2 -0
  4. pulumi_gcp/accesscontextmanager/access_policy_iam_member.py +2 -0
  5. pulumi_gcp/accesscontextmanager/access_policy_iam_policy.py +2 -0
  6. pulumi_gcp/accesscontextmanager/outputs.py +15 -3
  7. pulumi_gcp/apigateway/api_config_iam_binding.py +2 -0
  8. pulumi_gcp/apigateway/api_config_iam_member.py +2 -0
  9. pulumi_gcp/apigateway/api_config_iam_policy.py +2 -0
  10. pulumi_gcp/apigateway/api_iam_binding.py +2 -0
  11. pulumi_gcp/apigateway/api_iam_member.py +2 -0
  12. pulumi_gcp/apigateway/api_iam_policy.py +2 -0
  13. pulumi_gcp/apigateway/gateway_iam_binding.py +2 -0
  14. pulumi_gcp/apigateway/gateway_iam_member.py +2 -0
  15. pulumi_gcp/apigateway/gateway_iam_policy.py +2 -0
  16. pulumi_gcp/apigee/__init__.py +2 -0
  17. pulumi_gcp/apigee/_inputs.py +1435 -0
  18. pulumi_gcp/apigee/api_product.py +1698 -0
  19. pulumi_gcp/apigee/environment_iam_binding.py +2 -0
  20. pulumi_gcp/apigee/environment_iam_member.py +2 -0
  21. pulumi_gcp/apigee/environment_iam_policy.py +2 -0
  22. pulumi_gcp/apigee/outputs.py +1081 -0
  23. pulumi_gcp/apigee/security_action.py +1010 -0
  24. pulumi_gcp/artifactregistry/__init__.py +6 -0
  25. pulumi_gcp/artifactregistry/get_docker_images.py +164 -0
  26. pulumi_gcp/artifactregistry/get_package.py +220 -0
  27. pulumi_gcp/artifactregistry/get_repositories.py +160 -0
  28. pulumi_gcp/artifactregistry/get_tag.py +187 -0
  29. pulumi_gcp/artifactregistry/get_tags.py +200 -0
  30. pulumi_gcp/artifactregistry/get_version.py +261 -0
  31. pulumi_gcp/artifactregistry/outputs.py +239 -2
  32. pulumi_gcp/artifactregistry/repository.py +6 -6
  33. pulumi_gcp/artifactregistry/repository_iam_binding.py +2 -0
  34. pulumi_gcp/artifactregistry/repository_iam_member.py +2 -0
  35. pulumi_gcp/artifactregistry/repository_iam_policy.py +2 -0
  36. pulumi_gcp/backupdisasterrecovery/backup_plan.py +114 -7
  37. pulumi_gcp/backupdisasterrecovery/backup_vault.py +56 -0
  38. pulumi_gcp/backupdisasterrecovery/get_backup_plan.py +12 -1
  39. pulumi_gcp/backupdisasterrecovery/get_backup_vault.py +12 -1
  40. pulumi_gcp/beyondcorp/application_iam_binding.py +8 -0
  41. pulumi_gcp/beyondcorp/application_iam_member.py +8 -0
  42. pulumi_gcp/beyondcorp/application_iam_policy.py +8 -0
  43. pulumi_gcp/beyondcorp/get_application_iam_policy.py +4 -0
  44. pulumi_gcp/beyondcorp/security_gateway_application_iam_binding.py +2 -0
  45. pulumi_gcp/beyondcorp/security_gateway_application_iam_member.py +2 -0
  46. pulumi_gcp/beyondcorp/security_gateway_application_iam_policy.py +2 -0
  47. pulumi_gcp/beyondcorp/security_gateway_iam_binding.py +2 -0
  48. pulumi_gcp/beyondcorp/security_gateway_iam_member.py +2 -0
  49. pulumi_gcp/beyondcorp/security_gateway_iam_policy.py +2 -0
  50. pulumi_gcp/bigquery/_inputs.py +6 -0
  51. pulumi_gcp/bigquery/connection_iam_binding.py +2 -0
  52. pulumi_gcp/bigquery/connection_iam_member.py +2 -0
  53. pulumi_gcp/bigquery/connection_iam_policy.py +2 -0
  54. pulumi_gcp/bigquery/data_transfer_config.py +2 -0
  55. pulumi_gcp/bigquery/dataset.py +2 -2
  56. pulumi_gcp/bigquery/get_table.py +23 -1
  57. pulumi_gcp/bigquery/iam_binding.py +2 -0
  58. pulumi_gcp/bigquery/iam_member.py +2 -0
  59. pulumi_gcp/bigquery/iam_policy.py +2 -0
  60. pulumi_gcp/bigquery/outputs.py +4 -0
  61. pulumi_gcp/bigquery/reservation.py +535 -0
  62. pulumi_gcp/bigquery/table.py +62 -0
  63. pulumi_gcp/bigqueryanalyticshub/_inputs.py +180 -0
  64. pulumi_gcp/bigqueryanalyticshub/data_exchange.py +80 -0
  65. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_binding.py +2 -0
  66. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_member.py +2 -0
  67. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_policy.py +2 -0
  68. pulumi_gcp/bigqueryanalyticshub/listing.py +322 -2
  69. pulumi_gcp/bigqueryanalyticshub/listing_iam_binding.py +2 -0
  70. pulumi_gcp/bigqueryanalyticshub/listing_iam_member.py +2 -0
  71. pulumi_gcp/bigqueryanalyticshub/listing_iam_policy.py +2 -0
  72. pulumi_gcp/bigqueryanalyticshub/listing_subscription.py +32 -0
  73. pulumi_gcp/bigqueryanalyticshub/outputs.py +159 -0
  74. pulumi_gcp/bigquerydatapolicy/data_policy_iam_binding.py +2 -0
  75. pulumi_gcp/bigquerydatapolicy/data_policy_iam_member.py +2 -0
  76. pulumi_gcp/bigquerydatapolicy/data_policy_iam_policy.py +2 -0
  77. pulumi_gcp/bigtable/__init__.py +1 -0
  78. pulumi_gcp/bigtable/_inputs.py +33 -0
  79. pulumi_gcp/bigtable/outputs.py +36 -0
  80. pulumi_gcp/bigtable/schema_bundle.py +568 -0
  81. pulumi_gcp/binaryauthorization/attestor_iam_binding.py +2 -0
  82. pulumi_gcp/binaryauthorization/attestor_iam_member.py +2 -0
  83. pulumi_gcp/binaryauthorization/attestor_iam_policy.py +2 -0
  84. pulumi_gcp/certificateauthority/ca_pool_iam_binding.py +2 -0
  85. pulumi_gcp/certificateauthority/ca_pool_iam_member.py +2 -0
  86. pulumi_gcp/certificateauthority/ca_pool_iam_policy.py +2 -0
  87. pulumi_gcp/certificateauthority/certificate_template_iam_binding.py +2 -0
  88. pulumi_gcp/certificateauthority/certificate_template_iam_member.py +2 -0
  89. pulumi_gcp/certificateauthority/certificate_template_iam_policy.py +2 -0
  90. pulumi_gcp/cloudbuildv2/connection_iam_binding.py +2 -0
  91. pulumi_gcp/cloudbuildv2/connection_iam_member.py +2 -0
  92. pulumi_gcp/cloudbuildv2/connection_iam_policy.py +2 -0
  93. pulumi_gcp/clouddeploy/_inputs.py +48 -48
  94. pulumi_gcp/clouddeploy/deploy_policy.py +54 -74
  95. pulumi_gcp/clouddeploy/outputs.py +32 -32
  96. pulumi_gcp/cloudfunctions/_inputs.py +48 -0
  97. pulumi_gcp/cloudfunctions/function.py +94 -0
  98. pulumi_gcp/cloudfunctions/function_iam_binding.py +2 -0
  99. pulumi_gcp/cloudfunctions/function_iam_member.py +2 -0
  100. pulumi_gcp/cloudfunctions/function_iam_policy.py +2 -0
  101. pulumi_gcp/cloudfunctions/get_function.py +23 -1
  102. pulumi_gcp/cloudfunctions/outputs.py +70 -0
  103. pulumi_gcp/cloudfunctionsv2/function_iam_binding.py +2 -0
  104. pulumi_gcp/cloudfunctionsv2/function_iam_member.py +2 -0
  105. pulumi_gcp/cloudfunctionsv2/function_iam_policy.py +2 -0
  106. pulumi_gcp/cloudrun/iam_binding.py +2 -0
  107. pulumi_gcp/cloudrun/iam_member.py +2 -0
  108. pulumi_gcp/cloudrun/iam_policy.py +2 -0
  109. pulumi_gcp/cloudrunv2/_inputs.py +20 -0
  110. pulumi_gcp/cloudrunv2/job.py +2 -0
  111. pulumi_gcp/cloudrunv2/job_iam_binding.py +2 -0
  112. pulumi_gcp/cloudrunv2/job_iam_member.py +2 -0
  113. pulumi_gcp/cloudrunv2/job_iam_policy.py +2 -0
  114. pulumi_gcp/cloudrunv2/outputs.py +25 -0
  115. pulumi_gcp/cloudrunv2/service_iam_binding.py +2 -0
  116. pulumi_gcp/cloudrunv2/service_iam_member.py +2 -0
  117. pulumi_gcp/cloudrunv2/service_iam_policy.py +2 -0
  118. pulumi_gcp/cloudrunv2/worker_pool.py +2 -0
  119. pulumi_gcp/cloudrunv2/worker_pool_iam_binding.py +2 -0
  120. pulumi_gcp/cloudrunv2/worker_pool_iam_member.py +2 -0
  121. pulumi_gcp/cloudrunv2/worker_pool_iam_policy.py +2 -0
  122. pulumi_gcp/cloudtasks/queue_iam_binding.py +2 -0
  123. pulumi_gcp/cloudtasks/queue_iam_member.py +2 -0
  124. pulumi_gcp/cloudtasks/queue_iam_policy.py +2 -0
  125. pulumi_gcp/colab/runtime_template_iam_binding.py +2 -0
  126. pulumi_gcp/colab/runtime_template_iam_member.py +2 -0
  127. pulumi_gcp/colab/runtime_template_iam_policy.py +2 -0
  128. pulumi_gcp/composer/user_workloads_config_map.py +26 -2
  129. pulumi_gcp/compute/__init__.py +1 -0
  130. pulumi_gcp/compute/_inputs.py +1068 -22
  131. pulumi_gcp/compute/disk_iam_binding.py +2 -0
  132. pulumi_gcp/compute/disk_iam_member.py +2 -0
  133. pulumi_gcp/compute/disk_iam_policy.py +2 -0
  134. pulumi_gcp/compute/firewall_policy_with_rules.py +66 -0
  135. pulumi_gcp/compute/forwarding_rule.py +0 -21
  136. pulumi_gcp/compute/get_region_backend_service.py +12 -1
  137. pulumi_gcp/compute/get_router.py +12 -1
  138. pulumi_gcp/compute/image_iam_binding.py +2 -0
  139. pulumi_gcp/compute/image_iam_member.py +2 -0
  140. pulumi_gcp/compute/image_iam_policy.py +2 -0
  141. pulumi_gcp/compute/instance_iam_binding.py +2 -0
  142. pulumi_gcp/compute/instance_iam_member.py +2 -0
  143. pulumi_gcp/compute/instance_iam_policy.py +2 -0
  144. pulumi_gcp/compute/instance_template_iam_binding.py +2 -0
  145. pulumi_gcp/compute/instance_template_iam_member.py +2 -0
  146. pulumi_gcp/compute/instance_template_iam_policy.py +2 -0
  147. pulumi_gcp/compute/instant_snapshot_iam_binding.py +2 -0
  148. pulumi_gcp/compute/instant_snapshot_iam_member.py +2 -0
  149. pulumi_gcp/compute/instant_snapshot_iam_policy.py +2 -0
  150. pulumi_gcp/compute/machine_image_iam_binding.py +2 -0
  151. pulumi_gcp/compute/machine_image_iam_member.py +2 -0
  152. pulumi_gcp/compute/machine_image_iam_policy.py +2 -0
  153. pulumi_gcp/compute/outputs.py +966 -22
  154. pulumi_gcp/compute/preview_feature.py +396 -0
  155. pulumi_gcp/compute/region_backend_service.py +257 -0
  156. pulumi_gcp/compute/region_disk_iam_binding.py +2 -0
  157. pulumi_gcp/compute/region_disk_iam_member.py +2 -0
  158. pulumi_gcp/compute/region_disk_iam_policy.py +2 -0
  159. pulumi_gcp/compute/region_security_policy.py +54 -0
  160. pulumi_gcp/compute/region_url_map.py +392 -0
  161. pulumi_gcp/compute/reservation.py +4 -4
  162. pulumi_gcp/compute/router.py +54 -0
  163. pulumi_gcp/compute/service_attachment.py +126 -0
  164. pulumi_gcp/compute/snapshot_iam_binding.py +2 -0
  165. pulumi_gcp/compute/snapshot_iam_member.py +2 -0
  166. pulumi_gcp/compute/snapshot_iam_policy.py +2 -0
  167. pulumi_gcp/compute/storage_pool.py +154 -0
  168. pulumi_gcp/compute/storage_pool_iam_binding.py +2 -0
  169. pulumi_gcp/compute/storage_pool_iam_member.py +2 -0
  170. pulumi_gcp/compute/storage_pool_iam_policy.py +2 -0
  171. pulumi_gcp/compute/subnetwork.py +54 -0
  172. pulumi_gcp/compute/subnetwork_iam_binding.py +2 -0
  173. pulumi_gcp/compute/subnetwork_iam_member.py +2 -0
  174. pulumi_gcp/compute/subnetwork_iam_policy.py +2 -0
  175. pulumi_gcp/config/__init__.pyi +2 -4
  176. pulumi_gcp/config/vars.py +4 -8
  177. pulumi_gcp/container/_inputs.py +2622 -246
  178. pulumi_gcp/container/cluster.py +61 -21
  179. pulumi_gcp/container/get_cluster.py +12 -1
  180. pulumi_gcp/container/outputs.py +2877 -133
  181. pulumi_gcp/containeranalysis/note_iam_binding.py +2 -0
  182. pulumi_gcp/containeranalysis/note_iam_member.py +2 -0
  183. pulumi_gcp/containeranalysis/note_iam_policy.py +2 -0
  184. pulumi_gcp/datacatalog/entry_group_iam_binding.py +2 -0
  185. pulumi_gcp/datacatalog/entry_group_iam_member.py +2 -0
  186. pulumi_gcp/datacatalog/entry_group_iam_policy.py +2 -0
  187. pulumi_gcp/datacatalog/policy_tag_iam_binding.py +2 -0
  188. pulumi_gcp/datacatalog/policy_tag_iam_member.py +2 -0
  189. pulumi_gcp/datacatalog/policy_tag_iam_policy.py +2 -0
  190. pulumi_gcp/datacatalog/tag_template_iam_binding.py +2 -0
  191. pulumi_gcp/datacatalog/tag_template_iam_member.py +2 -0
  192. pulumi_gcp/datacatalog/tag_template_iam_policy.py +2 -0
  193. pulumi_gcp/datacatalog/taxonomy_iam_binding.py +2 -0
  194. pulumi_gcp/datacatalog/taxonomy_iam_member.py +2 -0
  195. pulumi_gcp/datacatalog/taxonomy_iam_policy.py +2 -0
  196. pulumi_gcp/datafusion/instance.py +18 -4
  197. pulumi_gcp/dataplex/aspect_type_iam_binding.py +2 -0
  198. pulumi_gcp/dataplex/aspect_type_iam_member.py +2 -0
  199. pulumi_gcp/dataplex/aspect_type_iam_policy.py +2 -0
  200. pulumi_gcp/dataplex/asset_iam_binding.py +2 -0
  201. pulumi_gcp/dataplex/asset_iam_member.py +2 -0
  202. pulumi_gcp/dataplex/asset_iam_policy.py +2 -0
  203. pulumi_gcp/dataplex/datascan_iam_binding.py +2 -0
  204. pulumi_gcp/dataplex/datascan_iam_member.py +2 -0
  205. pulumi_gcp/dataplex/datascan_iam_policy.py +2 -0
  206. pulumi_gcp/dataplex/entry_group_iam_binding.py +2 -0
  207. pulumi_gcp/dataplex/entry_group_iam_member.py +2 -0
  208. pulumi_gcp/dataplex/entry_group_iam_policy.py +2 -0
  209. pulumi_gcp/dataplex/entry_type_iam_binding.py +2 -0
  210. pulumi_gcp/dataplex/entry_type_iam_member.py +2 -0
  211. pulumi_gcp/dataplex/entry_type_iam_policy.py +2 -0
  212. pulumi_gcp/dataplex/glossary_iam_binding.py +2 -0
  213. pulumi_gcp/dataplex/glossary_iam_member.py +2 -0
  214. pulumi_gcp/dataplex/glossary_iam_policy.py +2 -0
  215. pulumi_gcp/dataplex/lake_iam_binding.py +2 -0
  216. pulumi_gcp/dataplex/lake_iam_member.py +2 -0
  217. pulumi_gcp/dataplex/lake_iam_policy.py +2 -0
  218. pulumi_gcp/dataplex/task_iam_binding.py +2 -0
  219. pulumi_gcp/dataplex/task_iam_member.py +2 -0
  220. pulumi_gcp/dataplex/task_iam_policy.py +2 -0
  221. pulumi_gcp/dataplex/zone_iam_binding.py +2 -0
  222. pulumi_gcp/dataplex/zone_iam_member.py +2 -0
  223. pulumi_gcp/dataplex/zone_iam_policy.py +2 -0
  224. pulumi_gcp/dataproc/_inputs.py +249 -14
  225. pulumi_gcp/dataproc/autoscaling_policy_iam_binding.py +2 -0
  226. pulumi_gcp/dataproc/autoscaling_policy_iam_member.py +2 -0
  227. pulumi_gcp/dataproc/autoscaling_policy_iam_policy.py +2 -0
  228. pulumi_gcp/dataproc/batch.py +6 -0
  229. pulumi_gcp/dataproc/cluster.py +2 -0
  230. pulumi_gcp/dataproc/metastore_database_iam_binding.py +2 -0
  231. pulumi_gcp/dataproc/metastore_database_iam_member.py +2 -0
  232. pulumi_gcp/dataproc/metastore_database_iam_policy.py +2 -0
  233. pulumi_gcp/dataproc/metastore_federation_iam_binding.py +2 -0
  234. pulumi_gcp/dataproc/metastore_federation_iam_member.py +2 -0
  235. pulumi_gcp/dataproc/metastore_federation_iam_policy.py +2 -0
  236. pulumi_gcp/dataproc/metastore_service_iam_binding.py +2 -0
  237. pulumi_gcp/dataproc/metastore_service_iam_member.py +2 -0
  238. pulumi_gcp/dataproc/metastore_service_iam_policy.py +2 -0
  239. pulumi_gcp/dataproc/metastore_table_iam_binding.py +2 -0
  240. pulumi_gcp/dataproc/metastore_table_iam_member.py +2 -0
  241. pulumi_gcp/dataproc/metastore_table_iam_policy.py +2 -0
  242. pulumi_gcp/dataproc/outputs.py +215 -12
  243. pulumi_gcp/dataproc/session_template.py +14 -2
  244. pulumi_gcp/developerconnect/__init__.py +1 -0
  245. pulumi_gcp/developerconnect/_inputs.py +583 -0
  246. pulumi_gcp/developerconnect/insights_config.py +895 -0
  247. pulumi_gcp/developerconnect/outputs.py +442 -0
  248. pulumi_gcp/diagflow/__init__.py +3 -0
  249. pulumi_gcp/diagflow/_inputs.py +11899 -7963
  250. pulumi_gcp/diagflow/conversation_profile.py +959 -0
  251. pulumi_gcp/diagflow/cx_generator.py +636 -0
  252. pulumi_gcp/diagflow/cx_playbook.py +967 -0
  253. pulumi_gcp/diagflow/cx_tool.py +2 -2
  254. pulumi_gcp/diagflow/cx_webhook.py +380 -36
  255. pulumi_gcp/diagflow/outputs.py +9099 -5946
  256. pulumi_gcp/discoveryengine/__init__.py +2 -0
  257. pulumi_gcp/discoveryengine/_inputs.py +465 -0
  258. pulumi_gcp/discoveryengine/cmek_config.py +707 -0
  259. pulumi_gcp/discoveryengine/outputs.py +412 -0
  260. pulumi_gcp/discoveryengine/recommendation_engine.py +813 -0
  261. pulumi_gcp/dns/dns_managed_zone_iam_binding.py +2 -0
  262. pulumi_gcp/dns/dns_managed_zone_iam_member.py +2 -0
  263. pulumi_gcp/dns/dns_managed_zone_iam_policy.py +2 -0
  264. pulumi_gcp/endpoints/service_iam_binding.py +2 -0
  265. pulumi_gcp/endpoints/service_iam_member.py +2 -0
  266. pulumi_gcp/endpoints/service_iam_policy.py +2 -0
  267. pulumi_gcp/firestore/field.py +6 -6
  268. pulumi_gcp/gemini/gemini_gcp_enablement_setting.py +107 -9
  269. pulumi_gcp/gemini/gemini_gcp_enablement_setting_binding.py +2 -2
  270. pulumi_gcp/gemini/repository_group_iam_binding.py +2 -0
  271. pulumi_gcp/gemini/repository_group_iam_member.py +2 -0
  272. pulumi_gcp/gemini/repository_group_iam_policy.py +2 -0
  273. pulumi_gcp/gkebackup/backup_plan_iam_binding.py +2 -0
  274. pulumi_gcp/gkebackup/backup_plan_iam_member.py +2 -0
  275. pulumi_gcp/gkebackup/backup_plan_iam_policy.py +2 -0
  276. pulumi_gcp/gkebackup/restore_plan_iam_binding.py +2 -0
  277. pulumi_gcp/gkebackup/restore_plan_iam_member.py +2 -0
  278. pulumi_gcp/gkebackup/restore_plan_iam_policy.py +2 -0
  279. pulumi_gcp/gkehub/feature_iam_binding.py +2 -0
  280. pulumi_gcp/gkehub/feature_iam_member.py +2 -0
  281. pulumi_gcp/gkehub/feature_iam_policy.py +2 -0
  282. pulumi_gcp/gkehub/membership_binding.py +6 -6
  283. pulumi_gcp/gkehub/membership_iam_binding.py +2 -0
  284. pulumi_gcp/gkehub/membership_iam_member.py +2 -0
  285. pulumi_gcp/gkehub/membership_iam_policy.py +2 -0
  286. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  287. pulumi_gcp/gkehub/namespace.py +4 -4
  288. pulumi_gcp/gkehub/scope_iam_binding.py +2 -0
  289. pulumi_gcp/gkehub/scope_iam_member.py +2 -0
  290. pulumi_gcp/gkehub/scope_iam_policy.py +2 -0
  291. pulumi_gcp/gkehub/scope_rbac_role_binding.py +8 -8
  292. pulumi_gcp/gkeonprem/vmware_admin_cluster.py +24 -3
  293. pulumi_gcp/healthcare/consent_store_iam_binding.py +2 -0
  294. pulumi_gcp/healthcare/consent_store_iam_member.py +2 -0
  295. pulumi_gcp/healthcare/consent_store_iam_policy.py +2 -0
  296. pulumi_gcp/iam/__init__.py +4 -0
  297. pulumi_gcp/iam/_inputs.py +98 -0
  298. pulumi_gcp/iam/get_workforce_pool_iam_policy.py +161 -0
  299. pulumi_gcp/iam/outputs.py +56 -0
  300. pulumi_gcp/iam/workforce_pool_iam_binding.py +763 -0
  301. pulumi_gcp/iam/workforce_pool_iam_member.py +763 -0
  302. pulumi_gcp/iam/workforce_pool_iam_policy.py +602 -0
  303. pulumi_gcp/iap/app_engine_service_iam_binding.py +2 -0
  304. pulumi_gcp/iap/app_engine_service_iam_member.py +2 -0
  305. pulumi_gcp/iap/app_engine_service_iam_policy.py +2 -0
  306. pulumi_gcp/iap/app_engine_version_iam_binding.py +2 -0
  307. pulumi_gcp/iap/app_engine_version_iam_member.py +2 -0
  308. pulumi_gcp/iap/app_engine_version_iam_policy.py +2 -0
  309. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  310. pulumi_gcp/iap/tunnel_dest_group_iam_binding.py +2 -0
  311. pulumi_gcp/iap/tunnel_dest_group_iam_member.py +2 -0
  312. pulumi_gcp/iap/tunnel_dest_group_iam_policy.py +2 -0
  313. pulumi_gcp/iap/tunnel_iam_binding.py +2 -0
  314. pulumi_gcp/iap/tunnel_iam_member.py +2 -0
  315. pulumi_gcp/iap/tunnel_iam_policy.py +2 -0
  316. pulumi_gcp/iap/tunnel_instance_iam_binding.py +2 -0
  317. pulumi_gcp/iap/tunnel_instance_iam_member.py +2 -0
  318. pulumi_gcp/iap/tunnel_instance_iam_policy.py +2 -0
  319. pulumi_gcp/iap/web_backend_service_iam_binding.py +2 -0
  320. pulumi_gcp/iap/web_backend_service_iam_member.py +2 -0
  321. pulumi_gcp/iap/web_backend_service_iam_policy.py +2 -0
  322. pulumi_gcp/iap/web_cloud_run_service_iam_binding.py +2 -0
  323. pulumi_gcp/iap/web_cloud_run_service_iam_member.py +2 -0
  324. pulumi_gcp/iap/web_cloud_run_service_iam_policy.py +2 -0
  325. pulumi_gcp/iap/web_iam_binding.py +2 -0
  326. pulumi_gcp/iap/web_iam_member.py +2 -0
  327. pulumi_gcp/iap/web_iam_policy.py +2 -0
  328. pulumi_gcp/iap/web_region_backend_service_iam_binding.py +2 -0
  329. pulumi_gcp/iap/web_region_backend_service_iam_member.py +2 -0
  330. pulumi_gcp/iap/web_region_backend_service_iam_policy.py +2 -0
  331. pulumi_gcp/iap/web_type_app_enging_iam_binding.py +2 -0
  332. pulumi_gcp/iap/web_type_app_enging_iam_member.py +2 -0
  333. pulumi_gcp/iap/web_type_app_enging_iam_policy.py +2 -0
  334. pulumi_gcp/iap/web_type_compute_iam_binding.py +2 -0
  335. pulumi_gcp/iap/web_type_compute_iam_member.py +2 -0
  336. pulumi_gcp/iap/web_type_compute_iam_policy.py +2 -0
  337. pulumi_gcp/integrationconnectors/managed_zone.py +8 -8
  338. pulumi_gcp/kms/crypto_key.py +7 -0
  339. pulumi_gcp/kms/ekm_connection_iam_binding.py +2 -0
  340. pulumi_gcp/kms/ekm_connection_iam_member.py +2 -0
  341. pulumi_gcp/kms/ekm_connection_iam_policy.py +2 -0
  342. pulumi_gcp/kms/outputs.py +2 -0
  343. pulumi_gcp/logging/log_view_iam_binding.py +2 -0
  344. pulumi_gcp/logging/log_view_iam_member.py +2 -0
  345. pulumi_gcp/logging/log_view_iam_policy.py +2 -0
  346. pulumi_gcp/looker/instance.py +28 -7
  347. pulumi_gcp/managedkafka/_inputs.py +127 -0
  348. pulumi_gcp/managedkafka/cluster.py +131 -1
  349. pulumi_gcp/managedkafka/connect_cluster.py +4 -4
  350. pulumi_gcp/managedkafka/connector.py +4 -4
  351. pulumi_gcp/managedkafka/outputs.py +128 -0
  352. pulumi_gcp/memorystore/get_instance.py +12 -1
  353. pulumi_gcp/memorystore/instance.py +78 -12
  354. pulumi_gcp/modelarmor/__init__.py +1 -0
  355. pulumi_gcp/modelarmor/_inputs.py +683 -0
  356. pulumi_gcp/modelarmor/floorsetting.py +736 -0
  357. pulumi_gcp/modelarmor/outputs.py +618 -0
  358. pulumi_gcp/monitoring/_inputs.py +3 -3
  359. pulumi_gcp/monitoring/outputs.py +2 -2
  360. pulumi_gcp/networkconnectivity/_inputs.py +60 -0
  361. pulumi_gcp/networkconnectivity/internal_range.py +136 -0
  362. pulumi_gcp/networkconnectivity/outputs.py +55 -0
  363. pulumi_gcp/networkconnectivity/spoke.py +14 -14
  364. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +213 -168
  365. pulumi_gcp/notebooks/instance.py +18 -18
  366. pulumi_gcp/notebooks/instance_iam_binding.py +2 -0
  367. pulumi_gcp/notebooks/instance_iam_member.py +2 -0
  368. pulumi_gcp/notebooks/instance_iam_policy.py +2 -0
  369. pulumi_gcp/notebooks/runtime_iam_binding.py +2 -0
  370. pulumi_gcp/notebooks/runtime_iam_member.py +2 -0
  371. pulumi_gcp/notebooks/runtime_iam_policy.py +2 -0
  372. pulumi_gcp/oracledatabase/__init__.py +2 -0
  373. pulumi_gcp/oracledatabase/autonomous_database.py +262 -38
  374. pulumi_gcp/oracledatabase/cloud_vm_cluster.py +314 -50
  375. pulumi_gcp/oracledatabase/get_autonomous_database.py +23 -1
  376. pulumi_gcp/oracledatabase/get_cloud_vm_cluster.py +34 -1
  377. pulumi_gcp/oracledatabase/odb_network.py +721 -0
  378. pulumi_gcp/oracledatabase/odb_subnet.py +803 -0
  379. pulumi_gcp/oracledatabase/outputs.py +83 -0
  380. pulumi_gcp/organizations/folder.py +56 -0
  381. pulumi_gcp/organizations/get_folder.py +29 -1
  382. pulumi_gcp/orgpolicy/policy.py +2 -2
  383. pulumi_gcp/parametermanager/parameter_version.py +62 -0
  384. pulumi_gcp/parametermanager/regional_parameter_version.py +64 -0
  385. pulumi_gcp/projects/api_key.py +88 -1
  386. pulumi_gcp/provider.py +20 -40
  387. pulumi_gcp/pubsub/schema_iam_binding.py +2 -0
  388. pulumi_gcp/pubsub/schema_iam_member.py +2 -0
  389. pulumi_gcp/pubsub/schema_iam_policy.py +2 -0
  390. pulumi_gcp/pubsub/subscription.py +130 -6
  391. pulumi_gcp/pubsub/topic.py +116 -0
  392. pulumi_gcp/pubsub/topic_iam_binding.py +2 -0
  393. pulumi_gcp/pubsub/topic_iam_member.py +2 -0
  394. pulumi_gcp/pubsub/topic_iam_policy.py +2 -0
  395. pulumi_gcp/pulumi-plugin.json +1 -1
  396. pulumi_gcp/redis/cluster.py +70 -0
  397. pulumi_gcp/redis/get_cluster.py +12 -1
  398. pulumi_gcp/redis/instance.py +8 -12
  399. pulumi_gcp/secretmanager/get_regional_secret.py +12 -1
  400. pulumi_gcp/secretmanager/get_secret.py +12 -1
  401. pulumi_gcp/secretmanager/outputs.py +30 -0
  402. pulumi_gcp/secretmanager/regional_secret.py +61 -0
  403. pulumi_gcp/secretmanager/regional_secret_iam_binding.py +2 -0
  404. pulumi_gcp/secretmanager/regional_secret_iam_member.py +2 -0
  405. pulumi_gcp/secretmanager/regional_secret_iam_policy.py +2 -0
  406. pulumi_gcp/secretmanager/secret.py +61 -0
  407. pulumi_gcp/secretmanager/secret_iam_binding.py +2 -0
  408. pulumi_gcp/secretmanager/secret_iam_member.py +2 -0
  409. pulumi_gcp/secretmanager/secret_iam_policy.py +2 -0
  410. pulumi_gcp/secretmanager/secret_version.py +1 -48
  411. pulumi_gcp/securesourcemanager/branch_rule.py +16 -8
  412. pulumi_gcp/securesourcemanager/instance.py +112 -4
  413. pulumi_gcp/securesourcemanager/repository.py +112 -8
  414. pulumi_gcp/securesourcemanager/repository_iam_binding.py +2 -0
  415. pulumi_gcp/securesourcemanager/repository_iam_member.py +2 -0
  416. pulumi_gcp/securesourcemanager/repository_iam_policy.py +2 -0
  417. pulumi_gcp/securitycenter/instance_iam_binding.py +18 -4
  418. pulumi_gcp/securitycenter/instance_iam_member.py +18 -4
  419. pulumi_gcp/securitycenter/instance_iam_policy.py +18 -4
  420. pulumi_gcp/securitycenter/v2_organization_source_iam_binding.py +2 -0
  421. pulumi_gcp/securitycenter/v2_organization_source_iam_member.py +2 -0
  422. pulumi_gcp/securitycenter/v2_organization_source_iam_policy.py +2 -0
  423. pulumi_gcp/serviceaccount/get_account_key.py +1 -0
  424. pulumi_gcp/servicedirectory/namespace_iam_binding.py +2 -0
  425. pulumi_gcp/servicedirectory/namespace_iam_member.py +2 -0
  426. pulumi_gcp/servicedirectory/namespace_iam_policy.py +2 -0
  427. pulumi_gcp/servicedirectory/service_iam_binding.py +2 -0
  428. pulumi_gcp/servicedirectory/service_iam_member.py +2 -0
  429. pulumi_gcp/servicedirectory/service_iam_policy.py +2 -0
  430. pulumi_gcp/sourcerepo/repository_iam_binding.py +2 -0
  431. pulumi_gcp/sourcerepo/repository_iam_member.py +2 -0
  432. pulumi_gcp/sourcerepo/repository_iam_policy.py +2 -0
  433. pulumi_gcp/sql/_inputs.py +88 -10
  434. pulumi_gcp/sql/database.py +0 -12
  435. pulumi_gcp/sql/database_instance.py +108 -7
  436. pulumi_gcp/sql/get_database_instance.py +12 -1
  437. pulumi_gcp/sql/outputs.py +158 -11
  438. pulumi_gcp/storage/__init__.py +2 -0
  439. pulumi_gcp/storage/_inputs.py +555 -12
  440. pulumi_gcp/storage/bucket.py +7 -7
  441. pulumi_gcp/storage/bucket_object.py +34 -0
  442. pulumi_gcp/storage/get_bucket_object.py +12 -1
  443. pulumi_gcp/storage/get_bucket_object_content.py +12 -1
  444. pulumi_gcp/storage/get_insights_dataset_config.py +363 -0
  445. pulumi_gcp/storage/insights_dataset_config.py +1280 -0
  446. pulumi_gcp/storage/outputs.py +703 -7
  447. pulumi_gcp/tags/tag_key_iam_binding.py +2 -0
  448. pulumi_gcp/tags/tag_key_iam_member.py +2 -0
  449. pulumi_gcp/tags/tag_key_iam_policy.py +2 -0
  450. pulumi_gcp/tags/tag_value_iam_binding.py +2 -0
  451. pulumi_gcp/tags/tag_value_iam_member.py +2 -0
  452. pulumi_gcp/tags/tag_value_iam_policy.py +2 -0
  453. pulumi_gcp/tpu/get_tensorflow_versions.py +10 -0
  454. pulumi_gcp/vertex/__init__.py +2 -0
  455. pulumi_gcp/vertex/_inputs.py +3768 -3
  456. pulumi_gcp/vertex/ai_endpoint.py +4 -4
  457. pulumi_gcp/vertex/ai_endpoint_with_model_garden_deployment.py +940 -0
  458. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  459. pulumi_gcp/vertex/ai_index.py +21 -7
  460. pulumi_gcp/vertex/ai_rag_engine_config.py +354 -0
  461. pulumi_gcp/vertex/outputs.py +2678 -2
  462. pulumi_gcp/vmwareengine/network_peering.py +7 -7
  463. pulumi_gcp/workbench/_inputs.py +118 -0
  464. pulumi_gcp/workbench/instance.py +171 -2
  465. pulumi_gcp/workbench/outputs.py +91 -0
  466. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/METADATA +1 -1
  467. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/RECORD +469 -442
  468. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/WHEEL +0 -0
  469. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/top_level.txt +0 -0
@@ -97,6 +97,7 @@ __all__ = [
97
97
  'ClusterAddonsConfigHttpLoadBalancing',
98
98
  'ClusterAddonsConfigIstioConfig',
99
99
  'ClusterAddonsConfigKalmConfig',
100
+ 'ClusterAddonsConfigLustreCsiDriverConfig',
100
101
  'ClusterAddonsConfigNetworkPolicyConfig',
101
102
  'ClusterAddonsConfigParallelstoreCsiDriverConfig',
102
103
  'ClusterAddonsConfigRayOperatorConfig',
@@ -131,6 +132,7 @@ __all__ = [
131
132
  'ClusterGkeAutoUpgradeConfig',
132
133
  'ClusterIdentityServiceConfig',
133
134
  'ClusterIpAllocationPolicy',
135
+ 'ClusterIpAllocationPolicyAdditionalIpRangesConfig',
134
136
  'ClusterIpAllocationPolicyAdditionalPodRangesConfig',
135
137
  'ClusterIpAllocationPolicyPodCidrOverprovisionConfig',
136
138
  'ClusterLoggingConfig',
@@ -152,6 +154,7 @@ __all__ = [
152
154
  'ClusterNetworkPolicy',
153
155
  'ClusterNodeConfig',
154
156
  'ClusterNodeConfigAdvancedMachineFeatures',
157
+ 'ClusterNodeConfigBootDisk',
155
158
  'ClusterNodeConfigConfidentialNodes',
156
159
  'ClusterNodeConfigContainerdConfig',
157
160
  'ClusterNodeConfigContainerdConfigPrivateRegistryAccessConfig',
@@ -168,6 +171,9 @@ __all__ = [
168
171
  'ClusterNodeConfigGvnic',
169
172
  'ClusterNodeConfigHostMaintenancePolicy',
170
173
  'ClusterNodeConfigKubeletConfig',
174
+ 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaim',
175
+ 'ClusterNodeConfigKubeletConfigEvictionSoft',
176
+ 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod',
171
177
  'ClusterNodeConfigLinuxNodeConfig',
172
178
  'ClusterNodeConfigLinuxNodeConfigHugepagesConfig',
173
179
  'ClusterNodeConfigLocalNvmeSsdBlockConfig',
@@ -201,6 +207,7 @@ __all__ = [
201
207
  'ClusterNodePoolNetworkConfigPodCidrOverprovisionConfig',
202
208
  'ClusterNodePoolNodeConfig',
203
209
  'ClusterNodePoolNodeConfigAdvancedMachineFeatures',
210
+ 'ClusterNodePoolNodeConfigBootDisk',
204
211
  'ClusterNodePoolNodeConfigConfidentialNodes',
205
212
  'ClusterNodePoolNodeConfigContainerdConfig',
206
213
  'ClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig',
@@ -217,6 +224,9 @@ __all__ = [
217
224
  'ClusterNodePoolNodeConfigGvnic',
218
225
  'ClusterNodePoolNodeConfigHostMaintenancePolicy',
219
226
  'ClusterNodePoolNodeConfigKubeletConfig',
227
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
228
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoft',
229
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
220
230
  'ClusterNodePoolNodeConfigLinuxNodeConfig',
221
231
  'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
222
232
  'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -243,6 +253,7 @@ __all__ = [
243
253
  'ClusterPrivateClusterConfigMasterGlobalAccessConfig',
244
254
  'ClusterProtectConfig',
245
255
  'ClusterProtectConfigWorkloadConfig',
256
+ 'ClusterRbacBindingConfig',
246
257
  'ClusterReleaseChannel',
247
258
  'ClusterResourceUsageExportConfig',
248
259
  'ClusterResourceUsageExportConfigBigqueryDestination',
@@ -264,6 +275,7 @@ __all__ = [
264
275
  'NodePoolNetworkConfigPodCidrOverprovisionConfig',
265
276
  'NodePoolNodeConfig',
266
277
  'NodePoolNodeConfigAdvancedMachineFeatures',
278
+ 'NodePoolNodeConfigBootDisk',
267
279
  'NodePoolNodeConfigConfidentialNodes',
268
280
  'NodePoolNodeConfigContainerdConfig',
269
281
  'NodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfig',
@@ -280,6 +292,9 @@ __all__ = [
280
292
  'NodePoolNodeConfigGvnic',
281
293
  'NodePoolNodeConfigHostMaintenancePolicy',
282
294
  'NodePoolNodeConfigKubeletConfig',
295
+ 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
296
+ 'NodePoolNodeConfigKubeletConfigEvictionSoft',
297
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
283
298
  'NodePoolNodeConfigLinuxNodeConfig',
284
299
  'NodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
285
300
  'NodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -309,6 +324,7 @@ __all__ = [
309
324
  'GetClusterAddonsConfigHttpLoadBalancingResult',
310
325
  'GetClusterAddonsConfigIstioConfigResult',
311
326
  'GetClusterAddonsConfigKalmConfigResult',
327
+ 'GetClusterAddonsConfigLustreCsiDriverConfigResult',
312
328
  'GetClusterAddonsConfigNetworkPolicyConfigResult',
313
329
  'GetClusterAddonsConfigParallelstoreCsiDriverConfigResult',
314
330
  'GetClusterAddonsConfigRayOperatorConfigResult',
@@ -343,6 +359,7 @@ __all__ = [
343
359
  'GetClusterGkeAutoUpgradeConfigResult',
344
360
  'GetClusterIdentityServiceConfigResult',
345
361
  'GetClusterIpAllocationPolicyResult',
362
+ 'GetClusterIpAllocationPolicyAdditionalIpRangesConfigResult',
346
363
  'GetClusterIpAllocationPolicyAdditionalPodRangesConfigResult',
347
364
  'GetClusterIpAllocationPolicyPodCidrOverprovisionConfigResult',
348
365
  'GetClusterLoggingConfigResult',
@@ -364,6 +381,7 @@ __all__ = [
364
381
  'GetClusterNetworkPolicyResult',
365
382
  'GetClusterNodeConfigResult',
366
383
  'GetClusterNodeConfigAdvancedMachineFeatureResult',
384
+ 'GetClusterNodeConfigBootDiskResult',
367
385
  'GetClusterNodeConfigConfidentialNodeResult',
368
386
  'GetClusterNodeConfigContainerdConfigResult',
369
387
  'GetClusterNodeConfigContainerdConfigPrivateRegistryAccessConfigResult',
@@ -380,6 +398,9 @@ __all__ = [
380
398
  'GetClusterNodeConfigGvnicResult',
381
399
  'GetClusterNodeConfigHostMaintenancePolicyResult',
382
400
  'GetClusterNodeConfigKubeletConfigResult',
401
+ 'GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult',
402
+ 'GetClusterNodeConfigKubeletConfigEvictionSoftResult',
403
+ 'GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
383
404
  'GetClusterNodeConfigLinuxNodeConfigResult',
384
405
  'GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult',
385
406
  'GetClusterNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -413,6 +434,7 @@ __all__ = [
413
434
  'GetClusterNodePoolNetworkConfigPodCidrOverprovisionConfigResult',
414
435
  'GetClusterNodePoolNodeConfigResult',
415
436
  'GetClusterNodePoolNodeConfigAdvancedMachineFeatureResult',
437
+ 'GetClusterNodePoolNodeConfigBootDiskResult',
416
438
  'GetClusterNodePoolNodeConfigConfidentialNodeResult',
417
439
  'GetClusterNodePoolNodeConfigContainerdConfigResult',
418
440
  'GetClusterNodePoolNodeConfigContainerdConfigPrivateRegistryAccessConfigResult',
@@ -429,6 +451,9 @@ __all__ = [
429
451
  'GetClusterNodePoolNodeConfigGvnicResult',
430
452
  'GetClusterNodePoolNodeConfigHostMaintenancePolicyResult',
431
453
  'GetClusterNodePoolNodeConfigKubeletConfigResult',
454
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult',
455
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult',
456
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
432
457
  'GetClusterNodePoolNodeConfigLinuxNodeConfigResult',
433
458
  'GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult',
434
459
  'GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -455,6 +480,7 @@ __all__ = [
455
480
  'GetClusterPrivateClusterConfigMasterGlobalAccessConfigResult',
456
481
  'GetClusterProtectConfigResult',
457
482
  'GetClusterProtectConfigWorkloadConfigResult',
483
+ 'GetClusterRbacBindingConfigResult',
458
484
  'GetClusterReleaseChannelResult',
459
485
  'GetClusterResourceUsageExportConfigResult',
460
486
  'GetClusterResourceUsageExportConfigBigqueryDestinationResult',
@@ -3919,6 +3945,8 @@ class ClusterAddonsConfig(dict):
3919
3945
  suggest = "istio_config"
3920
3946
  elif key == "kalmConfig":
3921
3947
  suggest = "kalm_config"
3948
+ elif key == "lustreCsiDriverConfig":
3949
+ suggest = "lustre_csi_driver_config"
3922
3950
  elif key == "networkPolicyConfig":
3923
3951
  suggest = "network_policy_config"
3924
3952
  elif key == "parallelstoreCsiDriverConfig":
@@ -3951,6 +3979,7 @@ class ClusterAddonsConfig(dict):
3951
3979
  http_load_balancing: Optional['outputs.ClusterAddonsConfigHttpLoadBalancing'] = None,
3952
3980
  istio_config: Optional['outputs.ClusterAddonsConfigIstioConfig'] = None,
3953
3981
  kalm_config: Optional['outputs.ClusterAddonsConfigKalmConfig'] = None,
3982
+ lustre_csi_driver_config: Optional['outputs.ClusterAddonsConfigLustreCsiDriverConfig'] = None,
3954
3983
  network_policy_config: Optional['outputs.ClusterAddonsConfigNetworkPolicyConfig'] = None,
3955
3984
  parallelstore_csi_driver_config: Optional['outputs.ClusterAddonsConfigParallelstoreCsiDriverConfig'] = None,
3956
3985
  ray_operator_configs: Optional[Sequence['outputs.ClusterAddonsConfigRayOperatorConfig']] = None,
@@ -3991,6 +4020,16 @@ class ClusterAddonsConfig(dict):
3991
4020
  Structure is documented below.
3992
4021
  :param 'ClusterAddonsConfigKalmConfigArgs' kalm_config: .
3993
4022
  Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set `enabled = true` to enable.
4023
+ :param 'ClusterAddonsConfigLustreCsiDriverConfigArgs' lustre_csi_driver_config: The status of the Lustre CSI driver addon,
4024
+ which allows the usage of a Lustre instances as volumes.
4025
+ It is disabled by default for Standard clusters; set `enabled = true` to enable.
4026
+ It is disabled by default for Autopilot clusters; set `enabled = true` to enable.
4027
+ Lustre CSI Driver Config has optional subfield
4028
+ `enable_legacy_lustre_port` which allows the Lustre CSI driver to initialize LNet (the virtual networklayer for Lustre kernel module) using port 6988.
4029
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
4030
+ See [Enable Lustre CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/lustre-csi-driver-new-volume) for more information.
4031
+
4032
+ This example `addons_config` disables two addons:
3994
4033
  :param 'ClusterAddonsConfigNetworkPolicyConfigArgs' network_policy_config: Whether we should enable the network policy addon
3995
4034
  for the master. This must be enabled in order to enable network policy for the nodes.
3996
4035
  To enable this, you must also define a `network_policy` block,
@@ -4002,8 +4041,6 @@ class ClusterAddonsConfig(dict):
4002
4041
  It is disabled by default for Standard clusters; set `enabled = true` to enable.
4003
4042
  It is enabled by default for Autopilot clusters with version 1.29 or later; set `enabled = true` to enable it explicitly.
4004
4043
  See [Enable the Parallelstore CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/parallelstore-csi-new-volume#enable) for more information.
4005
-
4006
- This example `addons_config` disables two addons:
4007
4044
  :param Sequence['ClusterAddonsConfigRayOperatorConfigArgs'] ray_operator_configs: . The status of the [Ray Operator
4008
4045
  addon](https://cloud.google.com/kubernetes-engine/docs/add-on/ray-on-gke/concepts/overview).
4009
4046
  It is disabled by default. Set `enabled = true` to enable. The minimum
@@ -4042,6 +4079,8 @@ class ClusterAddonsConfig(dict):
4042
4079
  pulumi.set(__self__, "istio_config", istio_config)
4043
4080
  if kalm_config is not None:
4044
4081
  pulumi.set(__self__, "kalm_config", kalm_config)
4082
+ if lustre_csi_driver_config is not None:
4083
+ pulumi.set(__self__, "lustre_csi_driver_config", lustre_csi_driver_config)
4045
4084
  if network_policy_config is not None:
4046
4085
  pulumi.set(__self__, "network_policy_config", network_policy_config)
4047
4086
  if parallelstore_csi_driver_config is not None:
@@ -4163,6 +4202,23 @@ class ClusterAddonsConfig(dict):
4163
4202
  """
4164
4203
  return pulumi.get(self, "kalm_config")
4165
4204
 
4205
+ @_builtins.property
4206
+ @pulumi.getter(name="lustreCsiDriverConfig")
4207
+ def lustre_csi_driver_config(self) -> Optional['outputs.ClusterAddonsConfigLustreCsiDriverConfig']:
4208
+ """
4209
+ The status of the Lustre CSI driver addon,
4210
+ which allows the usage of a Lustre instances as volumes.
4211
+ It is disabled by default for Standard clusters; set `enabled = true` to enable.
4212
+ It is disabled by default for Autopilot clusters; set `enabled = true` to enable.
4213
+ Lustre CSI Driver Config has optional subfield
4214
+ `enable_legacy_lustre_port` which allows the Lustre CSI driver to initialize LNet (the virtual networklayer for Lustre kernel module) using port 6988.
4215
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
4216
+ See [Enable Lustre CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/lustre-csi-driver-new-volume) for more information.
4217
+
4218
+ This example `addons_config` disables two addons:
4219
+ """
4220
+ return pulumi.get(self, "lustre_csi_driver_config")
4221
+
4166
4222
  @_builtins.property
4167
4223
  @pulumi.getter(name="networkPolicyConfig")
4168
4224
  def network_policy_config(self) -> Optional['outputs.ClusterAddonsConfigNetworkPolicyConfig']:
@@ -4185,8 +4241,6 @@ class ClusterAddonsConfig(dict):
4185
4241
  It is disabled by default for Standard clusters; set `enabled = true` to enable.
4186
4242
  It is enabled by default for Autopilot clusters with version 1.29 or later; set `enabled = true` to enable it explicitly.
4187
4243
  See [Enable the Parallelstore CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/parallelstore-csi-new-volume#enable) for more information.
4188
-
4189
- This example `addons_config` disables two addons:
4190
4244
  """
4191
4245
  return pulumi.get(self, "parallelstore_csi_driver_config")
4192
4246
 
@@ -4429,6 +4483,55 @@ class ClusterAddonsConfigKalmConfig(dict):
4429
4483
  return pulumi.get(self, "enabled")
4430
4484
 
4431
4485
 
4486
+ @pulumi.output_type
4487
+ class ClusterAddonsConfigLustreCsiDriverConfig(dict):
4488
+ @staticmethod
4489
+ def __key_warning(key: str):
4490
+ suggest = None
4491
+ if key == "enableLegacyLustrePort":
4492
+ suggest = "enable_legacy_lustre_port"
4493
+
4494
+ if suggest:
4495
+ pulumi.log.warn(f"Key '{key}' not found in ClusterAddonsConfigLustreCsiDriverConfig. Access the value via the '{suggest}' property getter instead.")
4496
+
4497
+ def __getitem__(self, key: str) -> Any:
4498
+ ClusterAddonsConfigLustreCsiDriverConfig.__key_warning(key)
4499
+ return super().__getitem__(key)
4500
+
4501
+ def get(self, key: str, default = None) -> Any:
4502
+ ClusterAddonsConfigLustreCsiDriverConfig.__key_warning(key)
4503
+ return super().get(key, default)
4504
+
4505
+ def __init__(__self__, *,
4506
+ enabled: _builtins.bool,
4507
+ enable_legacy_lustre_port: Optional[_builtins.bool] = None):
4508
+ """
4509
+ :param _builtins.bool enabled: Whether the Lustre CSI driver is enabled for this cluster.
4510
+ :param _builtins.bool enable_legacy_lustre_port: If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988.
4511
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
4512
+ """
4513
+ pulumi.set(__self__, "enabled", enabled)
4514
+ if enable_legacy_lustre_port is not None:
4515
+ pulumi.set(__self__, "enable_legacy_lustre_port", enable_legacy_lustre_port)
4516
+
4517
+ @_builtins.property
4518
+ @pulumi.getter
4519
+ def enabled(self) -> _builtins.bool:
4520
+ """
4521
+ Whether the Lustre CSI driver is enabled for this cluster.
4522
+ """
4523
+ return pulumi.get(self, "enabled")
4524
+
4525
+ @_builtins.property
4526
+ @pulumi.getter(name="enableLegacyLustrePort")
4527
+ def enable_legacy_lustre_port(self) -> Optional[_builtins.bool]:
4528
+ """
4529
+ If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988.
4530
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
4531
+ """
4532
+ return pulumi.get(self, "enable_legacy_lustre_port")
4533
+
4534
+
4432
4535
  @pulumi.output_type
4433
4536
  class ClusterAddonsConfigNetworkPolicyConfig(dict):
4434
4537
  def __init__(__self__, *,
@@ -5975,7 +6078,9 @@ class ClusterIpAllocationPolicy(dict):
5975
6078
  @staticmethod
5976
6079
  def __key_warning(key: str):
5977
6080
  suggest = None
5978
- if key == "additionalPodRangesConfig":
6081
+ if key == "additionalIpRangesConfigs":
6082
+ suggest = "additional_ip_ranges_configs"
6083
+ elif key == "additionalPodRangesConfig":
5979
6084
  suggest = "additional_pod_ranges_config"
5980
6085
  elif key == "clusterIpv4CidrBlock":
5981
6086
  suggest = "cluster_ipv4_cidr_block"
@@ -6002,6 +6107,7 @@ class ClusterIpAllocationPolicy(dict):
6002
6107
  return super().get(key, default)
6003
6108
 
6004
6109
  def __init__(__self__, *,
6110
+ additional_ip_ranges_configs: Optional[Sequence['outputs.ClusterIpAllocationPolicyAdditionalIpRangesConfig']] = None,
6005
6111
  additional_pod_ranges_config: Optional['outputs.ClusterIpAllocationPolicyAdditionalPodRangesConfig'] = None,
6006
6112
  cluster_ipv4_cidr_block: Optional[_builtins.str] = None,
6007
6113
  cluster_secondary_range_name: Optional[_builtins.str] = None,
@@ -6010,6 +6116,8 @@ class ClusterIpAllocationPolicy(dict):
6010
6116
  services_secondary_range_name: Optional[_builtins.str] = None,
6011
6117
  stack_type: Optional[_builtins.str] = None):
6012
6118
  """
6119
+ :param Sequence['ClusterIpAllocationPolicyAdditionalIpRangesConfigArgs'] additional_ip_ranges_configs: The configuration for individual additional subnetworks attached to the cluster.
6120
+ Structure is documented below.
6013
6121
  :param 'ClusterIpAllocationPolicyAdditionalPodRangesConfigArgs' additional_pod_ranges_config: The configuration for additional pod secondary ranges at
6014
6122
  the cluster level. Used for Autopilot clusters and Standard clusters with which control of the
6015
6123
  secondary Pod IP address assignment to node pools isn't needed. Structure is documented below.
@@ -6035,6 +6143,8 @@ class ClusterIpAllocationPolicy(dict):
6035
6143
  Default value is `IPV4`.
6036
6144
  Possible values are `IPV4` and `IPV4_IPV6`.
6037
6145
  """
6146
+ if additional_ip_ranges_configs is not None:
6147
+ pulumi.set(__self__, "additional_ip_ranges_configs", additional_ip_ranges_configs)
6038
6148
  if additional_pod_ranges_config is not None:
6039
6149
  pulumi.set(__self__, "additional_pod_ranges_config", additional_pod_ranges_config)
6040
6150
  if cluster_ipv4_cidr_block is not None:
@@ -6050,6 +6160,15 @@ class ClusterIpAllocationPolicy(dict):
6050
6160
  if stack_type is not None:
6051
6161
  pulumi.set(__self__, "stack_type", stack_type)
6052
6162
 
6163
+ @_builtins.property
6164
+ @pulumi.getter(name="additionalIpRangesConfigs")
6165
+ def additional_ip_ranges_configs(self) -> Optional[Sequence['outputs.ClusterIpAllocationPolicyAdditionalIpRangesConfig']]:
6166
+ """
6167
+ The configuration for individual additional subnetworks attached to the cluster.
6168
+ Structure is documented below.
6169
+ """
6170
+ return pulumi.get(self, "additional_ip_ranges_configs")
6171
+
6053
6172
  @_builtins.property
6054
6173
  @pulumi.getter(name="additionalPodRangesConfig")
6055
6174
  def additional_pod_ranges_config(self) -> Optional['outputs.ClusterIpAllocationPolicyAdditionalPodRangesConfig']:
@@ -6124,6 +6243,53 @@ class ClusterIpAllocationPolicy(dict):
6124
6243
  return pulumi.get(self, "stack_type")
6125
6244
 
6126
6245
 
6246
+ @pulumi.output_type
6247
+ class ClusterIpAllocationPolicyAdditionalIpRangesConfig(dict):
6248
+ @staticmethod
6249
+ def __key_warning(key: str):
6250
+ suggest = None
6251
+ if key == "podIpv4RangeNames":
6252
+ suggest = "pod_ipv4_range_names"
6253
+
6254
+ if suggest:
6255
+ pulumi.log.warn(f"Key '{key}' not found in ClusterIpAllocationPolicyAdditionalIpRangesConfig. Access the value via the '{suggest}' property getter instead.")
6256
+
6257
+ def __getitem__(self, key: str) -> Any:
6258
+ ClusterIpAllocationPolicyAdditionalIpRangesConfig.__key_warning(key)
6259
+ return super().__getitem__(key)
6260
+
6261
+ def get(self, key: str, default = None) -> Any:
6262
+ ClusterIpAllocationPolicyAdditionalIpRangesConfig.__key_warning(key)
6263
+ return super().get(key, default)
6264
+
6265
+ def __init__(__self__, *,
6266
+ subnetwork: _builtins.str,
6267
+ pod_ipv4_range_names: Optional[Sequence[_builtins.str]] = None):
6268
+ """
6269
+ :param _builtins.str subnetwork: Name of the subnetwork. This can be the full path of the subnetwork or just the name.
6270
+ :param Sequence[_builtins.str] pod_ipv4_range_names: List of secondary ranges names within this subnetwork that can be used for pod IPs.
6271
+ """
6272
+ pulumi.set(__self__, "subnetwork", subnetwork)
6273
+ if pod_ipv4_range_names is not None:
6274
+ pulumi.set(__self__, "pod_ipv4_range_names", pod_ipv4_range_names)
6275
+
6276
+ @_builtins.property
6277
+ @pulumi.getter
6278
+ def subnetwork(self) -> _builtins.str:
6279
+ """
6280
+ Name of the subnetwork. This can be the full path of the subnetwork or just the name.
6281
+ """
6282
+ return pulumi.get(self, "subnetwork")
6283
+
6284
+ @_builtins.property
6285
+ @pulumi.getter(name="podIpv4RangeNames")
6286
+ def pod_ipv4_range_names(self) -> Optional[Sequence[_builtins.str]]:
6287
+ """
6288
+ List of secondary ranges names within this subnetwork that can be used for pod IPs.
6289
+ """
6290
+ return pulumi.get(self, "pod_ipv4_range_names")
6291
+
6292
+
6127
6293
  @pulumi.output_type
6128
6294
  class ClusterIpAllocationPolicyAdditionalPodRangesConfig(dict):
6129
6295
  @staticmethod
@@ -7102,6 +7268,8 @@ class ClusterNodeConfig(dict):
7102
7268
  suggest = None
7103
7269
  if key == "advancedMachineFeatures":
7104
7270
  suggest = "advanced_machine_features"
7271
+ elif key == "bootDisk":
7272
+ suggest = "boot_disk"
7105
7273
  elif key == "bootDiskKmsKey":
7106
7274
  suggest = "boot_disk_kms_key"
7107
7275
  elif key == "confidentialNodes":
@@ -7190,6 +7358,7 @@ class ClusterNodeConfig(dict):
7190
7358
 
7191
7359
  def __init__(__self__, *,
7192
7360
  advanced_machine_features: Optional['outputs.ClusterNodeConfigAdvancedMachineFeatures'] = None,
7361
+ boot_disk: Optional['outputs.ClusterNodeConfigBootDisk'] = None,
7193
7362
  boot_disk_kms_key: Optional[_builtins.str] = None,
7194
7363
  confidential_nodes: Optional['outputs.ClusterNodeConfigConfidentialNodes'] = None,
7195
7364
  containerd_config: Optional['outputs.ClusterNodeConfigContainerdConfig'] = None,
@@ -7237,13 +7406,15 @@ class ClusterNodeConfig(dict):
7237
7406
  """
7238
7407
  :param 'ClusterNodeConfigAdvancedMachineFeaturesArgs' advanced_machine_features: Specifies options for controlling
7239
7408
  advanced machine features. Structure is documented below.
7409
+ :param 'ClusterNodeConfigBootDiskArgs' boot_disk: Configuration of the node pool boot disk. Structure is documented below
7240
7410
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
7241
7411
  :param 'ClusterNodeConfigConfidentialNodesArgs' confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
7242
7412
  :param 'ClusterNodeConfigContainerdConfigArgs' containerd_config: Parameters to customize containerd runtime. Structure is documented below.
7243
7413
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified
7244
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
7414
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
7415
+ Prefer configuring `boot_disk`.
7245
7416
  :param _builtins.str disk_type: Type of the disk attached to each node
7246
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
7417
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7247
7418
  :param Sequence['ClusterNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
7248
7419
  :param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
7249
7420
  :param 'ClusterNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -7323,7 +7494,7 @@ class ClusterNodeConfig(dict):
7323
7494
  :param _builtins.str service_account: The service account to be used by the Node VMs.
7324
7495
  If not specified, the "default" service account is used.
7325
7496
  :param 'ClusterNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
7326
- :param 'ClusterNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
7497
+ :param 'ClusterNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
7327
7498
  :param _builtins.bool spot: A boolean that represents whether the underlying node VMs are spot.
7328
7499
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
7329
7500
  for more information. Defaults to false.
@@ -7344,6 +7515,8 @@ class ClusterNodeConfig(dict):
7344
7515
  """
7345
7516
  if advanced_machine_features is not None:
7346
7517
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
7518
+ if boot_disk is not None:
7519
+ pulumi.set(__self__, "boot_disk", boot_disk)
7347
7520
  if boot_disk_kms_key is not None:
7348
7521
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
7349
7522
  if confidential_nodes is not None:
@@ -7442,6 +7615,14 @@ class ClusterNodeConfig(dict):
7442
7615
  """
7443
7616
  return pulumi.get(self, "advanced_machine_features")
7444
7617
 
7618
+ @_builtins.property
7619
+ @pulumi.getter(name="bootDisk")
7620
+ def boot_disk(self) -> Optional['outputs.ClusterNodeConfigBootDisk']:
7621
+ """
7622
+ Configuration of the node pool boot disk. Structure is documented below
7623
+ """
7624
+ return pulumi.get(self, "boot_disk")
7625
+
7445
7626
  @_builtins.property
7446
7627
  @pulumi.getter(name="bootDiskKmsKey")
7447
7628
  def boot_disk_kms_key(self) -> Optional[_builtins.str]:
@@ -7471,7 +7652,8 @@ class ClusterNodeConfig(dict):
7471
7652
  def disk_size_gb(self) -> Optional[_builtins.int]:
7472
7653
  """
7473
7654
  Size of the disk attached to each node, specified
7474
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
7655
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
7656
+ Prefer configuring `boot_disk`.
7475
7657
  """
7476
7658
  return pulumi.get(self, "disk_size_gb")
7477
7659
 
@@ -7480,7 +7662,7 @@ class ClusterNodeConfig(dict):
7480
7662
  def disk_type(self) -> Optional[_builtins.str]:
7481
7663
  """
7482
7664
  Type of the disk attached to each node
7483
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
7665
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
7484
7666
  """
7485
7667
  return pulumi.get(self, "disk_type")
7486
7668
 
@@ -7791,7 +7973,7 @@ class ClusterNodeConfig(dict):
7791
7973
  @pulumi.getter(name="soleTenantConfig")
7792
7974
  def sole_tenant_config(self) -> Optional['outputs.ClusterNodeConfigSoleTenantConfig']:
7793
7975
  """
7794
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
7976
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
7795
7977
  """
7796
7978
  return pulumi.get(self, "sole_tenant_config")
7797
7979
 
@@ -7918,6 +8100,88 @@ class ClusterNodeConfigAdvancedMachineFeatures(dict):
7918
8100
  return pulumi.get(self, "performance_monitoring_unit")
7919
8101
 
7920
8102
 
8103
+ @pulumi.output_type
8104
+ class ClusterNodeConfigBootDisk(dict):
8105
+ @staticmethod
8106
+ def __key_warning(key: str):
8107
+ suggest = None
8108
+ if key == "diskType":
8109
+ suggest = "disk_type"
8110
+ elif key == "provisionedIops":
8111
+ suggest = "provisioned_iops"
8112
+ elif key == "provisionedThroughput":
8113
+ suggest = "provisioned_throughput"
8114
+ elif key == "sizeGb":
8115
+ suggest = "size_gb"
8116
+
8117
+ if suggest:
8118
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigBootDisk. Access the value via the '{suggest}' property getter instead.")
8119
+
8120
+ def __getitem__(self, key: str) -> Any:
8121
+ ClusterNodeConfigBootDisk.__key_warning(key)
8122
+ return super().__getitem__(key)
8123
+
8124
+ def get(self, key: str, default = None) -> Any:
8125
+ ClusterNodeConfigBootDisk.__key_warning(key)
8126
+ return super().get(key, default)
8127
+
8128
+ def __init__(__self__, *,
8129
+ disk_type: Optional[_builtins.str] = None,
8130
+ provisioned_iops: Optional[_builtins.int] = None,
8131
+ provisioned_throughput: Optional[_builtins.int] = None,
8132
+ size_gb: Optional[_builtins.int] = None):
8133
+ """
8134
+ :param _builtins.str disk_type: Type of the disk attached to each node
8135
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8136
+ :param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8137
+ :param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8138
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified
8139
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
8140
+ """
8141
+ if disk_type is not None:
8142
+ pulumi.set(__self__, "disk_type", disk_type)
8143
+ if provisioned_iops is not None:
8144
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
8145
+ if provisioned_throughput is not None:
8146
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
8147
+ if size_gb is not None:
8148
+ pulumi.set(__self__, "size_gb", size_gb)
8149
+
8150
+ @_builtins.property
8151
+ @pulumi.getter(name="diskType")
8152
+ def disk_type(self) -> Optional[_builtins.str]:
8153
+ """
8154
+ Type of the disk attached to each node
8155
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
8156
+ """
8157
+ return pulumi.get(self, "disk_type")
8158
+
8159
+ @_builtins.property
8160
+ @pulumi.getter(name="provisionedIops")
8161
+ def provisioned_iops(self) -> Optional[_builtins.int]:
8162
+ """
8163
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8164
+ """
8165
+ return pulumi.get(self, "provisioned_iops")
8166
+
8167
+ @_builtins.property
8168
+ @pulumi.getter(name="provisionedThroughput")
8169
+ def provisioned_throughput(self) -> Optional[_builtins.int]:
8170
+ """
8171
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
8172
+ """
8173
+ return pulumi.get(self, "provisioned_throughput")
8174
+
8175
+ @_builtins.property
8176
+ @pulumi.getter(name="sizeGb")
8177
+ def size_gb(self) -> Optional[_builtins.int]:
8178
+ """
8179
+ Size of the disk attached to each node, specified
8180
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
8181
+ """
8182
+ return pulumi.get(self, "size_gb")
8183
+
8184
+
7921
8185
  @pulumi.output_type
7922
8186
  class ClusterNodeConfigConfidentialNodes(dict):
7923
8187
  @staticmethod
@@ -8551,6 +8815,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8551
8815
  suggest = "cpu_cfs_quota_period"
8552
8816
  elif key == "cpuManagerPolicy":
8553
8817
  suggest = "cpu_manager_policy"
8818
+ elif key == "evictionMaxPodGracePeriodSeconds":
8819
+ suggest = "eviction_max_pod_grace_period_seconds"
8820
+ elif key == "evictionMinimumReclaim":
8821
+ suggest = "eviction_minimum_reclaim"
8822
+ elif key == "evictionSoft":
8823
+ suggest = "eviction_soft"
8824
+ elif key == "evictionSoftGracePeriod":
8825
+ suggest = "eviction_soft_grace_period"
8554
8826
  elif key == "imageGcHighThresholdPercent":
8555
8827
  suggest = "image_gc_high_threshold_percent"
8556
8828
  elif key == "imageGcLowThresholdPercent":
@@ -8561,8 +8833,12 @@ class ClusterNodeConfigKubeletConfig(dict):
8561
8833
  suggest = "image_minimum_gc_age"
8562
8834
  elif key == "insecureKubeletReadonlyPortEnabled":
8563
8835
  suggest = "insecure_kubelet_readonly_port_enabled"
8836
+ elif key == "maxParallelImagePulls":
8837
+ suggest = "max_parallel_image_pulls"
8564
8838
  elif key == "podPidsLimit":
8565
8839
  suggest = "pod_pids_limit"
8840
+ elif key == "singleProcessOomKill":
8841
+ suggest = "single_process_oom_kill"
8566
8842
 
8567
8843
  if suggest:
8568
8844
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -8582,12 +8858,18 @@ class ClusterNodeConfigKubeletConfig(dict):
8582
8858
  cpu_cfs_quota: Optional[_builtins.bool] = None,
8583
8859
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
8584
8860
  cpu_manager_policy: Optional[_builtins.str] = None,
8861
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
8862
+ eviction_minimum_reclaim: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
8863
+ eviction_soft: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoft'] = None,
8864
+ eviction_soft_grace_period: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
8585
8865
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
8586
8866
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
8587
8867
  image_maximum_gc_age: Optional[_builtins.str] = None,
8588
8868
  image_minimum_gc_age: Optional[_builtins.str] = None,
8589
8869
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
8590
- pod_pids_limit: Optional[_builtins.int] = None):
8870
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
8871
+ pod_pids_limit: Optional[_builtins.int] = None,
8872
+ single_process_oom_kill: Optional[_builtins.bool] = None):
8591
8873
  """
8592
8874
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
8593
8875
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -8607,12 +8889,18 @@ class ClusterNodeConfigKubeletConfig(dict):
8607
8889
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
8608
8890
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
8609
8891
  is setting the empty string `""`, which will function identically to not setting this field.
8892
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
8893
+ :param 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
8894
+ :param 'ClusterNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
8895
+ :param 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
8610
8896
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
8611
8897
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
8612
8898
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
8613
8899
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
8614
8900
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8901
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
8615
8902
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
8903
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
8616
8904
  """
8617
8905
  if allowed_unsafe_sysctls is not None:
8618
8906
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -8626,6 +8914,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8626
8914
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
8627
8915
  if cpu_manager_policy is not None:
8628
8916
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
8917
+ if eviction_max_pod_grace_period_seconds is not None:
8918
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
8919
+ if eviction_minimum_reclaim is not None:
8920
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
8921
+ if eviction_soft is not None:
8922
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
8923
+ if eviction_soft_grace_period is not None:
8924
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
8629
8925
  if image_gc_high_threshold_percent is not None:
8630
8926
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
8631
8927
  if image_gc_low_threshold_percent is not None:
@@ -8636,8 +8932,12 @@ class ClusterNodeConfigKubeletConfig(dict):
8636
8932
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
8637
8933
  if insecure_kubelet_readonly_port_enabled is not None:
8638
8934
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
8935
+ if max_parallel_image_pulls is not None:
8936
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
8639
8937
  if pod_pids_limit is not None:
8640
8938
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
8939
+ if single_process_oom_kill is not None:
8940
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
8641
8941
 
8642
8942
  @_builtins.property
8643
8943
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -8699,6 +8999,38 @@ class ClusterNodeConfigKubeletConfig(dict):
8699
8999
  """
8700
9000
  return pulumi.get(self, "cpu_manager_policy")
8701
9001
 
9002
+ @_builtins.property
9003
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
9004
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
9005
+ """
9006
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
9007
+ """
9008
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
9009
+
9010
+ @_builtins.property
9011
+ @pulumi.getter(name="evictionMinimumReclaim")
9012
+ def eviction_minimum_reclaim(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionMinimumReclaim']:
9013
+ """
9014
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
9015
+ """
9016
+ return pulumi.get(self, "eviction_minimum_reclaim")
9017
+
9018
+ @_builtins.property
9019
+ @pulumi.getter(name="evictionSoft")
9020
+ def eviction_soft(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoft']:
9021
+ """
9022
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
9023
+ """
9024
+ return pulumi.get(self, "eviction_soft")
9025
+
9026
+ @_builtins.property
9027
+ @pulumi.getter(name="evictionSoftGracePeriod")
9028
+ def eviction_soft_grace_period(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod']:
9029
+ """
9030
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
9031
+ """
9032
+ return pulumi.get(self, "eviction_soft_grace_period")
9033
+
8702
9034
  @_builtins.property
8703
9035
  @pulumi.getter(name="imageGcHighThresholdPercent")
8704
9036
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -8739,6 +9071,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8739
9071
  """
8740
9072
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
8741
9073
 
9074
+ @_builtins.property
9075
+ @pulumi.getter(name="maxParallelImagePulls")
9076
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
9077
+ """
9078
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
9079
+ """
9080
+ return pulumi.get(self, "max_parallel_image_pulls")
9081
+
8742
9082
  @_builtins.property
8743
9083
  @pulumi.getter(name="podPidsLimit")
8744
9084
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -8747,146 +9087,500 @@ class ClusterNodeConfigKubeletConfig(dict):
8747
9087
  """
8748
9088
  return pulumi.get(self, "pod_pids_limit")
8749
9089
 
9090
+ @_builtins.property
9091
+ @pulumi.getter(name="singleProcessOomKill")
9092
+ def single_process_oom_kill(self) -> Optional[_builtins.bool]:
9093
+ """
9094
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
9095
+ """
9096
+ return pulumi.get(self, "single_process_oom_kill")
9097
+
8750
9098
 
8751
9099
  @pulumi.output_type
8752
- class ClusterNodeConfigLinuxNodeConfig(dict):
9100
+ class ClusterNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
8753
9101
  @staticmethod
8754
9102
  def __key_warning(key: str):
8755
9103
  suggest = None
8756
- if key == "cgroupMode":
8757
- suggest = "cgroup_mode"
8758
- elif key == "hugepagesConfig":
8759
- suggest = "hugepages_config"
9104
+ if key == "imagefsAvailable":
9105
+ suggest = "imagefs_available"
9106
+ elif key == "imagefsInodesFree":
9107
+ suggest = "imagefs_inodes_free"
9108
+ elif key == "memoryAvailable":
9109
+ suggest = "memory_available"
9110
+ elif key == "nodefsAvailable":
9111
+ suggest = "nodefs_available"
9112
+ elif key == "nodefsInodesFree":
9113
+ suggest = "nodefs_inodes_free"
9114
+ elif key == "pidAvailable":
9115
+ suggest = "pid_available"
8760
9116
 
8761
9117
  if suggest:
8762
- pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
9118
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
8763
9119
 
8764
9120
  def __getitem__(self, key: str) -> Any:
8765
- ClusterNodeConfigLinuxNodeConfig.__key_warning(key)
9121
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
8766
9122
  return super().__getitem__(key)
8767
9123
 
8768
9124
  def get(self, key: str, default = None) -> Any:
8769
- ClusterNodeConfigLinuxNodeConfig.__key_warning(key)
9125
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
8770
9126
  return super().get(key, default)
8771
9127
 
8772
9128
  def __init__(__self__, *,
8773
- cgroup_mode: Optional[_builtins.str] = None,
8774
- hugepages_config: Optional['outputs.ClusterNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
8775
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
9129
+ imagefs_available: Optional[_builtins.str] = None,
9130
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9131
+ memory_available: Optional[_builtins.str] = None,
9132
+ nodefs_available: Optional[_builtins.str] = None,
9133
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9134
+ pid_available: Optional[_builtins.str] = None):
8776
9135
  """
8777
- :param _builtins.str cgroup_mode: Possible cgroup modes that can be used.
8778
- Accepted values are:
8779
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
8780
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
8781
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
8782
- :param 'ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs' hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
8783
- :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes
8784
- and all pods running on the nodes. Specified as a map from the key, such as
8785
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
8786
- Note that validations happen all server side. All attributes are optional.
9136
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9137
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9138
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9139
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9140
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9141
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
8787
9142
  """
8788
- if cgroup_mode is not None:
8789
- pulumi.set(__self__, "cgroup_mode", cgroup_mode)
8790
- if hugepages_config is not None:
8791
- pulumi.set(__self__, "hugepages_config", hugepages_config)
8792
- if sysctls is not None:
8793
- pulumi.set(__self__, "sysctls", sysctls)
9143
+ if imagefs_available is not None:
9144
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9145
+ if imagefs_inodes_free is not None:
9146
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9147
+ if memory_available is not None:
9148
+ pulumi.set(__self__, "memory_available", memory_available)
9149
+ if nodefs_available is not None:
9150
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9151
+ if nodefs_inodes_free is not None:
9152
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9153
+ if pid_available is not None:
9154
+ pulumi.set(__self__, "pid_available", pid_available)
8794
9155
 
8795
9156
  @_builtins.property
8796
- @pulumi.getter(name="cgroupMode")
8797
- def cgroup_mode(self) -> Optional[_builtins.str]:
9157
+ @pulumi.getter(name="imagefsAvailable")
9158
+ def imagefs_available(self) -> Optional[_builtins.str]:
8798
9159
  """
8799
- Possible cgroup modes that can be used.
8800
- Accepted values are:
8801
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
8802
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
8803
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
9160
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
8804
9161
  """
8805
- return pulumi.get(self, "cgroup_mode")
9162
+ return pulumi.get(self, "imagefs_available")
8806
9163
 
8807
9164
  @_builtins.property
8808
- @pulumi.getter(name="hugepagesConfig")
8809
- def hugepages_config(self) -> Optional['outputs.ClusterNodeConfigLinuxNodeConfigHugepagesConfig']:
9165
+ @pulumi.getter(name="imagefsInodesFree")
9166
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
8810
9167
  """
8811
- Amounts for 2M and 1G hugepages. Structure is documented below.
9168
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
8812
9169
  """
8813
- return pulumi.get(self, "hugepages_config")
9170
+ return pulumi.get(self, "imagefs_inodes_free")
8814
9171
 
8815
9172
  @_builtins.property
8816
- @pulumi.getter
8817
- def sysctls(self) -> Optional[Mapping[str, _builtins.str]]:
9173
+ @pulumi.getter(name="memoryAvailable")
9174
+ def memory_available(self) -> Optional[_builtins.str]:
8818
9175
  """
8819
- The Linux kernel parameters to be applied to the nodes
8820
- and all pods running on the nodes. Specified as a map from the key, such as
8821
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
8822
- Note that validations happen all server side. All attributes are optional.
9176
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
8823
9177
  """
8824
- return pulumi.get(self, "sysctls")
9178
+ return pulumi.get(self, "memory_available")
9179
+
9180
+ @_builtins.property
9181
+ @pulumi.getter(name="nodefsAvailable")
9182
+ def nodefs_available(self) -> Optional[_builtins.str]:
9183
+ """
9184
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9185
+ """
9186
+ return pulumi.get(self, "nodefs_available")
9187
+
9188
+ @_builtins.property
9189
+ @pulumi.getter(name="nodefsInodesFree")
9190
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9191
+ """
9192
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9193
+ """
9194
+ return pulumi.get(self, "nodefs_inodes_free")
9195
+
9196
+ @_builtins.property
9197
+ @pulumi.getter(name="pidAvailable")
9198
+ def pid_available(self) -> Optional[_builtins.str]:
9199
+ """
9200
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9201
+ """
9202
+ return pulumi.get(self, "pid_available")
8825
9203
 
8826
9204
 
8827
9205
  @pulumi.output_type
8828
- class ClusterNodeConfigLinuxNodeConfigHugepagesConfig(dict):
9206
+ class ClusterNodeConfigKubeletConfigEvictionSoft(dict):
8829
9207
  @staticmethod
8830
9208
  def __key_warning(key: str):
8831
9209
  suggest = None
8832
- if key == "hugepageSize1g":
8833
- suggest = "hugepage_size1g"
8834
- elif key == "hugepageSize2m":
8835
- suggest = "hugepage_size2m"
9210
+ if key == "imagefsAvailable":
9211
+ suggest = "imagefs_available"
9212
+ elif key == "imagefsInodesFree":
9213
+ suggest = "imagefs_inodes_free"
9214
+ elif key == "memoryAvailable":
9215
+ suggest = "memory_available"
9216
+ elif key == "nodefsAvailable":
9217
+ suggest = "nodefs_available"
9218
+ elif key == "nodefsInodesFree":
9219
+ suggest = "nodefs_inodes_free"
9220
+ elif key == "pidAvailable":
9221
+ suggest = "pid_available"
8836
9222
 
8837
9223
  if suggest:
8838
- pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLinuxNodeConfigHugepagesConfig. Access the value via the '{suggest}' property getter instead.")
9224
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
8839
9225
 
8840
9226
  def __getitem__(self, key: str) -> Any:
8841
- ClusterNodeConfigLinuxNodeConfigHugepagesConfig.__key_warning(key)
9227
+ ClusterNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
8842
9228
  return super().__getitem__(key)
8843
9229
 
8844
9230
  def get(self, key: str, default = None) -> Any:
8845
- ClusterNodeConfigLinuxNodeConfigHugepagesConfig.__key_warning(key)
9231
+ ClusterNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
8846
9232
  return super().get(key, default)
8847
9233
 
8848
9234
  def __init__(__self__, *,
8849
- hugepage_size1g: Optional[_builtins.int] = None,
8850
- hugepage_size2m: Optional[_builtins.int] = None):
9235
+ imagefs_available: Optional[_builtins.str] = None,
9236
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9237
+ memory_available: Optional[_builtins.str] = None,
9238
+ nodefs_available: Optional[_builtins.str] = None,
9239
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9240
+ pid_available: Optional[_builtins.str] = None):
8851
9241
  """
8852
- :param _builtins.int hugepage_size1g: Amount of 1G hugepages.
8853
- :param _builtins.int hugepage_size2m: Amount of 2M hugepages.
9242
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
9243
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9244
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
9245
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9246
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9247
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
8854
9248
  """
8855
- if hugepage_size1g is not None:
8856
- pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
8857
- if hugepage_size2m is not None:
8858
- pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
9249
+ if imagefs_available is not None:
9250
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9251
+ if imagefs_inodes_free is not None:
9252
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9253
+ if memory_available is not None:
9254
+ pulumi.set(__self__, "memory_available", memory_available)
9255
+ if nodefs_available is not None:
9256
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9257
+ if nodefs_inodes_free is not None:
9258
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9259
+ if pid_available is not None:
9260
+ pulumi.set(__self__, "pid_available", pid_available)
8859
9261
 
8860
9262
  @_builtins.property
8861
- @pulumi.getter(name="hugepageSize1g")
8862
- def hugepage_size1g(self) -> Optional[_builtins.int]:
9263
+ @pulumi.getter(name="imagefsAvailable")
9264
+ def imagefs_available(self) -> Optional[_builtins.str]:
8863
9265
  """
8864
- Amount of 1G hugepages.
9266
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
8865
9267
  """
8866
- return pulumi.get(self, "hugepage_size1g")
9268
+ return pulumi.get(self, "imagefs_available")
8867
9269
 
8868
9270
  @_builtins.property
8869
- @pulumi.getter(name="hugepageSize2m")
8870
- def hugepage_size2m(self) -> Optional[_builtins.int]:
9271
+ @pulumi.getter(name="imagefsInodesFree")
9272
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
8871
9273
  """
8872
- Amount of 2M hugepages.
9274
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
8873
9275
  """
8874
- return pulumi.get(self, "hugepage_size2m")
9276
+ return pulumi.get(self, "imagefs_inodes_free")
9277
+
9278
+ @_builtins.property
9279
+ @pulumi.getter(name="memoryAvailable")
9280
+ def memory_available(self) -> Optional[_builtins.str]:
9281
+ """
9282
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
9283
+ """
9284
+ return pulumi.get(self, "memory_available")
9285
+
9286
+ @_builtins.property
9287
+ @pulumi.getter(name="nodefsAvailable")
9288
+ def nodefs_available(self) -> Optional[_builtins.str]:
9289
+ """
9290
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9291
+ """
9292
+ return pulumi.get(self, "nodefs_available")
9293
+
9294
+ @_builtins.property
9295
+ @pulumi.getter(name="nodefsInodesFree")
9296
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9297
+ """
9298
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9299
+ """
9300
+ return pulumi.get(self, "nodefs_inodes_free")
9301
+
9302
+ @_builtins.property
9303
+ @pulumi.getter(name="pidAvailable")
9304
+ def pid_available(self) -> Optional[_builtins.str]:
9305
+ """
9306
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9307
+ """
9308
+ return pulumi.get(self, "pid_available")
8875
9309
 
8876
9310
 
8877
9311
  @pulumi.output_type
8878
- class ClusterNodeConfigLocalNvmeSsdBlockConfig(dict):
9312
+ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
8879
9313
  @staticmethod
8880
9314
  def __key_warning(key: str):
8881
9315
  suggest = None
8882
- if key == "localSsdCount":
8883
- suggest = "local_ssd_count"
9316
+ if key == "imagefsAvailable":
9317
+ suggest = "imagefs_available"
9318
+ elif key == "imagefsInodesFree":
9319
+ suggest = "imagefs_inodes_free"
9320
+ elif key == "memoryAvailable":
9321
+ suggest = "memory_available"
9322
+ elif key == "nodefsAvailable":
9323
+ suggest = "nodefs_available"
9324
+ elif key == "nodefsInodesFree":
9325
+ suggest = "nodefs_inodes_free"
9326
+ elif key == "pidAvailable":
9327
+ suggest = "pid_available"
8884
9328
 
8885
9329
  if suggest:
8886
- pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLocalNvmeSsdBlockConfig. Access the value via the '{suggest}' property getter instead.")
9330
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
8887
9331
 
8888
9332
  def __getitem__(self, key: str) -> Any:
8889
- ClusterNodeConfigLocalNvmeSsdBlockConfig.__key_warning(key)
9333
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
9334
+ return super().__getitem__(key)
9335
+
9336
+ def get(self, key: str, default = None) -> Any:
9337
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
9338
+ return super().get(key, default)
9339
+
9340
+ def __init__(__self__, *,
9341
+ imagefs_available: Optional[_builtins.str] = None,
9342
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9343
+ memory_available: Optional[_builtins.str] = None,
9344
+ nodefs_available: Optional[_builtins.str] = None,
9345
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9346
+ pid_available: Optional[_builtins.str] = None):
9347
+ """
9348
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9349
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9350
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
9351
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9352
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9353
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9354
+ """
9355
+ if imagefs_available is not None:
9356
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9357
+ if imagefs_inodes_free is not None:
9358
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9359
+ if memory_available is not None:
9360
+ pulumi.set(__self__, "memory_available", memory_available)
9361
+ if nodefs_available is not None:
9362
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9363
+ if nodefs_inodes_free is not None:
9364
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9365
+ if pid_available is not None:
9366
+ pulumi.set(__self__, "pid_available", pid_available)
9367
+
9368
+ @_builtins.property
9369
+ @pulumi.getter(name="imagefsAvailable")
9370
+ def imagefs_available(self) -> Optional[_builtins.str]:
9371
+ """
9372
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9373
+ """
9374
+ return pulumi.get(self, "imagefs_available")
9375
+
9376
+ @_builtins.property
9377
+ @pulumi.getter(name="imagefsInodesFree")
9378
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
9379
+ """
9380
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9381
+ """
9382
+ return pulumi.get(self, "imagefs_inodes_free")
9383
+
9384
+ @_builtins.property
9385
+ @pulumi.getter(name="memoryAvailable")
9386
+ def memory_available(self) -> Optional[_builtins.str]:
9387
+ """
9388
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
9389
+ """
9390
+ return pulumi.get(self, "memory_available")
9391
+
9392
+ @_builtins.property
9393
+ @pulumi.getter(name="nodefsAvailable")
9394
+ def nodefs_available(self) -> Optional[_builtins.str]:
9395
+ """
9396
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9397
+ """
9398
+ return pulumi.get(self, "nodefs_available")
9399
+
9400
+ @_builtins.property
9401
+ @pulumi.getter(name="nodefsInodesFree")
9402
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9403
+ """
9404
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9405
+ """
9406
+ return pulumi.get(self, "nodefs_inodes_free")
9407
+
9408
+ @_builtins.property
9409
+ @pulumi.getter(name="pidAvailable")
9410
+ def pid_available(self) -> Optional[_builtins.str]:
9411
+ """
9412
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9413
+ """
9414
+ return pulumi.get(self, "pid_available")
9415
+
9416
+
9417
+ @pulumi.output_type
9418
+ class ClusterNodeConfigLinuxNodeConfig(dict):
9419
+ @staticmethod
9420
+ def __key_warning(key: str):
9421
+ suggest = None
9422
+ if key == "cgroupMode":
9423
+ suggest = "cgroup_mode"
9424
+ elif key == "hugepagesConfig":
9425
+ suggest = "hugepages_config"
9426
+ elif key == "transparentHugepageDefrag":
9427
+ suggest = "transparent_hugepage_defrag"
9428
+ elif key == "transparentHugepageEnabled":
9429
+ suggest = "transparent_hugepage_enabled"
9430
+
9431
+ if suggest:
9432
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
9433
+
9434
+ def __getitem__(self, key: str) -> Any:
9435
+ ClusterNodeConfigLinuxNodeConfig.__key_warning(key)
9436
+ return super().__getitem__(key)
9437
+
9438
+ def get(self, key: str, default = None) -> Any:
9439
+ ClusterNodeConfigLinuxNodeConfig.__key_warning(key)
9440
+ return super().get(key, default)
9441
+
9442
+ def __init__(__self__, *,
9443
+ cgroup_mode: Optional[_builtins.str] = None,
9444
+ hugepages_config: Optional['outputs.ClusterNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
9445
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
9446
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
9447
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
9448
+ """
9449
+ :param _builtins.str cgroup_mode: Possible cgroup modes that can be used.
9450
+ Accepted values are:
9451
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
9452
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
9453
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
9454
+ :param 'ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs' hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
9455
+ :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes
9456
+ and all pods running on the nodes. Specified as a map from the key, such as
9457
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
9458
+ Note that validations happen all server side. All attributes are optional.
9459
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
9460
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
9461
+ """
9462
+ if cgroup_mode is not None:
9463
+ pulumi.set(__self__, "cgroup_mode", cgroup_mode)
9464
+ if hugepages_config is not None:
9465
+ pulumi.set(__self__, "hugepages_config", hugepages_config)
9466
+ if sysctls is not None:
9467
+ pulumi.set(__self__, "sysctls", sysctls)
9468
+ if transparent_hugepage_defrag is not None:
9469
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
9470
+ if transparent_hugepage_enabled is not None:
9471
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
9472
+
9473
+ @_builtins.property
9474
+ @pulumi.getter(name="cgroupMode")
9475
+ def cgroup_mode(self) -> Optional[_builtins.str]:
9476
+ """
9477
+ Possible cgroup modes that can be used.
9478
+ Accepted values are:
9479
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
9480
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
9481
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
9482
+ """
9483
+ return pulumi.get(self, "cgroup_mode")
9484
+
9485
+ @_builtins.property
9486
+ @pulumi.getter(name="hugepagesConfig")
9487
+ def hugepages_config(self) -> Optional['outputs.ClusterNodeConfigLinuxNodeConfigHugepagesConfig']:
9488
+ """
9489
+ Amounts for 2M and 1G hugepages. Structure is documented below.
9490
+ """
9491
+ return pulumi.get(self, "hugepages_config")
9492
+
9493
+ @_builtins.property
9494
+ @pulumi.getter
9495
+ def sysctls(self) -> Optional[Mapping[str, _builtins.str]]:
9496
+ """
9497
+ The Linux kernel parameters to be applied to the nodes
9498
+ and all pods running on the nodes. Specified as a map from the key, such as
9499
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
9500
+ Note that validations happen all server side. All attributes are optional.
9501
+ """
9502
+ return pulumi.get(self, "sysctls")
9503
+
9504
+ @_builtins.property
9505
+ @pulumi.getter(name="transparentHugepageDefrag")
9506
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
9507
+ """
9508
+ The Linux kernel transparent hugepage defrag setting.
9509
+ """
9510
+ return pulumi.get(self, "transparent_hugepage_defrag")
9511
+
9512
+ @_builtins.property
9513
+ @pulumi.getter(name="transparentHugepageEnabled")
9514
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
9515
+ """
9516
+ The Linux kernel transparent hugepage setting.
9517
+ """
9518
+ return pulumi.get(self, "transparent_hugepage_enabled")
9519
+
9520
+
9521
+ @pulumi.output_type
9522
+ class ClusterNodeConfigLinuxNodeConfigHugepagesConfig(dict):
9523
+ @staticmethod
9524
+ def __key_warning(key: str):
9525
+ suggest = None
9526
+ if key == "hugepageSize1g":
9527
+ suggest = "hugepage_size1g"
9528
+ elif key == "hugepageSize2m":
9529
+ suggest = "hugepage_size2m"
9530
+
9531
+ if suggest:
9532
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLinuxNodeConfigHugepagesConfig. Access the value via the '{suggest}' property getter instead.")
9533
+
9534
+ def __getitem__(self, key: str) -> Any:
9535
+ ClusterNodeConfigLinuxNodeConfigHugepagesConfig.__key_warning(key)
9536
+ return super().__getitem__(key)
9537
+
9538
+ def get(self, key: str, default = None) -> Any:
9539
+ ClusterNodeConfigLinuxNodeConfigHugepagesConfig.__key_warning(key)
9540
+ return super().get(key, default)
9541
+
9542
+ def __init__(__self__, *,
9543
+ hugepage_size1g: Optional[_builtins.int] = None,
9544
+ hugepage_size2m: Optional[_builtins.int] = None):
9545
+ """
9546
+ :param _builtins.int hugepage_size1g: Amount of 1G hugepages.
9547
+ :param _builtins.int hugepage_size2m: Amount of 2M hugepages.
9548
+ """
9549
+ if hugepage_size1g is not None:
9550
+ pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
9551
+ if hugepage_size2m is not None:
9552
+ pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
9553
+
9554
+ @_builtins.property
9555
+ @pulumi.getter(name="hugepageSize1g")
9556
+ def hugepage_size1g(self) -> Optional[_builtins.int]:
9557
+ """
9558
+ Amount of 1G hugepages.
9559
+ """
9560
+ return pulumi.get(self, "hugepage_size1g")
9561
+
9562
+ @_builtins.property
9563
+ @pulumi.getter(name="hugepageSize2m")
9564
+ def hugepage_size2m(self) -> Optional[_builtins.int]:
9565
+ """
9566
+ Amount of 2M hugepages.
9567
+ """
9568
+ return pulumi.get(self, "hugepage_size2m")
9569
+
9570
+
9571
+ @pulumi.output_type
9572
+ class ClusterNodeConfigLocalNvmeSsdBlockConfig(dict):
9573
+ @staticmethod
9574
+ def __key_warning(key: str):
9575
+ suggest = None
9576
+ if key == "localSsdCount":
9577
+ suggest = "local_ssd_count"
9578
+
9579
+ if suggest:
9580
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLocalNvmeSsdBlockConfig. Access the value via the '{suggest}' property getter instead.")
9581
+
9582
+ def __getitem__(self, key: str) -> Any:
9583
+ ClusterNodeConfigLocalNvmeSsdBlockConfig.__key_warning(key)
8890
9584
  return super().__getitem__(key)
8891
9585
 
8892
9586
  def get(self, key: str, default = None) -> Any:
@@ -9135,6 +9829,8 @@ class ClusterNodeConfigSoleTenantConfig(dict):
9135
9829
  suggest = None
9136
9830
  if key == "nodeAffinities":
9137
9831
  suggest = "node_affinities"
9832
+ elif key == "minNodeCpus":
9833
+ suggest = "min_node_cpus"
9138
9834
 
9139
9835
  if suggest:
9140
9836
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -9148,20 +9844,32 @@ class ClusterNodeConfigSoleTenantConfig(dict):
9148
9844
  return super().get(key, default)
9149
9845
 
9150
9846
  def __init__(__self__, *,
9151
- node_affinities: Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity']):
9847
+ node_affinities: Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity'],
9848
+ min_node_cpus: Optional[_builtins.int] = None):
9152
9849
  """
9153
- :param Sequence['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
9850
+ :param Sequence['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
9851
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
9154
9852
  """
9155
9853
  pulumi.set(__self__, "node_affinities", node_affinities)
9854
+ if min_node_cpus is not None:
9855
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
9156
9856
 
9157
9857
  @_builtins.property
9158
9858
  @pulumi.getter(name="nodeAffinities")
9159
9859
  def node_affinities(self) -> Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity']:
9160
9860
  """
9161
- .
9861
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
9162
9862
  """
9163
9863
  return pulumi.get(self, "node_affinities")
9164
9864
 
9865
+ @_builtins.property
9866
+ @pulumi.getter(name="minNodeCpus")
9867
+ def min_node_cpus(self) -> Optional[_builtins.int]:
9868
+ """
9869
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
9870
+ """
9871
+ return pulumi.get(self, "min_node_cpus")
9872
+
9165
9873
 
9166
9874
  @pulumi.output_type
9167
9875
  class ClusterNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -10230,7 +10938,8 @@ class ClusterNodePoolNetworkConfig(dict):
10230
10938
  network_performance_config: Optional['outputs.ClusterNodePoolNetworkConfigNetworkPerformanceConfig'] = None,
10231
10939
  pod_cidr_overprovision_config: Optional['outputs.ClusterNodePoolNetworkConfigPodCidrOverprovisionConfig'] = None,
10232
10940
  pod_ipv4_cidr_block: Optional[_builtins.str] = None,
10233
- pod_range: Optional[_builtins.str] = None):
10941
+ pod_range: Optional[_builtins.str] = None,
10942
+ subnetwork: Optional[_builtins.str] = None):
10234
10943
  """
10235
10944
  :param Sequence['ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgs'] additional_node_network_configs: We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface
10236
10945
  :param Sequence['ClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs'] additional_pod_network_configs: We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node
@@ -10240,6 +10949,8 @@ class ClusterNodePoolNetworkConfig(dict):
10240
10949
  :param 'ClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs' pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
10241
10950
  :param _builtins.str pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if create_pod_range is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
10242
10951
  :param _builtins.str pod_range: The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
10952
+ :param _builtins.str subnetwork: The name or self_link of the Google Compute Engine
10953
+ subnetwork in which the cluster's instances are launched.
10243
10954
  """
10244
10955
  if additional_node_network_configs is not None:
10245
10956
  pulumi.set(__self__, "additional_node_network_configs", additional_node_network_configs)
@@ -10257,6 +10968,8 @@ class ClusterNodePoolNetworkConfig(dict):
10257
10968
  pulumi.set(__self__, "pod_ipv4_cidr_block", pod_ipv4_cidr_block)
10258
10969
  if pod_range is not None:
10259
10970
  pulumi.set(__self__, "pod_range", pod_range)
10971
+ if subnetwork is not None:
10972
+ pulumi.set(__self__, "subnetwork", subnetwork)
10260
10973
 
10261
10974
  @_builtins.property
10262
10975
  @pulumi.getter(name="additionalNodeNetworkConfigs")
@@ -10322,6 +11035,15 @@ class ClusterNodePoolNetworkConfig(dict):
10322
11035
  """
10323
11036
  return pulumi.get(self, "pod_range")
10324
11037
 
11038
+ @_builtins.property
11039
+ @pulumi.getter
11040
+ def subnetwork(self) -> Optional[_builtins.str]:
11041
+ """
11042
+ The name or self_link of the Google Compute Engine
11043
+ subnetwork in which the cluster's instances are launched.
11044
+ """
11045
+ return pulumi.get(self, "subnetwork")
11046
+
10325
11047
 
10326
11048
  @pulumi.output_type
10327
11049
  class ClusterNodePoolNetworkConfigAdditionalNodeNetworkConfig(dict):
@@ -10488,6 +11210,8 @@ class ClusterNodePoolNodeConfig(dict):
10488
11210
  suggest = None
10489
11211
  if key == "advancedMachineFeatures":
10490
11212
  suggest = "advanced_machine_features"
11213
+ elif key == "bootDisk":
11214
+ suggest = "boot_disk"
10491
11215
  elif key == "bootDiskKmsKey":
10492
11216
  suggest = "boot_disk_kms_key"
10493
11217
  elif key == "confidentialNodes":
@@ -10576,6 +11300,7 @@ class ClusterNodePoolNodeConfig(dict):
10576
11300
 
10577
11301
  def __init__(__self__, *,
10578
11302
  advanced_machine_features: Optional['outputs.ClusterNodePoolNodeConfigAdvancedMachineFeatures'] = None,
11303
+ boot_disk: Optional['outputs.ClusterNodePoolNodeConfigBootDisk'] = None,
10579
11304
  boot_disk_kms_key: Optional[_builtins.str] = None,
10580
11305
  confidential_nodes: Optional['outputs.ClusterNodePoolNodeConfigConfidentialNodes'] = None,
10581
11306
  containerd_config: Optional['outputs.ClusterNodePoolNodeConfigContainerdConfig'] = None,
@@ -10623,13 +11348,15 @@ class ClusterNodePoolNodeConfig(dict):
10623
11348
  """
10624
11349
  :param 'ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs' advanced_machine_features: Specifies options for controlling
10625
11350
  advanced machine features. Structure is documented below.
11351
+ :param 'ClusterNodePoolNodeConfigBootDiskArgs' boot_disk: Configuration of the node pool boot disk. Structure is documented below
10626
11352
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
10627
11353
  :param 'ClusterNodePoolNodeConfigConfidentialNodesArgs' confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
10628
11354
  :param 'ClusterNodePoolNodeConfigContainerdConfigArgs' containerd_config: Parameters to customize containerd runtime. Structure is documented below.
10629
11355
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified
10630
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
11356
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
11357
+ Prefer configuring `boot_disk`.
10631
11358
  :param _builtins.str disk_type: Type of the disk attached to each node
10632
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
11359
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
10633
11360
  :param Sequence['ClusterNodePoolNodeConfigEffectiveTaintArgs'] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
10634
11361
  :param _builtins.bool enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
10635
11362
  :param 'ClusterNodePoolNodeConfigEphemeralStorageConfigArgs' ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -10709,7 +11436,7 @@ class ClusterNodePoolNodeConfig(dict):
10709
11436
  :param _builtins.str service_account: The service account to be used by the Node VMs.
10710
11437
  If not specified, the "default" service account is used.
10711
11438
  :param 'ClusterNodePoolNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
10712
- :param 'ClusterNodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
11439
+ :param 'ClusterNodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
10713
11440
  :param _builtins.bool spot: A boolean that represents whether the underlying node VMs are spot.
10714
11441
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
10715
11442
  for more information. Defaults to false.
@@ -10730,6 +11457,8 @@ class ClusterNodePoolNodeConfig(dict):
10730
11457
  """
10731
11458
  if advanced_machine_features is not None:
10732
11459
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
11460
+ if boot_disk is not None:
11461
+ pulumi.set(__self__, "boot_disk", boot_disk)
10733
11462
  if boot_disk_kms_key is not None:
10734
11463
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
10735
11464
  if confidential_nodes is not None:
@@ -10828,6 +11557,14 @@ class ClusterNodePoolNodeConfig(dict):
10828
11557
  """
10829
11558
  return pulumi.get(self, "advanced_machine_features")
10830
11559
 
11560
+ @_builtins.property
11561
+ @pulumi.getter(name="bootDisk")
11562
+ def boot_disk(self) -> Optional['outputs.ClusterNodePoolNodeConfigBootDisk']:
11563
+ """
11564
+ Configuration of the node pool boot disk. Structure is documented below
11565
+ """
11566
+ return pulumi.get(self, "boot_disk")
11567
+
10831
11568
  @_builtins.property
10832
11569
  @pulumi.getter(name="bootDiskKmsKey")
10833
11570
  def boot_disk_kms_key(self) -> Optional[_builtins.str]:
@@ -10857,7 +11594,8 @@ class ClusterNodePoolNodeConfig(dict):
10857
11594
  def disk_size_gb(self) -> Optional[_builtins.int]:
10858
11595
  """
10859
11596
  Size of the disk attached to each node, specified
10860
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
11597
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
11598
+ Prefer configuring `boot_disk`.
10861
11599
  """
10862
11600
  return pulumi.get(self, "disk_size_gb")
10863
11601
 
@@ -10866,7 +11604,7 @@ class ClusterNodePoolNodeConfig(dict):
10866
11604
  def disk_type(self) -> Optional[_builtins.str]:
10867
11605
  """
10868
11606
  Type of the disk attached to each node
10869
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
11607
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
10870
11608
  """
10871
11609
  return pulumi.get(self, "disk_type")
10872
11610
 
@@ -11177,7 +11915,7 @@ class ClusterNodePoolNodeConfig(dict):
11177
11915
  @pulumi.getter(name="soleTenantConfig")
11178
11916
  def sole_tenant_config(self) -> Optional['outputs.ClusterNodePoolNodeConfigSoleTenantConfig']:
11179
11917
  """
11180
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
11918
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
11181
11919
  """
11182
11920
  return pulumi.get(self, "sole_tenant_config")
11183
11921
 
@@ -11304,6 +12042,88 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeatures(dict):
11304
12042
  return pulumi.get(self, "performance_monitoring_unit")
11305
12043
 
11306
12044
 
12045
+ @pulumi.output_type
12046
+ class ClusterNodePoolNodeConfigBootDisk(dict):
12047
+ @staticmethod
12048
+ def __key_warning(key: str):
12049
+ suggest = None
12050
+ if key == "diskType":
12051
+ suggest = "disk_type"
12052
+ elif key == "provisionedIops":
12053
+ suggest = "provisioned_iops"
12054
+ elif key == "provisionedThroughput":
12055
+ suggest = "provisioned_throughput"
12056
+ elif key == "sizeGb":
12057
+ suggest = "size_gb"
12058
+
12059
+ if suggest:
12060
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigBootDisk. Access the value via the '{suggest}' property getter instead.")
12061
+
12062
+ def __getitem__(self, key: str) -> Any:
12063
+ ClusterNodePoolNodeConfigBootDisk.__key_warning(key)
12064
+ return super().__getitem__(key)
12065
+
12066
+ def get(self, key: str, default = None) -> Any:
12067
+ ClusterNodePoolNodeConfigBootDisk.__key_warning(key)
12068
+ return super().get(key, default)
12069
+
12070
+ def __init__(__self__, *,
12071
+ disk_type: Optional[_builtins.str] = None,
12072
+ provisioned_iops: Optional[_builtins.int] = None,
12073
+ provisioned_throughput: Optional[_builtins.int] = None,
12074
+ size_gb: Optional[_builtins.int] = None):
12075
+ """
12076
+ :param _builtins.str disk_type: Type of the disk attached to each node
12077
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
12078
+ :param _builtins.int provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12079
+ :param _builtins.int provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12080
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified
12081
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
12082
+ """
12083
+ if disk_type is not None:
12084
+ pulumi.set(__self__, "disk_type", disk_type)
12085
+ if provisioned_iops is not None:
12086
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
12087
+ if provisioned_throughput is not None:
12088
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
12089
+ if size_gb is not None:
12090
+ pulumi.set(__self__, "size_gb", size_gb)
12091
+
12092
+ @_builtins.property
12093
+ @pulumi.getter(name="diskType")
12094
+ def disk_type(self) -> Optional[_builtins.str]:
12095
+ """
12096
+ Type of the disk attached to each node
12097
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
12098
+ """
12099
+ return pulumi.get(self, "disk_type")
12100
+
12101
+ @_builtins.property
12102
+ @pulumi.getter(name="provisionedIops")
12103
+ def provisioned_iops(self) -> Optional[_builtins.int]:
12104
+ """
12105
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12106
+ """
12107
+ return pulumi.get(self, "provisioned_iops")
12108
+
12109
+ @_builtins.property
12110
+ @pulumi.getter(name="provisionedThroughput")
12111
+ def provisioned_throughput(self) -> Optional[_builtins.int]:
12112
+ """
12113
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
12114
+ """
12115
+ return pulumi.get(self, "provisioned_throughput")
12116
+
12117
+ @_builtins.property
12118
+ @pulumi.getter(name="sizeGb")
12119
+ def size_gb(self) -> Optional[_builtins.int]:
12120
+ """
12121
+ Size of the disk attached to each node, specified
12122
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
12123
+ """
12124
+ return pulumi.get(self, "size_gb")
12125
+
12126
+
11307
12127
  @pulumi.output_type
11308
12128
  class ClusterNodePoolNodeConfigConfidentialNodes(dict):
11309
12129
  @staticmethod
@@ -11937,6 +12757,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
11937
12757
  suggest = "cpu_cfs_quota_period"
11938
12758
  elif key == "cpuManagerPolicy":
11939
12759
  suggest = "cpu_manager_policy"
12760
+ elif key == "evictionMaxPodGracePeriodSeconds":
12761
+ suggest = "eviction_max_pod_grace_period_seconds"
12762
+ elif key == "evictionMinimumReclaim":
12763
+ suggest = "eviction_minimum_reclaim"
12764
+ elif key == "evictionSoft":
12765
+ suggest = "eviction_soft"
12766
+ elif key == "evictionSoftGracePeriod":
12767
+ suggest = "eviction_soft_grace_period"
11940
12768
  elif key == "imageGcHighThresholdPercent":
11941
12769
  suggest = "image_gc_high_threshold_percent"
11942
12770
  elif key == "imageGcLowThresholdPercent":
@@ -11947,8 +12775,12 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
11947
12775
  suggest = "image_minimum_gc_age"
11948
12776
  elif key == "insecureKubeletReadonlyPortEnabled":
11949
12777
  suggest = "insecure_kubelet_readonly_port_enabled"
12778
+ elif key == "maxParallelImagePulls":
12779
+ suggest = "max_parallel_image_pulls"
11950
12780
  elif key == "podPidsLimit":
11951
12781
  suggest = "pod_pids_limit"
12782
+ elif key == "singleProcessOomKill":
12783
+ suggest = "single_process_oom_kill"
11952
12784
 
11953
12785
  if suggest:
11954
12786
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -11968,12 +12800,18 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
11968
12800
  cpu_cfs_quota: Optional[_builtins.bool] = None,
11969
12801
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
11970
12802
  cpu_manager_policy: Optional[_builtins.str] = None,
12803
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
12804
+ eviction_minimum_reclaim: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
12805
+ eviction_soft: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoft'] = None,
12806
+ eviction_soft_grace_period: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
11971
12807
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
11972
12808
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
11973
12809
  image_maximum_gc_age: Optional[_builtins.str] = None,
11974
12810
  image_minimum_gc_age: Optional[_builtins.str] = None,
11975
12811
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
11976
- pod_pids_limit: Optional[_builtins.int] = None):
12812
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
12813
+ pod_pids_limit: Optional[_builtins.int] = None,
12814
+ single_process_oom_kill: Optional[_builtins.bool] = None):
11977
12815
  """
11978
12816
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
11979
12817
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -11993,12 +12831,18 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
11993
12831
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
11994
12832
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
11995
12833
  is setting the empty string `""`, which will function identically to not setting this field.
12834
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
12835
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
12836
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
12837
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
11996
12838
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
11997
12839
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
11998
12840
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
11999
12841
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
12000
12842
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
12843
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
12001
12844
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
12845
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
12002
12846
  """
12003
12847
  if allowed_unsafe_sysctls is not None:
12004
12848
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -12012,6 +12856,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12012
12856
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
12013
12857
  if cpu_manager_policy is not None:
12014
12858
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
12859
+ if eviction_max_pod_grace_period_seconds is not None:
12860
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
12861
+ if eviction_minimum_reclaim is not None:
12862
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
12863
+ if eviction_soft is not None:
12864
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
12865
+ if eviction_soft_grace_period is not None:
12866
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
12015
12867
  if image_gc_high_threshold_percent is not None:
12016
12868
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
12017
12869
  if image_gc_low_threshold_percent is not None:
@@ -12022,8 +12874,12 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12022
12874
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
12023
12875
  if insecure_kubelet_readonly_port_enabled is not None:
12024
12876
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
12877
+ if max_parallel_image_pulls is not None:
12878
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
12025
12879
  if pod_pids_limit is not None:
12026
12880
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
12881
+ if single_process_oom_kill is not None:
12882
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
12027
12883
 
12028
12884
  @_builtins.property
12029
12885
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -12086,20 +12942,52 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12086
12942
  return pulumi.get(self, "cpu_manager_policy")
12087
12943
 
12088
12944
  @_builtins.property
12089
- @pulumi.getter(name="imageGcHighThresholdPercent")
12090
- def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
12945
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
12946
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
12091
12947
  """
12092
- Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
12948
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
12093
12949
  """
12094
- return pulumi.get(self, "image_gc_high_threshold_percent")
12950
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
12095
12951
 
12096
12952
  @_builtins.property
12097
- @pulumi.getter(name="imageGcLowThresholdPercent")
12098
- def image_gc_low_threshold_percent(self) -> Optional[_builtins.int]:
12953
+ @pulumi.getter(name="evictionMinimumReclaim")
12954
+ def eviction_minimum_reclaim(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim']:
12099
12955
  """
12100
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
12956
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
12101
12957
  """
12102
- return pulumi.get(self, "image_gc_low_threshold_percent")
12958
+ return pulumi.get(self, "eviction_minimum_reclaim")
12959
+
12960
+ @_builtins.property
12961
+ @pulumi.getter(name="evictionSoft")
12962
+ def eviction_soft(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoft']:
12963
+ """
12964
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
12965
+ """
12966
+ return pulumi.get(self, "eviction_soft")
12967
+
12968
+ @_builtins.property
12969
+ @pulumi.getter(name="evictionSoftGracePeriod")
12970
+ def eviction_soft_grace_period(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod']:
12971
+ """
12972
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
12973
+ """
12974
+ return pulumi.get(self, "eviction_soft_grace_period")
12975
+
12976
+ @_builtins.property
12977
+ @pulumi.getter(name="imageGcHighThresholdPercent")
12978
+ def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
12979
+ """
12980
+ Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
12981
+ """
12982
+ return pulumi.get(self, "image_gc_high_threshold_percent")
12983
+
12984
+ @_builtins.property
12985
+ @pulumi.getter(name="imageGcLowThresholdPercent")
12986
+ def image_gc_low_threshold_percent(self) -> Optional[_builtins.int]:
12987
+ """
12988
+ Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
12989
+ """
12990
+ return pulumi.get(self, "image_gc_low_threshold_percent")
12103
12991
 
12104
12992
  @_builtins.property
12105
12993
  @pulumi.getter(name="imageMaximumGcAge")
@@ -12125,6 +13013,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12125
13013
  """
12126
13014
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
12127
13015
 
13016
+ @_builtins.property
13017
+ @pulumi.getter(name="maxParallelImagePulls")
13018
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
13019
+ """
13020
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
13021
+ """
13022
+ return pulumi.get(self, "max_parallel_image_pulls")
13023
+
12128
13024
  @_builtins.property
12129
13025
  @pulumi.getter(name="podPidsLimit")
12130
13026
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -12133,6 +13029,332 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12133
13029
  """
12134
13030
  return pulumi.get(self, "pod_pids_limit")
12135
13031
 
13032
+ @_builtins.property
13033
+ @pulumi.getter(name="singleProcessOomKill")
13034
+ def single_process_oom_kill(self) -> Optional[_builtins.bool]:
13035
+ """
13036
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
13037
+ """
13038
+ return pulumi.get(self, "single_process_oom_kill")
13039
+
13040
+
13041
+ @pulumi.output_type
13042
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
13043
+ @staticmethod
13044
+ def __key_warning(key: str):
13045
+ suggest = None
13046
+ if key == "imagefsAvailable":
13047
+ suggest = "imagefs_available"
13048
+ elif key == "imagefsInodesFree":
13049
+ suggest = "imagefs_inodes_free"
13050
+ elif key == "memoryAvailable":
13051
+ suggest = "memory_available"
13052
+ elif key == "nodefsAvailable":
13053
+ suggest = "nodefs_available"
13054
+ elif key == "nodefsInodesFree":
13055
+ suggest = "nodefs_inodes_free"
13056
+ elif key == "pidAvailable":
13057
+ suggest = "pid_available"
13058
+
13059
+ if suggest:
13060
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
13061
+
13062
+ def __getitem__(self, key: str) -> Any:
13063
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
13064
+ return super().__getitem__(key)
13065
+
13066
+ def get(self, key: str, default = None) -> Any:
13067
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
13068
+ return super().get(key, default)
13069
+
13070
+ def __init__(__self__, *,
13071
+ imagefs_available: Optional[_builtins.str] = None,
13072
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13073
+ memory_available: Optional[_builtins.str] = None,
13074
+ nodefs_available: Optional[_builtins.str] = None,
13075
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13076
+ pid_available: Optional[_builtins.str] = None):
13077
+ """
13078
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13079
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13080
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13081
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13082
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13083
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13084
+ """
13085
+ if imagefs_available is not None:
13086
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13087
+ if imagefs_inodes_free is not None:
13088
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13089
+ if memory_available is not None:
13090
+ pulumi.set(__self__, "memory_available", memory_available)
13091
+ if nodefs_available is not None:
13092
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13093
+ if nodefs_inodes_free is not None:
13094
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13095
+ if pid_available is not None:
13096
+ pulumi.set(__self__, "pid_available", pid_available)
13097
+
13098
+ @_builtins.property
13099
+ @pulumi.getter(name="imagefsAvailable")
13100
+ def imagefs_available(self) -> Optional[_builtins.str]:
13101
+ """
13102
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13103
+ """
13104
+ return pulumi.get(self, "imagefs_available")
13105
+
13106
+ @_builtins.property
13107
+ @pulumi.getter(name="imagefsInodesFree")
13108
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13109
+ """
13110
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13111
+ """
13112
+ return pulumi.get(self, "imagefs_inodes_free")
13113
+
13114
+ @_builtins.property
13115
+ @pulumi.getter(name="memoryAvailable")
13116
+ def memory_available(self) -> Optional[_builtins.str]:
13117
+ """
13118
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13119
+ """
13120
+ return pulumi.get(self, "memory_available")
13121
+
13122
+ @_builtins.property
13123
+ @pulumi.getter(name="nodefsAvailable")
13124
+ def nodefs_available(self) -> Optional[_builtins.str]:
13125
+ """
13126
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13127
+ """
13128
+ return pulumi.get(self, "nodefs_available")
13129
+
13130
+ @_builtins.property
13131
+ @pulumi.getter(name="nodefsInodesFree")
13132
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13133
+ """
13134
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13135
+ """
13136
+ return pulumi.get(self, "nodefs_inodes_free")
13137
+
13138
+ @_builtins.property
13139
+ @pulumi.getter(name="pidAvailable")
13140
+ def pid_available(self) -> Optional[_builtins.str]:
13141
+ """
13142
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13143
+ """
13144
+ return pulumi.get(self, "pid_available")
13145
+
13146
+
13147
+ @pulumi.output_type
13148
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoft(dict):
13149
+ @staticmethod
13150
+ def __key_warning(key: str):
13151
+ suggest = None
13152
+ if key == "imagefsAvailable":
13153
+ suggest = "imagefs_available"
13154
+ elif key == "imagefsInodesFree":
13155
+ suggest = "imagefs_inodes_free"
13156
+ elif key == "memoryAvailable":
13157
+ suggest = "memory_available"
13158
+ elif key == "nodefsAvailable":
13159
+ suggest = "nodefs_available"
13160
+ elif key == "nodefsInodesFree":
13161
+ suggest = "nodefs_inodes_free"
13162
+ elif key == "pidAvailable":
13163
+ suggest = "pid_available"
13164
+
13165
+ if suggest:
13166
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
13167
+
13168
+ def __getitem__(self, key: str) -> Any:
13169
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
13170
+ return super().__getitem__(key)
13171
+
13172
+ def get(self, key: str, default = None) -> Any:
13173
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
13174
+ return super().get(key, default)
13175
+
13176
+ def __init__(__self__, *,
13177
+ imagefs_available: Optional[_builtins.str] = None,
13178
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13179
+ memory_available: Optional[_builtins.str] = None,
13180
+ nodefs_available: Optional[_builtins.str] = None,
13181
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13182
+ pid_available: Optional[_builtins.str] = None):
13183
+ """
13184
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
13185
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13186
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
13187
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13188
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13189
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13190
+ """
13191
+ if imagefs_available is not None:
13192
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13193
+ if imagefs_inodes_free is not None:
13194
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13195
+ if memory_available is not None:
13196
+ pulumi.set(__self__, "memory_available", memory_available)
13197
+ if nodefs_available is not None:
13198
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13199
+ if nodefs_inodes_free is not None:
13200
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13201
+ if pid_available is not None:
13202
+ pulumi.set(__self__, "pid_available", pid_available)
13203
+
13204
+ @_builtins.property
13205
+ @pulumi.getter(name="imagefsAvailable")
13206
+ def imagefs_available(self) -> Optional[_builtins.str]:
13207
+ """
13208
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
13209
+ """
13210
+ return pulumi.get(self, "imagefs_available")
13211
+
13212
+ @_builtins.property
13213
+ @pulumi.getter(name="imagefsInodesFree")
13214
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13215
+ """
13216
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13217
+ """
13218
+ return pulumi.get(self, "imagefs_inodes_free")
13219
+
13220
+ @_builtins.property
13221
+ @pulumi.getter(name="memoryAvailable")
13222
+ def memory_available(self) -> Optional[_builtins.str]:
13223
+ """
13224
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
13225
+ """
13226
+ return pulumi.get(self, "memory_available")
13227
+
13228
+ @_builtins.property
13229
+ @pulumi.getter(name="nodefsAvailable")
13230
+ def nodefs_available(self) -> Optional[_builtins.str]:
13231
+ """
13232
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13233
+ """
13234
+ return pulumi.get(self, "nodefs_available")
13235
+
13236
+ @_builtins.property
13237
+ @pulumi.getter(name="nodefsInodesFree")
13238
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13239
+ """
13240
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13241
+ """
13242
+ return pulumi.get(self, "nodefs_inodes_free")
13243
+
13244
+ @_builtins.property
13245
+ @pulumi.getter(name="pidAvailable")
13246
+ def pid_available(self) -> Optional[_builtins.str]:
13247
+ """
13248
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13249
+ """
13250
+ return pulumi.get(self, "pid_available")
13251
+
13252
+
13253
+ @pulumi.output_type
13254
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
13255
+ @staticmethod
13256
+ def __key_warning(key: str):
13257
+ suggest = None
13258
+ if key == "imagefsAvailable":
13259
+ suggest = "imagefs_available"
13260
+ elif key == "imagefsInodesFree":
13261
+ suggest = "imagefs_inodes_free"
13262
+ elif key == "memoryAvailable":
13263
+ suggest = "memory_available"
13264
+ elif key == "nodefsAvailable":
13265
+ suggest = "nodefs_available"
13266
+ elif key == "nodefsInodesFree":
13267
+ suggest = "nodefs_inodes_free"
13268
+ elif key == "pidAvailable":
13269
+ suggest = "pid_available"
13270
+
13271
+ if suggest:
13272
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
13273
+
13274
+ def __getitem__(self, key: str) -> Any:
13275
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
13276
+ return super().__getitem__(key)
13277
+
13278
+ def get(self, key: str, default = None) -> Any:
13279
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
13280
+ return super().get(key, default)
13281
+
13282
+ def __init__(__self__, *,
13283
+ imagefs_available: Optional[_builtins.str] = None,
13284
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13285
+ memory_available: Optional[_builtins.str] = None,
13286
+ nodefs_available: Optional[_builtins.str] = None,
13287
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13288
+ pid_available: Optional[_builtins.str] = None):
13289
+ """
13290
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13291
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13292
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
13293
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13294
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13295
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13296
+ """
13297
+ if imagefs_available is not None:
13298
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13299
+ if imagefs_inodes_free is not None:
13300
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13301
+ if memory_available is not None:
13302
+ pulumi.set(__self__, "memory_available", memory_available)
13303
+ if nodefs_available is not None:
13304
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13305
+ if nodefs_inodes_free is not None:
13306
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13307
+ if pid_available is not None:
13308
+ pulumi.set(__self__, "pid_available", pid_available)
13309
+
13310
+ @_builtins.property
13311
+ @pulumi.getter(name="imagefsAvailable")
13312
+ def imagefs_available(self) -> Optional[_builtins.str]:
13313
+ """
13314
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13315
+ """
13316
+ return pulumi.get(self, "imagefs_available")
13317
+
13318
+ @_builtins.property
13319
+ @pulumi.getter(name="imagefsInodesFree")
13320
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13321
+ """
13322
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13323
+ """
13324
+ return pulumi.get(self, "imagefs_inodes_free")
13325
+
13326
+ @_builtins.property
13327
+ @pulumi.getter(name="memoryAvailable")
13328
+ def memory_available(self) -> Optional[_builtins.str]:
13329
+ """
13330
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
13331
+ """
13332
+ return pulumi.get(self, "memory_available")
13333
+
13334
+ @_builtins.property
13335
+ @pulumi.getter(name="nodefsAvailable")
13336
+ def nodefs_available(self) -> Optional[_builtins.str]:
13337
+ """
13338
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13339
+ """
13340
+ return pulumi.get(self, "nodefs_available")
13341
+
13342
+ @_builtins.property
13343
+ @pulumi.getter(name="nodefsInodesFree")
13344
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13345
+ """
13346
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13347
+ """
13348
+ return pulumi.get(self, "nodefs_inodes_free")
13349
+
13350
+ @_builtins.property
13351
+ @pulumi.getter(name="pidAvailable")
13352
+ def pid_available(self) -> Optional[_builtins.str]:
13353
+ """
13354
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13355
+ """
13356
+ return pulumi.get(self, "pid_available")
13357
+
12136
13358
 
12137
13359
  @pulumi.output_type
12138
13360
  class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
@@ -12143,6 +13365,10 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12143
13365
  suggest = "cgroup_mode"
12144
13366
  elif key == "hugepagesConfig":
12145
13367
  suggest = "hugepages_config"
13368
+ elif key == "transparentHugepageDefrag":
13369
+ suggest = "transparent_hugepage_defrag"
13370
+ elif key == "transparentHugepageEnabled":
13371
+ suggest = "transparent_hugepage_enabled"
12146
13372
 
12147
13373
  if suggest:
12148
13374
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12158,7 +13384,9 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12158
13384
  def __init__(__self__, *,
12159
13385
  cgroup_mode: Optional[_builtins.str] = None,
12160
13386
  hugepages_config: Optional['outputs.ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
12161
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
13387
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
13388
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
13389
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
12162
13390
  """
12163
13391
  :param _builtins.str cgroup_mode: Possible cgroup modes that can be used.
12164
13392
  Accepted values are:
@@ -12170,6 +13398,8 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12170
13398
  and all pods running on the nodes. Specified as a map from the key, such as
12171
13399
  `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
12172
13400
  Note that validations happen all server side. All attributes are optional.
13401
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
13402
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
12173
13403
  """
12174
13404
  if cgroup_mode is not None:
12175
13405
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -12177,6 +13407,10 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12177
13407
  pulumi.set(__self__, "hugepages_config", hugepages_config)
12178
13408
  if sysctls is not None:
12179
13409
  pulumi.set(__self__, "sysctls", sysctls)
13410
+ if transparent_hugepage_defrag is not None:
13411
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
13412
+ if transparent_hugepage_enabled is not None:
13413
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
12180
13414
 
12181
13415
  @_builtins.property
12182
13416
  @pulumi.getter(name="cgroupMode")
@@ -12209,6 +13443,22 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12209
13443
  """
12210
13444
  return pulumi.get(self, "sysctls")
12211
13445
 
13446
+ @_builtins.property
13447
+ @pulumi.getter(name="transparentHugepageDefrag")
13448
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
13449
+ """
13450
+ The Linux kernel transparent hugepage defrag setting.
13451
+ """
13452
+ return pulumi.get(self, "transparent_hugepage_defrag")
13453
+
13454
+ @_builtins.property
13455
+ @pulumi.getter(name="transparentHugepageEnabled")
13456
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
13457
+ """
13458
+ The Linux kernel transparent hugepage setting.
13459
+ """
13460
+ return pulumi.get(self, "transparent_hugepage_enabled")
13461
+
12212
13462
 
12213
13463
  @pulumi.output_type
12214
13464
  class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -12521,6 +13771,8 @@ class ClusterNodePoolNodeConfigSoleTenantConfig(dict):
12521
13771
  suggest = None
12522
13772
  if key == "nodeAffinities":
12523
13773
  suggest = "node_affinities"
13774
+ elif key == "minNodeCpus":
13775
+ suggest = "min_node_cpus"
12524
13776
 
12525
13777
  if suggest:
12526
13778
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12534,20 +13786,32 @@ class ClusterNodePoolNodeConfigSoleTenantConfig(dict):
12534
13786
  return super().get(key, default)
12535
13787
 
12536
13788
  def __init__(__self__, *,
12537
- node_affinities: Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity']):
13789
+ node_affinities: Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity'],
13790
+ min_node_cpus: Optional[_builtins.int] = None):
12538
13791
  """
12539
- :param Sequence['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
13792
+ :param Sequence['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
13793
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
12540
13794
  """
12541
13795
  pulumi.set(__self__, "node_affinities", node_affinities)
13796
+ if min_node_cpus is not None:
13797
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
12542
13798
 
12543
13799
  @_builtins.property
12544
13800
  @pulumi.getter(name="nodeAffinities")
12545
13801
  def node_affinities(self) -> Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity']:
12546
13802
  """
12547
- .
13803
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
12548
13804
  """
12549
13805
  return pulumi.get(self, "node_affinities")
12550
13806
 
13807
+ @_builtins.property
13808
+ @pulumi.getter(name="minNodeCpus")
13809
+ def min_node_cpus(self) -> Optional[_builtins.int]:
13810
+ """
13811
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
13812
+ """
13813
+ return pulumi.get(self, "min_node_cpus")
13814
+
12551
13815
 
12552
13816
  @pulumi.output_type
12553
13817
  class ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -13377,6 +14641,56 @@ class ClusterProtectConfigWorkloadConfig(dict):
13377
14641
  return pulumi.get(self, "audit_mode")
13378
14642
 
13379
14643
 
14644
+ @pulumi.output_type
14645
+ class ClusterRbacBindingConfig(dict):
14646
+ @staticmethod
14647
+ def __key_warning(key: str):
14648
+ suggest = None
14649
+ if key == "enableInsecureBindingSystemAuthenticated":
14650
+ suggest = "enable_insecure_binding_system_authenticated"
14651
+ elif key == "enableInsecureBindingSystemUnauthenticated":
14652
+ suggest = "enable_insecure_binding_system_unauthenticated"
14653
+
14654
+ if suggest:
14655
+ pulumi.log.warn(f"Key '{key}' not found in ClusterRbacBindingConfig. Access the value via the '{suggest}' property getter instead.")
14656
+
14657
+ def __getitem__(self, key: str) -> Any:
14658
+ ClusterRbacBindingConfig.__key_warning(key)
14659
+ return super().__getitem__(key)
14660
+
14661
+ def get(self, key: str, default = None) -> Any:
14662
+ ClusterRbacBindingConfig.__key_warning(key)
14663
+ return super().get(key, default)
14664
+
14665
+ def __init__(__self__, *,
14666
+ enable_insecure_binding_system_authenticated: Optional[_builtins.bool] = None,
14667
+ enable_insecure_binding_system_unauthenticated: Optional[_builtins.bool] = None):
14668
+ """
14669
+ :param _builtins.bool enable_insecure_binding_system_authenticated: Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.
14670
+ :param _builtins.bool enable_insecure_binding_system_unauthenticated: Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated.
14671
+ """
14672
+ if enable_insecure_binding_system_authenticated is not None:
14673
+ pulumi.set(__self__, "enable_insecure_binding_system_authenticated", enable_insecure_binding_system_authenticated)
14674
+ if enable_insecure_binding_system_unauthenticated is not None:
14675
+ pulumi.set(__self__, "enable_insecure_binding_system_unauthenticated", enable_insecure_binding_system_unauthenticated)
14676
+
14677
+ @_builtins.property
14678
+ @pulumi.getter(name="enableInsecureBindingSystemAuthenticated")
14679
+ def enable_insecure_binding_system_authenticated(self) -> Optional[_builtins.bool]:
14680
+ """
14681
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.
14682
+ """
14683
+ return pulumi.get(self, "enable_insecure_binding_system_authenticated")
14684
+
14685
+ @_builtins.property
14686
+ @pulumi.getter(name="enableInsecureBindingSystemUnauthenticated")
14687
+ def enable_insecure_binding_system_unauthenticated(self) -> Optional[_builtins.bool]:
14688
+ """
14689
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated.
14690
+ """
14691
+ return pulumi.get(self, "enable_insecure_binding_system_unauthenticated")
14692
+
14693
+
13380
14694
  @pulumi.output_type
13381
14695
  class ClusterReleaseChannel(dict):
13382
14696
  def __init__(__self__, *,
@@ -14166,7 +15480,8 @@ class NodePoolNetworkConfig(dict):
14166
15480
  network_performance_config: Optional['outputs.NodePoolNetworkConfigNetworkPerformanceConfig'] = None,
14167
15481
  pod_cidr_overprovision_config: Optional['outputs.NodePoolNetworkConfigPodCidrOverprovisionConfig'] = None,
14168
15482
  pod_ipv4_cidr_block: Optional[_builtins.str] = None,
14169
- pod_range: Optional[_builtins.str] = None):
15483
+ pod_range: Optional[_builtins.str] = None,
15484
+ subnetwork: Optional[_builtins.str] = None):
14170
15485
  """
14171
15486
  :param Sequence['NodePoolNetworkConfigAdditionalNodeNetworkConfigArgs'] additional_node_network_configs: We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface.
14172
15487
  Structure is documented below
@@ -14178,6 +15493,7 @@ class NodePoolNetworkConfig(dict):
14178
15493
  :param 'NodePoolNetworkConfigPodCidrOverprovisionConfigArgs' pod_cidr_overprovision_config: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is documented below.
14179
15494
  :param _builtins.str pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if createPodRange is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
14180
15495
  :param _builtins.str pod_range: The ID of the secondary range for pod IPs. If `create_pod_range` is true, this ID is used for the new range. If `create_pod_range` is false, uses an existing secondary range with this ID.
15496
+ :param _builtins.str subnetwork: The subnetwork path for the node pool. Format: `projects/{project}/regions/{region}/subnetworks/{subnetwork}`. If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable
14181
15497
  """
14182
15498
  if additional_node_network_configs is not None:
14183
15499
  pulumi.set(__self__, "additional_node_network_configs", additional_node_network_configs)
@@ -14195,6 +15511,8 @@ class NodePoolNetworkConfig(dict):
14195
15511
  pulumi.set(__self__, "pod_ipv4_cidr_block", pod_ipv4_cidr_block)
14196
15512
  if pod_range is not None:
14197
15513
  pulumi.set(__self__, "pod_range", pod_range)
15514
+ if subnetwork is not None:
15515
+ pulumi.set(__self__, "subnetwork", subnetwork)
14198
15516
 
14199
15517
  @_builtins.property
14200
15518
  @pulumi.getter(name="additionalNodeNetworkConfigs")
@@ -14262,6 +15580,14 @@ class NodePoolNetworkConfig(dict):
14262
15580
  """
14263
15581
  return pulumi.get(self, "pod_range")
14264
15582
 
15583
+ @_builtins.property
15584
+ @pulumi.getter
15585
+ def subnetwork(self) -> Optional[_builtins.str]:
15586
+ """
15587
+ The subnetwork path for the node pool. Format: `projects/{project}/regions/{region}/subnetworks/{subnetwork}`. If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable
15588
+ """
15589
+ return pulumi.get(self, "subnetwork")
15590
+
14265
15591
 
14266
15592
  @pulumi.output_type
14267
15593
  class NodePoolNetworkConfigAdditionalNodeNetworkConfig(dict):
@@ -14416,6 +15742,8 @@ class NodePoolNodeConfig(dict):
14416
15742
  suggest = None
14417
15743
  if key == "advancedMachineFeatures":
14418
15744
  suggest = "advanced_machine_features"
15745
+ elif key == "bootDisk":
15746
+ suggest = "boot_disk"
14419
15747
  elif key == "bootDiskKmsKey":
14420
15748
  suggest = "boot_disk_kms_key"
14421
15749
  elif key == "confidentialNodes":
@@ -14504,6 +15832,7 @@ class NodePoolNodeConfig(dict):
14504
15832
 
14505
15833
  def __init__(__self__, *,
14506
15834
  advanced_machine_features: Optional['outputs.NodePoolNodeConfigAdvancedMachineFeatures'] = None,
15835
+ boot_disk: Optional['outputs.NodePoolNodeConfigBootDisk'] = None,
14507
15836
  boot_disk_kms_key: Optional[_builtins.str] = None,
14508
15837
  confidential_nodes: Optional['outputs.NodePoolNodeConfigConfidentialNodes'] = None,
14509
15838
  containerd_config: Optional['outputs.NodePoolNodeConfigContainerdConfig'] = None,
@@ -14550,6 +15879,7 @@ class NodePoolNodeConfig(dict):
14550
15879
  workload_metadata_config: Optional['outputs.NodePoolNodeConfigWorkloadMetadataConfig'] = None):
14551
15880
  """
14552
15881
  :param 'NodePoolNodeConfigAdvancedMachineFeaturesArgs' advanced_machine_features: Specifies options for controlling advanced machine features.
15882
+ :param 'NodePoolNodeConfigBootDiskArgs' boot_disk: Boot disk configuration for node pools nodes.
14553
15883
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
14554
15884
  :param 'NodePoolNodeConfigConfidentialNodesArgs' confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
14555
15885
  :param 'NodePoolNodeConfigContainerdConfigArgs' containerd_config: Parameters for containerd configuration.
@@ -14597,6 +15927,8 @@ class NodePoolNodeConfig(dict):
14597
15927
  """
14598
15928
  if advanced_machine_features is not None:
14599
15929
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
15930
+ if boot_disk is not None:
15931
+ pulumi.set(__self__, "boot_disk", boot_disk)
14600
15932
  if boot_disk_kms_key is not None:
14601
15933
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
14602
15934
  if confidential_nodes is not None:
@@ -14694,6 +16026,14 @@ class NodePoolNodeConfig(dict):
14694
16026
  """
14695
16027
  return pulumi.get(self, "advanced_machine_features")
14696
16028
 
16029
+ @_builtins.property
16030
+ @pulumi.getter(name="bootDisk")
16031
+ def boot_disk(self) -> Optional['outputs.NodePoolNodeConfigBootDisk']:
16032
+ """
16033
+ Boot disk configuration for node pools nodes.
16034
+ """
16035
+ return pulumi.get(self, "boot_disk")
16036
+
14697
16037
  @_builtins.property
14698
16038
  @pulumi.getter(name="bootDiskKmsKey")
14699
16039
  def boot_disk_kms_key(self) -> Optional[_builtins.str]:
@@ -15110,6 +16450,84 @@ class NodePoolNodeConfigAdvancedMachineFeatures(dict):
15110
16450
  return pulumi.get(self, "performance_monitoring_unit")
15111
16451
 
15112
16452
 
16453
+ @pulumi.output_type
16454
+ class NodePoolNodeConfigBootDisk(dict):
16455
+ @staticmethod
16456
+ def __key_warning(key: str):
16457
+ suggest = None
16458
+ if key == "diskType":
16459
+ suggest = "disk_type"
16460
+ elif key == "provisionedIops":
16461
+ suggest = "provisioned_iops"
16462
+ elif key == "provisionedThroughput":
16463
+ suggest = "provisioned_throughput"
16464
+ elif key == "sizeGb":
16465
+ suggest = "size_gb"
16466
+
16467
+ if suggest:
16468
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigBootDisk. Access the value via the '{suggest}' property getter instead.")
16469
+
16470
+ def __getitem__(self, key: str) -> Any:
16471
+ NodePoolNodeConfigBootDisk.__key_warning(key)
16472
+ return super().__getitem__(key)
16473
+
16474
+ def get(self, key: str, default = None) -> Any:
16475
+ NodePoolNodeConfigBootDisk.__key_warning(key)
16476
+ return super().get(key, default)
16477
+
16478
+ def __init__(__self__, *,
16479
+ disk_type: Optional[_builtins.str] = None,
16480
+ provisioned_iops: Optional[_builtins.int] = None,
16481
+ provisioned_throughput: Optional[_builtins.int] = None,
16482
+ size_gb: Optional[_builtins.int] = None):
16483
+ """
16484
+ :param _builtins.str disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
16485
+ :param _builtins.int provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
16486
+ :param _builtins.int provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
16487
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
16488
+ """
16489
+ if disk_type is not None:
16490
+ pulumi.set(__self__, "disk_type", disk_type)
16491
+ if provisioned_iops is not None:
16492
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
16493
+ if provisioned_throughput is not None:
16494
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
16495
+ if size_gb is not None:
16496
+ pulumi.set(__self__, "size_gb", size_gb)
16497
+
16498
+ @_builtins.property
16499
+ @pulumi.getter(name="diskType")
16500
+ def disk_type(self) -> Optional[_builtins.str]:
16501
+ """
16502
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
16503
+ """
16504
+ return pulumi.get(self, "disk_type")
16505
+
16506
+ @_builtins.property
16507
+ @pulumi.getter(name="provisionedIops")
16508
+ def provisioned_iops(self) -> Optional[_builtins.int]:
16509
+ """
16510
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
16511
+ """
16512
+ return pulumi.get(self, "provisioned_iops")
16513
+
16514
+ @_builtins.property
16515
+ @pulumi.getter(name="provisionedThroughput")
16516
+ def provisioned_throughput(self) -> Optional[_builtins.int]:
16517
+ """
16518
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
16519
+ """
16520
+ return pulumi.get(self, "provisioned_throughput")
16521
+
16522
+ @_builtins.property
16523
+ @pulumi.getter(name="sizeGb")
16524
+ def size_gb(self) -> Optional[_builtins.int]:
16525
+ """
16526
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
16527
+ """
16528
+ return pulumi.get(self, "size_gb")
16529
+
16530
+
15113
16531
  @pulumi.output_type
15114
16532
  class NodePoolNodeConfigConfidentialNodes(dict):
15115
16533
  @staticmethod
@@ -15723,6 +17141,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
15723
17141
  suggest = "cpu_cfs_quota_period"
15724
17142
  elif key == "cpuManagerPolicy":
15725
17143
  suggest = "cpu_manager_policy"
17144
+ elif key == "evictionMaxPodGracePeriodSeconds":
17145
+ suggest = "eviction_max_pod_grace_period_seconds"
17146
+ elif key == "evictionMinimumReclaim":
17147
+ suggest = "eviction_minimum_reclaim"
17148
+ elif key == "evictionSoft":
17149
+ suggest = "eviction_soft"
17150
+ elif key == "evictionSoftGracePeriod":
17151
+ suggest = "eviction_soft_grace_period"
15726
17152
  elif key == "imageGcHighThresholdPercent":
15727
17153
  suggest = "image_gc_high_threshold_percent"
15728
17154
  elif key == "imageGcLowThresholdPercent":
@@ -15733,8 +17159,12 @@ class NodePoolNodeConfigKubeletConfig(dict):
15733
17159
  suggest = "image_minimum_gc_age"
15734
17160
  elif key == "insecureKubeletReadonlyPortEnabled":
15735
17161
  suggest = "insecure_kubelet_readonly_port_enabled"
17162
+ elif key == "maxParallelImagePulls":
17163
+ suggest = "max_parallel_image_pulls"
15736
17164
  elif key == "podPidsLimit":
15737
17165
  suggest = "pod_pids_limit"
17166
+ elif key == "singleProcessOomKill":
17167
+ suggest = "single_process_oom_kill"
15738
17168
 
15739
17169
  if suggest:
15740
17170
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfig. Access the value via the '{suggest}' property getter instead.")
@@ -15754,12 +17184,18 @@ class NodePoolNodeConfigKubeletConfig(dict):
15754
17184
  cpu_cfs_quota: Optional[_builtins.bool] = None,
15755
17185
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
15756
17186
  cpu_manager_policy: Optional[_builtins.str] = None,
17187
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
17188
+ eviction_minimum_reclaim: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
17189
+ eviction_soft: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoft'] = None,
17190
+ eviction_soft_grace_period: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
15757
17191
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
15758
17192
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
15759
17193
  image_maximum_gc_age: Optional[_builtins.str] = None,
15760
17194
  image_minimum_gc_age: Optional[_builtins.str] = None,
15761
17195
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
15762
- pod_pids_limit: Optional[_builtins.int] = None):
17196
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
17197
+ pod_pids_limit: Optional[_builtins.int] = None,
17198
+ single_process_oom_kill: Optional[_builtins.bool] = None):
15763
17199
  """
15764
17200
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
15765
17201
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -15767,12 +17203,18 @@ class NodePoolNodeConfigKubeletConfig(dict):
15767
17203
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
15768
17204
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
15769
17205
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
17206
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
17207
+ :param 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
17208
+ :param 'NodePoolNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
17209
+ :param 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
15770
17210
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
15771
17211
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
15772
17212
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
15773
17213
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
15774
17214
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
17215
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
15775
17216
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
17217
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
15776
17218
  """
15777
17219
  if allowed_unsafe_sysctls is not None:
15778
17220
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -15786,6 +17228,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
15786
17228
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
15787
17229
  if cpu_manager_policy is not None:
15788
17230
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
17231
+ if eviction_max_pod_grace_period_seconds is not None:
17232
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
17233
+ if eviction_minimum_reclaim is not None:
17234
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
17235
+ if eviction_soft is not None:
17236
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
17237
+ if eviction_soft_grace_period is not None:
17238
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
15789
17239
  if image_gc_high_threshold_percent is not None:
15790
17240
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
15791
17241
  if image_gc_low_threshold_percent is not None:
@@ -15796,8 +17246,12 @@ class NodePoolNodeConfigKubeletConfig(dict):
15796
17246
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
15797
17247
  if insecure_kubelet_readonly_port_enabled is not None:
15798
17248
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
17249
+ if max_parallel_image_pulls is not None:
17250
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
15799
17251
  if pod_pids_limit is not None:
15800
17252
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
17253
+ if single_process_oom_kill is not None:
17254
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
15801
17255
 
15802
17256
  @_builtins.property
15803
17257
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -15847,6 +17301,38 @@ class NodePoolNodeConfigKubeletConfig(dict):
15847
17301
  """
15848
17302
  return pulumi.get(self, "cpu_manager_policy")
15849
17303
 
17304
+ @_builtins.property
17305
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
17306
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
17307
+ """
17308
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
17309
+ """
17310
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
17311
+
17312
+ @_builtins.property
17313
+ @pulumi.getter(name="evictionMinimumReclaim")
17314
+ def eviction_minimum_reclaim(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim']:
17315
+ """
17316
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
17317
+ """
17318
+ return pulumi.get(self, "eviction_minimum_reclaim")
17319
+
17320
+ @_builtins.property
17321
+ @pulumi.getter(name="evictionSoft")
17322
+ def eviction_soft(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoft']:
17323
+ """
17324
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
17325
+ """
17326
+ return pulumi.get(self, "eviction_soft")
17327
+
17328
+ @_builtins.property
17329
+ @pulumi.getter(name="evictionSoftGracePeriod")
17330
+ def eviction_soft_grace_period(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod']:
17331
+ """
17332
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
17333
+ """
17334
+ return pulumi.get(self, "eviction_soft_grace_period")
17335
+
15850
17336
  @_builtins.property
15851
17337
  @pulumi.getter(name="imageGcHighThresholdPercent")
15852
17338
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -15887,6 +17373,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
15887
17373
  """
15888
17374
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
15889
17375
 
17376
+ @_builtins.property
17377
+ @pulumi.getter(name="maxParallelImagePulls")
17378
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
17379
+ """
17380
+ Set the maximum number of image pulls in parallel.
17381
+ """
17382
+ return pulumi.get(self, "max_parallel_image_pulls")
17383
+
15890
17384
  @_builtins.property
15891
17385
  @pulumi.getter(name="podPidsLimit")
15892
17386
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -15895,6 +17389,332 @@ class NodePoolNodeConfigKubeletConfig(dict):
15895
17389
  """
15896
17390
  return pulumi.get(self, "pod_pids_limit")
15897
17391
 
17392
+ @_builtins.property
17393
+ @pulumi.getter(name="singleProcessOomKill")
17394
+ def single_process_oom_kill(self) -> Optional[_builtins.bool]:
17395
+ """
17396
+ Defines whether to enable single process OOM killer.
17397
+ """
17398
+ return pulumi.get(self, "single_process_oom_kill")
17399
+
17400
+
17401
+ @pulumi.output_type
17402
+ class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
17403
+ @staticmethod
17404
+ def __key_warning(key: str):
17405
+ suggest = None
17406
+ if key == "imagefsAvailable":
17407
+ suggest = "imagefs_available"
17408
+ elif key == "imagefsInodesFree":
17409
+ suggest = "imagefs_inodes_free"
17410
+ elif key == "memoryAvailable":
17411
+ suggest = "memory_available"
17412
+ elif key == "nodefsAvailable":
17413
+ suggest = "nodefs_available"
17414
+ elif key == "nodefsInodesFree":
17415
+ suggest = "nodefs_inodes_free"
17416
+ elif key == "pidAvailable":
17417
+ suggest = "pid_available"
17418
+
17419
+ if suggest:
17420
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
17421
+
17422
+ def __getitem__(self, key: str) -> Any:
17423
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
17424
+ return super().__getitem__(key)
17425
+
17426
+ def get(self, key: str, default = None) -> Any:
17427
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
17428
+ return super().get(key, default)
17429
+
17430
+ def __init__(__self__, *,
17431
+ imagefs_available: Optional[_builtins.str] = None,
17432
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17433
+ memory_available: Optional[_builtins.str] = None,
17434
+ nodefs_available: Optional[_builtins.str] = None,
17435
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17436
+ pid_available: Optional[_builtins.str] = None):
17437
+ """
17438
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
17439
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
17440
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
17441
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
17442
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
17443
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
17444
+ """
17445
+ if imagefs_available is not None:
17446
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17447
+ if imagefs_inodes_free is not None:
17448
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17449
+ if memory_available is not None:
17450
+ pulumi.set(__self__, "memory_available", memory_available)
17451
+ if nodefs_available is not None:
17452
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17453
+ if nodefs_inodes_free is not None:
17454
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17455
+ if pid_available is not None:
17456
+ pulumi.set(__self__, "pid_available", pid_available)
17457
+
17458
+ @_builtins.property
17459
+ @pulumi.getter(name="imagefsAvailable")
17460
+ def imagefs_available(self) -> Optional[_builtins.str]:
17461
+ """
17462
+ Defines percentage of minimum reclaim for imagefs.available.
17463
+ """
17464
+ return pulumi.get(self, "imagefs_available")
17465
+
17466
+ @_builtins.property
17467
+ @pulumi.getter(name="imagefsInodesFree")
17468
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17469
+ """
17470
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
17471
+ """
17472
+ return pulumi.get(self, "imagefs_inodes_free")
17473
+
17474
+ @_builtins.property
17475
+ @pulumi.getter(name="memoryAvailable")
17476
+ def memory_available(self) -> Optional[_builtins.str]:
17477
+ """
17478
+ Defines percentage of minimum reclaim for memory.available.
17479
+ """
17480
+ return pulumi.get(self, "memory_available")
17481
+
17482
+ @_builtins.property
17483
+ @pulumi.getter(name="nodefsAvailable")
17484
+ def nodefs_available(self) -> Optional[_builtins.str]:
17485
+ """
17486
+ Defines percentage of minimum reclaim for nodefs.available.
17487
+ """
17488
+ return pulumi.get(self, "nodefs_available")
17489
+
17490
+ @_builtins.property
17491
+ @pulumi.getter(name="nodefsInodesFree")
17492
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17493
+ """
17494
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
17495
+ """
17496
+ return pulumi.get(self, "nodefs_inodes_free")
17497
+
17498
+ @_builtins.property
17499
+ @pulumi.getter(name="pidAvailable")
17500
+ def pid_available(self) -> Optional[_builtins.str]:
17501
+ """
17502
+ Defines percentage of minimum reclaim for pid.available.
17503
+ """
17504
+ return pulumi.get(self, "pid_available")
17505
+
17506
+
17507
+ @pulumi.output_type
17508
+ class NodePoolNodeConfigKubeletConfigEvictionSoft(dict):
17509
+ @staticmethod
17510
+ def __key_warning(key: str):
17511
+ suggest = None
17512
+ if key == "imagefsAvailable":
17513
+ suggest = "imagefs_available"
17514
+ elif key == "imagefsInodesFree":
17515
+ suggest = "imagefs_inodes_free"
17516
+ elif key == "memoryAvailable":
17517
+ suggest = "memory_available"
17518
+ elif key == "nodefsAvailable":
17519
+ suggest = "nodefs_available"
17520
+ elif key == "nodefsInodesFree":
17521
+ suggest = "nodefs_inodes_free"
17522
+ elif key == "pidAvailable":
17523
+ suggest = "pid_available"
17524
+
17525
+ if suggest:
17526
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
17527
+
17528
+ def __getitem__(self, key: str) -> Any:
17529
+ NodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
17530
+ return super().__getitem__(key)
17531
+
17532
+ def get(self, key: str, default = None) -> Any:
17533
+ NodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
17534
+ return super().get(key, default)
17535
+
17536
+ def __init__(__self__, *,
17537
+ imagefs_available: Optional[_builtins.str] = None,
17538
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17539
+ memory_available: Optional[_builtins.str] = None,
17540
+ nodefs_available: Optional[_builtins.str] = None,
17541
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17542
+ pid_available: Optional[_builtins.str] = None):
17543
+ """
17544
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
17545
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
17546
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
17547
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
17548
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
17549
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
17550
+ """
17551
+ if imagefs_available is not None:
17552
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17553
+ if imagefs_inodes_free is not None:
17554
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17555
+ if memory_available is not None:
17556
+ pulumi.set(__self__, "memory_available", memory_available)
17557
+ if nodefs_available is not None:
17558
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17559
+ if nodefs_inodes_free is not None:
17560
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17561
+ if pid_available is not None:
17562
+ pulumi.set(__self__, "pid_available", pid_available)
17563
+
17564
+ @_builtins.property
17565
+ @pulumi.getter(name="imagefsAvailable")
17566
+ def imagefs_available(self) -> Optional[_builtins.str]:
17567
+ """
17568
+ Defines percentage of soft eviction threshold for imagefs.available.
17569
+ """
17570
+ return pulumi.get(self, "imagefs_available")
17571
+
17572
+ @_builtins.property
17573
+ @pulumi.getter(name="imagefsInodesFree")
17574
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17575
+ """
17576
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
17577
+ """
17578
+ return pulumi.get(self, "imagefs_inodes_free")
17579
+
17580
+ @_builtins.property
17581
+ @pulumi.getter(name="memoryAvailable")
17582
+ def memory_available(self) -> Optional[_builtins.str]:
17583
+ """
17584
+ Defines quantity of soft eviction threshold for memory.available.
17585
+ """
17586
+ return pulumi.get(self, "memory_available")
17587
+
17588
+ @_builtins.property
17589
+ @pulumi.getter(name="nodefsAvailable")
17590
+ def nodefs_available(self) -> Optional[_builtins.str]:
17591
+ """
17592
+ Defines percentage of soft eviction threshold for nodefs.available.
17593
+ """
17594
+ return pulumi.get(self, "nodefs_available")
17595
+
17596
+ @_builtins.property
17597
+ @pulumi.getter(name="nodefsInodesFree")
17598
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17599
+ """
17600
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
17601
+ """
17602
+ return pulumi.get(self, "nodefs_inodes_free")
17603
+
17604
+ @_builtins.property
17605
+ @pulumi.getter(name="pidAvailable")
17606
+ def pid_available(self) -> Optional[_builtins.str]:
17607
+ """
17608
+ Defines percentage of soft eviction threshold for pid.available.
17609
+ """
17610
+ return pulumi.get(self, "pid_available")
17611
+
17612
+
17613
+ @pulumi.output_type
17614
+ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
17615
+ @staticmethod
17616
+ def __key_warning(key: str):
17617
+ suggest = None
17618
+ if key == "imagefsAvailable":
17619
+ suggest = "imagefs_available"
17620
+ elif key == "imagefsInodesFree":
17621
+ suggest = "imagefs_inodes_free"
17622
+ elif key == "memoryAvailable":
17623
+ suggest = "memory_available"
17624
+ elif key == "nodefsAvailable":
17625
+ suggest = "nodefs_available"
17626
+ elif key == "nodefsInodesFree":
17627
+ suggest = "nodefs_inodes_free"
17628
+ elif key == "pidAvailable":
17629
+ suggest = "pid_available"
17630
+
17631
+ if suggest:
17632
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
17633
+
17634
+ def __getitem__(self, key: str) -> Any:
17635
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
17636
+ return super().__getitem__(key)
17637
+
17638
+ def get(self, key: str, default = None) -> Any:
17639
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
17640
+ return super().get(key, default)
17641
+
17642
+ def __init__(__self__, *,
17643
+ imagefs_available: Optional[_builtins.str] = None,
17644
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17645
+ memory_available: Optional[_builtins.str] = None,
17646
+ nodefs_available: Optional[_builtins.str] = None,
17647
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17648
+ pid_available: Optional[_builtins.str] = None):
17649
+ """
17650
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
17651
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
17652
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
17653
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
17654
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
17655
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
17656
+ """
17657
+ if imagefs_available is not None:
17658
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17659
+ if imagefs_inodes_free is not None:
17660
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17661
+ if memory_available is not None:
17662
+ pulumi.set(__self__, "memory_available", memory_available)
17663
+ if nodefs_available is not None:
17664
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17665
+ if nodefs_inodes_free is not None:
17666
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17667
+ if pid_available is not None:
17668
+ pulumi.set(__self__, "pid_available", pid_available)
17669
+
17670
+ @_builtins.property
17671
+ @pulumi.getter(name="imagefsAvailable")
17672
+ def imagefs_available(self) -> Optional[_builtins.str]:
17673
+ """
17674
+ Defines grace period for the imagefs.available soft eviction threshold
17675
+ """
17676
+ return pulumi.get(self, "imagefs_available")
17677
+
17678
+ @_builtins.property
17679
+ @pulumi.getter(name="imagefsInodesFree")
17680
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17681
+ """
17682
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
17683
+ """
17684
+ return pulumi.get(self, "imagefs_inodes_free")
17685
+
17686
+ @_builtins.property
17687
+ @pulumi.getter(name="memoryAvailable")
17688
+ def memory_available(self) -> Optional[_builtins.str]:
17689
+ """
17690
+ Defines grace period for the memory.available soft eviction threshold.
17691
+ """
17692
+ return pulumi.get(self, "memory_available")
17693
+
17694
+ @_builtins.property
17695
+ @pulumi.getter(name="nodefsAvailable")
17696
+ def nodefs_available(self) -> Optional[_builtins.str]:
17697
+ """
17698
+ Defines grace period for the nodefs.available soft eviction threshold.
17699
+ """
17700
+ return pulumi.get(self, "nodefs_available")
17701
+
17702
+ @_builtins.property
17703
+ @pulumi.getter(name="nodefsInodesFree")
17704
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17705
+ """
17706
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
17707
+ """
17708
+ return pulumi.get(self, "nodefs_inodes_free")
17709
+
17710
+ @_builtins.property
17711
+ @pulumi.getter(name="pidAvailable")
17712
+ def pid_available(self) -> Optional[_builtins.str]:
17713
+ """
17714
+ Defines grace period for the pid.available soft eviction threshold.
17715
+ """
17716
+ return pulumi.get(self, "pid_available")
17717
+
15898
17718
 
15899
17719
  @pulumi.output_type
15900
17720
  class NodePoolNodeConfigLinuxNodeConfig(dict):
@@ -15905,6 +17725,10 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
15905
17725
  suggest = "cgroup_mode"
15906
17726
  elif key == "hugepagesConfig":
15907
17727
  suggest = "hugepages_config"
17728
+ elif key == "transparentHugepageDefrag":
17729
+ suggest = "transparent_hugepage_defrag"
17730
+ elif key == "transparentHugepageEnabled":
17731
+ suggest = "transparent_hugepage_enabled"
15908
17732
 
15909
17733
  if suggest:
15910
17734
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -15920,11 +17744,15 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
15920
17744
  def __init__(__self__, *,
15921
17745
  cgroup_mode: Optional[_builtins.str] = None,
15922
17746
  hugepages_config: Optional['outputs.NodePoolNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
15923
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
17747
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
17748
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
17749
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
15924
17750
  """
15925
17751
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
15926
17752
  :param 'NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs' hugepages_config: Amounts for 2M and 1G hugepages.
15927
17753
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
17754
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
17755
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
15928
17756
  """
15929
17757
  if cgroup_mode is not None:
15930
17758
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -15932,6 +17760,10 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
15932
17760
  pulumi.set(__self__, "hugepages_config", hugepages_config)
15933
17761
  if sysctls is not None:
15934
17762
  pulumi.set(__self__, "sysctls", sysctls)
17763
+ if transparent_hugepage_defrag is not None:
17764
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
17765
+ if transparent_hugepage_enabled is not None:
17766
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
15935
17767
 
15936
17768
  @_builtins.property
15937
17769
  @pulumi.getter(name="cgroupMode")
@@ -15957,6 +17789,22 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
15957
17789
  """
15958
17790
  return pulumi.get(self, "sysctls")
15959
17791
 
17792
+ @_builtins.property
17793
+ @pulumi.getter(name="transparentHugepageDefrag")
17794
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
17795
+ """
17796
+ The Linux kernel transparent hugepage defrag setting.
17797
+ """
17798
+ return pulumi.get(self, "transparent_hugepage_defrag")
17799
+
17800
+ @_builtins.property
17801
+ @pulumi.getter(name="transparentHugepageEnabled")
17802
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
17803
+ """
17804
+ The Linux kernel transparent hugepage setting.
17805
+ """
17806
+ return pulumi.get(self, "transparent_hugepage_enabled")
17807
+
15960
17808
 
15961
17809
  @pulumi.output_type
15962
17810
  class NodePoolNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -16253,6 +18101,8 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16253
18101
  suggest = None
16254
18102
  if key == "nodeAffinities":
16255
18103
  suggest = "node_affinities"
18104
+ elif key == "minNodeCpus":
18105
+ suggest = "min_node_cpus"
16256
18106
 
16257
18107
  if suggest:
16258
18108
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -16266,11 +18116,15 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16266
18116
  return super().get(key, default)
16267
18117
 
16268
18118
  def __init__(__self__, *,
16269
- node_affinities: Sequence['outputs.NodePoolNodeConfigSoleTenantConfigNodeAffinity']):
18119
+ node_affinities: Sequence['outputs.NodePoolNodeConfigSoleTenantConfigNodeAffinity'],
18120
+ min_node_cpus: Optional[_builtins.int] = None):
16270
18121
  """
16271
18122
  :param Sequence['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
18123
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
16272
18124
  """
16273
18125
  pulumi.set(__self__, "node_affinities", node_affinities)
18126
+ if min_node_cpus is not None:
18127
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
16274
18128
 
16275
18129
  @_builtins.property
16276
18130
  @pulumi.getter(name="nodeAffinities")
@@ -16280,6 +18134,14 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16280
18134
  """
16281
18135
  return pulumi.get(self, "node_affinities")
16282
18136
 
18137
+ @_builtins.property
18138
+ @pulumi.getter(name="minNodeCpus")
18139
+ def min_node_cpus(self) -> Optional[_builtins.int]:
18140
+ """
18141
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
18142
+ """
18143
+ return pulumi.get(self, "min_node_cpus")
18144
+
16283
18145
 
16284
18146
  @pulumi.output_type
16285
18147
  class NodePoolNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -16704,6 +18566,7 @@ class GetClusterAddonsConfigResult(dict):
16704
18566
  http_load_balancings: Sequence['outputs.GetClusterAddonsConfigHttpLoadBalancingResult'],
16705
18567
  istio_configs: Sequence['outputs.GetClusterAddonsConfigIstioConfigResult'],
16706
18568
  kalm_configs: Sequence['outputs.GetClusterAddonsConfigKalmConfigResult'],
18569
+ lustre_csi_driver_configs: Sequence['outputs.GetClusterAddonsConfigLustreCsiDriverConfigResult'],
16707
18570
  network_policy_configs: Sequence['outputs.GetClusterAddonsConfigNetworkPolicyConfigResult'],
16708
18571
  parallelstore_csi_driver_configs: Sequence['outputs.GetClusterAddonsConfigParallelstoreCsiDriverConfigResult'],
16709
18572
  ray_operator_configs: Sequence['outputs.GetClusterAddonsConfigRayOperatorConfigResult'],
@@ -16720,6 +18583,7 @@ class GetClusterAddonsConfigResult(dict):
16720
18583
  :param Sequence['GetClusterAddonsConfigHttpLoadBalancingArgs'] http_load_balancings: The status of the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster. It is enabled by default; set disabled = true to disable.
16721
18584
  :param Sequence['GetClusterAddonsConfigIstioConfigArgs'] istio_configs: The status of the Istio addon.
16722
18585
  :param Sequence['GetClusterAddonsConfigKalmConfigArgs'] kalm_configs: Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set enabled = true to enable.
18586
+ :param Sequence['GetClusterAddonsConfigLustreCsiDriverConfigArgs'] lustre_csi_driver_configs: Configuration for the Lustre CSI driver. Defaults to disabled; set enabled = true to enable.
16723
18587
  :param Sequence['GetClusterAddonsConfigNetworkPolicyConfigArgs'] network_policy_configs: Whether we should enable the network policy addon for the master. This must be enabled in order to enable network policy for the nodes. To enable this, you must also define a network_policy block, otherwise nothing will happen. It can only be disabled if the nodes already do not have network policies enabled. Defaults to disabled; set disabled = false to enable.
16724
18588
  :param Sequence['GetClusterAddonsConfigParallelstoreCsiDriverConfigArgs'] parallelstore_csi_driver_configs: The status of the Parallelstore CSI driver addon, which allows the usage of Parallelstore instances as volumes. Defaults to disabled; set enabled = true to enable.
16725
18589
  :param Sequence['GetClusterAddonsConfigRayOperatorConfigArgs'] ray_operator_configs: The status of the Ray Operator addon, which enabled management of Ray AI/ML jobs on GKE. Defaults to disabled; set enabled = true to enable.
@@ -16736,6 +18600,7 @@ class GetClusterAddonsConfigResult(dict):
16736
18600
  pulumi.set(__self__, "http_load_balancings", http_load_balancings)
16737
18601
  pulumi.set(__self__, "istio_configs", istio_configs)
16738
18602
  pulumi.set(__self__, "kalm_configs", kalm_configs)
18603
+ pulumi.set(__self__, "lustre_csi_driver_configs", lustre_csi_driver_configs)
16739
18604
  pulumi.set(__self__, "network_policy_configs", network_policy_configs)
16740
18605
  pulumi.set(__self__, "parallelstore_csi_driver_configs", parallelstore_csi_driver_configs)
16741
18606
  pulumi.set(__self__, "ray_operator_configs", ray_operator_configs)
@@ -16829,6 +18694,14 @@ class GetClusterAddonsConfigResult(dict):
16829
18694
  """
16830
18695
  return pulumi.get(self, "kalm_configs")
16831
18696
 
18697
+ @_builtins.property
18698
+ @pulumi.getter(name="lustreCsiDriverConfigs")
18699
+ def lustre_csi_driver_configs(self) -> Sequence['outputs.GetClusterAddonsConfigLustreCsiDriverConfigResult']:
18700
+ """
18701
+ Configuration for the Lustre CSI driver. Defaults to disabled; set enabled = true to enable.
18702
+ """
18703
+ return pulumi.get(self, "lustre_csi_driver_configs")
18704
+
16832
18705
  @_builtins.property
16833
18706
  @pulumi.getter(name="networkPolicyConfigs")
16834
18707
  def network_policy_configs(self) -> Sequence['outputs.GetClusterAddonsConfigNetworkPolicyConfigResult']:
@@ -17018,6 +18891,37 @@ class GetClusterAddonsConfigKalmConfigResult(dict):
17018
18891
  return pulumi.get(self, "enabled")
17019
18892
 
17020
18893
 
18894
+ @pulumi.output_type
18895
+ class GetClusterAddonsConfigLustreCsiDriverConfigResult(dict):
18896
+ def __init__(__self__, *,
18897
+ enable_legacy_lustre_port: _builtins.bool,
18898
+ enabled: _builtins.bool):
18899
+ """
18900
+ :param _builtins.bool enable_legacy_lustre_port: If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988.
18901
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
18902
+ :param _builtins.bool enabled: Whether the Lustre CSI driver is enabled for this cluster.
18903
+ """
18904
+ pulumi.set(__self__, "enable_legacy_lustre_port", enable_legacy_lustre_port)
18905
+ pulumi.set(__self__, "enabled", enabled)
18906
+
18907
+ @_builtins.property
18908
+ @pulumi.getter(name="enableLegacyLustrePort")
18909
+ def enable_legacy_lustre_port(self) -> _builtins.bool:
18910
+ """
18911
+ If set to true, the Lustre CSI driver will initialize LNet (the virtual network layer for Lustre kernel module) using port 6988.
18912
+ This flag is required to workaround a port conflict with the gke-metadata-server on GKE nodes.
18913
+ """
18914
+ return pulumi.get(self, "enable_legacy_lustre_port")
18915
+
18916
+ @_builtins.property
18917
+ @pulumi.getter
18918
+ def enabled(self) -> _builtins.bool:
18919
+ """
18920
+ Whether the Lustre CSI driver is enabled for this cluster.
18921
+ """
18922
+ return pulumi.get(self, "enabled")
18923
+
18924
+
17021
18925
  @pulumi.output_type
17022
18926
  class GetClusterAddonsConfigNetworkPolicyConfigResult(dict):
17023
18927
  def __init__(__self__, *,
@@ -18039,6 +19943,7 @@ class GetClusterIdentityServiceConfigResult(dict):
18039
19943
  @pulumi.output_type
18040
19944
  class GetClusterIpAllocationPolicyResult(dict):
18041
19945
  def __init__(__self__, *,
19946
+ additional_ip_ranges_configs: Sequence['outputs.GetClusterIpAllocationPolicyAdditionalIpRangesConfigResult'],
18042
19947
  additional_pod_ranges_configs: Sequence['outputs.GetClusterIpAllocationPolicyAdditionalPodRangesConfigResult'],
18043
19948
  cluster_ipv4_cidr_block: _builtins.str,
18044
19949
  cluster_secondary_range_name: _builtins.str,
@@ -18047,6 +19952,7 @@ class GetClusterIpAllocationPolicyResult(dict):
18047
19952
  services_secondary_range_name: _builtins.str,
18048
19953
  stack_type: _builtins.str):
18049
19954
  """
19955
+ :param Sequence['GetClusterIpAllocationPolicyAdditionalIpRangesConfigArgs'] additional_ip_ranges_configs: AdditionalIPRangesConfig is the configuration for individual additional subnetworks attached to the cluster
18050
19956
  :param Sequence['GetClusterIpAllocationPolicyAdditionalPodRangesConfigArgs'] additional_pod_ranges_configs: AdditionalPodRangesConfig is the configuration for additional pod secondary ranges supporting the ClusterUpdate message.
18051
19957
  :param _builtins.str cluster_ipv4_cidr_block: The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.
18052
19958
  :param _builtins.str cluster_secondary_range_name: The name of the existing secondary range in the cluster's subnetwork to use for pod IP addresses. Alternatively, cluster_ipv4_cidr_block can be used to automatically create a GKE-managed one.
@@ -18055,6 +19961,7 @@ class GetClusterIpAllocationPolicyResult(dict):
18055
19961
  :param _builtins.str services_secondary_range_name: The name of the existing secondary range in the cluster's subnetwork to use for service ClusterIPs. Alternatively, services_ipv4_cidr_block can be used to automatically create a GKE-managed one.
18056
19962
  :param _builtins.str stack_type: The IP Stack type of the cluster. Choose between IPV4 and IPV4_IPV6. Default type is IPV4 Only if not set
18057
19963
  """
19964
+ pulumi.set(__self__, "additional_ip_ranges_configs", additional_ip_ranges_configs)
18058
19965
  pulumi.set(__self__, "additional_pod_ranges_configs", additional_pod_ranges_configs)
18059
19966
  pulumi.set(__self__, "cluster_ipv4_cidr_block", cluster_ipv4_cidr_block)
18060
19967
  pulumi.set(__self__, "cluster_secondary_range_name", cluster_secondary_range_name)
@@ -18063,6 +19970,14 @@ class GetClusterIpAllocationPolicyResult(dict):
18063
19970
  pulumi.set(__self__, "services_secondary_range_name", services_secondary_range_name)
18064
19971
  pulumi.set(__self__, "stack_type", stack_type)
18065
19972
 
19973
+ @_builtins.property
19974
+ @pulumi.getter(name="additionalIpRangesConfigs")
19975
+ def additional_ip_ranges_configs(self) -> Sequence['outputs.GetClusterIpAllocationPolicyAdditionalIpRangesConfigResult']:
19976
+ """
19977
+ AdditionalIPRangesConfig is the configuration for individual additional subnetworks attached to the cluster
19978
+ """
19979
+ return pulumi.get(self, "additional_ip_ranges_configs")
19980
+
18066
19981
  @_builtins.property
18067
19982
  @pulumi.getter(name="additionalPodRangesConfigs")
18068
19983
  def additional_pod_ranges_configs(self) -> Sequence['outputs.GetClusterIpAllocationPolicyAdditionalPodRangesConfigResult']:
@@ -18120,6 +20035,35 @@ class GetClusterIpAllocationPolicyResult(dict):
18120
20035
  return pulumi.get(self, "stack_type")
18121
20036
 
18122
20037
 
20038
+ @pulumi.output_type
20039
+ class GetClusterIpAllocationPolicyAdditionalIpRangesConfigResult(dict):
20040
+ def __init__(__self__, *,
20041
+ pod_ipv4_range_names: Sequence[_builtins.str],
20042
+ subnetwork: _builtins.str):
20043
+ """
20044
+ :param Sequence[_builtins.str] pod_ipv4_range_names: List of secondary ranges names within this subnetwork that can be used for pod IPs.
20045
+ :param _builtins.str subnetwork: Name of the subnetwork. This can be the full path of the subnetwork or just the name.
20046
+ """
20047
+ pulumi.set(__self__, "pod_ipv4_range_names", pod_ipv4_range_names)
20048
+ pulumi.set(__self__, "subnetwork", subnetwork)
20049
+
20050
+ @_builtins.property
20051
+ @pulumi.getter(name="podIpv4RangeNames")
20052
+ def pod_ipv4_range_names(self) -> Sequence[_builtins.str]:
20053
+ """
20054
+ List of secondary ranges names within this subnetwork that can be used for pod IPs.
20055
+ """
20056
+ return pulumi.get(self, "pod_ipv4_range_names")
20057
+
20058
+ @_builtins.property
20059
+ @pulumi.getter
20060
+ def subnetwork(self) -> _builtins.str:
20061
+ """
20062
+ Name of the subnetwork. This can be the full path of the subnetwork or just the name.
20063
+ """
20064
+ return pulumi.get(self, "subnetwork")
20065
+
20066
+
18123
20067
  @pulumi.output_type
18124
20068
  class GetClusterIpAllocationPolicyAdditionalPodRangesConfigResult(dict):
18125
20069
  def __init__(__self__, *,
@@ -18634,6 +20578,7 @@ class GetClusterNodeConfigResult(dict):
18634
20578
  def __init__(__self__, *,
18635
20579
  advanced_machine_features: Sequence['outputs.GetClusterNodeConfigAdvancedMachineFeatureResult'],
18636
20580
  boot_disk_kms_key: _builtins.str,
20581
+ boot_disks: Sequence['outputs.GetClusterNodeConfigBootDiskResult'],
18637
20582
  confidential_nodes: Sequence['outputs.GetClusterNodeConfigConfidentialNodeResult'],
18638
20583
  containerd_configs: Sequence['outputs.GetClusterNodeConfigContainerdConfigResult'],
18639
20584
  disk_size_gb: _builtins.int,
@@ -18680,6 +20625,7 @@ class GetClusterNodeConfigResult(dict):
18680
20625
  """
18681
20626
  :param Sequence['GetClusterNodeConfigAdvancedMachineFeatureArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
18682
20627
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
20628
+ :param Sequence['GetClusterNodeConfigBootDiskArgs'] boot_disks: Boot disk configuration for node pools nodes.
18683
20629
  :param Sequence['GetClusterNodeConfigConfidentialNodeArgs'] confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
18684
20630
  :param Sequence['GetClusterNodeConfigContainerdConfigArgs'] containerd_configs: Parameters for containerd configuration.
18685
20631
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
@@ -18726,6 +20672,7 @@ class GetClusterNodeConfigResult(dict):
18726
20672
  """
18727
20673
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
18728
20674
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
20675
+ pulumi.set(__self__, "boot_disks", boot_disks)
18729
20676
  pulumi.set(__self__, "confidential_nodes", confidential_nodes)
18730
20677
  pulumi.set(__self__, "containerd_configs", containerd_configs)
18731
20678
  pulumi.set(__self__, "disk_size_gb", disk_size_gb)
@@ -18786,6 +20733,14 @@ class GetClusterNodeConfigResult(dict):
18786
20733
  """
18787
20734
  return pulumi.get(self, "boot_disk_kms_key")
18788
20735
 
20736
+ @_builtins.property
20737
+ @pulumi.getter(name="bootDisks")
20738
+ def boot_disks(self) -> Sequence['outputs.GetClusterNodeConfigBootDiskResult']:
20739
+ """
20740
+ Boot disk configuration for node pools nodes.
20741
+ """
20742
+ return pulumi.get(self, "boot_disks")
20743
+
18789
20744
  @_builtins.property
18790
20745
  @pulumi.getter(name="confidentialNodes")
18791
20746
  def confidential_nodes(self) -> Sequence['outputs.GetClusterNodeConfigConfidentialNodeResult']:
@@ -19171,6 +21126,57 @@ class GetClusterNodeConfigAdvancedMachineFeatureResult(dict):
19171
21126
  return pulumi.get(self, "threads_per_core")
19172
21127
 
19173
21128
 
21129
+ @pulumi.output_type
21130
+ class GetClusterNodeConfigBootDiskResult(dict):
21131
+ def __init__(__self__, *,
21132
+ disk_type: _builtins.str,
21133
+ provisioned_iops: _builtins.int,
21134
+ provisioned_throughput: _builtins.int,
21135
+ size_gb: _builtins.int):
21136
+ """
21137
+ :param _builtins.str disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
21138
+ :param _builtins.int provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
21139
+ :param _builtins.int provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
21140
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
21141
+ """
21142
+ pulumi.set(__self__, "disk_type", disk_type)
21143
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
21144
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
21145
+ pulumi.set(__self__, "size_gb", size_gb)
21146
+
21147
+ @_builtins.property
21148
+ @pulumi.getter(name="diskType")
21149
+ def disk_type(self) -> _builtins.str:
21150
+ """
21151
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
21152
+ """
21153
+ return pulumi.get(self, "disk_type")
21154
+
21155
+ @_builtins.property
21156
+ @pulumi.getter(name="provisionedIops")
21157
+ def provisioned_iops(self) -> _builtins.int:
21158
+ """
21159
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
21160
+ """
21161
+ return pulumi.get(self, "provisioned_iops")
21162
+
21163
+ @_builtins.property
21164
+ @pulumi.getter(name="provisionedThroughput")
21165
+ def provisioned_throughput(self) -> _builtins.int:
21166
+ """
21167
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
21168
+ """
21169
+ return pulumi.get(self, "provisioned_throughput")
21170
+
21171
+ @_builtins.property
21172
+ @pulumi.getter(name="sizeGb")
21173
+ def size_gb(self) -> _builtins.int:
21174
+ """
21175
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
21176
+ """
21177
+ return pulumi.get(self, "size_gb")
21178
+
21179
+
19174
21180
  @pulumi.output_type
19175
21181
  class GetClusterNodeConfigConfidentialNodeResult(dict):
19176
21182
  def __init__(__self__, *,
@@ -19571,12 +21577,18 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19571
21577
  cpu_cfs_quota: _builtins.bool,
19572
21578
  cpu_cfs_quota_period: _builtins.str,
19573
21579
  cpu_manager_policy: _builtins.str,
21580
+ eviction_max_pod_grace_period_seconds: _builtins.int,
21581
+ eviction_minimum_reclaims: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult'],
21582
+ eviction_soft_grace_periods: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult'],
21583
+ eviction_softs: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftResult'],
19574
21584
  image_gc_high_threshold_percent: _builtins.int,
19575
21585
  image_gc_low_threshold_percent: _builtins.int,
19576
21586
  image_maximum_gc_age: _builtins.str,
19577
21587
  image_minimum_gc_age: _builtins.str,
19578
21588
  insecure_kubelet_readonly_port_enabled: _builtins.str,
19579
- pod_pids_limit: _builtins.int):
21589
+ max_parallel_image_pulls: _builtins.int,
21590
+ pod_pids_limit: _builtins.int,
21591
+ single_process_oom_kill: _builtins.bool):
19580
21592
  """
19581
21593
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
19582
21594
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -19584,12 +21596,18 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19584
21596
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
19585
21597
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
19586
21598
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
21599
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21600
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaims: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21601
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_periods: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21602
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionSoftArgs'] eviction_softs: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
19587
21603
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
19588
21604
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
19589
21605
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
19590
21606
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
19591
21607
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
21608
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
19592
21609
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
21610
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
19593
21611
  """
19594
21612
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
19595
21613
  pulumi.set(__self__, "container_log_max_files", container_log_max_files)
@@ -19597,12 +21615,18 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19597
21615
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
19598
21616
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
19599
21617
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
21618
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
21619
+ pulumi.set(__self__, "eviction_minimum_reclaims", eviction_minimum_reclaims)
21620
+ pulumi.set(__self__, "eviction_soft_grace_periods", eviction_soft_grace_periods)
21621
+ pulumi.set(__self__, "eviction_softs", eviction_softs)
19600
21622
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
19601
21623
  pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
19602
21624
  pulumi.set(__self__, "image_maximum_gc_age", image_maximum_gc_age)
19603
21625
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
19604
21626
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
21627
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
19605
21628
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
21629
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
19606
21630
 
19607
21631
  @_builtins.property
19608
21632
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -19653,32 +21677,64 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19653
21677
  return pulumi.get(self, "cpu_manager_policy")
19654
21678
 
19655
21679
  @_builtins.property
19656
- @pulumi.getter(name="imageGcHighThresholdPercent")
19657
- def image_gc_high_threshold_percent(self) -> _builtins.int:
21680
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
21681
+ def eviction_max_pod_grace_period_seconds(self) -> _builtins.int:
19658
21682
  """
19659
- Defines the percent of disk usage after which image garbage collection is always run.
21683
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
19660
21684
  """
19661
- return pulumi.get(self, "image_gc_high_threshold_percent")
21685
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
19662
21686
 
19663
21687
  @_builtins.property
19664
- @pulumi.getter(name="imageGcLowThresholdPercent")
19665
- def image_gc_low_threshold_percent(self) -> _builtins.int:
21688
+ @pulumi.getter(name="evictionMinimumReclaims")
21689
+ def eviction_minimum_reclaims(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult']:
19666
21690
  """
19667
- Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
21691
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
19668
21692
  """
19669
- return pulumi.get(self, "image_gc_low_threshold_percent")
21693
+ return pulumi.get(self, "eviction_minimum_reclaims")
19670
21694
 
19671
21695
  @_builtins.property
19672
- @pulumi.getter(name="imageMaximumGcAge")
19673
- def image_maximum_gc_age(self) -> _builtins.str:
21696
+ @pulumi.getter(name="evictionSoftGracePeriods")
21697
+ def eviction_soft_grace_periods(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult']:
19674
21698
  """
19675
- Defines the maximum age an image can be unused before it is garbage collected.
21699
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
19676
21700
  """
19677
- return pulumi.get(self, "image_maximum_gc_age")
21701
+ return pulumi.get(self, "eviction_soft_grace_periods")
19678
21702
 
19679
21703
  @_builtins.property
19680
- @pulumi.getter(name="imageMinimumGcAge")
19681
- def image_minimum_gc_age(self) -> _builtins.str:
21704
+ @pulumi.getter(name="evictionSofts")
21705
+ def eviction_softs(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftResult']:
21706
+ """
21707
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21708
+ """
21709
+ return pulumi.get(self, "eviction_softs")
21710
+
21711
+ @_builtins.property
21712
+ @pulumi.getter(name="imageGcHighThresholdPercent")
21713
+ def image_gc_high_threshold_percent(self) -> _builtins.int:
21714
+ """
21715
+ Defines the percent of disk usage after which image garbage collection is always run.
21716
+ """
21717
+ return pulumi.get(self, "image_gc_high_threshold_percent")
21718
+
21719
+ @_builtins.property
21720
+ @pulumi.getter(name="imageGcLowThresholdPercent")
21721
+ def image_gc_low_threshold_percent(self) -> _builtins.int:
21722
+ """
21723
+ Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
21724
+ """
21725
+ return pulumi.get(self, "image_gc_low_threshold_percent")
21726
+
21727
+ @_builtins.property
21728
+ @pulumi.getter(name="imageMaximumGcAge")
21729
+ def image_maximum_gc_age(self) -> _builtins.str:
21730
+ """
21731
+ Defines the maximum age an image can be unused before it is garbage collected.
21732
+ """
21733
+ return pulumi.get(self, "image_maximum_gc_age")
21734
+
21735
+ @_builtins.property
21736
+ @pulumi.getter(name="imageMinimumGcAge")
21737
+ def image_minimum_gc_age(self) -> _builtins.str:
19682
21738
  """
19683
21739
  Defines the minimum age for an unused image before it is garbage collected.
19684
21740
  """
@@ -19692,6 +21748,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19692
21748
  """
19693
21749
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
19694
21750
 
21751
+ @_builtins.property
21752
+ @pulumi.getter(name="maxParallelImagePulls")
21753
+ def max_parallel_image_pulls(self) -> _builtins.int:
21754
+ """
21755
+ Set the maximum number of image pulls in parallel.
21756
+ """
21757
+ return pulumi.get(self, "max_parallel_image_pulls")
21758
+
19695
21759
  @_builtins.property
19696
21760
  @pulumi.getter(name="podPidsLimit")
19697
21761
  def pod_pids_limit(self) -> _builtins.int:
@@ -19700,21 +21764,254 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
19700
21764
  """
19701
21765
  return pulumi.get(self, "pod_pids_limit")
19702
21766
 
21767
+ @_builtins.property
21768
+ @pulumi.getter(name="singleProcessOomKill")
21769
+ def single_process_oom_kill(self) -> _builtins.bool:
21770
+ """
21771
+ Defines whether to enable single process OOM killer.
21772
+ """
21773
+ return pulumi.get(self, "single_process_oom_kill")
21774
+
21775
+
21776
+ @pulumi.output_type
21777
+ class GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
21778
+ def __init__(__self__, *,
21779
+ imagefs_available: _builtins.str,
21780
+ imagefs_inodes_free: _builtins.str,
21781
+ memory_available: _builtins.str,
21782
+ nodefs_available: _builtins.str,
21783
+ nodefs_inodes_free: _builtins.str,
21784
+ pid_available: _builtins.str):
21785
+ """
21786
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
21787
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
21788
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
21789
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
21790
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
21791
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
21792
+ """
21793
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21794
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21795
+ pulumi.set(__self__, "memory_available", memory_available)
21796
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21797
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21798
+ pulumi.set(__self__, "pid_available", pid_available)
21799
+
21800
+ @_builtins.property
21801
+ @pulumi.getter(name="imagefsAvailable")
21802
+ def imagefs_available(self) -> _builtins.str:
21803
+ """
21804
+ Defines percentage of minimum reclaim for imagefs.available.
21805
+ """
21806
+ return pulumi.get(self, "imagefs_available")
21807
+
21808
+ @_builtins.property
21809
+ @pulumi.getter(name="imagefsInodesFree")
21810
+ def imagefs_inodes_free(self) -> _builtins.str:
21811
+ """
21812
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
21813
+ """
21814
+ return pulumi.get(self, "imagefs_inodes_free")
21815
+
21816
+ @_builtins.property
21817
+ @pulumi.getter(name="memoryAvailable")
21818
+ def memory_available(self) -> _builtins.str:
21819
+ """
21820
+ Defines percentage of minimum reclaim for memory.available.
21821
+ """
21822
+ return pulumi.get(self, "memory_available")
21823
+
21824
+ @_builtins.property
21825
+ @pulumi.getter(name="nodefsAvailable")
21826
+ def nodefs_available(self) -> _builtins.str:
21827
+ """
21828
+ Defines percentage of minimum reclaim for nodefs.available.
21829
+ """
21830
+ return pulumi.get(self, "nodefs_available")
21831
+
21832
+ @_builtins.property
21833
+ @pulumi.getter(name="nodefsInodesFree")
21834
+ def nodefs_inodes_free(self) -> _builtins.str:
21835
+ """
21836
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
21837
+ """
21838
+ return pulumi.get(self, "nodefs_inodes_free")
21839
+
21840
+ @_builtins.property
21841
+ @pulumi.getter(name="pidAvailable")
21842
+ def pid_available(self) -> _builtins.str:
21843
+ """
21844
+ Defines percentage of minimum reclaim for pid.available.
21845
+ """
21846
+ return pulumi.get(self, "pid_available")
21847
+
21848
+
21849
+ @pulumi.output_type
21850
+ class GetClusterNodeConfigKubeletConfigEvictionSoftResult(dict):
21851
+ def __init__(__self__, *,
21852
+ imagefs_available: _builtins.str,
21853
+ imagefs_inodes_free: _builtins.str,
21854
+ memory_available: _builtins.str,
21855
+ nodefs_available: _builtins.str,
21856
+ nodefs_inodes_free: _builtins.str,
21857
+ pid_available: _builtins.str):
21858
+ """
21859
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
21860
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
21861
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
21862
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
21863
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
21864
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
21865
+ """
21866
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21867
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21868
+ pulumi.set(__self__, "memory_available", memory_available)
21869
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21870
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21871
+ pulumi.set(__self__, "pid_available", pid_available)
21872
+
21873
+ @_builtins.property
21874
+ @pulumi.getter(name="imagefsAvailable")
21875
+ def imagefs_available(self) -> _builtins.str:
21876
+ """
21877
+ Defines percentage of soft eviction threshold for imagefs.available.
21878
+ """
21879
+ return pulumi.get(self, "imagefs_available")
21880
+
21881
+ @_builtins.property
21882
+ @pulumi.getter(name="imagefsInodesFree")
21883
+ def imagefs_inodes_free(self) -> _builtins.str:
21884
+ """
21885
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
21886
+ """
21887
+ return pulumi.get(self, "imagefs_inodes_free")
21888
+
21889
+ @_builtins.property
21890
+ @pulumi.getter(name="memoryAvailable")
21891
+ def memory_available(self) -> _builtins.str:
21892
+ """
21893
+ Defines quantity of soft eviction threshold for memory.available.
21894
+ """
21895
+ return pulumi.get(self, "memory_available")
21896
+
21897
+ @_builtins.property
21898
+ @pulumi.getter(name="nodefsAvailable")
21899
+ def nodefs_available(self) -> _builtins.str:
21900
+ """
21901
+ Defines percentage of soft eviction threshold for nodefs.available.
21902
+ """
21903
+ return pulumi.get(self, "nodefs_available")
21904
+
21905
+ @_builtins.property
21906
+ @pulumi.getter(name="nodefsInodesFree")
21907
+ def nodefs_inodes_free(self) -> _builtins.str:
21908
+ """
21909
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
21910
+ """
21911
+ return pulumi.get(self, "nodefs_inodes_free")
21912
+
21913
+ @_builtins.property
21914
+ @pulumi.getter(name="pidAvailable")
21915
+ def pid_available(self) -> _builtins.str:
21916
+ """
21917
+ Defines percentage of soft eviction threshold for pid.available.
21918
+ """
21919
+ return pulumi.get(self, "pid_available")
21920
+
21921
+
21922
+ @pulumi.output_type
21923
+ class GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
21924
+ def __init__(__self__, *,
21925
+ imagefs_available: _builtins.str,
21926
+ imagefs_inodes_free: _builtins.str,
21927
+ memory_available: _builtins.str,
21928
+ nodefs_available: _builtins.str,
21929
+ nodefs_inodes_free: _builtins.str,
21930
+ pid_available: _builtins.str):
21931
+ """
21932
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
21933
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
21934
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
21935
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
21936
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
21937
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
21938
+ """
21939
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21940
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21941
+ pulumi.set(__self__, "memory_available", memory_available)
21942
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21943
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21944
+ pulumi.set(__self__, "pid_available", pid_available)
21945
+
21946
+ @_builtins.property
21947
+ @pulumi.getter(name="imagefsAvailable")
21948
+ def imagefs_available(self) -> _builtins.str:
21949
+ """
21950
+ Defines grace period for the imagefs.available soft eviction threshold
21951
+ """
21952
+ return pulumi.get(self, "imagefs_available")
21953
+
21954
+ @_builtins.property
21955
+ @pulumi.getter(name="imagefsInodesFree")
21956
+ def imagefs_inodes_free(self) -> _builtins.str:
21957
+ """
21958
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
21959
+ """
21960
+ return pulumi.get(self, "imagefs_inodes_free")
21961
+
21962
+ @_builtins.property
21963
+ @pulumi.getter(name="memoryAvailable")
21964
+ def memory_available(self) -> _builtins.str:
21965
+ """
21966
+ Defines grace period for the memory.available soft eviction threshold.
21967
+ """
21968
+ return pulumi.get(self, "memory_available")
21969
+
21970
+ @_builtins.property
21971
+ @pulumi.getter(name="nodefsAvailable")
21972
+ def nodefs_available(self) -> _builtins.str:
21973
+ """
21974
+ Defines grace period for the nodefs.available soft eviction threshold.
21975
+ """
21976
+ return pulumi.get(self, "nodefs_available")
21977
+
21978
+ @_builtins.property
21979
+ @pulumi.getter(name="nodefsInodesFree")
21980
+ def nodefs_inodes_free(self) -> _builtins.str:
21981
+ """
21982
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
21983
+ """
21984
+ return pulumi.get(self, "nodefs_inodes_free")
21985
+
21986
+ @_builtins.property
21987
+ @pulumi.getter(name="pidAvailable")
21988
+ def pid_available(self) -> _builtins.str:
21989
+ """
21990
+ Defines grace period for the pid.available soft eviction threshold.
21991
+ """
21992
+ return pulumi.get(self, "pid_available")
21993
+
19703
21994
 
19704
21995
  @pulumi.output_type
19705
21996
  class GetClusterNodeConfigLinuxNodeConfigResult(dict):
19706
21997
  def __init__(__self__, *,
19707
21998
  cgroup_mode: _builtins.str,
19708
21999
  hugepages_configs: Sequence['outputs.GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult'],
19709
- sysctls: Mapping[str, _builtins.str]):
22000
+ sysctls: Mapping[str, _builtins.str],
22001
+ transparent_hugepage_defrag: _builtins.str,
22002
+ transparent_hugepage_enabled: _builtins.str):
19710
22003
  """
19711
22004
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
19712
22005
  :param Sequence['GetClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_configs: Amounts for 2M and 1G hugepages.
19713
22006
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
22007
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
22008
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
19714
22009
  """
19715
22010
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
19716
22011
  pulumi.set(__self__, "hugepages_configs", hugepages_configs)
19717
22012
  pulumi.set(__self__, "sysctls", sysctls)
22013
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
22014
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
19718
22015
 
19719
22016
  @_builtins.property
19720
22017
  @pulumi.getter(name="cgroupMode")
@@ -19740,6 +22037,22 @@ class GetClusterNodeConfigLinuxNodeConfigResult(dict):
19740
22037
  """
19741
22038
  return pulumi.get(self, "sysctls")
19742
22039
 
22040
+ @_builtins.property
22041
+ @pulumi.getter(name="transparentHugepageDefrag")
22042
+ def transparent_hugepage_defrag(self) -> _builtins.str:
22043
+ """
22044
+ The Linux kernel transparent hugepage defrag setting.
22045
+ """
22046
+ return pulumi.get(self, "transparent_hugepage_defrag")
22047
+
22048
+ @_builtins.property
22049
+ @pulumi.getter(name="transparentHugepageEnabled")
22050
+ def transparent_hugepage_enabled(self) -> _builtins.str:
22051
+ """
22052
+ The Linux kernel transparent hugepage setting.
22053
+ """
22054
+ return pulumi.get(self, "transparent_hugepage_enabled")
22055
+
19743
22056
 
19744
22057
  @pulumi.output_type
19745
22058
  class GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult(dict):
@@ -19907,12 +22220,23 @@ class GetClusterNodeConfigShieldedInstanceConfigResult(dict):
19907
22220
  @pulumi.output_type
19908
22221
  class GetClusterNodeConfigSoleTenantConfigResult(dict):
19909
22222
  def __init__(__self__, *,
22223
+ min_node_cpus: _builtins.int,
19910
22224
  node_affinities: Sequence['outputs.GetClusterNodeConfigSoleTenantConfigNodeAffinityResult']):
19911
22225
  """
22226
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
19912
22227
  :param Sequence['GetClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
19913
22228
  """
22229
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
19914
22230
  pulumi.set(__self__, "node_affinities", node_affinities)
19915
22231
 
22232
+ @_builtins.property
22233
+ @pulumi.getter(name="minNodeCpus")
22234
+ def min_node_cpus(self) -> _builtins.int:
22235
+ """
22236
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22237
+ """
22238
+ return pulumi.get(self, "min_node_cpus")
22239
+
19916
22240
  @_builtins.property
19917
22241
  @pulumi.getter(name="nodeAffinities")
19918
22242
  def node_affinities(self) -> Sequence['outputs.GetClusterNodeConfigSoleTenantConfigNodeAffinityResult']:
@@ -20604,7 +22928,8 @@ class GetClusterNodePoolNetworkConfigResult(dict):
20604
22928
  network_performance_configs: Sequence['outputs.GetClusterNodePoolNetworkConfigNetworkPerformanceConfigResult'],
20605
22929
  pod_cidr_overprovision_configs: Sequence['outputs.GetClusterNodePoolNetworkConfigPodCidrOverprovisionConfigResult'],
20606
22930
  pod_ipv4_cidr_block: _builtins.str,
20607
- pod_range: _builtins.str):
22931
+ pod_range: _builtins.str,
22932
+ subnetwork: _builtins.str):
20608
22933
  """
20609
22934
  :param Sequence['GetClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigArgs'] additional_node_network_configs: We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface
20610
22935
  :param Sequence['GetClusterNodePoolNetworkConfigAdditionalPodNetworkConfigArgs'] additional_pod_network_configs: We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node
@@ -20614,6 +22939,7 @@ class GetClusterNodePoolNetworkConfigResult(dict):
20614
22939
  :param Sequence['GetClusterNodePoolNetworkConfigPodCidrOverprovisionConfigArgs'] pod_cidr_overprovision_configs: Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited
20615
22940
  :param _builtins.str pod_ipv4_cidr_block: The IP address range for pod IPs in this node pool. Only applicable if create_pod_range is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.
20616
22941
  :param _builtins.str pod_range: The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.
22942
+ :param _builtins.str subnetwork: The subnetwork path for the node pool. Format: projects/{project}/regions/{region}/subnetworks/{subnetwork} . If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable.
20617
22943
  """
20618
22944
  pulumi.set(__self__, "additional_node_network_configs", additional_node_network_configs)
20619
22945
  pulumi.set(__self__, "additional_pod_network_configs", additional_pod_network_configs)
@@ -20623,6 +22949,7 @@ class GetClusterNodePoolNetworkConfigResult(dict):
20623
22949
  pulumi.set(__self__, "pod_cidr_overprovision_configs", pod_cidr_overprovision_configs)
20624
22950
  pulumi.set(__self__, "pod_ipv4_cidr_block", pod_ipv4_cidr_block)
20625
22951
  pulumi.set(__self__, "pod_range", pod_range)
22952
+ pulumi.set(__self__, "subnetwork", subnetwork)
20626
22953
 
20627
22954
  @_builtins.property
20628
22955
  @pulumi.getter(name="additionalNodeNetworkConfigs")
@@ -20688,6 +23015,14 @@ class GetClusterNodePoolNetworkConfigResult(dict):
20688
23015
  """
20689
23016
  return pulumi.get(self, "pod_range")
20690
23017
 
23018
+ @_builtins.property
23019
+ @pulumi.getter
23020
+ def subnetwork(self) -> _builtins.str:
23021
+ """
23022
+ The subnetwork path for the node pool. Format: projects/{project}/regions/{region}/subnetworks/{subnetwork} . If the cluster is associated with multiple subnetworks, the subnetwork for the node pool is picked based on the IP utilization during node pool creation and is immutable.
23023
+ """
23024
+ return pulumi.get(self, "subnetwork")
23025
+
20691
23026
 
20692
23027
  @pulumi.output_type
20693
23028
  class GetClusterNodePoolNetworkConfigAdditionalNodeNetworkConfigResult(dict):
@@ -20793,6 +23128,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
20793
23128
  def __init__(__self__, *,
20794
23129
  advanced_machine_features: Sequence['outputs.GetClusterNodePoolNodeConfigAdvancedMachineFeatureResult'],
20795
23130
  boot_disk_kms_key: _builtins.str,
23131
+ boot_disks: Sequence['outputs.GetClusterNodePoolNodeConfigBootDiskResult'],
20796
23132
  confidential_nodes: Sequence['outputs.GetClusterNodePoolNodeConfigConfidentialNodeResult'],
20797
23133
  containerd_configs: Sequence['outputs.GetClusterNodePoolNodeConfigContainerdConfigResult'],
20798
23134
  disk_size_gb: _builtins.int,
@@ -20839,6 +23175,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
20839
23175
  """
20840
23176
  :param Sequence['GetClusterNodePoolNodeConfigAdvancedMachineFeatureArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
20841
23177
  :param _builtins.str boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
23178
+ :param Sequence['GetClusterNodePoolNodeConfigBootDiskArgs'] boot_disks: Boot disk configuration for node pools nodes.
20842
23179
  :param Sequence['GetClusterNodePoolNodeConfigConfidentialNodeArgs'] confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
20843
23180
  :param Sequence['GetClusterNodePoolNodeConfigContainerdConfigArgs'] containerd_configs: Parameters for containerd configuration.
20844
23181
  :param _builtins.int disk_size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
@@ -20885,6 +23222,7 @@ class GetClusterNodePoolNodeConfigResult(dict):
20885
23222
  """
20886
23223
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
20887
23224
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
23225
+ pulumi.set(__self__, "boot_disks", boot_disks)
20888
23226
  pulumi.set(__self__, "confidential_nodes", confidential_nodes)
20889
23227
  pulumi.set(__self__, "containerd_configs", containerd_configs)
20890
23228
  pulumi.set(__self__, "disk_size_gb", disk_size_gb)
@@ -20945,6 +23283,14 @@ class GetClusterNodePoolNodeConfigResult(dict):
20945
23283
  """
20946
23284
  return pulumi.get(self, "boot_disk_kms_key")
20947
23285
 
23286
+ @_builtins.property
23287
+ @pulumi.getter(name="bootDisks")
23288
+ def boot_disks(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigBootDiskResult']:
23289
+ """
23290
+ Boot disk configuration for node pools nodes.
23291
+ """
23292
+ return pulumi.get(self, "boot_disks")
23293
+
20948
23294
  @_builtins.property
20949
23295
  @pulumi.getter(name="confidentialNodes")
20950
23296
  def confidential_nodes(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigConfidentialNodeResult']:
@@ -21330,6 +23676,57 @@ class GetClusterNodePoolNodeConfigAdvancedMachineFeatureResult(dict):
21330
23676
  return pulumi.get(self, "threads_per_core")
21331
23677
 
21332
23678
 
23679
+ @pulumi.output_type
23680
+ class GetClusterNodePoolNodeConfigBootDiskResult(dict):
23681
+ def __init__(__self__, *,
23682
+ disk_type: _builtins.str,
23683
+ provisioned_iops: _builtins.int,
23684
+ provisioned_throughput: _builtins.int,
23685
+ size_gb: _builtins.int):
23686
+ """
23687
+ :param _builtins.str disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
23688
+ :param _builtins.int provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
23689
+ :param _builtins.int provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
23690
+ :param _builtins.int size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
23691
+ """
23692
+ pulumi.set(__self__, "disk_type", disk_type)
23693
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
23694
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
23695
+ pulumi.set(__self__, "size_gb", size_gb)
23696
+
23697
+ @_builtins.property
23698
+ @pulumi.getter(name="diskType")
23699
+ def disk_type(self) -> _builtins.str:
23700
+ """
23701
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
23702
+ """
23703
+ return pulumi.get(self, "disk_type")
23704
+
23705
+ @_builtins.property
23706
+ @pulumi.getter(name="provisionedIops")
23707
+ def provisioned_iops(self) -> _builtins.int:
23708
+ """
23709
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
23710
+ """
23711
+ return pulumi.get(self, "provisioned_iops")
23712
+
23713
+ @_builtins.property
23714
+ @pulumi.getter(name="provisionedThroughput")
23715
+ def provisioned_throughput(self) -> _builtins.int:
23716
+ """
23717
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
23718
+ """
23719
+ return pulumi.get(self, "provisioned_throughput")
23720
+
23721
+ @_builtins.property
23722
+ @pulumi.getter(name="sizeGb")
23723
+ def size_gb(self) -> _builtins.int:
23724
+ """
23725
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
23726
+ """
23727
+ return pulumi.get(self, "size_gb")
23728
+
23729
+
21333
23730
  @pulumi.output_type
21334
23731
  class GetClusterNodePoolNodeConfigConfidentialNodeResult(dict):
21335
23732
  def __init__(__self__, *,
@@ -21730,12 +24127,18 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
21730
24127
  cpu_cfs_quota: _builtins.bool,
21731
24128
  cpu_cfs_quota_period: _builtins.str,
21732
24129
  cpu_manager_policy: _builtins.str,
24130
+ eviction_max_pod_grace_period_seconds: _builtins.int,
24131
+ eviction_minimum_reclaims: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult'],
24132
+ eviction_soft_grace_periods: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult'],
24133
+ eviction_softs: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult'],
21733
24134
  image_gc_high_threshold_percent: _builtins.int,
21734
24135
  image_gc_low_threshold_percent: _builtins.int,
21735
24136
  image_maximum_gc_age: _builtins.str,
21736
24137
  image_minimum_gc_age: _builtins.str,
21737
24138
  insecure_kubelet_readonly_port_enabled: _builtins.str,
21738
- pod_pids_limit: _builtins.int):
24139
+ max_parallel_image_pulls: _builtins.int,
24140
+ pod_pids_limit: _builtins.int,
24141
+ single_process_oom_kill: _builtins.bool):
21739
24142
  """
21740
24143
  :param Sequence[_builtins.str] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
21741
24144
  :param _builtins.int container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -21743,12 +24146,18 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
21743
24146
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
21744
24147
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
21745
24148
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
24149
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
24150
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaims: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
24151
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_periods: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
24152
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs'] eviction_softs: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21746
24153
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
21747
24154
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
21748
24155
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
21749
24156
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
21750
24157
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
24158
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
21751
24159
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
24160
+ :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
21752
24161
  """
21753
24162
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
21754
24163
  pulumi.set(__self__, "container_log_max_files", container_log_max_files)
@@ -21756,12 +24165,18 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
21756
24165
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
21757
24166
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
21758
24167
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
24168
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
24169
+ pulumi.set(__self__, "eviction_minimum_reclaims", eviction_minimum_reclaims)
24170
+ pulumi.set(__self__, "eviction_soft_grace_periods", eviction_soft_grace_periods)
24171
+ pulumi.set(__self__, "eviction_softs", eviction_softs)
21759
24172
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
21760
24173
  pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
21761
24174
  pulumi.set(__self__, "image_maximum_gc_age", image_maximum_gc_age)
21762
24175
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
21763
24176
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
24177
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
21764
24178
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
24179
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
21765
24180
 
21766
24181
  @_builtins.property
21767
24182
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -21811,6 +24226,38 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
21811
24226
  """
21812
24227
  return pulumi.get(self, "cpu_manager_policy")
21813
24228
 
24229
+ @_builtins.property
24230
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
24231
+ def eviction_max_pod_grace_period_seconds(self) -> _builtins.int:
24232
+ """
24233
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
24234
+ """
24235
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
24236
+
24237
+ @_builtins.property
24238
+ @pulumi.getter(name="evictionMinimumReclaims")
24239
+ def eviction_minimum_reclaims(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult']:
24240
+ """
24241
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
24242
+ """
24243
+ return pulumi.get(self, "eviction_minimum_reclaims")
24244
+
24245
+ @_builtins.property
24246
+ @pulumi.getter(name="evictionSoftGracePeriods")
24247
+ def eviction_soft_grace_periods(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult']:
24248
+ """
24249
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
24250
+ """
24251
+ return pulumi.get(self, "eviction_soft_grace_periods")
24252
+
24253
+ @_builtins.property
24254
+ @pulumi.getter(name="evictionSofts")
24255
+ def eviction_softs(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult']:
24256
+ """
24257
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
24258
+ """
24259
+ return pulumi.get(self, "eviction_softs")
24260
+
21814
24261
  @_builtins.property
21815
24262
  @pulumi.getter(name="imageGcHighThresholdPercent")
21816
24263
  def image_gc_high_threshold_percent(self) -> _builtins.int:
@@ -21851,6 +24298,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
21851
24298
  """
21852
24299
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
21853
24300
 
24301
+ @_builtins.property
24302
+ @pulumi.getter(name="maxParallelImagePulls")
24303
+ def max_parallel_image_pulls(self) -> _builtins.int:
24304
+ """
24305
+ Set the maximum number of image pulls in parallel.
24306
+ """
24307
+ return pulumi.get(self, "max_parallel_image_pulls")
24308
+
21854
24309
  @_builtins.property
21855
24310
  @pulumi.getter(name="podPidsLimit")
21856
24311
  def pod_pids_limit(self) -> _builtins.int:
@@ -21859,21 +24314,254 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
21859
24314
  """
21860
24315
  return pulumi.get(self, "pod_pids_limit")
21861
24316
 
24317
+ @_builtins.property
24318
+ @pulumi.getter(name="singleProcessOomKill")
24319
+ def single_process_oom_kill(self) -> _builtins.bool:
24320
+ """
24321
+ Defines whether to enable single process OOM killer.
24322
+ """
24323
+ return pulumi.get(self, "single_process_oom_kill")
24324
+
24325
+
24326
+ @pulumi.output_type
24327
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
24328
+ def __init__(__self__, *,
24329
+ imagefs_available: _builtins.str,
24330
+ imagefs_inodes_free: _builtins.str,
24331
+ memory_available: _builtins.str,
24332
+ nodefs_available: _builtins.str,
24333
+ nodefs_inodes_free: _builtins.str,
24334
+ pid_available: _builtins.str):
24335
+ """
24336
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
24337
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
24338
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
24339
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
24340
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
24341
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
24342
+ """
24343
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24344
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24345
+ pulumi.set(__self__, "memory_available", memory_available)
24346
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24347
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24348
+ pulumi.set(__self__, "pid_available", pid_available)
24349
+
24350
+ @_builtins.property
24351
+ @pulumi.getter(name="imagefsAvailable")
24352
+ def imagefs_available(self) -> _builtins.str:
24353
+ """
24354
+ Defines percentage of minimum reclaim for imagefs.available.
24355
+ """
24356
+ return pulumi.get(self, "imagefs_available")
24357
+
24358
+ @_builtins.property
24359
+ @pulumi.getter(name="imagefsInodesFree")
24360
+ def imagefs_inodes_free(self) -> _builtins.str:
24361
+ """
24362
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
24363
+ """
24364
+ return pulumi.get(self, "imagefs_inodes_free")
24365
+
24366
+ @_builtins.property
24367
+ @pulumi.getter(name="memoryAvailable")
24368
+ def memory_available(self) -> _builtins.str:
24369
+ """
24370
+ Defines percentage of minimum reclaim for memory.available.
24371
+ """
24372
+ return pulumi.get(self, "memory_available")
24373
+
24374
+ @_builtins.property
24375
+ @pulumi.getter(name="nodefsAvailable")
24376
+ def nodefs_available(self) -> _builtins.str:
24377
+ """
24378
+ Defines percentage of minimum reclaim for nodefs.available.
24379
+ """
24380
+ return pulumi.get(self, "nodefs_available")
24381
+
24382
+ @_builtins.property
24383
+ @pulumi.getter(name="nodefsInodesFree")
24384
+ def nodefs_inodes_free(self) -> _builtins.str:
24385
+ """
24386
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
24387
+ """
24388
+ return pulumi.get(self, "nodefs_inodes_free")
24389
+
24390
+ @_builtins.property
24391
+ @pulumi.getter(name="pidAvailable")
24392
+ def pid_available(self) -> _builtins.str:
24393
+ """
24394
+ Defines percentage of minimum reclaim for pid.available.
24395
+ """
24396
+ return pulumi.get(self, "pid_available")
24397
+
24398
+
24399
+ @pulumi.output_type
24400
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult(dict):
24401
+ def __init__(__self__, *,
24402
+ imagefs_available: _builtins.str,
24403
+ imagefs_inodes_free: _builtins.str,
24404
+ memory_available: _builtins.str,
24405
+ nodefs_available: _builtins.str,
24406
+ nodefs_inodes_free: _builtins.str,
24407
+ pid_available: _builtins.str):
24408
+ """
24409
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
24410
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
24411
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
24412
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
24413
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
24414
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
24415
+ """
24416
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24417
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24418
+ pulumi.set(__self__, "memory_available", memory_available)
24419
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24420
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24421
+ pulumi.set(__self__, "pid_available", pid_available)
24422
+
24423
+ @_builtins.property
24424
+ @pulumi.getter(name="imagefsAvailable")
24425
+ def imagefs_available(self) -> _builtins.str:
24426
+ """
24427
+ Defines percentage of soft eviction threshold for imagefs.available.
24428
+ """
24429
+ return pulumi.get(self, "imagefs_available")
24430
+
24431
+ @_builtins.property
24432
+ @pulumi.getter(name="imagefsInodesFree")
24433
+ def imagefs_inodes_free(self) -> _builtins.str:
24434
+ """
24435
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
24436
+ """
24437
+ return pulumi.get(self, "imagefs_inodes_free")
24438
+
24439
+ @_builtins.property
24440
+ @pulumi.getter(name="memoryAvailable")
24441
+ def memory_available(self) -> _builtins.str:
24442
+ """
24443
+ Defines quantity of soft eviction threshold for memory.available.
24444
+ """
24445
+ return pulumi.get(self, "memory_available")
24446
+
24447
+ @_builtins.property
24448
+ @pulumi.getter(name="nodefsAvailable")
24449
+ def nodefs_available(self) -> _builtins.str:
24450
+ """
24451
+ Defines percentage of soft eviction threshold for nodefs.available.
24452
+ """
24453
+ return pulumi.get(self, "nodefs_available")
24454
+
24455
+ @_builtins.property
24456
+ @pulumi.getter(name="nodefsInodesFree")
24457
+ def nodefs_inodes_free(self) -> _builtins.str:
24458
+ """
24459
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
24460
+ """
24461
+ return pulumi.get(self, "nodefs_inodes_free")
24462
+
24463
+ @_builtins.property
24464
+ @pulumi.getter(name="pidAvailable")
24465
+ def pid_available(self) -> _builtins.str:
24466
+ """
24467
+ Defines percentage of soft eviction threshold for pid.available.
24468
+ """
24469
+ return pulumi.get(self, "pid_available")
24470
+
24471
+
24472
+ @pulumi.output_type
24473
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
24474
+ def __init__(__self__, *,
24475
+ imagefs_available: _builtins.str,
24476
+ imagefs_inodes_free: _builtins.str,
24477
+ memory_available: _builtins.str,
24478
+ nodefs_available: _builtins.str,
24479
+ nodefs_inodes_free: _builtins.str,
24480
+ pid_available: _builtins.str):
24481
+ """
24482
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
24483
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
24484
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
24485
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
24486
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
24487
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
24488
+ """
24489
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24490
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24491
+ pulumi.set(__self__, "memory_available", memory_available)
24492
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24493
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24494
+ pulumi.set(__self__, "pid_available", pid_available)
24495
+
24496
+ @_builtins.property
24497
+ @pulumi.getter(name="imagefsAvailable")
24498
+ def imagefs_available(self) -> _builtins.str:
24499
+ """
24500
+ Defines grace period for the imagefs.available soft eviction threshold
24501
+ """
24502
+ return pulumi.get(self, "imagefs_available")
24503
+
24504
+ @_builtins.property
24505
+ @pulumi.getter(name="imagefsInodesFree")
24506
+ def imagefs_inodes_free(self) -> _builtins.str:
24507
+ """
24508
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
24509
+ """
24510
+ return pulumi.get(self, "imagefs_inodes_free")
24511
+
24512
+ @_builtins.property
24513
+ @pulumi.getter(name="memoryAvailable")
24514
+ def memory_available(self) -> _builtins.str:
24515
+ """
24516
+ Defines grace period for the memory.available soft eviction threshold.
24517
+ """
24518
+ return pulumi.get(self, "memory_available")
24519
+
24520
+ @_builtins.property
24521
+ @pulumi.getter(name="nodefsAvailable")
24522
+ def nodefs_available(self) -> _builtins.str:
24523
+ """
24524
+ Defines grace period for the nodefs.available soft eviction threshold.
24525
+ """
24526
+ return pulumi.get(self, "nodefs_available")
24527
+
24528
+ @_builtins.property
24529
+ @pulumi.getter(name="nodefsInodesFree")
24530
+ def nodefs_inodes_free(self) -> _builtins.str:
24531
+ """
24532
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
24533
+ """
24534
+ return pulumi.get(self, "nodefs_inodes_free")
24535
+
24536
+ @_builtins.property
24537
+ @pulumi.getter(name="pidAvailable")
24538
+ def pid_available(self) -> _builtins.str:
24539
+ """
24540
+ Defines grace period for the pid.available soft eviction threshold.
24541
+ """
24542
+ return pulumi.get(self, "pid_available")
24543
+
21862
24544
 
21863
24545
  @pulumi.output_type
21864
24546
  class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
21865
24547
  def __init__(__self__, *,
21866
24548
  cgroup_mode: _builtins.str,
21867
24549
  hugepages_configs: Sequence['outputs.GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult'],
21868
- sysctls: Mapping[str, _builtins.str]):
24550
+ sysctls: Mapping[str, _builtins.str],
24551
+ transparent_hugepage_defrag: _builtins.str,
24552
+ transparent_hugepage_enabled: _builtins.str):
21869
24553
  """
21870
24554
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
21871
24555
  :param Sequence['GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_configs: Amounts for 2M and 1G hugepages.
21872
24556
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
24557
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
24558
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
21873
24559
  """
21874
24560
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
21875
24561
  pulumi.set(__self__, "hugepages_configs", hugepages_configs)
21876
24562
  pulumi.set(__self__, "sysctls", sysctls)
24563
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
24564
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
21877
24565
 
21878
24566
  @_builtins.property
21879
24567
  @pulumi.getter(name="cgroupMode")
@@ -21899,6 +24587,22 @@ class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
21899
24587
  """
21900
24588
  return pulumi.get(self, "sysctls")
21901
24589
 
24590
+ @_builtins.property
24591
+ @pulumi.getter(name="transparentHugepageDefrag")
24592
+ def transparent_hugepage_defrag(self) -> _builtins.str:
24593
+ """
24594
+ The Linux kernel transparent hugepage defrag setting.
24595
+ """
24596
+ return pulumi.get(self, "transparent_hugepage_defrag")
24597
+
24598
+ @_builtins.property
24599
+ @pulumi.getter(name="transparentHugepageEnabled")
24600
+ def transparent_hugepage_enabled(self) -> _builtins.str:
24601
+ """
24602
+ The Linux kernel transparent hugepage setting.
24603
+ """
24604
+ return pulumi.get(self, "transparent_hugepage_enabled")
24605
+
21902
24606
 
21903
24607
  @pulumi.output_type
21904
24608
  class GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult(dict):
@@ -22066,12 +24770,23 @@ class GetClusterNodePoolNodeConfigShieldedInstanceConfigResult(dict):
22066
24770
  @pulumi.output_type
22067
24771
  class GetClusterNodePoolNodeConfigSoleTenantConfigResult(dict):
22068
24772
  def __init__(__self__, *,
24773
+ min_node_cpus: _builtins.int,
22069
24774
  node_affinities: Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityResult']):
22070
24775
  """
24776
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22071
24777
  :param Sequence['GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
22072
24778
  """
24779
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
22073
24780
  pulumi.set(__self__, "node_affinities", node_affinities)
22074
24781
 
24782
+ @_builtins.property
24783
+ @pulumi.getter(name="minNodeCpus")
24784
+ def min_node_cpus(self) -> _builtins.int:
24785
+ """
24786
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
24787
+ """
24788
+ return pulumi.get(self, "min_node_cpus")
24789
+
22075
24790
  @_builtins.property
22076
24791
  @pulumi.getter(name="nodeAffinities")
22077
24792
  def node_affinities(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityResult']:
@@ -22653,6 +25368,35 @@ class GetClusterProtectConfigWorkloadConfigResult(dict):
22653
25368
  return pulumi.get(self, "audit_mode")
22654
25369
 
22655
25370
 
25371
+ @pulumi.output_type
25372
+ class GetClusterRbacBindingConfigResult(dict):
25373
+ def __init__(__self__, *,
25374
+ enable_insecure_binding_system_authenticated: _builtins.bool,
25375
+ enable_insecure_binding_system_unauthenticated: _builtins.bool):
25376
+ """
25377
+ :param _builtins.bool enable_insecure_binding_system_authenticated: Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.
25378
+ :param _builtins.bool enable_insecure_binding_system_unauthenticated: Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated.
25379
+ """
25380
+ pulumi.set(__self__, "enable_insecure_binding_system_authenticated", enable_insecure_binding_system_authenticated)
25381
+ pulumi.set(__self__, "enable_insecure_binding_system_unauthenticated", enable_insecure_binding_system_unauthenticated)
25382
+
25383
+ @_builtins.property
25384
+ @pulumi.getter(name="enableInsecureBindingSystemAuthenticated")
25385
+ def enable_insecure_binding_system_authenticated(self) -> _builtins.bool:
25386
+ """
25387
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:authenticated.
25388
+ """
25389
+ return pulumi.get(self, "enable_insecure_binding_system_authenticated")
25390
+
25391
+ @_builtins.property
25392
+ @pulumi.getter(name="enableInsecureBindingSystemUnauthenticated")
25393
+ def enable_insecure_binding_system_unauthenticated(self) -> _builtins.bool:
25394
+ """
25395
+ Setting this to true will allow any ClusterRoleBinding and RoleBinding with subjects system:anonymous or system:unauthenticated.
25396
+ """
25397
+ return pulumi.get(self, "enable_insecure_binding_system_unauthenticated")
25398
+
25399
+
22656
25400
  @pulumi.output_type
22657
25401
  class GetClusterReleaseChannelResult(dict):
22658
25402
  def __init__(__self__, *,