pulumi-gcp 8.41.0a1755297349__py3-none-any.whl → 8.42.0a1756095712__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (343) hide show
  1. pulumi_gcp/__init__.py +24 -0
  2. pulumi_gcp/accesscontextmanager/access_policy_iam_binding.py +2 -0
  3. pulumi_gcp/accesscontextmanager/access_policy_iam_member.py +2 -0
  4. pulumi_gcp/accesscontextmanager/access_policy_iam_policy.py +2 -0
  5. pulumi_gcp/apigateway/api_config_iam_binding.py +2 -0
  6. pulumi_gcp/apigateway/api_config_iam_member.py +2 -0
  7. pulumi_gcp/apigateway/api_config_iam_policy.py +2 -0
  8. pulumi_gcp/apigateway/api_iam_binding.py +2 -0
  9. pulumi_gcp/apigateway/api_iam_member.py +2 -0
  10. pulumi_gcp/apigateway/api_iam_policy.py +2 -0
  11. pulumi_gcp/apigateway/gateway_iam_binding.py +2 -0
  12. pulumi_gcp/apigateway/gateway_iam_member.py +2 -0
  13. pulumi_gcp/apigateway/gateway_iam_policy.py +2 -0
  14. pulumi_gcp/apigee/environment_iam_binding.py +2 -0
  15. pulumi_gcp/apigee/environment_iam_member.py +2 -0
  16. pulumi_gcp/apigee/environment_iam_policy.py +2 -0
  17. pulumi_gcp/artifactregistry/__init__.py +5 -0
  18. pulumi_gcp/artifactregistry/get_package.py +220 -0
  19. pulumi_gcp/artifactregistry/get_repositories.py +160 -0
  20. pulumi_gcp/artifactregistry/get_tag.py +187 -0
  21. pulumi_gcp/artifactregistry/get_tags.py +200 -0
  22. pulumi_gcp/artifactregistry/get_version.py +261 -0
  23. pulumi_gcp/artifactregistry/outputs.py +130 -0
  24. pulumi_gcp/artifactregistry/repository_iam_binding.py +2 -0
  25. pulumi_gcp/artifactregistry/repository_iam_member.py +2 -0
  26. pulumi_gcp/artifactregistry/repository_iam_policy.py +2 -0
  27. pulumi_gcp/backupdisasterrecovery/backup_plan.py +114 -7
  28. pulumi_gcp/backupdisasterrecovery/get_backup_plan.py +12 -1
  29. pulumi_gcp/beyondcorp/application_iam_binding.py +8 -0
  30. pulumi_gcp/beyondcorp/application_iam_member.py +8 -0
  31. pulumi_gcp/beyondcorp/application_iam_policy.py +8 -0
  32. pulumi_gcp/beyondcorp/get_application_iam_policy.py +4 -0
  33. pulumi_gcp/beyondcorp/security_gateway_application_iam_binding.py +2 -0
  34. pulumi_gcp/beyondcorp/security_gateway_application_iam_member.py +2 -0
  35. pulumi_gcp/beyondcorp/security_gateway_application_iam_policy.py +2 -0
  36. pulumi_gcp/beyondcorp/security_gateway_iam_binding.py +2 -0
  37. pulumi_gcp/beyondcorp/security_gateway_iam_member.py +2 -0
  38. pulumi_gcp/beyondcorp/security_gateway_iam_policy.py +2 -0
  39. pulumi_gcp/bigquery/connection_iam_binding.py +2 -0
  40. pulumi_gcp/bigquery/connection_iam_member.py +2 -0
  41. pulumi_gcp/bigquery/connection_iam_policy.py +2 -0
  42. pulumi_gcp/bigquery/data_transfer_config.py +2 -0
  43. pulumi_gcp/bigquery/dataset.py +2 -2
  44. pulumi_gcp/bigquery/iam_binding.py +2 -0
  45. pulumi_gcp/bigquery/iam_member.py +2 -0
  46. pulumi_gcp/bigquery/iam_policy.py +2 -0
  47. pulumi_gcp/bigquery/reservation.py +535 -0
  48. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_binding.py +2 -0
  49. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_member.py +2 -0
  50. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_policy.py +2 -0
  51. pulumi_gcp/bigqueryanalyticshub/listing_iam_binding.py +2 -0
  52. pulumi_gcp/bigqueryanalyticshub/listing_iam_member.py +2 -0
  53. pulumi_gcp/bigqueryanalyticshub/listing_iam_policy.py +2 -0
  54. pulumi_gcp/bigquerydatapolicy/data_policy_iam_binding.py +2 -0
  55. pulumi_gcp/bigquerydatapolicy/data_policy_iam_member.py +2 -0
  56. pulumi_gcp/bigquerydatapolicy/data_policy_iam_policy.py +2 -0
  57. pulumi_gcp/binaryauthorization/attestor_iam_binding.py +2 -0
  58. pulumi_gcp/binaryauthorization/attestor_iam_member.py +2 -0
  59. pulumi_gcp/binaryauthorization/attestor_iam_policy.py +2 -0
  60. pulumi_gcp/certificateauthority/ca_pool_iam_binding.py +2 -0
  61. pulumi_gcp/certificateauthority/ca_pool_iam_member.py +2 -0
  62. pulumi_gcp/certificateauthority/ca_pool_iam_policy.py +2 -0
  63. pulumi_gcp/certificateauthority/certificate_template_iam_binding.py +2 -0
  64. pulumi_gcp/certificateauthority/certificate_template_iam_member.py +2 -0
  65. pulumi_gcp/certificateauthority/certificate_template_iam_policy.py +2 -0
  66. pulumi_gcp/cloudbuildv2/connection_iam_binding.py +2 -0
  67. pulumi_gcp/cloudbuildv2/connection_iam_member.py +2 -0
  68. pulumi_gcp/cloudbuildv2/connection_iam_policy.py +2 -0
  69. pulumi_gcp/clouddeploy/_inputs.py +48 -48
  70. pulumi_gcp/clouddeploy/deploy_policy.py +54 -74
  71. pulumi_gcp/clouddeploy/outputs.py +32 -32
  72. pulumi_gcp/cloudfunctions/function_iam_binding.py +2 -0
  73. pulumi_gcp/cloudfunctions/function_iam_member.py +2 -0
  74. pulumi_gcp/cloudfunctions/function_iam_policy.py +2 -0
  75. pulumi_gcp/cloudfunctionsv2/function_iam_binding.py +2 -0
  76. pulumi_gcp/cloudfunctionsv2/function_iam_member.py +2 -0
  77. pulumi_gcp/cloudfunctionsv2/function_iam_policy.py +2 -0
  78. pulumi_gcp/cloudrun/iam_binding.py +2 -0
  79. pulumi_gcp/cloudrun/iam_member.py +2 -0
  80. pulumi_gcp/cloudrun/iam_policy.py +2 -0
  81. pulumi_gcp/cloudrunv2/job_iam_binding.py +2 -0
  82. pulumi_gcp/cloudrunv2/job_iam_member.py +2 -0
  83. pulumi_gcp/cloudrunv2/job_iam_policy.py +2 -0
  84. pulumi_gcp/cloudrunv2/service_iam_binding.py +2 -0
  85. pulumi_gcp/cloudrunv2/service_iam_member.py +2 -0
  86. pulumi_gcp/cloudrunv2/service_iam_policy.py +2 -0
  87. pulumi_gcp/cloudrunv2/worker_pool_iam_binding.py +2 -0
  88. pulumi_gcp/cloudrunv2/worker_pool_iam_member.py +2 -0
  89. pulumi_gcp/cloudrunv2/worker_pool_iam_policy.py +2 -0
  90. pulumi_gcp/cloudtasks/queue_iam_binding.py +2 -0
  91. pulumi_gcp/cloudtasks/queue_iam_member.py +2 -0
  92. pulumi_gcp/cloudtasks/queue_iam_policy.py +2 -0
  93. pulumi_gcp/colab/runtime_template_iam_binding.py +2 -0
  94. pulumi_gcp/colab/runtime_template_iam_member.py +2 -0
  95. pulumi_gcp/colab/runtime_template_iam_policy.py +2 -0
  96. pulumi_gcp/composer/user_workloads_config_map.py +26 -2
  97. pulumi_gcp/compute/_inputs.py +355 -0
  98. pulumi_gcp/compute/disk_iam_binding.py +2 -0
  99. pulumi_gcp/compute/disk_iam_member.py +2 -0
  100. pulumi_gcp/compute/disk_iam_policy.py +2 -0
  101. pulumi_gcp/compute/get_region_backend_service.py +12 -1
  102. pulumi_gcp/compute/image_iam_binding.py +2 -0
  103. pulumi_gcp/compute/image_iam_member.py +2 -0
  104. pulumi_gcp/compute/image_iam_policy.py +2 -0
  105. pulumi_gcp/compute/instance_iam_binding.py +2 -0
  106. pulumi_gcp/compute/instance_iam_member.py +2 -0
  107. pulumi_gcp/compute/instance_iam_policy.py +2 -0
  108. pulumi_gcp/compute/instance_template_iam_binding.py +2 -0
  109. pulumi_gcp/compute/instance_template_iam_member.py +2 -0
  110. pulumi_gcp/compute/instance_template_iam_policy.py +2 -0
  111. pulumi_gcp/compute/instant_snapshot_iam_binding.py +2 -0
  112. pulumi_gcp/compute/instant_snapshot_iam_member.py +2 -0
  113. pulumi_gcp/compute/instant_snapshot_iam_policy.py +2 -0
  114. pulumi_gcp/compute/machine_image_iam_binding.py +2 -0
  115. pulumi_gcp/compute/machine_image_iam_member.py +2 -0
  116. pulumi_gcp/compute/machine_image_iam_policy.py +2 -0
  117. pulumi_gcp/compute/outputs.py +404 -0
  118. pulumi_gcp/compute/region_backend_service.py +257 -0
  119. pulumi_gcp/compute/region_disk_iam_binding.py +2 -0
  120. pulumi_gcp/compute/region_disk_iam_member.py +2 -0
  121. pulumi_gcp/compute/region_disk_iam_policy.py +2 -0
  122. pulumi_gcp/compute/region_security_policy.py +54 -0
  123. pulumi_gcp/compute/service_attachment.py +126 -0
  124. pulumi_gcp/compute/snapshot_iam_binding.py +2 -0
  125. pulumi_gcp/compute/snapshot_iam_member.py +2 -0
  126. pulumi_gcp/compute/snapshot_iam_policy.py +2 -0
  127. pulumi_gcp/compute/storage_pool_iam_binding.py +2 -0
  128. pulumi_gcp/compute/storage_pool_iam_member.py +2 -0
  129. pulumi_gcp/compute/storage_pool_iam_policy.py +2 -0
  130. pulumi_gcp/compute/subnetwork_iam_binding.py +2 -0
  131. pulumi_gcp/compute/subnetwork_iam_member.py +2 -0
  132. pulumi_gcp/compute/subnetwork_iam_policy.py +2 -0
  133. pulumi_gcp/config/__init__.pyi +0 -4
  134. pulumi_gcp/config/vars.py +0 -8
  135. pulumi_gcp/container/_inputs.py +2373 -267
  136. pulumi_gcp/container/outputs.py +2481 -81
  137. pulumi_gcp/containeranalysis/note_iam_binding.py +2 -0
  138. pulumi_gcp/containeranalysis/note_iam_member.py +2 -0
  139. pulumi_gcp/containeranalysis/note_iam_policy.py +2 -0
  140. pulumi_gcp/datacatalog/entry_group_iam_binding.py +2 -0
  141. pulumi_gcp/datacatalog/entry_group_iam_member.py +2 -0
  142. pulumi_gcp/datacatalog/entry_group_iam_policy.py +2 -0
  143. pulumi_gcp/datacatalog/policy_tag_iam_binding.py +2 -0
  144. pulumi_gcp/datacatalog/policy_tag_iam_member.py +2 -0
  145. pulumi_gcp/datacatalog/policy_tag_iam_policy.py +2 -0
  146. pulumi_gcp/datacatalog/tag_template_iam_binding.py +2 -0
  147. pulumi_gcp/datacatalog/tag_template_iam_member.py +2 -0
  148. pulumi_gcp/datacatalog/tag_template_iam_policy.py +2 -0
  149. pulumi_gcp/datacatalog/taxonomy_iam_binding.py +2 -0
  150. pulumi_gcp/datacatalog/taxonomy_iam_member.py +2 -0
  151. pulumi_gcp/datacatalog/taxonomy_iam_policy.py +2 -0
  152. pulumi_gcp/datafusion/instance.py +18 -4
  153. pulumi_gcp/dataplex/aspect_type_iam_binding.py +2 -0
  154. pulumi_gcp/dataplex/aspect_type_iam_member.py +2 -0
  155. pulumi_gcp/dataplex/aspect_type_iam_policy.py +2 -0
  156. pulumi_gcp/dataplex/asset_iam_binding.py +2 -0
  157. pulumi_gcp/dataplex/asset_iam_member.py +2 -0
  158. pulumi_gcp/dataplex/asset_iam_policy.py +2 -0
  159. pulumi_gcp/dataplex/datascan_iam_binding.py +2 -0
  160. pulumi_gcp/dataplex/datascan_iam_member.py +2 -0
  161. pulumi_gcp/dataplex/datascan_iam_policy.py +2 -0
  162. pulumi_gcp/dataplex/entry_group_iam_binding.py +2 -0
  163. pulumi_gcp/dataplex/entry_group_iam_member.py +2 -0
  164. pulumi_gcp/dataplex/entry_group_iam_policy.py +2 -0
  165. pulumi_gcp/dataplex/entry_type_iam_binding.py +2 -0
  166. pulumi_gcp/dataplex/entry_type_iam_member.py +2 -0
  167. pulumi_gcp/dataplex/entry_type_iam_policy.py +2 -0
  168. pulumi_gcp/dataplex/glossary_iam_binding.py +2 -0
  169. pulumi_gcp/dataplex/glossary_iam_member.py +2 -0
  170. pulumi_gcp/dataplex/glossary_iam_policy.py +2 -0
  171. pulumi_gcp/dataplex/lake_iam_binding.py +2 -0
  172. pulumi_gcp/dataplex/lake_iam_member.py +2 -0
  173. pulumi_gcp/dataplex/lake_iam_policy.py +2 -0
  174. pulumi_gcp/dataplex/task_iam_binding.py +2 -0
  175. pulumi_gcp/dataplex/task_iam_member.py +2 -0
  176. pulumi_gcp/dataplex/task_iam_policy.py +2 -0
  177. pulumi_gcp/dataplex/zone_iam_binding.py +2 -0
  178. pulumi_gcp/dataplex/zone_iam_member.py +2 -0
  179. pulumi_gcp/dataplex/zone_iam_policy.py +2 -0
  180. pulumi_gcp/dataproc/autoscaling_policy_iam_binding.py +2 -0
  181. pulumi_gcp/dataproc/autoscaling_policy_iam_member.py +2 -0
  182. pulumi_gcp/dataproc/autoscaling_policy_iam_policy.py +2 -0
  183. pulumi_gcp/dataproc/metastore_database_iam_binding.py +2 -0
  184. pulumi_gcp/dataproc/metastore_database_iam_member.py +2 -0
  185. pulumi_gcp/dataproc/metastore_database_iam_policy.py +2 -0
  186. pulumi_gcp/dataproc/metastore_federation_iam_binding.py +2 -0
  187. pulumi_gcp/dataproc/metastore_federation_iam_member.py +2 -0
  188. pulumi_gcp/dataproc/metastore_federation_iam_policy.py +2 -0
  189. pulumi_gcp/dataproc/metastore_service_iam_binding.py +2 -0
  190. pulumi_gcp/dataproc/metastore_service_iam_member.py +2 -0
  191. pulumi_gcp/dataproc/metastore_service_iam_policy.py +2 -0
  192. pulumi_gcp/dataproc/metastore_table_iam_binding.py +2 -0
  193. pulumi_gcp/dataproc/metastore_table_iam_member.py +2 -0
  194. pulumi_gcp/dataproc/metastore_table_iam_policy.py +2 -0
  195. pulumi_gcp/diagflow/__init__.py +2 -0
  196. pulumi_gcp/diagflow/_inputs.py +2829 -0
  197. pulumi_gcp/diagflow/conversation_profile.py +959 -0
  198. pulumi_gcp/diagflow/cx_playbook.py +967 -0
  199. pulumi_gcp/diagflow/outputs.py +2330 -0
  200. pulumi_gcp/dns/dns_managed_zone_iam_binding.py +2 -0
  201. pulumi_gcp/dns/dns_managed_zone_iam_member.py +2 -0
  202. pulumi_gcp/dns/dns_managed_zone_iam_policy.py +2 -0
  203. pulumi_gcp/endpoints/service_iam_binding.py +2 -0
  204. pulumi_gcp/endpoints/service_iam_member.py +2 -0
  205. pulumi_gcp/endpoints/service_iam_policy.py +2 -0
  206. pulumi_gcp/gemini/repository_group_iam_binding.py +2 -0
  207. pulumi_gcp/gemini/repository_group_iam_member.py +2 -0
  208. pulumi_gcp/gemini/repository_group_iam_policy.py +2 -0
  209. pulumi_gcp/gkebackup/backup_plan_iam_binding.py +2 -0
  210. pulumi_gcp/gkebackup/backup_plan_iam_member.py +2 -0
  211. pulumi_gcp/gkebackup/backup_plan_iam_policy.py +2 -0
  212. pulumi_gcp/gkebackup/restore_plan_iam_binding.py +2 -0
  213. pulumi_gcp/gkebackup/restore_plan_iam_member.py +2 -0
  214. pulumi_gcp/gkebackup/restore_plan_iam_policy.py +2 -0
  215. pulumi_gcp/gkehub/feature_iam_binding.py +2 -0
  216. pulumi_gcp/gkehub/feature_iam_member.py +2 -0
  217. pulumi_gcp/gkehub/feature_iam_policy.py +2 -0
  218. pulumi_gcp/gkehub/membership_iam_binding.py +2 -0
  219. pulumi_gcp/gkehub/membership_iam_member.py +2 -0
  220. pulumi_gcp/gkehub/membership_iam_policy.py +2 -0
  221. pulumi_gcp/gkehub/scope_iam_binding.py +2 -0
  222. pulumi_gcp/gkehub/scope_iam_member.py +2 -0
  223. pulumi_gcp/gkehub/scope_iam_policy.py +2 -0
  224. pulumi_gcp/gkeonprem/vmware_admin_cluster.py +24 -3
  225. pulumi_gcp/healthcare/consent_store_iam_binding.py +2 -0
  226. pulumi_gcp/healthcare/consent_store_iam_member.py +2 -0
  227. pulumi_gcp/healthcare/consent_store_iam_policy.py +2 -0
  228. pulumi_gcp/iam/workforce_pool_iam_binding.py +2 -0
  229. pulumi_gcp/iam/workforce_pool_iam_member.py +2 -0
  230. pulumi_gcp/iam/workforce_pool_iam_policy.py +2 -0
  231. pulumi_gcp/iap/app_engine_service_iam_binding.py +2 -0
  232. pulumi_gcp/iap/app_engine_service_iam_member.py +2 -0
  233. pulumi_gcp/iap/app_engine_service_iam_policy.py +2 -0
  234. pulumi_gcp/iap/app_engine_version_iam_binding.py +2 -0
  235. pulumi_gcp/iap/app_engine_version_iam_member.py +2 -0
  236. pulumi_gcp/iap/app_engine_version_iam_policy.py +2 -0
  237. pulumi_gcp/iap/tunnel_dest_group_iam_binding.py +2 -0
  238. pulumi_gcp/iap/tunnel_dest_group_iam_member.py +2 -0
  239. pulumi_gcp/iap/tunnel_dest_group_iam_policy.py +2 -0
  240. pulumi_gcp/iap/tunnel_iam_binding.py +2 -0
  241. pulumi_gcp/iap/tunnel_iam_member.py +2 -0
  242. pulumi_gcp/iap/tunnel_iam_policy.py +2 -0
  243. pulumi_gcp/iap/tunnel_instance_iam_binding.py +2 -0
  244. pulumi_gcp/iap/tunnel_instance_iam_member.py +2 -0
  245. pulumi_gcp/iap/tunnel_instance_iam_policy.py +2 -0
  246. pulumi_gcp/iap/web_backend_service_iam_binding.py +2 -0
  247. pulumi_gcp/iap/web_backend_service_iam_member.py +2 -0
  248. pulumi_gcp/iap/web_backend_service_iam_policy.py +2 -0
  249. pulumi_gcp/iap/web_cloud_run_service_iam_binding.py +2 -0
  250. pulumi_gcp/iap/web_cloud_run_service_iam_member.py +2 -0
  251. pulumi_gcp/iap/web_cloud_run_service_iam_policy.py +2 -0
  252. pulumi_gcp/iap/web_iam_binding.py +2 -0
  253. pulumi_gcp/iap/web_iam_member.py +2 -0
  254. pulumi_gcp/iap/web_iam_policy.py +2 -0
  255. pulumi_gcp/iap/web_region_backend_service_iam_binding.py +2 -0
  256. pulumi_gcp/iap/web_region_backend_service_iam_member.py +2 -0
  257. pulumi_gcp/iap/web_region_backend_service_iam_policy.py +2 -0
  258. pulumi_gcp/iap/web_type_app_enging_iam_binding.py +2 -0
  259. pulumi_gcp/iap/web_type_app_enging_iam_member.py +2 -0
  260. pulumi_gcp/iap/web_type_app_enging_iam_policy.py +2 -0
  261. pulumi_gcp/iap/web_type_compute_iam_binding.py +2 -0
  262. pulumi_gcp/iap/web_type_compute_iam_member.py +2 -0
  263. pulumi_gcp/iap/web_type_compute_iam_policy.py +2 -0
  264. pulumi_gcp/kms/crypto_key.py +7 -0
  265. pulumi_gcp/kms/ekm_connection_iam_binding.py +2 -0
  266. pulumi_gcp/kms/ekm_connection_iam_member.py +2 -0
  267. pulumi_gcp/kms/ekm_connection_iam_policy.py +2 -0
  268. pulumi_gcp/kms/outputs.py +2 -0
  269. pulumi_gcp/logging/log_view_iam_binding.py +2 -0
  270. pulumi_gcp/logging/log_view_iam_member.py +2 -0
  271. pulumi_gcp/logging/log_view_iam_policy.py +2 -0
  272. pulumi_gcp/memorystore/get_instance.py +12 -1
  273. pulumi_gcp/memorystore/instance.py +70 -0
  274. pulumi_gcp/monitoring/_inputs.py +3 -3
  275. pulumi_gcp/monitoring/outputs.py +2 -2
  276. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +213 -168
  277. pulumi_gcp/notebooks/instance.py +18 -18
  278. pulumi_gcp/notebooks/instance_iam_binding.py +2 -0
  279. pulumi_gcp/notebooks/instance_iam_member.py +2 -0
  280. pulumi_gcp/notebooks/instance_iam_policy.py +2 -0
  281. pulumi_gcp/notebooks/runtime_iam_binding.py +2 -0
  282. pulumi_gcp/notebooks/runtime_iam_member.py +2 -0
  283. pulumi_gcp/notebooks/runtime_iam_policy.py +2 -0
  284. pulumi_gcp/organizations/folder.py +56 -0
  285. pulumi_gcp/organizations/get_folder.py +29 -1
  286. pulumi_gcp/projects/api_key.py +88 -1
  287. pulumi_gcp/provider.py +0 -40
  288. pulumi_gcp/pubsub/schema_iam_binding.py +2 -0
  289. pulumi_gcp/pubsub/schema_iam_member.py +2 -0
  290. pulumi_gcp/pubsub/schema_iam_policy.py +2 -0
  291. pulumi_gcp/pubsub/subscription.py +84 -0
  292. pulumi_gcp/pubsub/topic.py +80 -0
  293. pulumi_gcp/pubsub/topic_iam_binding.py +2 -0
  294. pulumi_gcp/pubsub/topic_iam_member.py +2 -0
  295. pulumi_gcp/pubsub/topic_iam_policy.py +2 -0
  296. pulumi_gcp/pulumi-plugin.json +1 -1
  297. pulumi_gcp/secretmanager/regional_secret_iam_binding.py +2 -0
  298. pulumi_gcp/secretmanager/regional_secret_iam_member.py +2 -0
  299. pulumi_gcp/secretmanager/regional_secret_iam_policy.py +2 -0
  300. pulumi_gcp/secretmanager/secret_iam_binding.py +2 -0
  301. pulumi_gcp/secretmanager/secret_iam_member.py +2 -0
  302. pulumi_gcp/secretmanager/secret_iam_policy.py +2 -0
  303. pulumi_gcp/secretmanager/secret_version.py +1 -48
  304. pulumi_gcp/securesourcemanager/repository_iam_binding.py +2 -0
  305. pulumi_gcp/securesourcemanager/repository_iam_member.py +2 -0
  306. pulumi_gcp/securesourcemanager/repository_iam_policy.py +2 -0
  307. pulumi_gcp/securitycenter/instance_iam_binding.py +18 -4
  308. pulumi_gcp/securitycenter/instance_iam_member.py +18 -4
  309. pulumi_gcp/securitycenter/instance_iam_policy.py +18 -4
  310. pulumi_gcp/securitycenter/v2_organization_source_iam_binding.py +2 -0
  311. pulumi_gcp/securitycenter/v2_organization_source_iam_member.py +2 -0
  312. pulumi_gcp/securitycenter/v2_organization_source_iam_policy.py +2 -0
  313. pulumi_gcp/servicedirectory/namespace_iam_binding.py +2 -0
  314. pulumi_gcp/servicedirectory/namespace_iam_member.py +2 -0
  315. pulumi_gcp/servicedirectory/namespace_iam_policy.py +2 -0
  316. pulumi_gcp/servicedirectory/service_iam_binding.py +2 -0
  317. pulumi_gcp/servicedirectory/service_iam_member.py +2 -0
  318. pulumi_gcp/servicedirectory/service_iam_policy.py +2 -0
  319. pulumi_gcp/sourcerepo/repository_iam_binding.py +2 -0
  320. pulumi_gcp/sourcerepo/repository_iam_member.py +2 -0
  321. pulumi_gcp/sourcerepo/repository_iam_policy.py +2 -0
  322. pulumi_gcp/sql/_inputs.py +82 -4
  323. pulumi_gcp/sql/database_instance.py +108 -7
  324. pulumi_gcp/sql/get_database_instance.py +12 -1
  325. pulumi_gcp/sql/outputs.py +154 -7
  326. pulumi_gcp/storage/_inputs.py +104 -12
  327. pulumi_gcp/storage/outputs.py +84 -7
  328. pulumi_gcp/tags/tag_key_iam_binding.py +2 -0
  329. pulumi_gcp/tags/tag_key_iam_member.py +2 -0
  330. pulumi_gcp/tags/tag_key_iam_policy.py +2 -0
  331. pulumi_gcp/tags/tag_value_iam_binding.py +2 -0
  332. pulumi_gcp/tags/tag_value_iam_member.py +2 -0
  333. pulumi_gcp/tags/tag_value_iam_policy.py +2 -0
  334. pulumi_gcp/tpu/get_tensorflow_versions.py +10 -0
  335. pulumi_gcp/vertex/__init__.py +1 -0
  336. pulumi_gcp/vertex/_inputs.py +122 -0
  337. pulumi_gcp/vertex/ai_index.py +21 -7
  338. pulumi_gcp/vertex/ai_rag_engine_config.py +354 -0
  339. pulumi_gcp/vertex/outputs.py +69 -0
  340. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/METADATA +1 -1
  341. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/RECORD +343 -335
  342. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/WHEEL +0 -0
  343. {pulumi_gcp-8.41.0a1755297349.dist-info → pulumi_gcp-8.42.0a1756095712.dist-info}/top_level.txt +0 -0
@@ -291,6 +291,8 @@ __all__ = [
291
291
  'ClusterNodeConfigArgsDict',
292
292
  'ClusterNodeConfigAdvancedMachineFeaturesArgs',
293
293
  'ClusterNodeConfigAdvancedMachineFeaturesArgsDict',
294
+ 'ClusterNodeConfigBootDiskArgs',
295
+ 'ClusterNodeConfigBootDiskArgsDict',
294
296
  'ClusterNodeConfigConfidentialNodesArgs',
295
297
  'ClusterNodeConfigConfidentialNodesArgsDict',
296
298
  'ClusterNodeConfigContainerdConfigArgs',
@@ -323,6 +325,12 @@ __all__ = [
323
325
  'ClusterNodeConfigHostMaintenancePolicyArgsDict',
324
326
  'ClusterNodeConfigKubeletConfigArgs',
325
327
  'ClusterNodeConfigKubeletConfigArgsDict',
328
+ 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs',
329
+ 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict',
330
+ 'ClusterNodeConfigKubeletConfigEvictionSoftArgs',
331
+ 'ClusterNodeConfigKubeletConfigEvictionSoftArgsDict',
332
+ 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
333
+ 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
326
334
  'ClusterNodeConfigLinuxNodeConfigArgs',
327
335
  'ClusterNodeConfigLinuxNodeConfigArgsDict',
328
336
  'ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -389,6 +397,8 @@ __all__ = [
389
397
  'ClusterNodePoolNodeConfigArgsDict',
390
398
  'ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs',
391
399
  'ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgsDict',
400
+ 'ClusterNodePoolNodeConfigBootDiskArgs',
401
+ 'ClusterNodePoolNodeConfigBootDiskArgsDict',
392
402
  'ClusterNodePoolNodeConfigConfidentialNodesArgs',
393
403
  'ClusterNodePoolNodeConfigConfidentialNodesArgsDict',
394
404
  'ClusterNodePoolNodeConfigContainerdConfigArgs',
@@ -421,6 +431,12 @@ __all__ = [
421
431
  'ClusterNodePoolNodeConfigHostMaintenancePolicyArgsDict',
422
432
  'ClusterNodePoolNodeConfigKubeletConfigArgs',
423
433
  'ClusterNodePoolNodeConfigKubeletConfigArgsDict',
434
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs',
435
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict',
436
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs',
437
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
438
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
439
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
424
440
  'ClusterNodePoolNodeConfigLinuxNodeConfigArgs',
425
441
  'ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict',
426
442
  'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -517,6 +533,8 @@ __all__ = [
517
533
  'NodePoolNodeConfigArgsDict',
518
534
  'NodePoolNodeConfigAdvancedMachineFeaturesArgs',
519
535
  'NodePoolNodeConfigAdvancedMachineFeaturesArgsDict',
536
+ 'NodePoolNodeConfigBootDiskArgs',
537
+ 'NodePoolNodeConfigBootDiskArgsDict',
520
538
  'NodePoolNodeConfigConfidentialNodesArgs',
521
539
  'NodePoolNodeConfigConfidentialNodesArgsDict',
522
540
  'NodePoolNodeConfigContainerdConfigArgs',
@@ -549,6 +567,12 @@ __all__ = [
549
567
  'NodePoolNodeConfigHostMaintenancePolicyArgsDict',
550
568
  'NodePoolNodeConfigKubeletConfigArgs',
551
569
  'NodePoolNodeConfigKubeletConfigArgsDict',
570
+ 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs',
571
+ 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict',
572
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftArgs',
573
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict',
574
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs',
575
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict',
552
576
  'NodePoolNodeConfigLinuxNodeConfigArgs',
553
577
  'NodePoolNodeConfigLinuxNodeConfigArgsDict',
554
578
  'NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs',
@@ -8782,6 +8806,10 @@ if not MYPY:
8782
8806
  Specifies options for controlling
8783
8807
  advanced machine features. Structure is documented below.
8784
8808
  """
8809
+ boot_disk: NotRequired[pulumi.Input['ClusterNodeConfigBootDiskArgsDict']]
8810
+ """
8811
+ Configuration of the node pool boot disk. Structure is documented below
8812
+ """
8785
8813
  boot_disk_kms_key: NotRequired[pulumi.Input[_builtins.str]]
8786
8814
  """
8787
8815
  The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
@@ -8797,12 +8825,13 @@ if not MYPY:
8797
8825
  disk_size_gb: NotRequired[pulumi.Input[_builtins.int]]
8798
8826
  """
8799
8827
  Size of the disk attached to each node, specified
8800
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
8828
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
8829
+ Prefer configuring `boot_disk`.
8801
8830
  """
8802
8831
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
8803
8832
  """
8804
8833
  Type of the disk attached to each node
8805
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
8834
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
8806
8835
  """
8807
8836
  effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgsDict']]]]
8808
8837
  """
@@ -8981,7 +9010,7 @@ if not MYPY:
8981
9010
  """
8982
9011
  sole_tenant_config: NotRequired[pulumi.Input['ClusterNodeConfigSoleTenantConfigArgsDict']]
8983
9012
  """
8984
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
9013
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
8985
9014
  """
8986
9015
  spot: NotRequired[pulumi.Input[_builtins.bool]]
8987
9016
  """
@@ -9025,6 +9054,7 @@ elif False:
9025
9054
  class ClusterNodeConfigArgs:
9026
9055
  def __init__(__self__, *,
9027
9056
  advanced_machine_features: Optional[pulumi.Input['ClusterNodeConfigAdvancedMachineFeaturesArgs']] = None,
9057
+ boot_disk: Optional[pulumi.Input['ClusterNodeConfigBootDiskArgs']] = None,
9028
9058
  boot_disk_kms_key: Optional[pulumi.Input[_builtins.str]] = None,
9029
9059
  confidential_nodes: Optional[pulumi.Input['ClusterNodeConfigConfidentialNodesArgs']] = None,
9030
9060
  containerd_config: Optional[pulumi.Input['ClusterNodeConfigContainerdConfigArgs']] = None,
@@ -9072,13 +9102,15 @@ class ClusterNodeConfigArgs:
9072
9102
  """
9073
9103
  :param pulumi.Input['ClusterNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling
9074
9104
  advanced machine features. Structure is documented below.
9105
+ :param pulumi.Input['ClusterNodeConfigBootDiskArgs'] boot_disk: Configuration of the node pool boot disk. Structure is documented below
9075
9106
  :param pulumi.Input[_builtins.str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
9076
9107
  :param pulumi.Input['ClusterNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
9077
9108
  :param pulumi.Input['ClusterNodeConfigContainerdConfigArgs'] containerd_config: Parameters to customize containerd runtime. Structure is documented below.
9078
9109
  :param pulumi.Input[_builtins.int] disk_size_gb: Size of the disk attached to each node, specified
9079
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
9110
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
9111
+ Prefer configuring `boot_disk`.
9080
9112
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
9081
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
9113
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
9082
9114
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
9083
9115
  :param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
9084
9116
  :param pulumi.Input['ClusterNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -9158,7 +9190,7 @@ class ClusterNodeConfigArgs:
9158
9190
  :param pulumi.Input[_builtins.str] service_account: The service account to be used by the Node VMs.
9159
9191
  If not specified, the "default" service account is used.
9160
9192
  :param pulumi.Input['ClusterNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
9161
- :param pulumi.Input['ClusterNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
9193
+ :param pulumi.Input['ClusterNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
9162
9194
  :param pulumi.Input[_builtins.bool] spot: A boolean that represents whether the underlying node VMs are spot.
9163
9195
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
9164
9196
  for more information. Defaults to false.
@@ -9179,6 +9211,8 @@ class ClusterNodeConfigArgs:
9179
9211
  """
9180
9212
  if advanced_machine_features is not None:
9181
9213
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
9214
+ if boot_disk is not None:
9215
+ pulumi.set(__self__, "boot_disk", boot_disk)
9182
9216
  if boot_disk_kms_key is not None:
9183
9217
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
9184
9218
  if confidential_nodes is not None:
@@ -9281,6 +9315,18 @@ class ClusterNodeConfigArgs:
9281
9315
  def advanced_machine_features(self, value: Optional[pulumi.Input['ClusterNodeConfigAdvancedMachineFeaturesArgs']]):
9282
9316
  pulumi.set(self, "advanced_machine_features", value)
9283
9317
 
9318
+ @_builtins.property
9319
+ @pulumi.getter(name="bootDisk")
9320
+ def boot_disk(self) -> Optional[pulumi.Input['ClusterNodeConfigBootDiskArgs']]:
9321
+ """
9322
+ Configuration of the node pool boot disk. Structure is documented below
9323
+ """
9324
+ return pulumi.get(self, "boot_disk")
9325
+
9326
+ @boot_disk.setter
9327
+ def boot_disk(self, value: Optional[pulumi.Input['ClusterNodeConfigBootDiskArgs']]):
9328
+ pulumi.set(self, "boot_disk", value)
9329
+
9284
9330
  @_builtins.property
9285
9331
  @pulumi.getter(name="bootDiskKmsKey")
9286
9332
  def boot_disk_kms_key(self) -> Optional[pulumi.Input[_builtins.str]]:
@@ -9322,7 +9368,8 @@ class ClusterNodeConfigArgs:
9322
9368
  def disk_size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
9323
9369
  """
9324
9370
  Size of the disk attached to each node, specified
9325
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
9371
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
9372
+ Prefer configuring `boot_disk`.
9326
9373
  """
9327
9374
  return pulumi.get(self, "disk_size_gb")
9328
9375
 
@@ -9335,7 +9382,7 @@ class ClusterNodeConfigArgs:
9335
9382
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
9336
9383
  """
9337
9384
  Type of the disk attached to each node
9338
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
9385
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
9339
9386
  """
9340
9387
  return pulumi.get(self, "disk_type")
9341
9388
 
@@ -9778,7 +9825,7 @@ class ClusterNodeConfigArgs:
9778
9825
  @pulumi.getter(name="soleTenantConfig")
9779
9826
  def sole_tenant_config(self) -> Optional[pulumi.Input['ClusterNodeConfigSoleTenantConfigArgs']]:
9780
9827
  """
9781
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
9828
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
9782
9829
  """
9783
9830
  return pulumi.get(self, "sole_tenant_config")
9784
9831
 
@@ -9941,6 +9988,104 @@ class ClusterNodeConfigAdvancedMachineFeaturesArgs:
9941
9988
  pulumi.set(self, "performance_monitoring_unit", value)
9942
9989
 
9943
9990
 
9991
+ if not MYPY:
9992
+ class ClusterNodeConfigBootDiskArgsDict(TypedDict):
9993
+ disk_type: NotRequired[pulumi.Input[_builtins.str]]
9994
+ """
9995
+ Type of the disk attached to each node
9996
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
9997
+ """
9998
+ provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
9999
+ """
10000
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10001
+ """
10002
+ provisioned_throughput: NotRequired[pulumi.Input[_builtins.int]]
10003
+ """
10004
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10005
+ """
10006
+ size_gb: NotRequired[pulumi.Input[_builtins.int]]
10007
+ """
10008
+ Size of the disk attached to each node, specified
10009
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
10010
+ """
10011
+ elif False:
10012
+ ClusterNodeConfigBootDiskArgsDict: TypeAlias = Mapping[str, Any]
10013
+
10014
+ @pulumi.input_type
10015
+ class ClusterNodeConfigBootDiskArgs:
10016
+ def __init__(__self__, *,
10017
+ disk_type: Optional[pulumi.Input[_builtins.str]] = None,
10018
+ provisioned_iops: Optional[pulumi.Input[_builtins.int]] = None,
10019
+ provisioned_throughput: Optional[pulumi.Input[_builtins.int]] = None,
10020
+ size_gb: Optional[pulumi.Input[_builtins.int]] = None):
10021
+ """
10022
+ :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
10023
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10024
+ :param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10025
+ :param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10026
+ :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
10027
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
10028
+ """
10029
+ if disk_type is not None:
10030
+ pulumi.set(__self__, "disk_type", disk_type)
10031
+ if provisioned_iops is not None:
10032
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
10033
+ if provisioned_throughput is not None:
10034
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
10035
+ if size_gb is not None:
10036
+ pulumi.set(__self__, "size_gb", size_gb)
10037
+
10038
+ @_builtins.property
10039
+ @pulumi.getter(name="diskType")
10040
+ def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
10041
+ """
10042
+ Type of the disk attached to each node
10043
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
10044
+ """
10045
+ return pulumi.get(self, "disk_type")
10046
+
10047
+ @disk_type.setter
10048
+ def disk_type(self, value: Optional[pulumi.Input[_builtins.str]]):
10049
+ pulumi.set(self, "disk_type", value)
10050
+
10051
+ @_builtins.property
10052
+ @pulumi.getter(name="provisionedIops")
10053
+ def provisioned_iops(self) -> Optional[pulumi.Input[_builtins.int]]:
10054
+ """
10055
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10056
+ """
10057
+ return pulumi.get(self, "provisioned_iops")
10058
+
10059
+ @provisioned_iops.setter
10060
+ def provisioned_iops(self, value: Optional[pulumi.Input[_builtins.int]]):
10061
+ pulumi.set(self, "provisioned_iops", value)
10062
+
10063
+ @_builtins.property
10064
+ @pulumi.getter(name="provisionedThroughput")
10065
+ def provisioned_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
10066
+ """
10067
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
10068
+ """
10069
+ return pulumi.get(self, "provisioned_throughput")
10070
+
10071
+ @provisioned_throughput.setter
10072
+ def provisioned_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
10073
+ pulumi.set(self, "provisioned_throughput", value)
10074
+
10075
+ @_builtins.property
10076
+ @pulumi.getter(name="sizeGb")
10077
+ def size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
10078
+ """
10079
+ Size of the disk attached to each node, specified
10080
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
10081
+ """
10082
+ return pulumi.get(self, "size_gb")
10083
+
10084
+ @size_gb.setter
10085
+ def size_gb(self, value: Optional[pulumi.Input[_builtins.int]]):
10086
+ pulumi.set(self, "size_gb", value)
10087
+
10088
+
9944
10089
  if not MYPY:
9945
10090
  class ClusterNodeConfigConfidentialNodesArgsDict(TypedDict):
9946
10091
  enabled: pulumi.Input[_builtins.bool]
@@ -10693,6 +10838,22 @@ if not MYPY:
10693
10838
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
10694
10839
  is setting the empty string `""`, which will function identically to not setting this field.
10695
10840
  """
10841
+ eviction_max_pod_grace_period_seconds: NotRequired[pulumi.Input[_builtins.int]]
10842
+ """
10843
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
10844
+ """
10845
+ eviction_minimum_reclaim: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict']]
10846
+ """
10847
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
10848
+ """
10849
+ eviction_soft: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgsDict']]
10850
+ """
10851
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
10852
+ """
10853
+ eviction_soft_grace_period: NotRequired[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict']]
10854
+ """
10855
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
10856
+ """
10696
10857
  image_gc_high_threshold_percent: NotRequired[pulumi.Input[_builtins.int]]
10697
10858
  """
10698
10859
  Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
@@ -10713,10 +10874,18 @@ if not MYPY:
10713
10874
  """
10714
10875
  Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
10715
10876
  """
10877
+ max_parallel_image_pulls: NotRequired[pulumi.Input[_builtins.int]]
10878
+ """
10879
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
10880
+ """
10716
10881
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
10717
10882
  """
10718
10883
  Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
10719
10884
  """
10885
+ single_process_oom_kill: NotRequired[pulumi.Input[_builtins.bool]]
10886
+ """
10887
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
10888
+ """
10720
10889
  elif False:
10721
10890
  ClusterNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
10722
10891
 
@@ -10729,12 +10898,18 @@ class ClusterNodeConfigKubeletConfigArgs:
10729
10898
  cpu_cfs_quota: Optional[pulumi.Input[_builtins.bool]] = None,
10730
10899
  cpu_cfs_quota_period: Optional[pulumi.Input[_builtins.str]] = None,
10731
10900
  cpu_manager_policy: Optional[pulumi.Input[_builtins.str]] = None,
10901
+ eviction_max_pod_grace_period_seconds: Optional[pulumi.Input[_builtins.int]] = None,
10902
+ eviction_minimum_reclaim: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs']] = None,
10903
+ eviction_soft: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs']] = None,
10904
+ eviction_soft_grace_period: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']] = None,
10732
10905
  image_gc_high_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
10733
10906
  image_gc_low_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
10734
10907
  image_maximum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
10735
10908
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
10736
10909
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
10737
- pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None):
10910
+ max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
10911
+ pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
10912
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
10738
10913
  """
10739
10914
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
10740
10915
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -10754,12 +10929,18 @@ class ClusterNodeConfigKubeletConfigArgs:
10754
10929
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
10755
10930
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
10756
10931
  is setting the empty string `""`, which will function identically to not setting this field.
10932
+ :param pulumi.Input[_builtins.int] eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
10933
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
10934
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs'] eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
10935
+ :param pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
10757
10936
  :param pulumi.Input[_builtins.int] image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
10758
10937
  :param pulumi.Input[_builtins.int] image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
10759
10938
  :param pulumi.Input[_builtins.str] image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
10760
10939
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
10761
10940
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
10941
+ :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
10762
10942
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
10943
+ :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
10763
10944
  """
10764
10945
  if allowed_unsafe_sysctls is not None:
10765
10946
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -10773,6 +10954,14 @@ class ClusterNodeConfigKubeletConfigArgs:
10773
10954
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
10774
10955
  if cpu_manager_policy is not None:
10775
10956
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
10957
+ if eviction_max_pod_grace_period_seconds is not None:
10958
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
10959
+ if eviction_minimum_reclaim is not None:
10960
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
10961
+ if eviction_soft is not None:
10962
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
10963
+ if eviction_soft_grace_period is not None:
10964
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
10776
10965
  if image_gc_high_threshold_percent is not None:
10777
10966
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
10778
10967
  if image_gc_low_threshold_percent is not None:
@@ -10783,8 +10972,12 @@ class ClusterNodeConfigKubeletConfigArgs:
10783
10972
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
10784
10973
  if insecure_kubelet_readonly_port_enabled is not None:
10785
10974
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
10975
+ if max_parallel_image_pulls is not None:
10976
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
10786
10977
  if pod_pids_limit is not None:
10787
10978
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
10979
+ if single_process_oom_kill is not None:
10980
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
10788
10981
 
10789
10982
  @_builtins.property
10790
10983
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -10870,6 +11063,54 @@ class ClusterNodeConfigKubeletConfigArgs:
10870
11063
  def cpu_manager_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
10871
11064
  pulumi.set(self, "cpu_manager_policy", value)
10872
11065
 
11066
+ @_builtins.property
11067
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
11068
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[pulumi.Input[_builtins.int]]:
11069
+ """
11070
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
11071
+ """
11072
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
11073
+
11074
+ @eviction_max_pod_grace_period_seconds.setter
11075
+ def eviction_max_pod_grace_period_seconds(self, value: Optional[pulumi.Input[_builtins.int]]):
11076
+ pulumi.set(self, "eviction_max_pod_grace_period_seconds", value)
11077
+
11078
+ @_builtins.property
11079
+ @pulumi.getter(name="evictionMinimumReclaim")
11080
+ def eviction_minimum_reclaim(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]:
11081
+ """
11082
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
11083
+ """
11084
+ return pulumi.get(self, "eviction_minimum_reclaim")
11085
+
11086
+ @eviction_minimum_reclaim.setter
11087
+ def eviction_minimum_reclaim(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]):
11088
+ pulumi.set(self, "eviction_minimum_reclaim", value)
11089
+
11090
+ @_builtins.property
11091
+ @pulumi.getter(name="evictionSoft")
11092
+ def eviction_soft(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs']]:
11093
+ """
11094
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
11095
+ """
11096
+ return pulumi.get(self, "eviction_soft")
11097
+
11098
+ @eviction_soft.setter
11099
+ def eviction_soft(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftArgs']]):
11100
+ pulumi.set(self, "eviction_soft", value)
11101
+
11102
+ @_builtins.property
11103
+ @pulumi.getter(name="evictionSoftGracePeriod")
11104
+ def eviction_soft_grace_period(self) -> Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]:
11105
+ """
11106
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
11107
+ """
11108
+ return pulumi.get(self, "eviction_soft_grace_period")
11109
+
11110
+ @eviction_soft_grace_period.setter
11111
+ def eviction_soft_grace_period(self, value: Optional[pulumi.Input['ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]):
11112
+ pulumi.set(self, "eviction_soft_grace_period", value)
11113
+
10873
11114
  @_builtins.property
10874
11115
  @pulumi.getter(name="imageGcHighThresholdPercent")
10875
11116
  def image_gc_high_threshold_percent(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -10930,6 +11171,18 @@ class ClusterNodeConfigKubeletConfigArgs:
10930
11171
  def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
10931
11172
  pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value)
10932
11173
 
11174
+ @_builtins.property
11175
+ @pulumi.getter(name="maxParallelImagePulls")
11176
+ def max_parallel_image_pulls(self) -> Optional[pulumi.Input[_builtins.int]]:
11177
+ """
11178
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
11179
+ """
11180
+ return pulumi.get(self, "max_parallel_image_pulls")
11181
+
11182
+ @max_parallel_image_pulls.setter
11183
+ def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
11184
+ pulumi.set(self, "max_parallel_image_pulls", value)
11185
+
10933
11186
  @_builtins.property
10934
11187
  @pulumi.getter(name="podPidsLimit")
10935
11188
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -10942,192 +11195,640 @@ class ClusterNodeConfigKubeletConfigArgs:
10942
11195
  def pod_pids_limit(self, value: Optional[pulumi.Input[_builtins.int]]):
10943
11196
  pulumi.set(self, "pod_pids_limit", value)
10944
11197
 
11198
+ @_builtins.property
11199
+ @pulumi.getter(name="singleProcessOomKill")
11200
+ def single_process_oom_kill(self) -> Optional[pulumi.Input[_builtins.bool]]:
11201
+ """
11202
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
11203
+ """
11204
+ return pulumi.get(self, "single_process_oom_kill")
11205
+
11206
+ @single_process_oom_kill.setter
11207
+ def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
11208
+ pulumi.set(self, "single_process_oom_kill", value)
11209
+
10945
11210
 
10946
11211
  if not MYPY:
10947
- class ClusterNodeConfigLinuxNodeConfigArgsDict(TypedDict):
10948
- cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
11212
+ class ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
11213
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
10949
11214
  """
10950
- Possible cgroup modes that can be used.
10951
- Accepted values are:
10952
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
10953
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
10954
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11215
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10955
11216
  """
10956
- hugepages_config: NotRequired[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
11217
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
10957
11218
  """
10958
- Amounts for 2M and 1G hugepages. Structure is documented below.
11219
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10959
11220
  """
10960
- sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
11221
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
10961
11222
  """
10962
- The Linux kernel parameters to be applied to the nodes
10963
- and all pods running on the nodes. Specified as a map from the key, such as
10964
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
10965
- Note that validations happen all server side. All attributes are optional.
11223
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11224
+ """
11225
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
11226
+ """
11227
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11228
+ """
11229
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11230
+ """
11231
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11232
+ """
11233
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
11234
+ """
11235
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10966
11236
  """
10967
11237
  elif False:
10968
- ClusterNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
11238
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict: TypeAlias = Mapping[str, Any]
10969
11239
 
10970
11240
  @pulumi.input_type
10971
- class ClusterNodeConfigLinuxNodeConfigArgs:
11241
+ class ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs:
10972
11242
  def __init__(__self__, *,
10973
- cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
10974
- hugepages_config: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
10975
- sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None):
11243
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11244
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11245
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
11246
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11247
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11248
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
10976
11249
  """
10977
- :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
10978
- Accepted values are:
10979
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
10980
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
10981
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
10982
- :param pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
10983
- :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
10984
- and all pods running on the nodes. Specified as a map from the key, such as
10985
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
10986
- Note that validations happen all server side. All attributes are optional.
11250
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11251
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11252
+ :param pulumi.Input[_builtins.str] memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11253
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11254
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11255
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
10987
11256
  """
10988
- if cgroup_mode is not None:
10989
- pulumi.set(__self__, "cgroup_mode", cgroup_mode)
10990
- if hugepages_config is not None:
10991
- pulumi.set(__self__, "hugepages_config", hugepages_config)
10992
- if sysctls is not None:
10993
- pulumi.set(__self__, "sysctls", sysctls)
11257
+ if imagefs_available is not None:
11258
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
11259
+ if imagefs_inodes_free is not None:
11260
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
11261
+ if memory_available is not None:
11262
+ pulumi.set(__self__, "memory_available", memory_available)
11263
+ if nodefs_available is not None:
11264
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
11265
+ if nodefs_inodes_free is not None:
11266
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
11267
+ if pid_available is not None:
11268
+ pulumi.set(__self__, "pid_available", pid_available)
10994
11269
 
10995
11270
  @_builtins.property
10996
- @pulumi.getter(name="cgroupMode")
10997
- def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
11271
+ @pulumi.getter(name="imagefsAvailable")
11272
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
10998
11273
  """
10999
- Possible cgroup modes that can be used.
11000
- Accepted values are:
11001
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
11002
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
11003
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11274
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11004
11275
  """
11005
- return pulumi.get(self, "cgroup_mode")
11276
+ return pulumi.get(self, "imagefs_available")
11006
11277
 
11007
- @cgroup_mode.setter
11008
- def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
11009
- pulumi.set(self, "cgroup_mode", value)
11278
+ @imagefs_available.setter
11279
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11280
+ pulumi.set(self, "imagefs_available", value)
11010
11281
 
11011
11282
  @_builtins.property
11012
- @pulumi.getter(name="hugepagesConfig")
11013
- def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
11283
+ @pulumi.getter(name="imagefsInodesFree")
11284
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11014
11285
  """
11015
- Amounts for 2M and 1G hugepages. Structure is documented below.
11286
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11016
11287
  """
11017
- return pulumi.get(self, "hugepages_config")
11288
+ return pulumi.get(self, "imagefs_inodes_free")
11018
11289
 
11019
- @hugepages_config.setter
11020
- def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
11021
- pulumi.set(self, "hugepages_config", value)
11290
+ @imagefs_inodes_free.setter
11291
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11292
+ pulumi.set(self, "imagefs_inodes_free", value)
11022
11293
 
11023
11294
  @_builtins.property
11024
- @pulumi.getter
11025
- def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
11295
+ @pulumi.getter(name="memoryAvailable")
11296
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11026
11297
  """
11027
- The Linux kernel parameters to be applied to the nodes
11028
- and all pods running on the nodes. Specified as a map from the key, such as
11029
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
11030
- Note that validations happen all server side. All attributes are optional.
11298
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11031
11299
  """
11032
- return pulumi.get(self, "sysctls")
11300
+ return pulumi.get(self, "memory_available")
11033
11301
 
11034
- @sysctls.setter
11035
- def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
11036
- pulumi.set(self, "sysctls", value)
11302
+ @memory_available.setter
11303
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11304
+ pulumi.set(self, "memory_available", value)
11305
+
11306
+ @_builtins.property
11307
+ @pulumi.getter(name="nodefsAvailable")
11308
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11309
+ """
11310
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11311
+ """
11312
+ return pulumi.get(self, "nodefs_available")
11313
+
11314
+ @nodefs_available.setter
11315
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11316
+ pulumi.set(self, "nodefs_available", value)
11317
+
11318
+ @_builtins.property
11319
+ @pulumi.getter(name="nodefsInodesFree")
11320
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11321
+ """
11322
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11323
+ """
11324
+ return pulumi.get(self, "nodefs_inodes_free")
11325
+
11326
+ @nodefs_inodes_free.setter
11327
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11328
+ pulumi.set(self, "nodefs_inodes_free", value)
11329
+
11330
+ @_builtins.property
11331
+ @pulumi.getter(name="pidAvailable")
11332
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11333
+ """
11334
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
11335
+ """
11336
+ return pulumi.get(self, "pid_available")
11337
+
11338
+ @pid_available.setter
11339
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11340
+ pulumi.set(self, "pid_available", value)
11037
11341
 
11038
11342
 
11039
11343
  if not MYPY:
11040
- class ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
11041
- hugepage_size1g: NotRequired[pulumi.Input[_builtins.int]]
11344
+ class ClusterNodeConfigKubeletConfigEvictionSoftArgsDict(TypedDict):
11345
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
11042
11346
  """
11043
- Amount of 1G hugepages.
11347
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
11044
11348
  """
11045
- hugepage_size2m: NotRequired[pulumi.Input[_builtins.int]]
11349
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11046
11350
  """
11047
- Amount of 2M hugepages.
11351
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11352
+ """
11353
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
11354
+ """
11355
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
11356
+ """
11357
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
11358
+ """
11359
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11360
+ """
11361
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11362
+ """
11363
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11364
+ """
11365
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
11366
+ """
11367
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11048
11368
  """
11049
11369
  elif False:
11050
- ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict: TypeAlias = Mapping[str, Any]
11370
+ ClusterNodeConfigKubeletConfigEvictionSoftArgsDict: TypeAlias = Mapping[str, Any]
11051
11371
 
11052
11372
  @pulumi.input_type
11053
- class ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs:
11373
+ class ClusterNodeConfigKubeletConfigEvictionSoftArgs:
11054
11374
  def __init__(__self__, *,
11055
- hugepage_size1g: Optional[pulumi.Input[_builtins.int]] = None,
11056
- hugepage_size2m: Optional[pulumi.Input[_builtins.int]] = None):
11375
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11376
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11377
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
11378
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11379
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11380
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
11057
11381
  """
11058
- :param pulumi.Input[_builtins.int] hugepage_size1g: Amount of 1G hugepages.
11059
- :param pulumi.Input[_builtins.int] hugepage_size2m: Amount of 2M hugepages.
11382
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
11383
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11384
+ :param pulumi.Input[_builtins.str] memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
11385
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11386
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11387
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11060
11388
  """
11061
- if hugepage_size1g is not None:
11062
- pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
11063
- if hugepage_size2m is not None:
11064
- pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
11389
+ if imagefs_available is not None:
11390
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
11391
+ if imagefs_inodes_free is not None:
11392
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
11393
+ if memory_available is not None:
11394
+ pulumi.set(__self__, "memory_available", memory_available)
11395
+ if nodefs_available is not None:
11396
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
11397
+ if nodefs_inodes_free is not None:
11398
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
11399
+ if pid_available is not None:
11400
+ pulumi.set(__self__, "pid_available", pid_available)
11065
11401
 
11066
11402
  @_builtins.property
11067
- @pulumi.getter(name="hugepageSize1g")
11068
- def hugepage_size1g(self) -> Optional[pulumi.Input[_builtins.int]]:
11403
+ @pulumi.getter(name="imagefsAvailable")
11404
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11069
11405
  """
11070
- Amount of 1G hugepages.
11406
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
11071
11407
  """
11072
- return pulumi.get(self, "hugepage_size1g")
11408
+ return pulumi.get(self, "imagefs_available")
11073
11409
 
11074
- @hugepage_size1g.setter
11075
- def hugepage_size1g(self, value: Optional[pulumi.Input[_builtins.int]]):
11076
- pulumi.set(self, "hugepage_size1g", value)
11410
+ @imagefs_available.setter
11411
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11412
+ pulumi.set(self, "imagefs_available", value)
11077
11413
 
11078
11414
  @_builtins.property
11079
- @pulumi.getter(name="hugepageSize2m")
11080
- def hugepage_size2m(self) -> Optional[pulumi.Input[_builtins.int]]:
11415
+ @pulumi.getter(name="imagefsInodesFree")
11416
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11081
11417
  """
11082
- Amount of 2M hugepages.
11418
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11083
11419
  """
11084
- return pulumi.get(self, "hugepage_size2m")
11420
+ return pulumi.get(self, "imagefs_inodes_free")
11085
11421
 
11086
- @hugepage_size2m.setter
11087
- def hugepage_size2m(self, value: Optional[pulumi.Input[_builtins.int]]):
11088
- pulumi.set(self, "hugepage_size2m", value)
11422
+ @imagefs_inodes_free.setter
11423
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11424
+ pulumi.set(self, "imagefs_inodes_free", value)
11425
+
11426
+ @_builtins.property
11427
+ @pulumi.getter(name="memoryAvailable")
11428
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11429
+ """
11430
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
11431
+ """
11432
+ return pulumi.get(self, "memory_available")
11089
11433
 
11434
+ @memory_available.setter
11435
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11436
+ pulumi.set(self, "memory_available", value)
11090
11437
 
11091
- if not MYPY:
11092
- class ClusterNodeConfigLocalNvmeSsdBlockConfigArgsDict(TypedDict):
11093
- local_ssd_count: pulumi.Input[_builtins.int]
11438
+ @_builtins.property
11439
+ @pulumi.getter(name="nodefsAvailable")
11440
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11094
11441
  """
11095
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
11096
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
11442
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11097
11443
  """
11098
- elif False:
11099
- ClusterNodeConfigLocalNvmeSsdBlockConfigArgsDict: TypeAlias = Mapping[str, Any]
11444
+ return pulumi.get(self, "nodefs_available")
11100
11445
 
11101
- @pulumi.input_type
11102
- class ClusterNodeConfigLocalNvmeSsdBlockConfigArgs:
11103
- def __init__(__self__, *,
11104
- local_ssd_count: pulumi.Input[_builtins.int]):
11446
+ @nodefs_available.setter
11447
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11448
+ pulumi.set(self, "nodefs_available", value)
11449
+
11450
+ @_builtins.property
11451
+ @pulumi.getter(name="nodefsInodesFree")
11452
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11105
11453
  """
11106
- :param pulumi.Input[_builtins.int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
11107
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
11454
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
11108
11455
  """
11109
- pulumi.set(__self__, "local_ssd_count", local_ssd_count)
11456
+ return pulumi.get(self, "nodefs_inodes_free")
11457
+
11458
+ @nodefs_inodes_free.setter
11459
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11460
+ pulumi.set(self, "nodefs_inodes_free", value)
11110
11461
 
11111
11462
  @_builtins.property
11112
- @pulumi.getter(name="localSsdCount")
11113
- def local_ssd_count(self) -> pulumi.Input[_builtins.int]:
11463
+ @pulumi.getter(name="pidAvailable")
11464
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11114
11465
  """
11115
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
11116
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
11466
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
11117
11467
  """
11118
- return pulumi.get(self, "local_ssd_count")
11468
+ return pulumi.get(self, "pid_available")
11119
11469
 
11120
- @local_ssd_count.setter
11121
- def local_ssd_count(self, value: pulumi.Input[_builtins.int]):
11122
- pulumi.set(self, "local_ssd_count", value)
11470
+ @pid_available.setter
11471
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11472
+ pulumi.set(self, "pid_available", value)
11123
11473
 
11124
11474
 
11125
11475
  if not MYPY:
11126
- class ClusterNodeConfigReservationAffinityArgsDict(TypedDict):
11127
- consume_reservation_type: pulumi.Input[_builtins.str]
11476
+ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict(TypedDict):
11477
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
11128
11478
  """
11129
- The type of reservation consumption
11130
- Accepted values are:
11479
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11480
+ """
11481
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11482
+ """
11483
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11484
+ """
11485
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
11486
+ """
11487
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
11488
+ """
11489
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
11490
+ """
11491
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11492
+ """
11493
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
11494
+ """
11495
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11496
+ """
11497
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
11498
+ """
11499
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11500
+ """
11501
+ elif False:
11502
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict: TypeAlias = Mapping[str, Any]
11503
+
11504
+ @pulumi.input_type
11505
+ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
11506
+ def __init__(__self__, *,
11507
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11508
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11509
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
11510
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
11511
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
11512
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
11513
+ """
11514
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11515
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11516
+ :param pulumi.Input[_builtins.str] memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
11517
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11518
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11519
+ :param pulumi.Input[_builtins.str] pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11520
+ """
11521
+ if imagefs_available is not None:
11522
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
11523
+ if imagefs_inodes_free is not None:
11524
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
11525
+ if memory_available is not None:
11526
+ pulumi.set(__self__, "memory_available", memory_available)
11527
+ if nodefs_available is not None:
11528
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
11529
+ if nodefs_inodes_free is not None:
11530
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
11531
+ if pid_available is not None:
11532
+ pulumi.set(__self__, "pid_available", pid_available)
11533
+
11534
+ @_builtins.property
11535
+ @pulumi.getter(name="imagefsAvailable")
11536
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11537
+ """
11538
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11539
+ """
11540
+ return pulumi.get(self, "imagefs_available")
11541
+
11542
+ @imagefs_available.setter
11543
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11544
+ pulumi.set(self, "imagefs_available", value)
11545
+
11546
+ @_builtins.property
11547
+ @pulumi.getter(name="imagefsInodesFree")
11548
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11549
+ """
11550
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11551
+ """
11552
+ return pulumi.get(self, "imagefs_inodes_free")
11553
+
11554
+ @imagefs_inodes_free.setter
11555
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11556
+ pulumi.set(self, "imagefs_inodes_free", value)
11557
+
11558
+ @_builtins.property
11559
+ @pulumi.getter(name="memoryAvailable")
11560
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11561
+ """
11562
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
11563
+ """
11564
+ return pulumi.get(self, "memory_available")
11565
+
11566
+ @memory_available.setter
11567
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11568
+ pulumi.set(self, "memory_available", value)
11569
+
11570
+ @_builtins.property
11571
+ @pulumi.getter(name="nodefsAvailable")
11572
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11573
+ """
11574
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11575
+ """
11576
+ return pulumi.get(self, "nodefs_available")
11577
+
11578
+ @nodefs_available.setter
11579
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11580
+ pulumi.set(self, "nodefs_available", value)
11581
+
11582
+ @_builtins.property
11583
+ @pulumi.getter(name="nodefsInodesFree")
11584
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
11585
+ """
11586
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11587
+ """
11588
+ return pulumi.get(self, "nodefs_inodes_free")
11589
+
11590
+ @nodefs_inodes_free.setter
11591
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
11592
+ pulumi.set(self, "nodefs_inodes_free", value)
11593
+
11594
+ @_builtins.property
11595
+ @pulumi.getter(name="pidAvailable")
11596
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
11597
+ """
11598
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
11599
+ """
11600
+ return pulumi.get(self, "pid_available")
11601
+
11602
+ @pid_available.setter
11603
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
11604
+ pulumi.set(self, "pid_available", value)
11605
+
11606
+
11607
+ if not MYPY:
11608
+ class ClusterNodeConfigLinuxNodeConfigArgsDict(TypedDict):
11609
+ cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
11610
+ """
11611
+ Possible cgroup modes that can be used.
11612
+ Accepted values are:
11613
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
11614
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
11615
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11616
+ """
11617
+ hugepages_config: NotRequired[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
11618
+ """
11619
+ Amounts for 2M and 1G hugepages. Structure is documented below.
11620
+ """
11621
+ sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
11622
+ """
11623
+ The Linux kernel parameters to be applied to the nodes
11624
+ and all pods running on the nodes. Specified as a map from the key, such as
11625
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
11626
+ Note that validations happen all server side. All attributes are optional.
11627
+ """
11628
+ transparent_hugepage_defrag: NotRequired[pulumi.Input[_builtins.str]]
11629
+ """
11630
+ The Linux kernel transparent hugepage defrag setting.
11631
+ """
11632
+ transparent_hugepage_enabled: NotRequired[pulumi.Input[_builtins.str]]
11633
+ """
11634
+ The Linux kernel transparent hugepage setting.
11635
+ """
11636
+ elif False:
11637
+ ClusterNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
11638
+
11639
+ @pulumi.input_type
11640
+ class ClusterNodeConfigLinuxNodeConfigArgs:
11641
+ def __init__(__self__, *,
11642
+ cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
11643
+ hugepages_config: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
11644
+ sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
11645
+ transparent_hugepage_defrag: Optional[pulumi.Input[_builtins.str]] = None,
11646
+ transparent_hugepage_enabled: Optional[pulumi.Input[_builtins.str]] = None):
11647
+ """
11648
+ :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
11649
+ Accepted values are:
11650
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
11651
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
11652
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11653
+ :param pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
11654
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
11655
+ and all pods running on the nodes. Specified as a map from the key, such as
11656
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
11657
+ Note that validations happen all server side. All attributes are optional.
11658
+ :param pulumi.Input[_builtins.str] transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
11659
+ :param pulumi.Input[_builtins.str] transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
11660
+ """
11661
+ if cgroup_mode is not None:
11662
+ pulumi.set(__self__, "cgroup_mode", cgroup_mode)
11663
+ if hugepages_config is not None:
11664
+ pulumi.set(__self__, "hugepages_config", hugepages_config)
11665
+ if sysctls is not None:
11666
+ pulumi.set(__self__, "sysctls", sysctls)
11667
+ if transparent_hugepage_defrag is not None:
11668
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
11669
+ if transparent_hugepage_enabled is not None:
11670
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
11671
+
11672
+ @_builtins.property
11673
+ @pulumi.getter(name="cgroupMode")
11674
+ def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
11675
+ """
11676
+ Possible cgroup modes that can be used.
11677
+ Accepted values are:
11678
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
11679
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
11680
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
11681
+ """
11682
+ return pulumi.get(self, "cgroup_mode")
11683
+
11684
+ @cgroup_mode.setter
11685
+ def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
11686
+ pulumi.set(self, "cgroup_mode", value)
11687
+
11688
+ @_builtins.property
11689
+ @pulumi.getter(name="hugepagesConfig")
11690
+ def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
11691
+ """
11692
+ Amounts for 2M and 1G hugepages. Structure is documented below.
11693
+ """
11694
+ return pulumi.get(self, "hugepages_config")
11695
+
11696
+ @hugepages_config.setter
11697
+ def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
11698
+ pulumi.set(self, "hugepages_config", value)
11699
+
11700
+ @_builtins.property
11701
+ @pulumi.getter
11702
+ def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
11703
+ """
11704
+ The Linux kernel parameters to be applied to the nodes
11705
+ and all pods running on the nodes. Specified as a map from the key, such as
11706
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
11707
+ Note that validations happen all server side. All attributes are optional.
11708
+ """
11709
+ return pulumi.get(self, "sysctls")
11710
+
11711
+ @sysctls.setter
11712
+ def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
11713
+ pulumi.set(self, "sysctls", value)
11714
+
11715
+ @_builtins.property
11716
+ @pulumi.getter(name="transparentHugepageDefrag")
11717
+ def transparent_hugepage_defrag(self) -> Optional[pulumi.Input[_builtins.str]]:
11718
+ """
11719
+ The Linux kernel transparent hugepage defrag setting.
11720
+ """
11721
+ return pulumi.get(self, "transparent_hugepage_defrag")
11722
+
11723
+ @transparent_hugepage_defrag.setter
11724
+ def transparent_hugepage_defrag(self, value: Optional[pulumi.Input[_builtins.str]]):
11725
+ pulumi.set(self, "transparent_hugepage_defrag", value)
11726
+
11727
+ @_builtins.property
11728
+ @pulumi.getter(name="transparentHugepageEnabled")
11729
+ def transparent_hugepage_enabled(self) -> Optional[pulumi.Input[_builtins.str]]:
11730
+ """
11731
+ The Linux kernel transparent hugepage setting.
11732
+ """
11733
+ return pulumi.get(self, "transparent_hugepage_enabled")
11734
+
11735
+ @transparent_hugepage_enabled.setter
11736
+ def transparent_hugepage_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
11737
+ pulumi.set(self, "transparent_hugepage_enabled", value)
11738
+
11739
+
11740
+ if not MYPY:
11741
+ class ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
11742
+ hugepage_size1g: NotRequired[pulumi.Input[_builtins.int]]
11743
+ """
11744
+ Amount of 1G hugepages.
11745
+ """
11746
+ hugepage_size2m: NotRequired[pulumi.Input[_builtins.int]]
11747
+ """
11748
+ Amount of 2M hugepages.
11749
+ """
11750
+ elif False:
11751
+ ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgsDict: TypeAlias = Mapping[str, Any]
11752
+
11753
+ @pulumi.input_type
11754
+ class ClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs:
11755
+ def __init__(__self__, *,
11756
+ hugepage_size1g: Optional[pulumi.Input[_builtins.int]] = None,
11757
+ hugepage_size2m: Optional[pulumi.Input[_builtins.int]] = None):
11758
+ """
11759
+ :param pulumi.Input[_builtins.int] hugepage_size1g: Amount of 1G hugepages.
11760
+ :param pulumi.Input[_builtins.int] hugepage_size2m: Amount of 2M hugepages.
11761
+ """
11762
+ if hugepage_size1g is not None:
11763
+ pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
11764
+ if hugepage_size2m is not None:
11765
+ pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
11766
+
11767
+ @_builtins.property
11768
+ @pulumi.getter(name="hugepageSize1g")
11769
+ def hugepage_size1g(self) -> Optional[pulumi.Input[_builtins.int]]:
11770
+ """
11771
+ Amount of 1G hugepages.
11772
+ """
11773
+ return pulumi.get(self, "hugepage_size1g")
11774
+
11775
+ @hugepage_size1g.setter
11776
+ def hugepage_size1g(self, value: Optional[pulumi.Input[_builtins.int]]):
11777
+ pulumi.set(self, "hugepage_size1g", value)
11778
+
11779
+ @_builtins.property
11780
+ @pulumi.getter(name="hugepageSize2m")
11781
+ def hugepage_size2m(self) -> Optional[pulumi.Input[_builtins.int]]:
11782
+ """
11783
+ Amount of 2M hugepages.
11784
+ """
11785
+ return pulumi.get(self, "hugepage_size2m")
11786
+
11787
+ @hugepage_size2m.setter
11788
+ def hugepage_size2m(self, value: Optional[pulumi.Input[_builtins.int]]):
11789
+ pulumi.set(self, "hugepage_size2m", value)
11790
+
11791
+
11792
+ if not MYPY:
11793
+ class ClusterNodeConfigLocalNvmeSsdBlockConfigArgsDict(TypedDict):
11794
+ local_ssd_count: pulumi.Input[_builtins.int]
11795
+ """
11796
+ Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
11797
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
11798
+ """
11799
+ elif False:
11800
+ ClusterNodeConfigLocalNvmeSsdBlockConfigArgsDict: TypeAlias = Mapping[str, Any]
11801
+
11802
+ @pulumi.input_type
11803
+ class ClusterNodeConfigLocalNvmeSsdBlockConfigArgs:
11804
+ def __init__(__self__, *,
11805
+ local_ssd_count: pulumi.Input[_builtins.int]):
11806
+ """
11807
+ :param pulumi.Input[_builtins.int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
11808
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
11809
+ """
11810
+ pulumi.set(__self__, "local_ssd_count", local_ssd_count)
11811
+
11812
+ @_builtins.property
11813
+ @pulumi.getter(name="localSsdCount")
11814
+ def local_ssd_count(self) -> pulumi.Input[_builtins.int]:
11815
+ """
11816
+ Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
11817
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
11818
+ """
11819
+ return pulumi.get(self, "local_ssd_count")
11820
+
11821
+ @local_ssd_count.setter
11822
+ def local_ssd_count(self, value: pulumi.Input[_builtins.int]):
11823
+ pulumi.set(self, "local_ssd_count", value)
11824
+
11825
+
11826
+ if not MYPY:
11827
+ class ClusterNodeConfigReservationAffinityArgsDict(TypedDict):
11828
+ consume_reservation_type: pulumi.Input[_builtins.str]
11829
+ """
11830
+ The type of reservation consumption
11831
+ Accepted values are:
11131
11832
 
11132
11833
  * `"UNSPECIFIED"`: Default value. This should not be used.
11133
11834
  * `"NO_RESERVATION"`: Do not consume from any reserved capacity.
@@ -11370,7 +12071,11 @@ if not MYPY:
11370
12071
  class ClusterNodeConfigSoleTenantConfigArgsDict(TypedDict):
11371
12072
  node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgsDict']]]
11372
12073
  """
11373
- .
12074
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
12075
+ """
12076
+ min_node_cpus: NotRequired[pulumi.Input[_builtins.int]]
12077
+ """
12078
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
11374
12079
  """
11375
12080
  elif False:
11376
12081
  ClusterNodeConfigSoleTenantConfigArgsDict: TypeAlias = Mapping[str, Any]
@@ -11378,17 +12083,21 @@ elif False:
11378
12083
  @pulumi.input_type
11379
12084
  class ClusterNodeConfigSoleTenantConfigArgs:
11380
12085
  def __init__(__self__, *,
11381
- node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
12086
+ node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]],
12087
+ min_node_cpus: Optional[pulumi.Input[_builtins.int]] = None):
11382
12088
  """
11383
- :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
12089
+ :param pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
12090
+ :param pulumi.Input[_builtins.int] min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
11384
12091
  """
11385
12092
  pulumi.set(__self__, "node_affinities", node_affinities)
12093
+ if min_node_cpus is not None:
12094
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
11386
12095
 
11387
12096
  @_builtins.property
11388
12097
  @pulumi.getter(name="nodeAffinities")
11389
12098
  def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
11390
12099
  """
11391
- .
12100
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
11392
12101
  """
11393
12102
  return pulumi.get(self, "node_affinities")
11394
12103
 
@@ -11396,6 +12105,18 @@ class ClusterNodeConfigSoleTenantConfigArgs:
11396
12105
  def node_affinities(self, value: pulumi.Input[Sequence[pulumi.Input['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
11397
12106
  pulumi.set(self, "node_affinities", value)
11398
12107
 
12108
+ @_builtins.property
12109
+ @pulumi.getter(name="minNodeCpus")
12110
+ def min_node_cpus(self) -> Optional[pulumi.Input[_builtins.int]]:
12111
+ """
12112
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
12113
+ """
12114
+ return pulumi.get(self, "min_node_cpus")
12115
+
12116
+ @min_node_cpus.setter
12117
+ def min_node_cpus(self, value: Optional[pulumi.Input[_builtins.int]]):
12118
+ pulumi.set(self, "min_node_cpus", value)
12119
+
11399
12120
 
11400
12121
  if not MYPY:
11401
12122
  class ClusterNodeConfigSoleTenantConfigNodeAffinityArgsDict(TypedDict):
@@ -13102,6 +13823,10 @@ if not MYPY:
13102
13823
  Specifies options for controlling
13103
13824
  advanced machine features. Structure is documented below.
13104
13825
  """
13826
+ boot_disk: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgsDict']]
13827
+ """
13828
+ Configuration of the node pool boot disk. Structure is documented below
13829
+ """
13105
13830
  boot_disk_kms_key: NotRequired[pulumi.Input[_builtins.str]]
13106
13831
  """
13107
13832
  The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
@@ -13117,12 +13842,13 @@ if not MYPY:
13117
13842
  disk_size_gb: NotRequired[pulumi.Input[_builtins.int]]
13118
13843
  """
13119
13844
  Size of the disk attached to each node, specified
13120
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
13845
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
13846
+ Prefer configuring `boot_disk`.
13121
13847
  """
13122
13848
  disk_type: NotRequired[pulumi.Input[_builtins.str]]
13123
13849
  """
13124
13850
  Type of the disk attached to each node
13125
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
13851
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
13126
13852
  """
13127
13853
  effective_taints: NotRequired[pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgsDict']]]]
13128
13854
  """
@@ -13301,7 +14027,7 @@ if not MYPY:
13301
14027
  """
13302
14028
  sole_tenant_config: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgsDict']]
13303
14029
  """
13304
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
14030
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
13305
14031
  """
13306
14032
  spot: NotRequired[pulumi.Input[_builtins.bool]]
13307
14033
  """
@@ -13345,6 +14071,7 @@ elif False:
13345
14071
  class ClusterNodePoolNodeConfigArgs:
13346
14072
  def __init__(__self__, *,
13347
14073
  advanced_machine_features: Optional[pulumi.Input['ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs']] = None,
14074
+ boot_disk: Optional[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs']] = None,
13348
14075
  boot_disk_kms_key: Optional[pulumi.Input[_builtins.str]] = None,
13349
14076
  confidential_nodes: Optional[pulumi.Input['ClusterNodePoolNodeConfigConfidentialNodesArgs']] = None,
13350
14077
  containerd_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigContainerdConfigArgs']] = None,
@@ -13392,13 +14119,15 @@ class ClusterNodePoolNodeConfigArgs:
13392
14119
  """
13393
14120
  :param pulumi.Input['ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling
13394
14121
  advanced machine features. Structure is documented below.
14122
+ :param pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs'] boot_disk: Configuration of the node pool boot disk. Structure is documented below
13395
14123
  :param pulumi.Input[_builtins.str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool. This should be of the form projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. For more information about protecting resources with Cloud KMS Keys please see: <https://cloud.google.com/compute/docs/disks/customer-managed-encryption>
13396
14124
  :param pulumi.Input['ClusterNodePoolNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for Confidential Nodes feature. Structure is documented below.
13397
14125
  :param pulumi.Input['ClusterNodePoolNodeConfigContainerdConfigArgs'] containerd_config: Parameters to customize containerd runtime. Structure is documented below.
13398
14126
  :param pulumi.Input[_builtins.int] disk_size_gb: Size of the disk attached to each node, specified
13399
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
14127
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
14128
+ Prefer configuring `boot_disk`.
13400
14129
  :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
13401
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
14130
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
13402
14131
  :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigEffectiveTaintArgs']]] effective_taints: List of kubernetes taints applied to each node. Structure is documented above.
13403
14132
  :param pulumi.Input[_builtins.bool] enable_confidential_storage: Enabling Confidential Storage will create boot disk with confidential mode. It is disabled by default.
13404
14133
  :param pulumi.Input['ClusterNodePoolNodeConfigEphemeralStorageConfigArgs'] ephemeral_storage_config: Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk. Structure is documented below.
@@ -13478,7 +14207,7 @@ class ClusterNodePoolNodeConfigArgs:
13478
14207
  :param pulumi.Input[_builtins.str] service_account: The service account to be used by the Node VMs.
13479
14208
  If not specified, the "default" service account is used.
13480
14209
  :param pulumi.Input['ClusterNodePoolNodeConfigShieldedInstanceConfigArgs'] shielded_instance_config: Shielded Instance options. Structure is documented below.
13481
- :param pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
14210
+ :param pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgs'] sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
13482
14211
  :param pulumi.Input[_builtins.bool] spot: A boolean that represents whether the underlying node VMs are spot.
13483
14212
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
13484
14213
  for more information. Defaults to false.
@@ -13499,6 +14228,8 @@ class ClusterNodePoolNodeConfigArgs:
13499
14228
  """
13500
14229
  if advanced_machine_features is not None:
13501
14230
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
14231
+ if boot_disk is not None:
14232
+ pulumi.set(__self__, "boot_disk", boot_disk)
13502
14233
  if boot_disk_kms_key is not None:
13503
14234
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
13504
14235
  if confidential_nodes is not None:
@@ -13601,6 +14332,18 @@ class ClusterNodePoolNodeConfigArgs:
13601
14332
  def advanced_machine_features(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs']]):
13602
14333
  pulumi.set(self, "advanced_machine_features", value)
13603
14334
 
14335
+ @_builtins.property
14336
+ @pulumi.getter(name="bootDisk")
14337
+ def boot_disk(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs']]:
14338
+ """
14339
+ Configuration of the node pool boot disk. Structure is documented below
14340
+ """
14341
+ return pulumi.get(self, "boot_disk")
14342
+
14343
+ @boot_disk.setter
14344
+ def boot_disk(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigBootDiskArgs']]):
14345
+ pulumi.set(self, "boot_disk", value)
14346
+
13604
14347
  @_builtins.property
13605
14348
  @pulumi.getter(name="bootDiskKmsKey")
13606
14349
  def boot_disk_kms_key(self) -> Optional[pulumi.Input[_builtins.str]]:
@@ -13642,7 +14385,8 @@ class ClusterNodePoolNodeConfigArgs:
13642
14385
  def disk_size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
13643
14386
  """
13644
14387
  Size of the disk attached to each node, specified
13645
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
14388
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated to `boot_disk.size_gb`, and must match if specified in both places.
14389
+ Prefer configuring `boot_disk`.
13646
14390
  """
13647
14391
  return pulumi.get(self, "disk_size_gb")
13648
14392
 
@@ -13655,7 +14399,7 @@ class ClusterNodePoolNodeConfigArgs:
13655
14399
  def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
13656
14400
  """
13657
14401
  Type of the disk attached to each node
13658
- (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced'
14402
+ (e.g. 'pd-standard', 'pd-balanced' or 'pd-ssd'). If unspecified, the default disk type is 'pd-balanced' This is being migrated to `boot_disk.disk_type`, and must match if specified in both places. Prefer configuring `boot_disk`.
13659
14403
  """
13660
14404
  return pulumi.get(self, "disk_type")
13661
14405
 
@@ -14098,7 +14842,7 @@ class ClusterNodePoolNodeConfigArgs:
14098
14842
  @pulumi.getter(name="soleTenantConfig")
14099
14843
  def sole_tenant_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigArgs']]:
14100
14844
  """
14101
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
14845
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
14102
14846
  """
14103
14847
  return pulumi.get(self, "sole_tenant_config")
14104
14848
 
@@ -14261,6 +15005,104 @@ class ClusterNodePoolNodeConfigAdvancedMachineFeaturesArgs:
14261
15005
  pulumi.set(self, "performance_monitoring_unit", value)
14262
15006
 
14263
15007
 
15008
+ if not MYPY:
15009
+ class ClusterNodePoolNodeConfigBootDiskArgsDict(TypedDict):
15010
+ disk_type: NotRequired[pulumi.Input[_builtins.str]]
15011
+ """
15012
+ Type of the disk attached to each node
15013
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15014
+ """
15015
+ provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
15016
+ """
15017
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15018
+ """
15019
+ provisioned_throughput: NotRequired[pulumi.Input[_builtins.int]]
15020
+ """
15021
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15022
+ """
15023
+ size_gb: NotRequired[pulumi.Input[_builtins.int]]
15024
+ """
15025
+ Size of the disk attached to each node, specified
15026
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
15027
+ """
15028
+ elif False:
15029
+ ClusterNodePoolNodeConfigBootDiskArgsDict: TypeAlias = Mapping[str, Any]
15030
+
15031
+ @pulumi.input_type
15032
+ class ClusterNodePoolNodeConfigBootDiskArgs:
15033
+ def __init__(__self__, *,
15034
+ disk_type: Optional[pulumi.Input[_builtins.str]] = None,
15035
+ provisioned_iops: Optional[pulumi.Input[_builtins.int]] = None,
15036
+ provisioned_throughput: Optional[pulumi.Input[_builtins.int]] = None,
15037
+ size_gb: Optional[pulumi.Input[_builtins.int]] = None):
15038
+ """
15039
+ :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node
15040
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15041
+ :param pulumi.Input[_builtins.int] provisioned_iops: Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15042
+ :param pulumi.Input[_builtins.int] provisioned_throughput: Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15043
+ :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified
15044
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
15045
+ """
15046
+ if disk_type is not None:
15047
+ pulumi.set(__self__, "disk_type", disk_type)
15048
+ if provisioned_iops is not None:
15049
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
15050
+ if provisioned_throughput is not None:
15051
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
15052
+ if size_gb is not None:
15053
+ pulumi.set(__self__, "size_gb", size_gb)
15054
+
15055
+ @_builtins.property
15056
+ @pulumi.getter(name="diskType")
15057
+ def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
15058
+ """
15059
+ Type of the disk attached to each node
15060
+ (e.g. 'pd-standard', 'pd-balanced', 'pd-ssd', 'hyperdisk-balanced'). If unspecified, the default disk type is 'pd-balanced' This is being migrated from `node_config.disk_type`, and must match if specified in both places. Prefer using this field.
15061
+ """
15062
+ return pulumi.get(self, "disk_type")
15063
+
15064
+ @disk_type.setter
15065
+ def disk_type(self, value: Optional[pulumi.Input[_builtins.str]]):
15066
+ pulumi.set(self, "disk_type", value)
15067
+
15068
+ @_builtins.property
15069
+ @pulumi.getter(name="provisionedIops")
15070
+ def provisioned_iops(self) -> Optional[pulumi.Input[_builtins.int]]:
15071
+ """
15072
+ Configure disk IOPs. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15073
+ """
15074
+ return pulumi.get(self, "provisioned_iops")
15075
+
15076
+ @provisioned_iops.setter
15077
+ def provisioned_iops(self, value: Optional[pulumi.Input[_builtins.int]]):
15078
+ pulumi.set(self, "provisioned_iops", value)
15079
+
15080
+ @_builtins.property
15081
+ @pulumi.getter(name="provisionedThroughput")
15082
+ def provisioned_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
15083
+ """
15084
+ Configure disk throughput. This is only valid if the `disk_type` is 'hyperdisk-balanced'. See [performance limit documention](https://cloud.google.com/compute/docs/disks/hyperdisk-perf-limits) for more information about valid values.
15085
+ """
15086
+ return pulumi.get(self, "provisioned_throughput")
15087
+
15088
+ @provisioned_throughput.setter
15089
+ def provisioned_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
15090
+ pulumi.set(self, "provisioned_throughput", value)
15091
+
15092
+ @_builtins.property
15093
+ @pulumi.getter(name="sizeGb")
15094
+ def size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
15095
+ """
15096
+ Size of the disk attached to each node, specified
15097
+ in GB. The smallest allowed disk size is 10GB. Defaults to 100GB. This is being migrated from `node_config.disk_size_gb`, and must match if specified in both places. Prefer using this field.
15098
+ """
15099
+ return pulumi.get(self, "size_gb")
15100
+
15101
+ @size_gb.setter
15102
+ def size_gb(self, value: Optional[pulumi.Input[_builtins.int]]):
15103
+ pulumi.set(self, "size_gb", value)
15104
+
15105
+
14264
15106
  if not MYPY:
14265
15107
  class ClusterNodePoolNodeConfigConfidentialNodesArgsDict(TypedDict):
14266
15108
  enabled: pulumi.Input[_builtins.bool]
@@ -15013,6 +15855,22 @@ if not MYPY:
15013
15855
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
15014
15856
  is setting the empty string `""`, which will function identically to not setting this field.
15015
15857
  """
15858
+ eviction_max_pod_grace_period_seconds: NotRequired[pulumi.Input[_builtins.int]]
15859
+ """
15860
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
15861
+ """
15862
+ eviction_minimum_reclaim: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict']]
15863
+ """
15864
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
15865
+ """
15866
+ eviction_soft: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict']]
15867
+ """
15868
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
15869
+ """
15870
+ eviction_soft_grace_period: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict']]
15871
+ """
15872
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
15873
+ """
15016
15874
  image_gc_high_threshold_percent: NotRequired[pulumi.Input[_builtins.int]]
15017
15875
  """
15018
15876
  Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
@@ -15033,10 +15891,18 @@ if not MYPY:
15033
15891
  """
15034
15892
  Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
15035
15893
  """
15894
+ max_parallel_image_pulls: NotRequired[pulumi.Input[_builtins.int]]
15895
+ """
15896
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
15897
+ """
15036
15898
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
15037
15899
  """
15038
15900
  Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
15039
15901
  """
15902
+ single_process_oom_kill: NotRequired[pulumi.Input[_builtins.bool]]
15903
+ """
15904
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
15905
+ """
15040
15906
  elif False:
15041
15907
  ClusterNodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
15042
15908
 
@@ -15049,12 +15915,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15049
15915
  cpu_cfs_quota: Optional[pulumi.Input[_builtins.bool]] = None,
15050
15916
  cpu_cfs_quota_period: Optional[pulumi.Input[_builtins.str]] = None,
15051
15917
  cpu_manager_policy: Optional[pulumi.Input[_builtins.str]] = None,
15918
+ eviction_max_pod_grace_period_seconds: Optional[pulumi.Input[_builtins.int]] = None,
15919
+ eviction_minimum_reclaim: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']] = None,
15920
+ eviction_soft: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs']] = None,
15921
+ eviction_soft_grace_period: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']] = None,
15052
15922
  image_gc_high_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
15053
15923
  image_gc_low_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
15054
15924
  image_maximum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
15055
15925
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
15056
15926
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
15057
- pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None):
15927
+ max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
15928
+ pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
15929
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
15058
15930
  """
15059
15931
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods. The allowed sysctl groups are `kernel.shm*`, `kernel.msg*`, `kernel.sem`, `fs.mqueue.*`, and `net.*`.
15060
15932
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container. The integer must be between 2 and 10, inclusive.
@@ -15074,12 +15946,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15074
15946
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
15075
15947
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
15076
15948
  is setting the empty string `""`, which will function identically to not setting this field.
15949
+ :param pulumi.Input[_builtins.int] eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
15950
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
15951
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs'] eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
15952
+ :param pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
15077
15953
  :param pulumi.Input[_builtins.int] image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
15078
15954
  :param pulumi.Input[_builtins.int] image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
15079
15955
  :param pulumi.Input[_builtins.str] image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
15080
15956
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
15081
15957
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
15958
+ :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
15082
15959
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
15960
+ :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
15083
15961
  """
15084
15962
  if allowed_unsafe_sysctls is not None:
15085
15963
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -15093,6 +15971,14 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15093
15971
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
15094
15972
  if cpu_manager_policy is not None:
15095
15973
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
15974
+ if eviction_max_pod_grace_period_seconds is not None:
15975
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
15976
+ if eviction_minimum_reclaim is not None:
15977
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
15978
+ if eviction_soft is not None:
15979
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
15980
+ if eviction_soft_grace_period is not None:
15981
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
15096
15982
  if image_gc_high_threshold_percent is not None:
15097
15983
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
15098
15984
  if image_gc_low_threshold_percent is not None:
@@ -15103,8 +15989,12 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15103
15989
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
15104
15990
  if insecure_kubelet_readonly_port_enabled is not None:
15105
15991
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
15992
+ if max_parallel_image_pulls is not None:
15993
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
15106
15994
  if pod_pids_limit is not None:
15107
15995
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
15996
+ if single_process_oom_kill is not None:
15997
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
15108
15998
 
15109
15999
  @_builtins.property
15110
16000
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -15190,6 +16080,54 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15190
16080
  def cpu_manager_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
15191
16081
  pulumi.set(self, "cpu_manager_policy", value)
15192
16082
 
16083
+ @_builtins.property
16084
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
16085
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[pulumi.Input[_builtins.int]]:
16086
+ """
16087
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
16088
+ """
16089
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
16090
+
16091
+ @eviction_max_pod_grace_period_seconds.setter
16092
+ def eviction_max_pod_grace_period_seconds(self, value: Optional[pulumi.Input[_builtins.int]]):
16093
+ pulumi.set(self, "eviction_max_pod_grace_period_seconds", value)
16094
+
16095
+ @_builtins.property
16096
+ @pulumi.getter(name="evictionMinimumReclaim")
16097
+ def eviction_minimum_reclaim(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]:
16098
+ """
16099
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
16100
+ """
16101
+ return pulumi.get(self, "eviction_minimum_reclaim")
16102
+
16103
+ @eviction_minimum_reclaim.setter
16104
+ def eviction_minimum_reclaim(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]):
16105
+ pulumi.set(self, "eviction_minimum_reclaim", value)
16106
+
16107
+ @_builtins.property
16108
+ @pulumi.getter(name="evictionSoft")
16109
+ def eviction_soft(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs']]:
16110
+ """
16111
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
16112
+ """
16113
+ return pulumi.get(self, "eviction_soft")
16114
+
16115
+ @eviction_soft.setter
16116
+ def eviction_soft(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs']]):
16117
+ pulumi.set(self, "eviction_soft", value)
16118
+
16119
+ @_builtins.property
16120
+ @pulumi.getter(name="evictionSoftGracePeriod")
16121
+ def eviction_soft_grace_period(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]:
16122
+ """
16123
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
16124
+ """
16125
+ return pulumi.get(self, "eviction_soft_grace_period")
16126
+
16127
+ @eviction_soft_grace_period.setter
16128
+ def eviction_soft_grace_period(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]):
16129
+ pulumi.set(self, "eviction_soft_grace_period", value)
16130
+
15193
16131
  @_builtins.property
15194
16132
  @pulumi.getter(name="imageGcHighThresholdPercent")
15195
16133
  def image_gc_high_threshold_percent(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -15250,6 +16188,18 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15250
16188
  def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
15251
16189
  pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value)
15252
16190
 
16191
+ @_builtins.property
16192
+ @pulumi.getter(name="maxParallelImagePulls")
16193
+ def max_parallel_image_pulls(self) -> Optional[pulumi.Input[_builtins.int]]:
16194
+ """
16195
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
16196
+ """
16197
+ return pulumi.get(self, "max_parallel_image_pulls")
16198
+
16199
+ @max_parallel_image_pulls.setter
16200
+ def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
16201
+ pulumi.set(self, "max_parallel_image_pulls", value)
16202
+
15253
16203
  @_builtins.property
15254
16204
  @pulumi.getter(name="podPidsLimit")
15255
16205
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -15262,192 +16212,640 @@ class ClusterNodePoolNodeConfigKubeletConfigArgs:
15262
16212
  def pod_pids_limit(self, value: Optional[pulumi.Input[_builtins.int]]):
15263
16213
  pulumi.set(self, "pod_pids_limit", value)
15264
16214
 
16215
+ @_builtins.property
16216
+ @pulumi.getter(name="singleProcessOomKill")
16217
+ def single_process_oom_kill(self) -> Optional[pulumi.Input[_builtins.bool]]:
16218
+ """
16219
+ Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
16220
+ """
16221
+ return pulumi.get(self, "single_process_oom_kill")
16222
+
16223
+ @single_process_oom_kill.setter
16224
+ def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
16225
+ pulumi.set(self, "single_process_oom_kill", value)
16226
+
15265
16227
 
15266
16228
  if not MYPY:
15267
- class ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
15268
- cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
16229
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
16230
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
15269
16231
  """
15270
- Possible cgroup modes that can be used.
15271
- Accepted values are:
15272
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
15273
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
15274
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16232
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15275
16233
  """
15276
- hugepages_config: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
16234
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
15277
16235
  """
15278
- Amounts for 2M and 1G hugepages. Structure is documented below.
16236
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15279
16237
  """
15280
- sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
16238
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
15281
16239
  """
15282
- The Linux kernel parameters to be applied to the nodes
15283
- and all pods running on the nodes. Specified as a map from the key, such as
15284
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
15285
- Note that validations happen all server side. All attributes are optional.
16240
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16241
+ """
16242
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
16243
+ """
16244
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16245
+ """
16246
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16247
+ """
16248
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16249
+ """
16250
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
16251
+ """
16252
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15286
16253
  """
15287
16254
  elif False:
15288
- ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
16255
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict: TypeAlias = Mapping[str, Any]
15289
16256
 
15290
16257
  @pulumi.input_type
15291
- class ClusterNodePoolNodeConfigLinuxNodeConfigArgs:
16258
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs:
15292
16259
  def __init__(__self__, *,
15293
- cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
15294
- hugepages_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
15295
- sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None):
16260
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16261
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16262
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
16263
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16264
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16265
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
15296
16266
  """
15297
- :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
15298
- Accepted values are:
15299
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
15300
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
15301
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
15302
- :param pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
15303
- :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
15304
- and all pods running on the nodes. Specified as a map from the key, such as
15305
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
15306
- Note that validations happen all server side. All attributes are optional.
16267
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16268
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16269
+ :param pulumi.Input[_builtins.str] memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16270
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16271
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16272
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15307
16273
  """
15308
- if cgroup_mode is not None:
15309
- pulumi.set(__self__, "cgroup_mode", cgroup_mode)
15310
- if hugepages_config is not None:
15311
- pulumi.set(__self__, "hugepages_config", hugepages_config)
15312
- if sysctls is not None:
15313
- pulumi.set(__self__, "sysctls", sysctls)
16274
+ if imagefs_available is not None:
16275
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
16276
+ if imagefs_inodes_free is not None:
16277
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
16278
+ if memory_available is not None:
16279
+ pulumi.set(__self__, "memory_available", memory_available)
16280
+ if nodefs_available is not None:
16281
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
16282
+ if nodefs_inodes_free is not None:
16283
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
16284
+ if pid_available is not None:
16285
+ pulumi.set(__self__, "pid_available", pid_available)
15314
16286
 
15315
16287
  @_builtins.property
15316
- @pulumi.getter(name="cgroupMode")
15317
- def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
16288
+ @pulumi.getter(name="imagefsAvailable")
16289
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15318
16290
  """
15319
- Possible cgroup modes that can be used.
15320
- Accepted values are:
15321
- * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
15322
- * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
15323
- * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16291
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15324
16292
  """
15325
- return pulumi.get(self, "cgroup_mode")
16293
+ return pulumi.get(self, "imagefs_available")
15326
16294
 
15327
- @cgroup_mode.setter
15328
- def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
15329
- pulumi.set(self, "cgroup_mode", value)
16295
+ @imagefs_available.setter
16296
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16297
+ pulumi.set(self, "imagefs_available", value)
15330
16298
 
15331
16299
  @_builtins.property
15332
- @pulumi.getter(name="hugepagesConfig")
15333
- def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
16300
+ @pulumi.getter(name="imagefsInodesFree")
16301
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
15334
16302
  """
15335
- Amounts for 2M and 1G hugepages. Structure is documented below.
16303
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15336
16304
  """
15337
- return pulumi.get(self, "hugepages_config")
16305
+ return pulumi.get(self, "imagefs_inodes_free")
15338
16306
 
15339
- @hugepages_config.setter
15340
- def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
15341
- pulumi.set(self, "hugepages_config", value)
16307
+ @imagefs_inodes_free.setter
16308
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16309
+ pulumi.set(self, "imagefs_inodes_free", value)
15342
16310
 
15343
16311
  @_builtins.property
15344
- @pulumi.getter
15345
- def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
16312
+ @pulumi.getter(name="memoryAvailable")
16313
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15346
16314
  """
15347
- The Linux kernel parameters to be applied to the nodes
15348
- and all pods running on the nodes. Specified as a map from the key, such as
15349
- `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
15350
- Note that validations happen all server side. All attributes are optional.
16315
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
15351
16316
  """
15352
- return pulumi.get(self, "sysctls")
16317
+ return pulumi.get(self, "memory_available")
15353
16318
 
15354
- @sysctls.setter
15355
- def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
15356
- pulumi.set(self, "sysctls", value)
16319
+ @memory_available.setter
16320
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16321
+ pulumi.set(self, "memory_available", value)
16322
+
16323
+ @_builtins.property
16324
+ @pulumi.getter(name="nodefsAvailable")
16325
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16326
+ """
16327
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16328
+ """
16329
+ return pulumi.get(self, "nodefs_available")
16330
+
16331
+ @nodefs_available.setter
16332
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16333
+ pulumi.set(self, "nodefs_available", value)
16334
+
16335
+ @_builtins.property
16336
+ @pulumi.getter(name="nodefsInodesFree")
16337
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
16338
+ """
16339
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16340
+ """
16341
+ return pulumi.get(self, "nodefs_inodes_free")
16342
+
16343
+ @nodefs_inodes_free.setter
16344
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16345
+ pulumi.set(self, "nodefs_inodes_free", value)
16346
+
16347
+ @_builtins.property
16348
+ @pulumi.getter(name="pidAvailable")
16349
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16350
+ """
16351
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
16352
+ """
16353
+ return pulumi.get(self, "pid_available")
16354
+
16355
+ @pid_available.setter
16356
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16357
+ pulumi.set(self, "pid_available", value)
15357
16358
 
15358
16359
 
15359
16360
  if not MYPY:
15360
- class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
15361
- hugepage_size1g: NotRequired[pulumi.Input[_builtins.int]]
16361
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict(TypedDict):
16362
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
15362
16363
  """
15363
- Amount of 1G hugepages.
16364
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
15364
16365
  """
15365
- hugepage_size2m: NotRequired[pulumi.Input[_builtins.int]]
16366
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
15366
16367
  """
15367
- Amount of 2M hugepages.
16368
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16369
+ """
16370
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
16371
+ """
16372
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
16373
+ """
16374
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
16375
+ """
16376
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
16377
+ """
16378
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16379
+ """
16380
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16381
+ """
16382
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
16383
+ """
16384
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15368
16385
  """
15369
16386
  elif False:
15370
- ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict: TypeAlias = Mapping[str, Any]
16387
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgsDict: TypeAlias = Mapping[str, Any]
15371
16388
 
15372
16389
  @pulumi.input_type
15373
- class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs:
16390
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs:
15374
16391
  def __init__(__self__, *,
15375
- hugepage_size1g: Optional[pulumi.Input[_builtins.int]] = None,
15376
- hugepage_size2m: Optional[pulumi.Input[_builtins.int]] = None):
16392
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16393
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16394
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
16395
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16396
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16397
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
15377
16398
  """
15378
- :param pulumi.Input[_builtins.int] hugepage_size1g: Amount of 1G hugepages.
15379
- :param pulumi.Input[_builtins.int] hugepage_size2m: Amount of 2M hugepages.
16399
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
16400
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16401
+ :param pulumi.Input[_builtins.str] memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
16402
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
16403
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
16404
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15380
16405
  """
15381
- if hugepage_size1g is not None:
15382
- pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
15383
- if hugepage_size2m is not None:
15384
- pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
16406
+ if imagefs_available is not None:
16407
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
16408
+ if imagefs_inodes_free is not None:
16409
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
16410
+ if memory_available is not None:
16411
+ pulumi.set(__self__, "memory_available", memory_available)
16412
+ if nodefs_available is not None:
16413
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
16414
+ if nodefs_inodes_free is not None:
16415
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
16416
+ if pid_available is not None:
16417
+ pulumi.set(__self__, "pid_available", pid_available)
15385
16418
 
15386
16419
  @_builtins.property
15387
- @pulumi.getter(name="hugepageSize1g")
15388
- def hugepage_size1g(self) -> Optional[pulumi.Input[_builtins.int]]:
16420
+ @pulumi.getter(name="imagefsAvailable")
16421
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15389
16422
  """
15390
- Amount of 1G hugepages.
16423
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
15391
16424
  """
15392
- return pulumi.get(self, "hugepage_size1g")
16425
+ return pulumi.get(self, "imagefs_available")
15393
16426
 
15394
- @hugepage_size1g.setter
15395
- def hugepage_size1g(self, value: Optional[pulumi.Input[_builtins.int]]):
15396
- pulumi.set(self, "hugepage_size1g", value)
16427
+ @imagefs_available.setter
16428
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16429
+ pulumi.set(self, "imagefs_available", value)
15397
16430
 
15398
16431
  @_builtins.property
15399
- @pulumi.getter(name="hugepageSize2m")
15400
- def hugepage_size2m(self) -> Optional[pulumi.Input[_builtins.int]]:
16432
+ @pulumi.getter(name="imagefsInodesFree")
16433
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
15401
16434
  """
15402
- Amount of 2M hugepages.
16435
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
15403
16436
  """
15404
- return pulumi.get(self, "hugepage_size2m")
16437
+ return pulumi.get(self, "imagefs_inodes_free")
15405
16438
 
15406
- @hugepage_size2m.setter
15407
- def hugepage_size2m(self, value: Optional[pulumi.Input[_builtins.int]]):
15408
- pulumi.set(self, "hugepage_size2m", value)
16439
+ @imagefs_inodes_free.setter
16440
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16441
+ pulumi.set(self, "imagefs_inodes_free", value)
16442
+
16443
+ @_builtins.property
16444
+ @pulumi.getter(name="memoryAvailable")
16445
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16446
+ """
16447
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
16448
+ """
16449
+ return pulumi.get(self, "memory_available")
15409
16450
 
16451
+ @memory_available.setter
16452
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16453
+ pulumi.set(self, "memory_available", value)
15410
16454
 
15411
- if not MYPY:
15412
- class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict(TypedDict):
15413
- local_ssd_count: pulumi.Input[_builtins.int]
16455
+ @_builtins.property
16456
+ @pulumi.getter(name="nodefsAvailable")
16457
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15414
16458
  """
15415
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
15416
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16459
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15417
16460
  """
15418
- elif False:
15419
- ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict: TypeAlias = Mapping[str, Any]
16461
+ return pulumi.get(self, "nodefs_available")
15420
16462
 
15421
- @pulumi.input_type
15422
- class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgs:
15423
- def __init__(__self__, *,
15424
- local_ssd_count: pulumi.Input[_builtins.int]):
16463
+ @nodefs_available.setter
16464
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16465
+ pulumi.set(self, "nodefs_available", value)
16466
+
16467
+ @_builtins.property
16468
+ @pulumi.getter(name="nodefsInodesFree")
16469
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
15425
16470
  """
15426
- :param pulumi.Input[_builtins.int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
15427
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16471
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
15428
16472
  """
15429
- pulumi.set(__self__, "local_ssd_count", local_ssd_count)
16473
+ return pulumi.get(self, "nodefs_inodes_free")
16474
+
16475
+ @nodefs_inodes_free.setter
16476
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16477
+ pulumi.set(self, "nodefs_inodes_free", value)
15430
16478
 
15431
16479
  @_builtins.property
15432
- @pulumi.getter(name="localSsdCount")
15433
- def local_ssd_count(self) -> pulumi.Input[_builtins.int]:
16480
+ @pulumi.getter(name="pidAvailable")
16481
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
15434
16482
  """
15435
- Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
15436
- > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16483
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
15437
16484
  """
15438
- return pulumi.get(self, "local_ssd_count")
16485
+ return pulumi.get(self, "pid_available")
15439
16486
 
15440
- @local_ssd_count.setter
15441
- def local_ssd_count(self, value: pulumi.Input[_builtins.int]):
15442
- pulumi.set(self, "local_ssd_count", value)
16487
+ @pid_available.setter
16488
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16489
+ pulumi.set(self, "pid_available", value)
15443
16490
 
15444
16491
 
15445
16492
  if not MYPY:
15446
- class ClusterNodePoolNodeConfigReservationAffinityArgsDict(TypedDict):
15447
- consume_reservation_type: pulumi.Input[_builtins.str]
16493
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict(TypedDict):
16494
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
15448
16495
  """
15449
- The type of reservation consumption
15450
- Accepted values are:
16496
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16497
+ """
16498
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16499
+ """
16500
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16501
+ """
16502
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
16503
+ """
16504
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
16505
+ """
16506
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
16507
+ """
16508
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16509
+ """
16510
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
16511
+ """
16512
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16513
+ """
16514
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
16515
+ """
16516
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16517
+ """
16518
+ elif False:
16519
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict: TypeAlias = Mapping[str, Any]
16520
+
16521
+ @pulumi.input_type
16522
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
16523
+ def __init__(__self__, *,
16524
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16525
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16526
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
16527
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
16528
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
16529
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
16530
+ """
16531
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16532
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16533
+ :param pulumi.Input[_builtins.str] memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
16534
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16535
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16536
+ :param pulumi.Input[_builtins.str] pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16537
+ """
16538
+ if imagefs_available is not None:
16539
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
16540
+ if imagefs_inodes_free is not None:
16541
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
16542
+ if memory_available is not None:
16543
+ pulumi.set(__self__, "memory_available", memory_available)
16544
+ if nodefs_available is not None:
16545
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
16546
+ if nodefs_inodes_free is not None:
16547
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
16548
+ if pid_available is not None:
16549
+ pulumi.set(__self__, "pid_available", pid_available)
16550
+
16551
+ @_builtins.property
16552
+ @pulumi.getter(name="imagefsAvailable")
16553
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16554
+ """
16555
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16556
+ """
16557
+ return pulumi.get(self, "imagefs_available")
16558
+
16559
+ @imagefs_available.setter
16560
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16561
+ pulumi.set(self, "imagefs_available", value)
16562
+
16563
+ @_builtins.property
16564
+ @pulumi.getter(name="imagefsInodesFree")
16565
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
16566
+ """
16567
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16568
+ """
16569
+ return pulumi.get(self, "imagefs_inodes_free")
16570
+
16571
+ @imagefs_inodes_free.setter
16572
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16573
+ pulumi.set(self, "imagefs_inodes_free", value)
16574
+
16575
+ @_builtins.property
16576
+ @pulumi.getter(name="memoryAvailable")
16577
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16578
+ """
16579
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
16580
+ """
16581
+ return pulumi.get(self, "memory_available")
16582
+
16583
+ @memory_available.setter
16584
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16585
+ pulumi.set(self, "memory_available", value)
16586
+
16587
+ @_builtins.property
16588
+ @pulumi.getter(name="nodefsAvailable")
16589
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16590
+ """
16591
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16592
+ """
16593
+ return pulumi.get(self, "nodefs_available")
16594
+
16595
+ @nodefs_available.setter
16596
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16597
+ pulumi.set(self, "nodefs_available", value)
16598
+
16599
+ @_builtins.property
16600
+ @pulumi.getter(name="nodefsInodesFree")
16601
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
16602
+ """
16603
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16604
+ """
16605
+ return pulumi.get(self, "nodefs_inodes_free")
16606
+
16607
+ @nodefs_inodes_free.setter
16608
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
16609
+ pulumi.set(self, "nodefs_inodes_free", value)
16610
+
16611
+ @_builtins.property
16612
+ @pulumi.getter(name="pidAvailable")
16613
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
16614
+ """
16615
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
16616
+ """
16617
+ return pulumi.get(self, "pid_available")
16618
+
16619
+ @pid_available.setter
16620
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
16621
+ pulumi.set(self, "pid_available", value)
16622
+
16623
+
16624
+ if not MYPY:
16625
+ class ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
16626
+ cgroup_mode: NotRequired[pulumi.Input[_builtins.str]]
16627
+ """
16628
+ Possible cgroup modes that can be used.
16629
+ Accepted values are:
16630
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
16631
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
16632
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16633
+ """
16634
+ hugepages_config: NotRequired[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict']]
16635
+ """
16636
+ Amounts for 2M and 1G hugepages. Structure is documented below.
16637
+ """
16638
+ sysctls: NotRequired[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]
16639
+ """
16640
+ The Linux kernel parameters to be applied to the nodes
16641
+ and all pods running on the nodes. Specified as a map from the key, such as
16642
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
16643
+ Note that validations happen all server side. All attributes are optional.
16644
+ """
16645
+ transparent_hugepage_defrag: NotRequired[pulumi.Input[_builtins.str]]
16646
+ """
16647
+ The Linux kernel transparent hugepage defrag setting.
16648
+ """
16649
+ transparent_hugepage_enabled: NotRequired[pulumi.Input[_builtins.str]]
16650
+ """
16651
+ The Linux kernel transparent hugepage setting.
16652
+ """
16653
+ elif False:
16654
+ ClusterNodePoolNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
16655
+
16656
+ @pulumi.input_type
16657
+ class ClusterNodePoolNodeConfigLinuxNodeConfigArgs:
16658
+ def __init__(__self__, *,
16659
+ cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
16660
+ hugepages_config: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
16661
+ sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
16662
+ transparent_hugepage_defrag: Optional[pulumi.Input[_builtins.str]] = None,
16663
+ transparent_hugepage_enabled: Optional[pulumi.Input[_builtins.str]] = None):
16664
+ """
16665
+ :param pulumi.Input[_builtins.str] cgroup_mode: Possible cgroup modes that can be used.
16666
+ Accepted values are:
16667
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
16668
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
16669
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16670
+ :param pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages. Structure is documented below.
16671
+ :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes
16672
+ and all pods running on the nodes. Specified as a map from the key, such as
16673
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
16674
+ Note that validations happen all server side. All attributes are optional.
16675
+ :param pulumi.Input[_builtins.str] transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
16676
+ :param pulumi.Input[_builtins.str] transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
16677
+ """
16678
+ if cgroup_mode is not None:
16679
+ pulumi.set(__self__, "cgroup_mode", cgroup_mode)
16680
+ if hugepages_config is not None:
16681
+ pulumi.set(__self__, "hugepages_config", hugepages_config)
16682
+ if sysctls is not None:
16683
+ pulumi.set(__self__, "sysctls", sysctls)
16684
+ if transparent_hugepage_defrag is not None:
16685
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
16686
+ if transparent_hugepage_enabled is not None:
16687
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
16688
+
16689
+ @_builtins.property
16690
+ @pulumi.getter(name="cgroupMode")
16691
+ def cgroup_mode(self) -> Optional[pulumi.Input[_builtins.str]]:
16692
+ """
16693
+ Possible cgroup modes that can be used.
16694
+ Accepted values are:
16695
+ * `CGROUP_MODE_UNSPECIFIED`: CGROUP_MODE_UNSPECIFIED is when unspecified cgroup configuration is used. The default for the GKE node OS image will be used.
16696
+ * `CGROUP_MODE_V1`: CGROUP_MODE_V1 specifies to use cgroupv1 for the cgroup configuration on the node image.
16697
+ * `CGROUP_MODE_V2`: CGROUP_MODE_V2 specifies to use cgroupv2 for the cgroup configuration on the node image.
16698
+ """
16699
+ return pulumi.get(self, "cgroup_mode")
16700
+
16701
+ @cgroup_mode.setter
16702
+ def cgroup_mode(self, value: Optional[pulumi.Input[_builtins.str]]):
16703
+ pulumi.set(self, "cgroup_mode", value)
16704
+
16705
+ @_builtins.property
16706
+ @pulumi.getter(name="hugepagesConfig")
16707
+ def hugepages_config(self) -> Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]:
16708
+ """
16709
+ Amounts for 2M and 1G hugepages. Structure is documented below.
16710
+ """
16711
+ return pulumi.get(self, "hugepages_config")
16712
+
16713
+ @hugepages_config.setter
16714
+ def hugepages_config(self, value: Optional[pulumi.Input['ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']]):
16715
+ pulumi.set(self, "hugepages_config", value)
16716
+
16717
+ @_builtins.property
16718
+ @pulumi.getter
16719
+ def sysctls(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]:
16720
+ """
16721
+ The Linux kernel parameters to be applied to the nodes
16722
+ and all pods running on the nodes. Specified as a map from the key, such as
16723
+ `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
16724
+ Note that validations happen all server side. All attributes are optional.
16725
+ """
16726
+ return pulumi.get(self, "sysctls")
16727
+
16728
+ @sysctls.setter
16729
+ def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
16730
+ pulumi.set(self, "sysctls", value)
16731
+
16732
+ @_builtins.property
16733
+ @pulumi.getter(name="transparentHugepageDefrag")
16734
+ def transparent_hugepage_defrag(self) -> Optional[pulumi.Input[_builtins.str]]:
16735
+ """
16736
+ The Linux kernel transparent hugepage defrag setting.
16737
+ """
16738
+ return pulumi.get(self, "transparent_hugepage_defrag")
16739
+
16740
+ @transparent_hugepage_defrag.setter
16741
+ def transparent_hugepage_defrag(self, value: Optional[pulumi.Input[_builtins.str]]):
16742
+ pulumi.set(self, "transparent_hugepage_defrag", value)
16743
+
16744
+ @_builtins.property
16745
+ @pulumi.getter(name="transparentHugepageEnabled")
16746
+ def transparent_hugepage_enabled(self) -> Optional[pulumi.Input[_builtins.str]]:
16747
+ """
16748
+ The Linux kernel transparent hugepage setting.
16749
+ """
16750
+ return pulumi.get(self, "transparent_hugepage_enabled")
16751
+
16752
+ @transparent_hugepage_enabled.setter
16753
+ def transparent_hugepage_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
16754
+ pulumi.set(self, "transparent_hugepage_enabled", value)
16755
+
16756
+
16757
+ if not MYPY:
16758
+ class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
16759
+ hugepage_size1g: NotRequired[pulumi.Input[_builtins.int]]
16760
+ """
16761
+ Amount of 1G hugepages.
16762
+ """
16763
+ hugepage_size2m: NotRequired[pulumi.Input[_builtins.int]]
16764
+ """
16765
+ Amount of 2M hugepages.
16766
+ """
16767
+ elif False:
16768
+ ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict: TypeAlias = Mapping[str, Any]
16769
+
16770
+ @pulumi.input_type
16771
+ class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs:
16772
+ def __init__(__self__, *,
16773
+ hugepage_size1g: Optional[pulumi.Input[_builtins.int]] = None,
16774
+ hugepage_size2m: Optional[pulumi.Input[_builtins.int]] = None):
16775
+ """
16776
+ :param pulumi.Input[_builtins.int] hugepage_size1g: Amount of 1G hugepages.
16777
+ :param pulumi.Input[_builtins.int] hugepage_size2m: Amount of 2M hugepages.
16778
+ """
16779
+ if hugepage_size1g is not None:
16780
+ pulumi.set(__self__, "hugepage_size1g", hugepage_size1g)
16781
+ if hugepage_size2m is not None:
16782
+ pulumi.set(__self__, "hugepage_size2m", hugepage_size2m)
16783
+
16784
+ @_builtins.property
16785
+ @pulumi.getter(name="hugepageSize1g")
16786
+ def hugepage_size1g(self) -> Optional[pulumi.Input[_builtins.int]]:
16787
+ """
16788
+ Amount of 1G hugepages.
16789
+ """
16790
+ return pulumi.get(self, "hugepage_size1g")
16791
+
16792
+ @hugepage_size1g.setter
16793
+ def hugepage_size1g(self, value: Optional[pulumi.Input[_builtins.int]]):
16794
+ pulumi.set(self, "hugepage_size1g", value)
16795
+
16796
+ @_builtins.property
16797
+ @pulumi.getter(name="hugepageSize2m")
16798
+ def hugepage_size2m(self) -> Optional[pulumi.Input[_builtins.int]]:
16799
+ """
16800
+ Amount of 2M hugepages.
16801
+ """
16802
+ return pulumi.get(self, "hugepage_size2m")
16803
+
16804
+ @hugepage_size2m.setter
16805
+ def hugepage_size2m(self, value: Optional[pulumi.Input[_builtins.int]]):
16806
+ pulumi.set(self, "hugepage_size2m", value)
16807
+
16808
+
16809
+ if not MYPY:
16810
+ class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict(TypedDict):
16811
+ local_ssd_count: pulumi.Input[_builtins.int]
16812
+ """
16813
+ Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
16814
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16815
+ """
16816
+ elif False:
16817
+ ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgsDict: TypeAlias = Mapping[str, Any]
16818
+
16819
+ @pulumi.input_type
16820
+ class ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigArgs:
16821
+ def __init__(__self__, *,
16822
+ local_ssd_count: pulumi.Input[_builtins.int]):
16823
+ """
16824
+ :param pulumi.Input[_builtins.int] local_ssd_count: Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
16825
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16826
+ """
16827
+ pulumi.set(__self__, "local_ssd_count", local_ssd_count)
16828
+
16829
+ @_builtins.property
16830
+ @pulumi.getter(name="localSsdCount")
16831
+ def local_ssd_count(self) -> pulumi.Input[_builtins.int]:
16832
+ """
16833
+ Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size. If zero, it means no raw-block local NVMe SSD disks to be attached to the node.
16834
+ > Note: Local NVMe SSD storage available in GKE versions v1.25.3-gke.1800 and later.
16835
+ """
16836
+ return pulumi.get(self, "local_ssd_count")
16837
+
16838
+ @local_ssd_count.setter
16839
+ def local_ssd_count(self, value: pulumi.Input[_builtins.int]):
16840
+ pulumi.set(self, "local_ssd_count", value)
16841
+
16842
+
16843
+ if not MYPY:
16844
+ class ClusterNodePoolNodeConfigReservationAffinityArgsDict(TypedDict):
16845
+ consume_reservation_type: pulumi.Input[_builtins.str]
16846
+ """
16847
+ The type of reservation consumption
16848
+ Accepted values are:
15451
16849
 
15452
16850
  * `"UNSPECIFIED"`: Default value. This should not be used.
15453
16851
  * `"NO_RESERVATION"`: Do not consume from any reserved capacity.
@@ -15690,7 +17088,11 @@ if not MYPY:
15690
17088
  class ClusterNodePoolNodeConfigSoleTenantConfigArgsDict(TypedDict):
15691
17089
  node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgsDict']]]
15692
17090
  """
15693
- .
17091
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
17092
+ """
17093
+ min_node_cpus: NotRequired[pulumi.Input[_builtins.int]]
17094
+ """
17095
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
15694
17096
  """
15695
17097
  elif False:
15696
17098
  ClusterNodePoolNodeConfigSoleTenantConfigArgsDict: TypeAlias = Mapping[str, Any]
@@ -15698,17 +17100,21 @@ elif False:
15698
17100
  @pulumi.input_type
15699
17101
  class ClusterNodePoolNodeConfigSoleTenantConfigArgs:
15700
17102
  def __init__(__self__, *,
15701
- node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
17103
+ node_affinities: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]],
17104
+ min_node_cpus: Optional[pulumi.Input[_builtins.int]] = None):
15702
17105
  """
15703
- :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
17106
+ :param pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
17107
+ :param pulumi.Input[_builtins.int] min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
15704
17108
  """
15705
17109
  pulumi.set(__self__, "node_affinities", node_affinities)
17110
+ if min_node_cpus is not None:
17111
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
15706
17112
 
15707
17113
  @_builtins.property
15708
17114
  @pulumi.getter(name="nodeAffinities")
15709
17115
  def node_affinities(self) -> pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]:
15710
17116
  """
15711
- .
17117
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
15712
17118
  """
15713
17119
  return pulumi.get(self, "node_affinities")
15714
17120
 
@@ -15716,6 +17122,18 @@ class ClusterNodePoolNodeConfigSoleTenantConfigArgs:
15716
17122
  def node_affinities(self, value: pulumi.Input[Sequence[pulumi.Input['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
15717
17123
  pulumi.set(self, "node_affinities", value)
15718
17124
 
17125
+ @_builtins.property
17126
+ @pulumi.getter(name="minNodeCpus")
17127
+ def min_node_cpus(self) -> Optional[pulumi.Input[_builtins.int]]:
17128
+ """
17129
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
17130
+ """
17131
+ return pulumi.get(self, "min_node_cpus")
17132
+
17133
+ @min_node_cpus.setter
17134
+ def min_node_cpus(self, value: Optional[pulumi.Input[_builtins.int]]):
17135
+ pulumi.set(self, "min_node_cpus", value)
17136
+
15719
17137
 
15720
17138
  if not MYPY:
15721
17139
  class ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgsDict(TypedDict):
@@ -18133,6 +19551,10 @@ if not MYPY:
18133
19551
  """
18134
19552
  Specifies options for controlling advanced machine features.
18135
19553
  """
19554
+ boot_disk: NotRequired[pulumi.Input['NodePoolNodeConfigBootDiskArgsDict']]
19555
+ """
19556
+ Boot disk configuration for node pools nodes.
19557
+ """
18136
19558
  boot_disk_kms_key: NotRequired[pulumi.Input[_builtins.str]]
18137
19559
  """
18138
19560
  The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
@@ -18316,6 +19738,7 @@ elif False:
18316
19738
  class NodePoolNodeConfigArgs:
18317
19739
  def __init__(__self__, *,
18318
19740
  advanced_machine_features: Optional[pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs']] = None,
19741
+ boot_disk: Optional[pulumi.Input['NodePoolNodeConfigBootDiskArgs']] = None,
18319
19742
  boot_disk_kms_key: Optional[pulumi.Input[_builtins.str]] = None,
18320
19743
  confidential_nodes: Optional[pulumi.Input['NodePoolNodeConfigConfidentialNodesArgs']] = None,
18321
19744
  containerd_config: Optional[pulumi.Input['NodePoolNodeConfigContainerdConfigArgs']] = None,
@@ -18362,6 +19785,7 @@ class NodePoolNodeConfigArgs:
18362
19785
  workload_metadata_config: Optional[pulumi.Input['NodePoolNodeConfigWorkloadMetadataConfigArgs']] = None):
18363
19786
  """
18364
19787
  :param pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs'] advanced_machine_features: Specifies options for controlling advanced machine features.
19788
+ :param pulumi.Input['NodePoolNodeConfigBootDiskArgs'] boot_disk: Boot disk configuration for node pools nodes.
18365
19789
  :param pulumi.Input[_builtins.str] boot_disk_kms_key: The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.
18366
19790
  :param pulumi.Input['NodePoolNodeConfigConfidentialNodesArgs'] confidential_nodes: Configuration for the confidential nodes feature, which makes nodes run on confidential VMs.
18367
19791
  :param pulumi.Input['NodePoolNodeConfigContainerdConfigArgs'] containerd_config: Parameters for containerd configuration.
@@ -18409,6 +19833,8 @@ class NodePoolNodeConfigArgs:
18409
19833
  """
18410
19834
  if advanced_machine_features is not None:
18411
19835
  pulumi.set(__self__, "advanced_machine_features", advanced_machine_features)
19836
+ if boot_disk is not None:
19837
+ pulumi.set(__self__, "boot_disk", boot_disk)
18412
19838
  if boot_disk_kms_key is not None:
18413
19839
  pulumi.set(__self__, "boot_disk_kms_key", boot_disk_kms_key)
18414
19840
  if confidential_nodes is not None:
@@ -18510,6 +19936,18 @@ class NodePoolNodeConfigArgs:
18510
19936
  def advanced_machine_features(self, value: Optional[pulumi.Input['NodePoolNodeConfigAdvancedMachineFeaturesArgs']]):
18511
19937
  pulumi.set(self, "advanced_machine_features", value)
18512
19938
 
19939
+ @_builtins.property
19940
+ @pulumi.getter(name="bootDisk")
19941
+ def boot_disk(self) -> Optional[pulumi.Input['NodePoolNodeConfigBootDiskArgs']]:
19942
+ """
19943
+ Boot disk configuration for node pools nodes.
19944
+ """
19945
+ return pulumi.get(self, "boot_disk")
19946
+
19947
+ @boot_disk.setter
19948
+ def boot_disk(self, value: Optional[pulumi.Input['NodePoolNodeConfigBootDiskArgs']]):
19949
+ pulumi.set(self, "boot_disk", value)
19950
+
18513
19951
  @_builtins.property
18514
19952
  @pulumi.getter(name="bootDiskKmsKey")
18515
19953
  def boot_disk_kms_key(self) -> Optional[pulumi.Input[_builtins.str]]:
@@ -19110,6 +20548,98 @@ class NodePoolNodeConfigAdvancedMachineFeaturesArgs:
19110
20548
  pulumi.set(self, "performance_monitoring_unit", value)
19111
20549
 
19112
20550
 
20551
+ if not MYPY:
20552
+ class NodePoolNodeConfigBootDiskArgsDict(TypedDict):
20553
+ disk_type: NotRequired[pulumi.Input[_builtins.str]]
20554
+ """
20555
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
20556
+ """
20557
+ provisioned_iops: NotRequired[pulumi.Input[_builtins.int]]
20558
+ """
20559
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
20560
+ """
20561
+ provisioned_throughput: NotRequired[pulumi.Input[_builtins.int]]
20562
+ """
20563
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
20564
+ """
20565
+ size_gb: NotRequired[pulumi.Input[_builtins.int]]
20566
+ """
20567
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
20568
+ """
20569
+ elif False:
20570
+ NodePoolNodeConfigBootDiskArgsDict: TypeAlias = Mapping[str, Any]
20571
+
20572
+ @pulumi.input_type
20573
+ class NodePoolNodeConfigBootDiskArgs:
20574
+ def __init__(__self__, *,
20575
+ disk_type: Optional[pulumi.Input[_builtins.str]] = None,
20576
+ provisioned_iops: Optional[pulumi.Input[_builtins.int]] = None,
20577
+ provisioned_throughput: Optional[pulumi.Input[_builtins.int]] = None,
20578
+ size_gb: Optional[pulumi.Input[_builtins.int]] = None):
20579
+ """
20580
+ :param pulumi.Input[_builtins.str] disk_type: Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
20581
+ :param pulumi.Input[_builtins.int] provisioned_iops: Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
20582
+ :param pulumi.Input[_builtins.int] provisioned_throughput: Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
20583
+ :param pulumi.Input[_builtins.int] size_gb: Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
20584
+ """
20585
+ if disk_type is not None:
20586
+ pulumi.set(__self__, "disk_type", disk_type)
20587
+ if provisioned_iops is not None:
20588
+ pulumi.set(__self__, "provisioned_iops", provisioned_iops)
20589
+ if provisioned_throughput is not None:
20590
+ pulumi.set(__self__, "provisioned_throughput", provisioned_throughput)
20591
+ if size_gb is not None:
20592
+ pulumi.set(__self__, "size_gb", size_gb)
20593
+
20594
+ @_builtins.property
20595
+ @pulumi.getter(name="diskType")
20596
+ def disk_type(self) -> Optional[pulumi.Input[_builtins.str]]:
20597
+ """
20598
+ Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd
20599
+ """
20600
+ return pulumi.get(self, "disk_type")
20601
+
20602
+ @disk_type.setter
20603
+ def disk_type(self, value: Optional[pulumi.Input[_builtins.str]]):
20604
+ pulumi.set(self, "disk_type", value)
20605
+
20606
+ @_builtins.property
20607
+ @pulumi.getter(name="provisionedIops")
20608
+ def provisioned_iops(self) -> Optional[pulumi.Input[_builtins.int]]:
20609
+ """
20610
+ Configured IOPs provisioning. Only valid with disk type hyperdisk-balanced.
20611
+ """
20612
+ return pulumi.get(self, "provisioned_iops")
20613
+
20614
+ @provisioned_iops.setter
20615
+ def provisioned_iops(self, value: Optional[pulumi.Input[_builtins.int]]):
20616
+ pulumi.set(self, "provisioned_iops", value)
20617
+
20618
+ @_builtins.property
20619
+ @pulumi.getter(name="provisionedThroughput")
20620
+ def provisioned_throughput(self) -> Optional[pulumi.Input[_builtins.int]]:
20621
+ """
20622
+ Configured throughput provisioning. Only valid with disk type hyperdisk-balanced.
20623
+ """
20624
+ return pulumi.get(self, "provisioned_throughput")
20625
+
20626
+ @provisioned_throughput.setter
20627
+ def provisioned_throughput(self, value: Optional[pulumi.Input[_builtins.int]]):
20628
+ pulumi.set(self, "provisioned_throughput", value)
20629
+
20630
+ @_builtins.property
20631
+ @pulumi.getter(name="sizeGb")
20632
+ def size_gb(self) -> Optional[pulumi.Input[_builtins.int]]:
20633
+ """
20634
+ Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.
20635
+ """
20636
+ return pulumi.get(self, "size_gb")
20637
+
20638
+ @size_gb.setter
20639
+ def size_gb(self, value: Optional[pulumi.Input[_builtins.int]]):
20640
+ pulumi.set(self, "size_gb", value)
20641
+
20642
+
19113
20643
  if not MYPY:
19114
20644
  class NodePoolNodeConfigConfidentialNodesArgsDict(TypedDict):
19115
20645
  enabled: pulumi.Input[_builtins.bool]
@@ -19820,6 +21350,22 @@ if not MYPY:
19820
21350
  """
19821
21351
  Control the CPU management policy on the node.
19822
21352
  """
21353
+ eviction_max_pod_grace_period_seconds: NotRequired[pulumi.Input[_builtins.int]]
21354
+ """
21355
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21356
+ """
21357
+ eviction_minimum_reclaim: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict']]
21358
+ """
21359
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21360
+ """
21361
+ eviction_soft: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict']]
21362
+ """
21363
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21364
+ """
21365
+ eviction_soft_grace_period: NotRequired[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict']]
21366
+ """
21367
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21368
+ """
19823
21369
  image_gc_high_threshold_percent: NotRequired[pulumi.Input[_builtins.int]]
19824
21370
  """
19825
21371
  Defines the percent of disk usage after which image garbage collection is always run.
@@ -19840,10 +21386,18 @@ if not MYPY:
19840
21386
  """
19841
21387
  Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
19842
21388
  """
21389
+ max_parallel_image_pulls: NotRequired[pulumi.Input[_builtins.int]]
21390
+ """
21391
+ Set the maximum number of image pulls in parallel.
21392
+ """
19843
21393
  pod_pids_limit: NotRequired[pulumi.Input[_builtins.int]]
19844
21394
  """
19845
21395
  Controls the maximum number of processes allowed to run in a pod.
19846
21396
  """
21397
+ single_process_oom_kill: NotRequired[pulumi.Input[_builtins.bool]]
21398
+ """
21399
+ Defines whether to enable single process OOM killer.
21400
+ """
19847
21401
  elif False:
19848
21402
  NodePoolNodeConfigKubeletConfigArgsDict: TypeAlias = Mapping[str, Any]
19849
21403
 
@@ -19856,12 +21410,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
19856
21410
  cpu_cfs_quota: Optional[pulumi.Input[_builtins.bool]] = None,
19857
21411
  cpu_cfs_quota_period: Optional[pulumi.Input[_builtins.str]] = None,
19858
21412
  cpu_manager_policy: Optional[pulumi.Input[_builtins.str]] = None,
21413
+ eviction_max_pod_grace_period_seconds: Optional[pulumi.Input[_builtins.int]] = None,
21414
+ eviction_minimum_reclaim: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']] = None,
21415
+ eviction_soft: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs']] = None,
21416
+ eviction_soft_grace_period: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']] = None,
19859
21417
  image_gc_high_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
19860
21418
  image_gc_low_threshold_percent: Optional[pulumi.Input[_builtins.int]] = None,
19861
21419
  image_maximum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
19862
21420
  image_minimum_gc_age: Optional[pulumi.Input[_builtins.str]] = None,
19863
21421
  insecure_kubelet_readonly_port_enabled: Optional[pulumi.Input[_builtins.str]] = None,
19864
- pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None):
21422
+ max_parallel_image_pulls: Optional[pulumi.Input[_builtins.int]] = None,
21423
+ pod_pids_limit: Optional[pulumi.Input[_builtins.int]] = None,
21424
+ single_process_oom_kill: Optional[pulumi.Input[_builtins.bool]] = None):
19865
21425
  """
19866
21426
  :param pulumi.Input[Sequence[pulumi.Input[_builtins.str]]] allowed_unsafe_sysctls: Defines a comma-separated allowlist of unsafe sysctls or sysctl patterns which can be set on the Pods.
19867
21427
  :param pulumi.Input[_builtins.int] container_log_max_files: Defines the maximum number of container log files that can be present for a container.
@@ -19869,12 +21429,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
19869
21429
  :param pulumi.Input[_builtins.bool] cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
19870
21430
  :param pulumi.Input[_builtins.str] cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
19871
21431
  :param pulumi.Input[_builtins.str] cpu_manager_policy: Control the CPU management policy on the node.
21432
+ :param pulumi.Input[_builtins.int] eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21433
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21434
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs'] eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21435
+ :param pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
19872
21436
  :param pulumi.Input[_builtins.int] image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
19873
21437
  :param pulumi.Input[_builtins.int] image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
19874
21438
  :param pulumi.Input[_builtins.str] image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
19875
21439
  :param pulumi.Input[_builtins.str] image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
19876
21440
  :param pulumi.Input[_builtins.str] insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
21441
+ :param pulumi.Input[_builtins.int] max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
19877
21442
  :param pulumi.Input[_builtins.int] pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
21443
+ :param pulumi.Input[_builtins.bool] single_process_oom_kill: Defines whether to enable single process OOM killer.
19878
21444
  """
19879
21445
  if allowed_unsafe_sysctls is not None:
19880
21446
  pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
@@ -19888,6 +21454,14 @@ class NodePoolNodeConfigKubeletConfigArgs:
19888
21454
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
19889
21455
  if cpu_manager_policy is not None:
19890
21456
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
21457
+ if eviction_max_pod_grace_period_seconds is not None:
21458
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
21459
+ if eviction_minimum_reclaim is not None:
21460
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
21461
+ if eviction_soft is not None:
21462
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
21463
+ if eviction_soft_grace_period is not None:
21464
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
19891
21465
  if image_gc_high_threshold_percent is not None:
19892
21466
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
19893
21467
  if image_gc_low_threshold_percent is not None:
@@ -19898,8 +21472,12 @@ class NodePoolNodeConfigKubeletConfigArgs:
19898
21472
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
19899
21473
  if insecure_kubelet_readonly_port_enabled is not None:
19900
21474
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
21475
+ if max_parallel_image_pulls is not None:
21476
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
19901
21477
  if pod_pids_limit is not None:
19902
21478
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
21479
+ if single_process_oom_kill is not None:
21480
+ pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
19903
21481
 
19904
21482
  @_builtins.property
19905
21483
  @pulumi.getter(name="allowedUnsafeSysctls")
@@ -19973,6 +21551,54 @@ class NodePoolNodeConfigKubeletConfigArgs:
19973
21551
  def cpu_manager_policy(self, value: Optional[pulumi.Input[_builtins.str]]):
19974
21552
  pulumi.set(self, "cpu_manager_policy", value)
19975
21553
 
21554
+ @_builtins.property
21555
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
21556
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[pulumi.Input[_builtins.int]]:
21557
+ """
21558
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21559
+ """
21560
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
21561
+
21562
+ @eviction_max_pod_grace_period_seconds.setter
21563
+ def eviction_max_pod_grace_period_seconds(self, value: Optional[pulumi.Input[_builtins.int]]):
21564
+ pulumi.set(self, "eviction_max_pod_grace_period_seconds", value)
21565
+
21566
+ @_builtins.property
21567
+ @pulumi.getter(name="evictionMinimumReclaim")
21568
+ def eviction_minimum_reclaim(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]:
21569
+ """
21570
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21571
+ """
21572
+ return pulumi.get(self, "eviction_minimum_reclaim")
21573
+
21574
+ @eviction_minimum_reclaim.setter
21575
+ def eviction_minimum_reclaim(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs']]):
21576
+ pulumi.set(self, "eviction_minimum_reclaim", value)
21577
+
21578
+ @_builtins.property
21579
+ @pulumi.getter(name="evictionSoft")
21580
+ def eviction_soft(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs']]:
21581
+ """
21582
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21583
+ """
21584
+ return pulumi.get(self, "eviction_soft")
21585
+
21586
+ @eviction_soft.setter
21587
+ def eviction_soft(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftArgs']]):
21588
+ pulumi.set(self, "eviction_soft", value)
21589
+
21590
+ @_builtins.property
21591
+ @pulumi.getter(name="evictionSoftGracePeriod")
21592
+ def eviction_soft_grace_period(self) -> Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]:
21593
+ """
21594
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21595
+ """
21596
+ return pulumi.get(self, "eviction_soft_grace_period")
21597
+
21598
+ @eviction_soft_grace_period.setter
21599
+ def eviction_soft_grace_period(self, value: Optional[pulumi.Input['NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs']]):
21600
+ pulumi.set(self, "eviction_soft_grace_period", value)
21601
+
19976
21602
  @_builtins.property
19977
21603
  @pulumi.getter(name="imageGcHighThresholdPercent")
19978
21604
  def image_gc_high_threshold_percent(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -20033,6 +21659,18 @@ class NodePoolNodeConfigKubeletConfigArgs:
20033
21659
  def insecure_kubelet_readonly_port_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
20034
21660
  pulumi.set(self, "insecure_kubelet_readonly_port_enabled", value)
20035
21661
 
21662
+ @_builtins.property
21663
+ @pulumi.getter(name="maxParallelImagePulls")
21664
+ def max_parallel_image_pulls(self) -> Optional[pulumi.Input[_builtins.int]]:
21665
+ """
21666
+ Set the maximum number of image pulls in parallel.
21667
+ """
21668
+ return pulumi.get(self, "max_parallel_image_pulls")
21669
+
21670
+ @max_parallel_image_pulls.setter
21671
+ def max_parallel_image_pulls(self, value: Optional[pulumi.Input[_builtins.int]]):
21672
+ pulumi.set(self, "max_parallel_image_pulls", value)
21673
+
20036
21674
  @_builtins.property
20037
21675
  @pulumi.getter(name="podPidsLimit")
20038
21676
  def pod_pids_limit(self) -> Optional[pulumi.Input[_builtins.int]]:
@@ -20045,6 +21683,414 @@ class NodePoolNodeConfigKubeletConfigArgs:
20045
21683
  def pod_pids_limit(self, value: Optional[pulumi.Input[_builtins.int]]):
20046
21684
  pulumi.set(self, "pod_pids_limit", value)
20047
21685
 
21686
+ @_builtins.property
21687
+ @pulumi.getter(name="singleProcessOomKill")
21688
+ def single_process_oom_kill(self) -> Optional[pulumi.Input[_builtins.bool]]:
21689
+ """
21690
+ Defines whether to enable single process OOM killer.
21691
+ """
21692
+ return pulumi.get(self, "single_process_oom_kill")
21693
+
21694
+ @single_process_oom_kill.setter
21695
+ def single_process_oom_kill(self, value: Optional[pulumi.Input[_builtins.bool]]):
21696
+ pulumi.set(self, "single_process_oom_kill", value)
21697
+
21698
+
21699
+ if not MYPY:
21700
+ class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict(TypedDict):
21701
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
21702
+ """
21703
+ Defines percentage of minimum reclaim for imagefs.available.
21704
+ """
21705
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21706
+ """
21707
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
21708
+ """
21709
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
21710
+ """
21711
+ Defines percentage of minimum reclaim for memory.available.
21712
+ """
21713
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
21714
+ """
21715
+ Defines percentage of minimum reclaim for nodefs.available.
21716
+ """
21717
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21718
+ """
21719
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
21720
+ """
21721
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
21722
+ """
21723
+ Defines percentage of minimum reclaim for pid.available.
21724
+ """
21725
+ elif False:
21726
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgsDict: TypeAlias = Mapping[str, Any]
21727
+
21728
+ @pulumi.input_type
21729
+ class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs:
21730
+ def __init__(__self__, *,
21731
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21732
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21733
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
21734
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21735
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21736
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
21737
+ """
21738
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
21739
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
21740
+ :param pulumi.Input[_builtins.str] memory_available: Defines percentage of minimum reclaim for memory.available.
21741
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
21742
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
21743
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of minimum reclaim for pid.available.
21744
+ """
21745
+ if imagefs_available is not None:
21746
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21747
+ if imagefs_inodes_free is not None:
21748
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21749
+ if memory_available is not None:
21750
+ pulumi.set(__self__, "memory_available", memory_available)
21751
+ if nodefs_available is not None:
21752
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21753
+ if nodefs_inodes_free is not None:
21754
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21755
+ if pid_available is not None:
21756
+ pulumi.set(__self__, "pid_available", pid_available)
21757
+
21758
+ @_builtins.property
21759
+ @pulumi.getter(name="imagefsAvailable")
21760
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21761
+ """
21762
+ Defines percentage of minimum reclaim for imagefs.available.
21763
+ """
21764
+ return pulumi.get(self, "imagefs_available")
21765
+
21766
+ @imagefs_available.setter
21767
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21768
+ pulumi.set(self, "imagefs_available", value)
21769
+
21770
+ @_builtins.property
21771
+ @pulumi.getter(name="imagefsInodesFree")
21772
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21773
+ """
21774
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
21775
+ """
21776
+ return pulumi.get(self, "imagefs_inodes_free")
21777
+
21778
+ @imagefs_inodes_free.setter
21779
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21780
+ pulumi.set(self, "imagefs_inodes_free", value)
21781
+
21782
+ @_builtins.property
21783
+ @pulumi.getter(name="memoryAvailable")
21784
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21785
+ """
21786
+ Defines percentage of minimum reclaim for memory.available.
21787
+ """
21788
+ return pulumi.get(self, "memory_available")
21789
+
21790
+ @memory_available.setter
21791
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21792
+ pulumi.set(self, "memory_available", value)
21793
+
21794
+ @_builtins.property
21795
+ @pulumi.getter(name="nodefsAvailable")
21796
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21797
+ """
21798
+ Defines percentage of minimum reclaim for nodefs.available.
21799
+ """
21800
+ return pulumi.get(self, "nodefs_available")
21801
+
21802
+ @nodefs_available.setter
21803
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21804
+ pulumi.set(self, "nodefs_available", value)
21805
+
21806
+ @_builtins.property
21807
+ @pulumi.getter(name="nodefsInodesFree")
21808
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21809
+ """
21810
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
21811
+ """
21812
+ return pulumi.get(self, "nodefs_inodes_free")
21813
+
21814
+ @nodefs_inodes_free.setter
21815
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21816
+ pulumi.set(self, "nodefs_inodes_free", value)
21817
+
21818
+ @_builtins.property
21819
+ @pulumi.getter(name="pidAvailable")
21820
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21821
+ """
21822
+ Defines percentage of minimum reclaim for pid.available.
21823
+ """
21824
+ return pulumi.get(self, "pid_available")
21825
+
21826
+ @pid_available.setter
21827
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21828
+ pulumi.set(self, "pid_available", value)
21829
+
21830
+
21831
+ if not MYPY:
21832
+ class NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict(TypedDict):
21833
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
21834
+ """
21835
+ Defines percentage of soft eviction threshold for imagefs.available.
21836
+ """
21837
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21838
+ """
21839
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
21840
+ """
21841
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
21842
+ """
21843
+ Defines quantity of soft eviction threshold for memory.available.
21844
+ """
21845
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
21846
+ """
21847
+ Defines percentage of soft eviction threshold for nodefs.available.
21848
+ """
21849
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21850
+ """
21851
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
21852
+ """
21853
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
21854
+ """
21855
+ Defines percentage of soft eviction threshold for pid.available.
21856
+ """
21857
+ elif False:
21858
+ NodePoolNodeConfigKubeletConfigEvictionSoftArgsDict: TypeAlias = Mapping[str, Any]
21859
+
21860
+ @pulumi.input_type
21861
+ class NodePoolNodeConfigKubeletConfigEvictionSoftArgs:
21862
+ def __init__(__self__, *,
21863
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21864
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21865
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
21866
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21867
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21868
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
21869
+ """
21870
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
21871
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
21872
+ :param pulumi.Input[_builtins.str] memory_available: Defines quantity of soft eviction threshold for memory.available.
21873
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
21874
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
21875
+ :param pulumi.Input[_builtins.str] pid_available: Defines percentage of soft eviction threshold for pid.available.
21876
+ """
21877
+ if imagefs_available is not None:
21878
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21879
+ if imagefs_inodes_free is not None:
21880
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21881
+ if memory_available is not None:
21882
+ pulumi.set(__self__, "memory_available", memory_available)
21883
+ if nodefs_available is not None:
21884
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21885
+ if nodefs_inodes_free is not None:
21886
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21887
+ if pid_available is not None:
21888
+ pulumi.set(__self__, "pid_available", pid_available)
21889
+
21890
+ @_builtins.property
21891
+ @pulumi.getter(name="imagefsAvailable")
21892
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21893
+ """
21894
+ Defines percentage of soft eviction threshold for imagefs.available.
21895
+ """
21896
+ return pulumi.get(self, "imagefs_available")
21897
+
21898
+ @imagefs_available.setter
21899
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21900
+ pulumi.set(self, "imagefs_available", value)
21901
+
21902
+ @_builtins.property
21903
+ @pulumi.getter(name="imagefsInodesFree")
21904
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21905
+ """
21906
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
21907
+ """
21908
+ return pulumi.get(self, "imagefs_inodes_free")
21909
+
21910
+ @imagefs_inodes_free.setter
21911
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21912
+ pulumi.set(self, "imagefs_inodes_free", value)
21913
+
21914
+ @_builtins.property
21915
+ @pulumi.getter(name="memoryAvailable")
21916
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21917
+ """
21918
+ Defines quantity of soft eviction threshold for memory.available.
21919
+ """
21920
+ return pulumi.get(self, "memory_available")
21921
+
21922
+ @memory_available.setter
21923
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21924
+ pulumi.set(self, "memory_available", value)
21925
+
21926
+ @_builtins.property
21927
+ @pulumi.getter(name="nodefsAvailable")
21928
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21929
+ """
21930
+ Defines percentage of soft eviction threshold for nodefs.available.
21931
+ """
21932
+ return pulumi.get(self, "nodefs_available")
21933
+
21934
+ @nodefs_available.setter
21935
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21936
+ pulumi.set(self, "nodefs_available", value)
21937
+
21938
+ @_builtins.property
21939
+ @pulumi.getter(name="nodefsInodesFree")
21940
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
21941
+ """
21942
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
21943
+ """
21944
+ return pulumi.get(self, "nodefs_inodes_free")
21945
+
21946
+ @nodefs_inodes_free.setter
21947
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
21948
+ pulumi.set(self, "nodefs_inodes_free", value)
21949
+
21950
+ @_builtins.property
21951
+ @pulumi.getter(name="pidAvailable")
21952
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
21953
+ """
21954
+ Defines percentage of soft eviction threshold for pid.available.
21955
+ """
21956
+ return pulumi.get(self, "pid_available")
21957
+
21958
+ @pid_available.setter
21959
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
21960
+ pulumi.set(self, "pid_available", value)
21961
+
21962
+
21963
+ if not MYPY:
21964
+ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict(TypedDict):
21965
+ imagefs_available: NotRequired[pulumi.Input[_builtins.str]]
21966
+ """
21967
+ Defines grace period for the imagefs.available soft eviction threshold
21968
+ """
21969
+ imagefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21970
+ """
21971
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
21972
+ """
21973
+ memory_available: NotRequired[pulumi.Input[_builtins.str]]
21974
+ """
21975
+ Defines grace period for the memory.available soft eviction threshold.
21976
+ """
21977
+ nodefs_available: NotRequired[pulumi.Input[_builtins.str]]
21978
+ """
21979
+ Defines grace period for the nodefs.available soft eviction threshold.
21980
+ """
21981
+ nodefs_inodes_free: NotRequired[pulumi.Input[_builtins.str]]
21982
+ """
21983
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
21984
+ """
21985
+ pid_available: NotRequired[pulumi.Input[_builtins.str]]
21986
+ """
21987
+ Defines grace period for the pid.available soft eviction threshold.
21988
+ """
21989
+ elif False:
21990
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgsDict: TypeAlias = Mapping[str, Any]
21991
+
21992
+ @pulumi.input_type
21993
+ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs:
21994
+ def __init__(__self__, *,
21995
+ imagefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21996
+ imagefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
21997
+ memory_available: Optional[pulumi.Input[_builtins.str]] = None,
21998
+ nodefs_available: Optional[pulumi.Input[_builtins.str]] = None,
21999
+ nodefs_inodes_free: Optional[pulumi.Input[_builtins.str]] = None,
22000
+ pid_available: Optional[pulumi.Input[_builtins.str]] = None):
22001
+ """
22002
+ :param pulumi.Input[_builtins.str] imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
22003
+ :param pulumi.Input[_builtins.str] imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
22004
+ :param pulumi.Input[_builtins.str] memory_available: Defines grace period for the memory.available soft eviction threshold.
22005
+ :param pulumi.Input[_builtins.str] nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
22006
+ :param pulumi.Input[_builtins.str] nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
22007
+ :param pulumi.Input[_builtins.str] pid_available: Defines grace period for the pid.available soft eviction threshold.
22008
+ """
22009
+ if imagefs_available is not None:
22010
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
22011
+ if imagefs_inodes_free is not None:
22012
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
22013
+ if memory_available is not None:
22014
+ pulumi.set(__self__, "memory_available", memory_available)
22015
+ if nodefs_available is not None:
22016
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
22017
+ if nodefs_inodes_free is not None:
22018
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
22019
+ if pid_available is not None:
22020
+ pulumi.set(__self__, "pid_available", pid_available)
22021
+
22022
+ @_builtins.property
22023
+ @pulumi.getter(name="imagefsAvailable")
22024
+ def imagefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22025
+ """
22026
+ Defines grace period for the imagefs.available soft eviction threshold
22027
+ """
22028
+ return pulumi.get(self, "imagefs_available")
22029
+
22030
+ @imagefs_available.setter
22031
+ def imagefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22032
+ pulumi.set(self, "imagefs_available", value)
22033
+
22034
+ @_builtins.property
22035
+ @pulumi.getter(name="imagefsInodesFree")
22036
+ def imagefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
22037
+ """
22038
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
22039
+ """
22040
+ return pulumi.get(self, "imagefs_inodes_free")
22041
+
22042
+ @imagefs_inodes_free.setter
22043
+ def imagefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
22044
+ pulumi.set(self, "imagefs_inodes_free", value)
22045
+
22046
+ @_builtins.property
22047
+ @pulumi.getter(name="memoryAvailable")
22048
+ def memory_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22049
+ """
22050
+ Defines grace period for the memory.available soft eviction threshold.
22051
+ """
22052
+ return pulumi.get(self, "memory_available")
22053
+
22054
+ @memory_available.setter
22055
+ def memory_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22056
+ pulumi.set(self, "memory_available", value)
22057
+
22058
+ @_builtins.property
22059
+ @pulumi.getter(name="nodefsAvailable")
22060
+ def nodefs_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22061
+ """
22062
+ Defines grace period for the nodefs.available soft eviction threshold.
22063
+ """
22064
+ return pulumi.get(self, "nodefs_available")
22065
+
22066
+ @nodefs_available.setter
22067
+ def nodefs_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22068
+ pulumi.set(self, "nodefs_available", value)
22069
+
22070
+ @_builtins.property
22071
+ @pulumi.getter(name="nodefsInodesFree")
22072
+ def nodefs_inodes_free(self) -> Optional[pulumi.Input[_builtins.str]]:
22073
+ """
22074
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
22075
+ """
22076
+ return pulumi.get(self, "nodefs_inodes_free")
22077
+
22078
+ @nodefs_inodes_free.setter
22079
+ def nodefs_inodes_free(self, value: Optional[pulumi.Input[_builtins.str]]):
22080
+ pulumi.set(self, "nodefs_inodes_free", value)
22081
+
22082
+ @_builtins.property
22083
+ @pulumi.getter(name="pidAvailable")
22084
+ def pid_available(self) -> Optional[pulumi.Input[_builtins.str]]:
22085
+ """
22086
+ Defines grace period for the pid.available soft eviction threshold.
22087
+ """
22088
+ return pulumi.get(self, "pid_available")
22089
+
22090
+ @pid_available.setter
22091
+ def pid_available(self, value: Optional[pulumi.Input[_builtins.str]]):
22092
+ pulumi.set(self, "pid_available", value)
22093
+
20048
22094
 
20049
22095
  if not MYPY:
20050
22096
  class NodePoolNodeConfigLinuxNodeConfigArgsDict(TypedDict):
@@ -20060,6 +22106,14 @@ if not MYPY:
20060
22106
  """
20061
22107
  The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
20062
22108
  """
22109
+ transparent_hugepage_defrag: NotRequired[pulumi.Input[_builtins.str]]
22110
+ """
22111
+ The Linux kernel transparent hugepage defrag setting.
22112
+ """
22113
+ transparent_hugepage_enabled: NotRequired[pulumi.Input[_builtins.str]]
22114
+ """
22115
+ The Linux kernel transparent hugepage setting.
22116
+ """
20063
22117
  elif False:
20064
22118
  NodePoolNodeConfigLinuxNodeConfigArgsDict: TypeAlias = Mapping[str, Any]
20065
22119
 
@@ -20068,11 +22122,15 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
20068
22122
  def __init__(__self__, *,
20069
22123
  cgroup_mode: Optional[pulumi.Input[_builtins.str]] = None,
20070
22124
  hugepages_config: Optional[pulumi.Input['NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs']] = None,
20071
- sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None):
22125
+ sysctls: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]] = None,
22126
+ transparent_hugepage_defrag: Optional[pulumi.Input[_builtins.str]] = None,
22127
+ transparent_hugepage_enabled: Optional[pulumi.Input[_builtins.str]] = None):
20072
22128
  """
20073
22129
  :param pulumi.Input[_builtins.str] cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
20074
22130
  :param pulumi.Input['NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_config: Amounts for 2M and 1G hugepages.
20075
22131
  :param pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
22132
+ :param pulumi.Input[_builtins.str] transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
22133
+ :param pulumi.Input[_builtins.str] transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
20076
22134
  """
20077
22135
  if cgroup_mode is not None:
20078
22136
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -20080,6 +22138,10 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
20080
22138
  pulumi.set(__self__, "hugepages_config", hugepages_config)
20081
22139
  if sysctls is not None:
20082
22140
  pulumi.set(__self__, "sysctls", sysctls)
22141
+ if transparent_hugepage_defrag is not None:
22142
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
22143
+ if transparent_hugepage_enabled is not None:
22144
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
20083
22145
 
20084
22146
  @_builtins.property
20085
22147
  @pulumi.getter(name="cgroupMode")
@@ -20117,6 +22179,30 @@ class NodePoolNodeConfigLinuxNodeConfigArgs:
20117
22179
  def sysctls(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[_builtins.str]]]]):
20118
22180
  pulumi.set(self, "sysctls", value)
20119
22181
 
22182
+ @_builtins.property
22183
+ @pulumi.getter(name="transparentHugepageDefrag")
22184
+ def transparent_hugepage_defrag(self) -> Optional[pulumi.Input[_builtins.str]]:
22185
+ """
22186
+ The Linux kernel transparent hugepage defrag setting.
22187
+ """
22188
+ return pulumi.get(self, "transparent_hugepage_defrag")
22189
+
22190
+ @transparent_hugepage_defrag.setter
22191
+ def transparent_hugepage_defrag(self, value: Optional[pulumi.Input[_builtins.str]]):
22192
+ pulumi.set(self, "transparent_hugepage_defrag", value)
22193
+
22194
+ @_builtins.property
22195
+ @pulumi.getter(name="transparentHugepageEnabled")
22196
+ def transparent_hugepage_enabled(self) -> Optional[pulumi.Input[_builtins.str]]:
22197
+ """
22198
+ The Linux kernel transparent hugepage setting.
22199
+ """
22200
+ return pulumi.get(self, "transparent_hugepage_enabled")
22201
+
22202
+ @transparent_hugepage_enabled.setter
22203
+ def transparent_hugepage_enabled(self, value: Optional[pulumi.Input[_builtins.str]]):
22204
+ pulumi.set(self, "transparent_hugepage_enabled", value)
22205
+
20120
22206
 
20121
22207
  if not MYPY:
20122
22208
  class NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgsDict(TypedDict):
@@ -20430,17 +22516,25 @@ if not MYPY:
20430
22516
  """
20431
22517
  .
20432
22518
  """
22519
+ min_node_cpus: NotRequired[pulumi.Input[_builtins.int]]
22520
+ """
22521
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22522
+ """
20433
22523
  elif False:
20434
22524
  NodePoolNodeConfigSoleTenantConfigArgsDict: TypeAlias = Mapping[str, Any]
20435
22525
 
20436
22526
  @pulumi.input_type
20437
22527
  class NodePoolNodeConfigSoleTenantConfigArgs:
20438
22528
  def __init__(__self__, *,
20439
- node_affinities: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
22529
+ node_affinities: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]],
22530
+ min_node_cpus: Optional[pulumi.Input[_builtins.int]] = None):
20440
22531
  """
20441
22532
  :param pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]] node_affinities: .
22533
+ :param pulumi.Input[_builtins.int] min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
20442
22534
  """
20443
22535
  pulumi.set(__self__, "node_affinities", node_affinities)
22536
+ if min_node_cpus is not None:
22537
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
20444
22538
 
20445
22539
  @_builtins.property
20446
22540
  @pulumi.getter(name="nodeAffinities")
@@ -20454,6 +22548,18 @@ class NodePoolNodeConfigSoleTenantConfigArgs:
20454
22548
  def node_affinities(self, value: pulumi.Input[Sequence[pulumi.Input['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs']]]):
20455
22549
  pulumi.set(self, "node_affinities", value)
20456
22550
 
22551
+ @_builtins.property
22552
+ @pulumi.getter(name="minNodeCpus")
22553
+ def min_node_cpus(self) -> Optional[pulumi.Input[_builtins.int]]:
22554
+ """
22555
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22556
+ """
22557
+ return pulumi.get(self, "min_node_cpus")
22558
+
22559
+ @min_node_cpus.setter
22560
+ def min_node_cpus(self, value: Optional[pulumi.Input[_builtins.int]]):
22561
+ pulumi.set(self, "min_node_cpus", value)
22562
+
20457
22563
 
20458
22564
  if not MYPY:
20459
22565
  class NodePoolNodeConfigSoleTenantConfigNodeAffinityArgsDict(TypedDict):