pulumi-gcp 8.41.0a1755716203__py3-none-any.whl → 8.41.0a1755891135__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (314) hide show
  1. pulumi_gcp/__init__.py +8 -0
  2. pulumi_gcp/accesscontextmanager/access_policy_iam_binding.py +2 -0
  3. pulumi_gcp/accesscontextmanager/access_policy_iam_member.py +2 -0
  4. pulumi_gcp/accesscontextmanager/access_policy_iam_policy.py +2 -0
  5. pulumi_gcp/apigateway/api_config_iam_binding.py +2 -0
  6. pulumi_gcp/apigateway/api_config_iam_member.py +2 -0
  7. pulumi_gcp/apigateway/api_config_iam_policy.py +2 -0
  8. pulumi_gcp/apigateway/api_iam_binding.py +2 -0
  9. pulumi_gcp/apigateway/api_iam_member.py +2 -0
  10. pulumi_gcp/apigateway/api_iam_policy.py +2 -0
  11. pulumi_gcp/apigateway/gateway_iam_binding.py +2 -0
  12. pulumi_gcp/apigateway/gateway_iam_member.py +2 -0
  13. pulumi_gcp/apigateway/gateway_iam_policy.py +2 -0
  14. pulumi_gcp/apigee/environment_iam_binding.py +2 -0
  15. pulumi_gcp/apigee/environment_iam_member.py +2 -0
  16. pulumi_gcp/apigee/environment_iam_policy.py +2 -0
  17. pulumi_gcp/artifactregistry/__init__.py +2 -0
  18. pulumi_gcp/artifactregistry/get_tag.py +187 -0
  19. pulumi_gcp/artifactregistry/get_tags.py +200 -0
  20. pulumi_gcp/artifactregistry/outputs.py +30 -0
  21. pulumi_gcp/artifactregistry/repository_iam_binding.py +2 -0
  22. pulumi_gcp/artifactregistry/repository_iam_member.py +2 -0
  23. pulumi_gcp/artifactregistry/repository_iam_policy.py +2 -0
  24. pulumi_gcp/beyondcorp/application_iam_binding.py +8 -0
  25. pulumi_gcp/beyondcorp/application_iam_member.py +8 -0
  26. pulumi_gcp/beyondcorp/application_iam_policy.py +8 -0
  27. pulumi_gcp/beyondcorp/get_application_iam_policy.py +4 -0
  28. pulumi_gcp/beyondcorp/security_gateway_application_iam_binding.py +2 -0
  29. pulumi_gcp/beyondcorp/security_gateway_application_iam_member.py +2 -0
  30. pulumi_gcp/beyondcorp/security_gateway_application_iam_policy.py +2 -0
  31. pulumi_gcp/beyondcorp/security_gateway_iam_binding.py +2 -0
  32. pulumi_gcp/beyondcorp/security_gateway_iam_member.py +2 -0
  33. pulumi_gcp/beyondcorp/security_gateway_iam_policy.py +2 -0
  34. pulumi_gcp/bigquery/connection_iam_binding.py +2 -0
  35. pulumi_gcp/bigquery/connection_iam_member.py +2 -0
  36. pulumi_gcp/bigquery/connection_iam_policy.py +2 -0
  37. pulumi_gcp/bigquery/data_transfer_config.py +2 -0
  38. pulumi_gcp/bigquery/iam_binding.py +2 -0
  39. pulumi_gcp/bigquery/iam_member.py +2 -0
  40. pulumi_gcp/bigquery/iam_policy.py +2 -0
  41. pulumi_gcp/bigquery/reservation.py +535 -0
  42. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_binding.py +2 -0
  43. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_member.py +2 -0
  44. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_policy.py +2 -0
  45. pulumi_gcp/bigqueryanalyticshub/listing_iam_binding.py +2 -0
  46. pulumi_gcp/bigqueryanalyticshub/listing_iam_member.py +2 -0
  47. pulumi_gcp/bigqueryanalyticshub/listing_iam_policy.py +2 -0
  48. pulumi_gcp/bigquerydatapolicy/data_policy_iam_binding.py +2 -0
  49. pulumi_gcp/bigquerydatapolicy/data_policy_iam_member.py +2 -0
  50. pulumi_gcp/bigquerydatapolicy/data_policy_iam_policy.py +2 -0
  51. pulumi_gcp/binaryauthorization/attestor_iam_binding.py +2 -0
  52. pulumi_gcp/binaryauthorization/attestor_iam_member.py +2 -0
  53. pulumi_gcp/binaryauthorization/attestor_iam_policy.py +2 -0
  54. pulumi_gcp/certificateauthority/ca_pool_iam_binding.py +2 -0
  55. pulumi_gcp/certificateauthority/ca_pool_iam_member.py +2 -0
  56. pulumi_gcp/certificateauthority/ca_pool_iam_policy.py +2 -0
  57. pulumi_gcp/certificateauthority/certificate_template_iam_binding.py +2 -0
  58. pulumi_gcp/certificateauthority/certificate_template_iam_member.py +2 -0
  59. pulumi_gcp/certificateauthority/certificate_template_iam_policy.py +2 -0
  60. pulumi_gcp/cloudbuildv2/connection_iam_binding.py +2 -0
  61. pulumi_gcp/cloudbuildv2/connection_iam_member.py +2 -0
  62. pulumi_gcp/cloudbuildv2/connection_iam_policy.py +2 -0
  63. pulumi_gcp/clouddeploy/_inputs.py +48 -48
  64. pulumi_gcp/clouddeploy/deploy_policy.py +54 -74
  65. pulumi_gcp/clouddeploy/outputs.py +32 -32
  66. pulumi_gcp/cloudfunctions/function_iam_binding.py +2 -0
  67. pulumi_gcp/cloudfunctions/function_iam_member.py +2 -0
  68. pulumi_gcp/cloudfunctions/function_iam_policy.py +2 -0
  69. pulumi_gcp/cloudfunctionsv2/function_iam_binding.py +2 -0
  70. pulumi_gcp/cloudfunctionsv2/function_iam_member.py +2 -0
  71. pulumi_gcp/cloudfunctionsv2/function_iam_policy.py +2 -0
  72. pulumi_gcp/cloudrun/iam_binding.py +2 -0
  73. pulumi_gcp/cloudrun/iam_member.py +2 -0
  74. pulumi_gcp/cloudrun/iam_policy.py +2 -0
  75. pulumi_gcp/cloudrunv2/job_iam_binding.py +2 -0
  76. pulumi_gcp/cloudrunv2/job_iam_member.py +2 -0
  77. pulumi_gcp/cloudrunv2/job_iam_policy.py +2 -0
  78. pulumi_gcp/cloudrunv2/service_iam_binding.py +2 -0
  79. pulumi_gcp/cloudrunv2/service_iam_member.py +2 -0
  80. pulumi_gcp/cloudrunv2/service_iam_policy.py +2 -0
  81. pulumi_gcp/cloudrunv2/worker_pool_iam_binding.py +2 -0
  82. pulumi_gcp/cloudrunv2/worker_pool_iam_member.py +2 -0
  83. pulumi_gcp/cloudrunv2/worker_pool_iam_policy.py +2 -0
  84. pulumi_gcp/cloudtasks/queue_iam_binding.py +2 -0
  85. pulumi_gcp/cloudtasks/queue_iam_member.py +2 -0
  86. pulumi_gcp/cloudtasks/queue_iam_policy.py +2 -0
  87. pulumi_gcp/colab/runtime_template_iam_binding.py +2 -0
  88. pulumi_gcp/colab/runtime_template_iam_member.py +2 -0
  89. pulumi_gcp/colab/runtime_template_iam_policy.py +2 -0
  90. pulumi_gcp/compute/disk_iam_binding.py +2 -0
  91. pulumi_gcp/compute/disk_iam_member.py +2 -0
  92. pulumi_gcp/compute/disk_iam_policy.py +2 -0
  93. pulumi_gcp/compute/image_iam_binding.py +2 -0
  94. pulumi_gcp/compute/image_iam_member.py +2 -0
  95. pulumi_gcp/compute/image_iam_policy.py +2 -0
  96. pulumi_gcp/compute/instance_iam_binding.py +2 -0
  97. pulumi_gcp/compute/instance_iam_member.py +2 -0
  98. pulumi_gcp/compute/instance_iam_policy.py +2 -0
  99. pulumi_gcp/compute/instance_template_iam_binding.py +2 -0
  100. pulumi_gcp/compute/instance_template_iam_member.py +2 -0
  101. pulumi_gcp/compute/instance_template_iam_policy.py +2 -0
  102. pulumi_gcp/compute/instant_snapshot_iam_binding.py +2 -0
  103. pulumi_gcp/compute/instant_snapshot_iam_member.py +2 -0
  104. pulumi_gcp/compute/instant_snapshot_iam_policy.py +2 -0
  105. pulumi_gcp/compute/machine_image_iam_binding.py +2 -0
  106. pulumi_gcp/compute/machine_image_iam_member.py +2 -0
  107. pulumi_gcp/compute/machine_image_iam_policy.py +2 -0
  108. pulumi_gcp/compute/region_disk_iam_binding.py +2 -0
  109. pulumi_gcp/compute/region_disk_iam_member.py +2 -0
  110. pulumi_gcp/compute/region_disk_iam_policy.py +2 -0
  111. pulumi_gcp/compute/snapshot_iam_binding.py +2 -0
  112. pulumi_gcp/compute/snapshot_iam_member.py +2 -0
  113. pulumi_gcp/compute/snapshot_iam_policy.py +2 -0
  114. pulumi_gcp/compute/storage_pool_iam_binding.py +2 -0
  115. pulumi_gcp/compute/storage_pool_iam_member.py +2 -0
  116. pulumi_gcp/compute/storage_pool_iam_policy.py +2 -0
  117. pulumi_gcp/compute/subnetwork_iam_binding.py +2 -0
  118. pulumi_gcp/compute/subnetwork_iam_member.py +2 -0
  119. pulumi_gcp/compute/subnetwork_iam_policy.py +2 -0
  120. pulumi_gcp/config/__init__.pyi +0 -4
  121. pulumi_gcp/config/vars.py +0 -8
  122. pulumi_gcp/container/_inputs.py +1728 -42
  123. pulumi_gcp/container/outputs.py +1935 -16
  124. pulumi_gcp/containeranalysis/note_iam_binding.py +2 -0
  125. pulumi_gcp/containeranalysis/note_iam_member.py +2 -0
  126. pulumi_gcp/containeranalysis/note_iam_policy.py +2 -0
  127. pulumi_gcp/datacatalog/entry_group_iam_binding.py +2 -0
  128. pulumi_gcp/datacatalog/entry_group_iam_member.py +2 -0
  129. pulumi_gcp/datacatalog/entry_group_iam_policy.py +2 -0
  130. pulumi_gcp/datacatalog/policy_tag_iam_binding.py +2 -0
  131. pulumi_gcp/datacatalog/policy_tag_iam_member.py +2 -0
  132. pulumi_gcp/datacatalog/policy_tag_iam_policy.py +2 -0
  133. pulumi_gcp/datacatalog/tag_template_iam_binding.py +2 -0
  134. pulumi_gcp/datacatalog/tag_template_iam_member.py +2 -0
  135. pulumi_gcp/datacatalog/tag_template_iam_policy.py +2 -0
  136. pulumi_gcp/datacatalog/taxonomy_iam_binding.py +2 -0
  137. pulumi_gcp/datacatalog/taxonomy_iam_member.py +2 -0
  138. pulumi_gcp/datacatalog/taxonomy_iam_policy.py +2 -0
  139. pulumi_gcp/datafusion/instance.py +18 -4
  140. pulumi_gcp/dataplex/aspect_type_iam_binding.py +2 -0
  141. pulumi_gcp/dataplex/aspect_type_iam_member.py +2 -0
  142. pulumi_gcp/dataplex/aspect_type_iam_policy.py +2 -0
  143. pulumi_gcp/dataplex/asset_iam_binding.py +2 -0
  144. pulumi_gcp/dataplex/asset_iam_member.py +2 -0
  145. pulumi_gcp/dataplex/asset_iam_policy.py +2 -0
  146. pulumi_gcp/dataplex/datascan_iam_binding.py +2 -0
  147. pulumi_gcp/dataplex/datascan_iam_member.py +2 -0
  148. pulumi_gcp/dataplex/datascan_iam_policy.py +2 -0
  149. pulumi_gcp/dataplex/entry_group_iam_binding.py +2 -0
  150. pulumi_gcp/dataplex/entry_group_iam_member.py +2 -0
  151. pulumi_gcp/dataplex/entry_group_iam_policy.py +2 -0
  152. pulumi_gcp/dataplex/entry_type_iam_binding.py +2 -0
  153. pulumi_gcp/dataplex/entry_type_iam_member.py +2 -0
  154. pulumi_gcp/dataplex/entry_type_iam_policy.py +2 -0
  155. pulumi_gcp/dataplex/glossary_iam_binding.py +2 -0
  156. pulumi_gcp/dataplex/glossary_iam_member.py +2 -0
  157. pulumi_gcp/dataplex/glossary_iam_policy.py +2 -0
  158. pulumi_gcp/dataplex/lake_iam_binding.py +2 -0
  159. pulumi_gcp/dataplex/lake_iam_member.py +2 -0
  160. pulumi_gcp/dataplex/lake_iam_policy.py +2 -0
  161. pulumi_gcp/dataplex/task_iam_binding.py +2 -0
  162. pulumi_gcp/dataplex/task_iam_member.py +2 -0
  163. pulumi_gcp/dataplex/task_iam_policy.py +2 -0
  164. pulumi_gcp/dataplex/zone_iam_binding.py +2 -0
  165. pulumi_gcp/dataplex/zone_iam_member.py +2 -0
  166. pulumi_gcp/dataplex/zone_iam_policy.py +2 -0
  167. pulumi_gcp/dataproc/autoscaling_policy_iam_binding.py +2 -0
  168. pulumi_gcp/dataproc/autoscaling_policy_iam_member.py +2 -0
  169. pulumi_gcp/dataproc/autoscaling_policy_iam_policy.py +2 -0
  170. pulumi_gcp/dataproc/metastore_database_iam_binding.py +2 -0
  171. pulumi_gcp/dataproc/metastore_database_iam_member.py +2 -0
  172. pulumi_gcp/dataproc/metastore_database_iam_policy.py +2 -0
  173. pulumi_gcp/dataproc/metastore_federation_iam_binding.py +2 -0
  174. pulumi_gcp/dataproc/metastore_federation_iam_member.py +2 -0
  175. pulumi_gcp/dataproc/metastore_federation_iam_policy.py +2 -0
  176. pulumi_gcp/dataproc/metastore_service_iam_binding.py +2 -0
  177. pulumi_gcp/dataproc/metastore_service_iam_member.py +2 -0
  178. pulumi_gcp/dataproc/metastore_service_iam_policy.py +2 -0
  179. pulumi_gcp/dataproc/metastore_table_iam_binding.py +2 -0
  180. pulumi_gcp/dataproc/metastore_table_iam_member.py +2 -0
  181. pulumi_gcp/dataproc/metastore_table_iam_policy.py +2 -0
  182. pulumi_gcp/diagflow/__init__.py +1 -0
  183. pulumi_gcp/diagflow/_inputs.py +2661 -0
  184. pulumi_gcp/diagflow/conversation_profile.py +959 -0
  185. pulumi_gcp/diagflow/outputs.py +2213 -0
  186. pulumi_gcp/dns/dns_managed_zone_iam_binding.py +2 -0
  187. pulumi_gcp/dns/dns_managed_zone_iam_member.py +2 -0
  188. pulumi_gcp/dns/dns_managed_zone_iam_policy.py +2 -0
  189. pulumi_gcp/endpoints/service_iam_binding.py +2 -0
  190. pulumi_gcp/endpoints/service_iam_member.py +2 -0
  191. pulumi_gcp/endpoints/service_iam_policy.py +2 -0
  192. pulumi_gcp/gemini/repository_group_iam_binding.py +2 -0
  193. pulumi_gcp/gemini/repository_group_iam_member.py +2 -0
  194. pulumi_gcp/gemini/repository_group_iam_policy.py +2 -0
  195. pulumi_gcp/gkebackup/backup_plan_iam_binding.py +2 -0
  196. pulumi_gcp/gkebackup/backup_plan_iam_member.py +2 -0
  197. pulumi_gcp/gkebackup/backup_plan_iam_policy.py +2 -0
  198. pulumi_gcp/gkebackup/restore_plan_iam_binding.py +2 -0
  199. pulumi_gcp/gkebackup/restore_plan_iam_member.py +2 -0
  200. pulumi_gcp/gkebackup/restore_plan_iam_policy.py +2 -0
  201. pulumi_gcp/gkehub/feature_iam_binding.py +2 -0
  202. pulumi_gcp/gkehub/feature_iam_member.py +2 -0
  203. pulumi_gcp/gkehub/feature_iam_policy.py +2 -0
  204. pulumi_gcp/gkehub/membership_iam_binding.py +2 -0
  205. pulumi_gcp/gkehub/membership_iam_member.py +2 -0
  206. pulumi_gcp/gkehub/membership_iam_policy.py +2 -0
  207. pulumi_gcp/gkehub/scope_iam_binding.py +2 -0
  208. pulumi_gcp/gkehub/scope_iam_member.py +2 -0
  209. pulumi_gcp/gkehub/scope_iam_policy.py +2 -0
  210. pulumi_gcp/healthcare/consent_store_iam_binding.py +2 -0
  211. pulumi_gcp/healthcare/consent_store_iam_member.py +2 -0
  212. pulumi_gcp/healthcare/consent_store_iam_policy.py +2 -0
  213. pulumi_gcp/iam/workforce_pool_iam_binding.py +2 -0
  214. pulumi_gcp/iam/workforce_pool_iam_member.py +2 -0
  215. pulumi_gcp/iam/workforce_pool_iam_policy.py +2 -0
  216. pulumi_gcp/iap/app_engine_service_iam_binding.py +2 -0
  217. pulumi_gcp/iap/app_engine_service_iam_member.py +2 -0
  218. pulumi_gcp/iap/app_engine_service_iam_policy.py +2 -0
  219. pulumi_gcp/iap/app_engine_version_iam_binding.py +2 -0
  220. pulumi_gcp/iap/app_engine_version_iam_member.py +2 -0
  221. pulumi_gcp/iap/app_engine_version_iam_policy.py +2 -0
  222. pulumi_gcp/iap/tunnel_dest_group_iam_binding.py +2 -0
  223. pulumi_gcp/iap/tunnel_dest_group_iam_member.py +2 -0
  224. pulumi_gcp/iap/tunnel_dest_group_iam_policy.py +2 -0
  225. pulumi_gcp/iap/tunnel_iam_binding.py +2 -0
  226. pulumi_gcp/iap/tunnel_iam_member.py +2 -0
  227. pulumi_gcp/iap/tunnel_iam_policy.py +2 -0
  228. pulumi_gcp/iap/tunnel_instance_iam_binding.py +2 -0
  229. pulumi_gcp/iap/tunnel_instance_iam_member.py +2 -0
  230. pulumi_gcp/iap/tunnel_instance_iam_policy.py +2 -0
  231. pulumi_gcp/iap/web_backend_service_iam_binding.py +2 -0
  232. pulumi_gcp/iap/web_backend_service_iam_member.py +2 -0
  233. pulumi_gcp/iap/web_backend_service_iam_policy.py +2 -0
  234. pulumi_gcp/iap/web_cloud_run_service_iam_binding.py +2 -0
  235. pulumi_gcp/iap/web_cloud_run_service_iam_member.py +2 -0
  236. pulumi_gcp/iap/web_cloud_run_service_iam_policy.py +2 -0
  237. pulumi_gcp/iap/web_iam_binding.py +2 -0
  238. pulumi_gcp/iap/web_iam_member.py +2 -0
  239. pulumi_gcp/iap/web_iam_policy.py +2 -0
  240. pulumi_gcp/iap/web_region_backend_service_iam_binding.py +2 -0
  241. pulumi_gcp/iap/web_region_backend_service_iam_member.py +2 -0
  242. pulumi_gcp/iap/web_region_backend_service_iam_policy.py +2 -0
  243. pulumi_gcp/iap/web_type_app_enging_iam_binding.py +2 -0
  244. pulumi_gcp/iap/web_type_app_enging_iam_member.py +2 -0
  245. pulumi_gcp/iap/web_type_app_enging_iam_policy.py +2 -0
  246. pulumi_gcp/iap/web_type_compute_iam_binding.py +2 -0
  247. pulumi_gcp/iap/web_type_compute_iam_member.py +2 -0
  248. pulumi_gcp/iap/web_type_compute_iam_policy.py +2 -0
  249. pulumi_gcp/kms/crypto_key.py +7 -0
  250. pulumi_gcp/kms/ekm_connection_iam_binding.py +2 -0
  251. pulumi_gcp/kms/ekm_connection_iam_member.py +2 -0
  252. pulumi_gcp/kms/ekm_connection_iam_policy.py +2 -0
  253. pulumi_gcp/kms/outputs.py +2 -0
  254. pulumi_gcp/logging/log_view_iam_binding.py +2 -0
  255. pulumi_gcp/logging/log_view_iam_member.py +2 -0
  256. pulumi_gcp/logging/log_view_iam_policy.py +2 -0
  257. pulumi_gcp/monitoring/_inputs.py +3 -3
  258. pulumi_gcp/monitoring/outputs.py +2 -2
  259. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +213 -168
  260. pulumi_gcp/notebooks/instance.py +18 -18
  261. pulumi_gcp/notebooks/instance_iam_binding.py +2 -0
  262. pulumi_gcp/notebooks/instance_iam_member.py +2 -0
  263. pulumi_gcp/notebooks/instance_iam_policy.py +2 -0
  264. pulumi_gcp/notebooks/runtime_iam_binding.py +2 -0
  265. pulumi_gcp/notebooks/runtime_iam_member.py +2 -0
  266. pulumi_gcp/notebooks/runtime_iam_policy.py +2 -0
  267. pulumi_gcp/organizations/folder.py +56 -0
  268. pulumi_gcp/organizations/get_folder.py +29 -1
  269. pulumi_gcp/projects/api_key.py +88 -1
  270. pulumi_gcp/provider.py +0 -40
  271. pulumi_gcp/pubsub/schema_iam_binding.py +2 -0
  272. pulumi_gcp/pubsub/schema_iam_member.py +2 -0
  273. pulumi_gcp/pubsub/schema_iam_policy.py +2 -0
  274. pulumi_gcp/pubsub/topic_iam_binding.py +2 -0
  275. pulumi_gcp/pubsub/topic_iam_member.py +2 -0
  276. pulumi_gcp/pubsub/topic_iam_policy.py +2 -0
  277. pulumi_gcp/pulumi-plugin.json +1 -1
  278. pulumi_gcp/secretmanager/regional_secret_iam_binding.py +2 -0
  279. pulumi_gcp/secretmanager/regional_secret_iam_member.py +2 -0
  280. pulumi_gcp/secretmanager/regional_secret_iam_policy.py +2 -0
  281. pulumi_gcp/secretmanager/secret_iam_binding.py +2 -0
  282. pulumi_gcp/secretmanager/secret_iam_member.py +2 -0
  283. pulumi_gcp/secretmanager/secret_iam_policy.py +2 -0
  284. pulumi_gcp/secretmanager/secret_version.py +1 -48
  285. pulumi_gcp/securesourcemanager/repository_iam_binding.py +2 -0
  286. pulumi_gcp/securesourcemanager/repository_iam_member.py +2 -0
  287. pulumi_gcp/securesourcemanager/repository_iam_policy.py +2 -0
  288. pulumi_gcp/securitycenter/instance_iam_binding.py +18 -4
  289. pulumi_gcp/securitycenter/instance_iam_member.py +18 -4
  290. pulumi_gcp/securitycenter/instance_iam_policy.py +18 -4
  291. pulumi_gcp/securitycenter/v2_organization_source_iam_binding.py +2 -0
  292. pulumi_gcp/securitycenter/v2_organization_source_iam_member.py +2 -0
  293. pulumi_gcp/securitycenter/v2_organization_source_iam_policy.py +2 -0
  294. pulumi_gcp/servicedirectory/namespace_iam_binding.py +2 -0
  295. pulumi_gcp/servicedirectory/namespace_iam_member.py +2 -0
  296. pulumi_gcp/servicedirectory/namespace_iam_policy.py +2 -0
  297. pulumi_gcp/servicedirectory/service_iam_binding.py +2 -0
  298. pulumi_gcp/servicedirectory/service_iam_member.py +2 -0
  299. pulumi_gcp/servicedirectory/service_iam_policy.py +2 -0
  300. pulumi_gcp/sourcerepo/repository_iam_binding.py +2 -0
  301. pulumi_gcp/sourcerepo/repository_iam_member.py +2 -0
  302. pulumi_gcp/sourcerepo/repository_iam_policy.py +2 -0
  303. pulumi_gcp/tags/tag_key_iam_binding.py +2 -0
  304. pulumi_gcp/tags/tag_key_iam_member.py +2 -0
  305. pulumi_gcp/tags/tag_key_iam_policy.py +2 -0
  306. pulumi_gcp/tags/tag_value_iam_binding.py +2 -0
  307. pulumi_gcp/tags/tag_value_iam_member.py +2 -0
  308. pulumi_gcp/tags/tag_value_iam_policy.py +2 -0
  309. pulumi_gcp/tpu/get_tensorflow_versions.py +10 -0
  310. pulumi_gcp/vertex/ai_index.py +21 -7
  311. {pulumi_gcp-8.41.0a1755716203.dist-info → pulumi_gcp-8.41.0a1755891135.dist-info}/METADATA +1 -1
  312. {pulumi_gcp-8.41.0a1755716203.dist-info → pulumi_gcp-8.41.0a1755891135.dist-info}/RECORD +314 -311
  313. {pulumi_gcp-8.41.0a1755716203.dist-info → pulumi_gcp-8.41.0a1755891135.dist-info}/WHEEL +0 -0
  314. {pulumi_gcp-8.41.0a1755716203.dist-info → pulumi_gcp-8.41.0a1755891135.dist-info}/top_level.txt +0 -0
@@ -171,6 +171,9 @@ __all__ = [
171
171
  'ClusterNodeConfigGvnic',
172
172
  'ClusterNodeConfigHostMaintenancePolicy',
173
173
  'ClusterNodeConfigKubeletConfig',
174
+ 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaim',
175
+ 'ClusterNodeConfigKubeletConfigEvictionSoft',
176
+ 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod',
174
177
  'ClusterNodeConfigLinuxNodeConfig',
175
178
  'ClusterNodeConfigLinuxNodeConfigHugepagesConfig',
176
179
  'ClusterNodeConfigLocalNvmeSsdBlockConfig',
@@ -221,6 +224,9 @@ __all__ = [
221
224
  'ClusterNodePoolNodeConfigGvnic',
222
225
  'ClusterNodePoolNodeConfigHostMaintenancePolicy',
223
226
  'ClusterNodePoolNodeConfigKubeletConfig',
227
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
228
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoft',
229
+ 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
224
230
  'ClusterNodePoolNodeConfigLinuxNodeConfig',
225
231
  'ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
226
232
  'ClusterNodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -286,6 +292,9 @@ __all__ = [
286
292
  'NodePoolNodeConfigGvnic',
287
293
  'NodePoolNodeConfigHostMaintenancePolicy',
288
294
  'NodePoolNodeConfigKubeletConfig',
295
+ 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim',
296
+ 'NodePoolNodeConfigKubeletConfigEvictionSoft',
297
+ 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod',
289
298
  'NodePoolNodeConfigLinuxNodeConfig',
290
299
  'NodePoolNodeConfigLinuxNodeConfigHugepagesConfig',
291
300
  'NodePoolNodeConfigLocalNvmeSsdBlockConfig',
@@ -389,6 +398,9 @@ __all__ = [
389
398
  'GetClusterNodeConfigGvnicResult',
390
399
  'GetClusterNodeConfigHostMaintenancePolicyResult',
391
400
  'GetClusterNodeConfigKubeletConfigResult',
401
+ 'GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult',
402
+ 'GetClusterNodeConfigKubeletConfigEvictionSoftResult',
403
+ 'GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
392
404
  'GetClusterNodeConfigLinuxNodeConfigResult',
393
405
  'GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult',
394
406
  'GetClusterNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -439,6 +451,9 @@ __all__ = [
439
451
  'GetClusterNodePoolNodeConfigGvnicResult',
440
452
  'GetClusterNodePoolNodeConfigHostMaintenancePolicyResult',
441
453
  'GetClusterNodePoolNodeConfigKubeletConfigResult',
454
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult',
455
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult',
456
+ 'GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult',
442
457
  'GetClusterNodePoolNodeConfigLinuxNodeConfigResult',
443
458
  'GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult',
444
459
  'GetClusterNodePoolNodeConfigLocalNvmeSsdBlockConfigResult',
@@ -7479,7 +7494,7 @@ class ClusterNodeConfig(dict):
7479
7494
  :param _builtins.str service_account: The service account to be used by the Node VMs.
7480
7495
  If not specified, the "default" service account is used.
7481
7496
  :param 'ClusterNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
7482
- :param 'ClusterNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
7497
+ :param 'ClusterNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
7483
7498
  :param _builtins.bool spot: A boolean that represents whether the underlying node VMs are spot.
7484
7499
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
7485
7500
  for more information. Defaults to false.
@@ -7958,7 +7973,7 @@ class ClusterNodeConfig(dict):
7958
7973
  @pulumi.getter(name="soleTenantConfig")
7959
7974
  def sole_tenant_config(self) -> Optional['outputs.ClusterNodeConfigSoleTenantConfig']:
7960
7975
  """
7961
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
7976
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
7962
7977
  """
7963
7978
  return pulumi.get(self, "sole_tenant_config")
7964
7979
 
@@ -8800,6 +8815,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8800
8815
  suggest = "cpu_cfs_quota_period"
8801
8816
  elif key == "cpuManagerPolicy":
8802
8817
  suggest = "cpu_manager_policy"
8818
+ elif key == "evictionMaxPodGracePeriodSeconds":
8819
+ suggest = "eviction_max_pod_grace_period_seconds"
8820
+ elif key == "evictionMinimumReclaim":
8821
+ suggest = "eviction_minimum_reclaim"
8822
+ elif key == "evictionSoft":
8823
+ suggest = "eviction_soft"
8824
+ elif key == "evictionSoftGracePeriod":
8825
+ suggest = "eviction_soft_grace_period"
8803
8826
  elif key == "imageGcHighThresholdPercent":
8804
8827
  suggest = "image_gc_high_threshold_percent"
8805
8828
  elif key == "imageGcLowThresholdPercent":
@@ -8810,6 +8833,8 @@ class ClusterNodeConfigKubeletConfig(dict):
8810
8833
  suggest = "image_minimum_gc_age"
8811
8834
  elif key == "insecureKubeletReadonlyPortEnabled":
8812
8835
  suggest = "insecure_kubelet_readonly_port_enabled"
8836
+ elif key == "maxParallelImagePulls":
8837
+ suggest = "max_parallel_image_pulls"
8813
8838
  elif key == "podPidsLimit":
8814
8839
  suggest = "pod_pids_limit"
8815
8840
  elif key == "singleProcessOomKill":
@@ -8833,11 +8858,16 @@ class ClusterNodeConfigKubeletConfig(dict):
8833
8858
  cpu_cfs_quota: Optional[_builtins.bool] = None,
8834
8859
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
8835
8860
  cpu_manager_policy: Optional[_builtins.str] = None,
8861
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
8862
+ eviction_minimum_reclaim: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
8863
+ eviction_soft: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoft'] = None,
8864
+ eviction_soft_grace_period: Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
8836
8865
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
8837
8866
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
8838
8867
  image_maximum_gc_age: Optional[_builtins.str] = None,
8839
8868
  image_minimum_gc_age: Optional[_builtins.str] = None,
8840
8869
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
8870
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
8841
8871
  pod_pids_limit: Optional[_builtins.int] = None,
8842
8872
  single_process_oom_kill: Optional[_builtins.bool] = None):
8843
8873
  """
@@ -8859,11 +8889,16 @@ class ClusterNodeConfigKubeletConfig(dict):
8859
8889
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
8860
8890
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
8861
8891
  is setting the empty string `""`, which will function identically to not setting this field.
8892
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
8893
+ :param 'ClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
8894
+ :param 'ClusterNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
8895
+ :param 'ClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
8862
8896
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
8863
8897
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
8864
8898
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
8865
8899
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
8866
8900
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
8901
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
8867
8902
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
8868
8903
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
8869
8904
  """
@@ -8879,6 +8914,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8879
8914
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
8880
8915
  if cpu_manager_policy is not None:
8881
8916
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
8917
+ if eviction_max_pod_grace_period_seconds is not None:
8918
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
8919
+ if eviction_minimum_reclaim is not None:
8920
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
8921
+ if eviction_soft is not None:
8922
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
8923
+ if eviction_soft_grace_period is not None:
8924
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
8882
8925
  if image_gc_high_threshold_percent is not None:
8883
8926
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
8884
8927
  if image_gc_low_threshold_percent is not None:
@@ -8889,6 +8932,8 @@ class ClusterNodeConfigKubeletConfig(dict):
8889
8932
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
8890
8933
  if insecure_kubelet_readonly_port_enabled is not None:
8891
8934
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
8935
+ if max_parallel_image_pulls is not None:
8936
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
8892
8937
  if pod_pids_limit is not None:
8893
8938
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
8894
8939
  if single_process_oom_kill is not None:
@@ -8954,6 +8999,38 @@ class ClusterNodeConfigKubeletConfig(dict):
8954
8999
  """
8955
9000
  return pulumi.get(self, "cpu_manager_policy")
8956
9001
 
9002
+ @_builtins.property
9003
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
9004
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
9005
+ """
9006
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
9007
+ """
9008
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
9009
+
9010
+ @_builtins.property
9011
+ @pulumi.getter(name="evictionMinimumReclaim")
9012
+ def eviction_minimum_reclaim(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionMinimumReclaim']:
9013
+ """
9014
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
9015
+ """
9016
+ return pulumi.get(self, "eviction_minimum_reclaim")
9017
+
9018
+ @_builtins.property
9019
+ @pulumi.getter(name="evictionSoft")
9020
+ def eviction_soft(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoft']:
9021
+ """
9022
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
9023
+ """
9024
+ return pulumi.get(self, "eviction_soft")
9025
+
9026
+ @_builtins.property
9027
+ @pulumi.getter(name="evictionSoftGracePeriod")
9028
+ def eviction_soft_grace_period(self) -> Optional['outputs.ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod']:
9029
+ """
9030
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
9031
+ """
9032
+ return pulumi.get(self, "eviction_soft_grace_period")
9033
+
8957
9034
  @_builtins.property
8958
9035
  @pulumi.getter(name="imageGcHighThresholdPercent")
8959
9036
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -8994,6 +9071,14 @@ class ClusterNodeConfigKubeletConfig(dict):
8994
9071
  """
8995
9072
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
8996
9073
 
9074
+ @_builtins.property
9075
+ @pulumi.getter(name="maxParallelImagePulls")
9076
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
9077
+ """
9078
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
9079
+ """
9080
+ return pulumi.get(self, "max_parallel_image_pulls")
9081
+
8997
9082
  @_builtins.property
8998
9083
  @pulumi.getter(name="podPidsLimit")
8999
9084
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -9011,6 +9096,324 @@ class ClusterNodeConfigKubeletConfig(dict):
9011
9096
  return pulumi.get(self, "single_process_oom_kill")
9012
9097
 
9013
9098
 
9099
+ @pulumi.output_type
9100
+ class ClusterNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
9101
+ @staticmethod
9102
+ def __key_warning(key: str):
9103
+ suggest = None
9104
+ if key == "imagefsAvailable":
9105
+ suggest = "imagefs_available"
9106
+ elif key == "imagefsInodesFree":
9107
+ suggest = "imagefs_inodes_free"
9108
+ elif key == "memoryAvailable":
9109
+ suggest = "memory_available"
9110
+ elif key == "nodefsAvailable":
9111
+ suggest = "nodefs_available"
9112
+ elif key == "nodefsInodesFree":
9113
+ suggest = "nodefs_inodes_free"
9114
+ elif key == "pidAvailable":
9115
+ suggest = "pid_available"
9116
+
9117
+ if suggest:
9118
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
9119
+
9120
+ def __getitem__(self, key: str) -> Any:
9121
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
9122
+ return super().__getitem__(key)
9123
+
9124
+ def get(self, key: str, default = None) -> Any:
9125
+ ClusterNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
9126
+ return super().get(key, default)
9127
+
9128
+ def __init__(__self__, *,
9129
+ imagefs_available: Optional[_builtins.str] = None,
9130
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9131
+ memory_available: Optional[_builtins.str] = None,
9132
+ nodefs_available: Optional[_builtins.str] = None,
9133
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9134
+ pid_available: Optional[_builtins.str] = None):
9135
+ """
9136
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9137
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9138
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9139
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9140
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9141
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9142
+ """
9143
+ if imagefs_available is not None:
9144
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9145
+ if imagefs_inodes_free is not None:
9146
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9147
+ if memory_available is not None:
9148
+ pulumi.set(__self__, "memory_available", memory_available)
9149
+ if nodefs_available is not None:
9150
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9151
+ if nodefs_inodes_free is not None:
9152
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9153
+ if pid_available is not None:
9154
+ pulumi.set(__self__, "pid_available", pid_available)
9155
+
9156
+ @_builtins.property
9157
+ @pulumi.getter(name="imagefsAvailable")
9158
+ def imagefs_available(self) -> Optional[_builtins.str]:
9159
+ """
9160
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9161
+ """
9162
+ return pulumi.get(self, "imagefs_available")
9163
+
9164
+ @_builtins.property
9165
+ @pulumi.getter(name="imagefsInodesFree")
9166
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
9167
+ """
9168
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9169
+ """
9170
+ return pulumi.get(self, "imagefs_inodes_free")
9171
+
9172
+ @_builtins.property
9173
+ @pulumi.getter(name="memoryAvailable")
9174
+ def memory_available(self) -> Optional[_builtins.str]:
9175
+ """
9176
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9177
+ """
9178
+ return pulumi.get(self, "memory_available")
9179
+
9180
+ @_builtins.property
9181
+ @pulumi.getter(name="nodefsAvailable")
9182
+ def nodefs_available(self) -> Optional[_builtins.str]:
9183
+ """
9184
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9185
+ """
9186
+ return pulumi.get(self, "nodefs_available")
9187
+
9188
+ @_builtins.property
9189
+ @pulumi.getter(name="nodefsInodesFree")
9190
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9191
+ """
9192
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9193
+ """
9194
+ return pulumi.get(self, "nodefs_inodes_free")
9195
+
9196
+ @_builtins.property
9197
+ @pulumi.getter(name="pidAvailable")
9198
+ def pid_available(self) -> Optional[_builtins.str]:
9199
+ """
9200
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
9201
+ """
9202
+ return pulumi.get(self, "pid_available")
9203
+
9204
+
9205
+ @pulumi.output_type
9206
+ class ClusterNodeConfigKubeletConfigEvictionSoft(dict):
9207
+ @staticmethod
9208
+ def __key_warning(key: str):
9209
+ suggest = None
9210
+ if key == "imagefsAvailable":
9211
+ suggest = "imagefs_available"
9212
+ elif key == "imagefsInodesFree":
9213
+ suggest = "imagefs_inodes_free"
9214
+ elif key == "memoryAvailable":
9215
+ suggest = "memory_available"
9216
+ elif key == "nodefsAvailable":
9217
+ suggest = "nodefs_available"
9218
+ elif key == "nodefsInodesFree":
9219
+ suggest = "nodefs_inodes_free"
9220
+ elif key == "pidAvailable":
9221
+ suggest = "pid_available"
9222
+
9223
+ if suggest:
9224
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
9225
+
9226
+ def __getitem__(self, key: str) -> Any:
9227
+ ClusterNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
9228
+ return super().__getitem__(key)
9229
+
9230
+ def get(self, key: str, default = None) -> Any:
9231
+ ClusterNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
9232
+ return super().get(key, default)
9233
+
9234
+ def __init__(__self__, *,
9235
+ imagefs_available: Optional[_builtins.str] = None,
9236
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9237
+ memory_available: Optional[_builtins.str] = None,
9238
+ nodefs_available: Optional[_builtins.str] = None,
9239
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9240
+ pid_available: Optional[_builtins.str] = None):
9241
+ """
9242
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
9243
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9244
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
9245
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9246
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9247
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9248
+ """
9249
+ if imagefs_available is not None:
9250
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9251
+ if imagefs_inodes_free is not None:
9252
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9253
+ if memory_available is not None:
9254
+ pulumi.set(__self__, "memory_available", memory_available)
9255
+ if nodefs_available is not None:
9256
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9257
+ if nodefs_inodes_free is not None:
9258
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9259
+ if pid_available is not None:
9260
+ pulumi.set(__self__, "pid_available", pid_available)
9261
+
9262
+ @_builtins.property
9263
+ @pulumi.getter(name="imagefsAvailable")
9264
+ def imagefs_available(self) -> Optional[_builtins.str]:
9265
+ """
9266
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
9267
+ """
9268
+ return pulumi.get(self, "imagefs_available")
9269
+
9270
+ @_builtins.property
9271
+ @pulumi.getter(name="imagefsInodesFree")
9272
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
9273
+ """
9274
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9275
+ """
9276
+ return pulumi.get(self, "imagefs_inodes_free")
9277
+
9278
+ @_builtins.property
9279
+ @pulumi.getter(name="memoryAvailable")
9280
+ def memory_available(self) -> Optional[_builtins.str]:
9281
+ """
9282
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
9283
+ """
9284
+ return pulumi.get(self, "memory_available")
9285
+
9286
+ @_builtins.property
9287
+ @pulumi.getter(name="nodefsAvailable")
9288
+ def nodefs_available(self) -> Optional[_builtins.str]:
9289
+ """
9290
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9291
+ """
9292
+ return pulumi.get(self, "nodefs_available")
9293
+
9294
+ @_builtins.property
9295
+ @pulumi.getter(name="nodefsInodesFree")
9296
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9297
+ """
9298
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
9299
+ """
9300
+ return pulumi.get(self, "nodefs_inodes_free")
9301
+
9302
+ @_builtins.property
9303
+ @pulumi.getter(name="pidAvailable")
9304
+ def pid_available(self) -> Optional[_builtins.str]:
9305
+ """
9306
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
9307
+ """
9308
+ return pulumi.get(self, "pid_available")
9309
+
9310
+
9311
+ @pulumi.output_type
9312
+ class ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
9313
+ @staticmethod
9314
+ def __key_warning(key: str):
9315
+ suggest = None
9316
+ if key == "imagefsAvailable":
9317
+ suggest = "imagefs_available"
9318
+ elif key == "imagefsInodesFree":
9319
+ suggest = "imagefs_inodes_free"
9320
+ elif key == "memoryAvailable":
9321
+ suggest = "memory_available"
9322
+ elif key == "nodefsAvailable":
9323
+ suggest = "nodefs_available"
9324
+ elif key == "nodefsInodesFree":
9325
+ suggest = "nodefs_inodes_free"
9326
+ elif key == "pidAvailable":
9327
+ suggest = "pid_available"
9328
+
9329
+ if suggest:
9330
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
9331
+
9332
+ def __getitem__(self, key: str) -> Any:
9333
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
9334
+ return super().__getitem__(key)
9335
+
9336
+ def get(self, key: str, default = None) -> Any:
9337
+ ClusterNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
9338
+ return super().get(key, default)
9339
+
9340
+ def __init__(__self__, *,
9341
+ imagefs_available: Optional[_builtins.str] = None,
9342
+ imagefs_inodes_free: Optional[_builtins.str] = None,
9343
+ memory_available: Optional[_builtins.str] = None,
9344
+ nodefs_available: Optional[_builtins.str] = None,
9345
+ nodefs_inodes_free: Optional[_builtins.str] = None,
9346
+ pid_available: Optional[_builtins.str] = None):
9347
+ """
9348
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9349
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9350
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
9351
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9352
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9353
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9354
+ """
9355
+ if imagefs_available is not None:
9356
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
9357
+ if imagefs_inodes_free is not None:
9358
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
9359
+ if memory_available is not None:
9360
+ pulumi.set(__self__, "memory_available", memory_available)
9361
+ if nodefs_available is not None:
9362
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
9363
+ if nodefs_inodes_free is not None:
9364
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
9365
+ if pid_available is not None:
9366
+ pulumi.set(__self__, "pid_available", pid_available)
9367
+
9368
+ @_builtins.property
9369
+ @pulumi.getter(name="imagefsAvailable")
9370
+ def imagefs_available(self) -> Optional[_builtins.str]:
9371
+ """
9372
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9373
+ """
9374
+ return pulumi.get(self, "imagefs_available")
9375
+
9376
+ @_builtins.property
9377
+ @pulumi.getter(name="imagefsInodesFree")
9378
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
9379
+ """
9380
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9381
+ """
9382
+ return pulumi.get(self, "imagefs_inodes_free")
9383
+
9384
+ @_builtins.property
9385
+ @pulumi.getter(name="memoryAvailable")
9386
+ def memory_available(self) -> Optional[_builtins.str]:
9387
+ """
9388
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
9389
+ """
9390
+ return pulumi.get(self, "memory_available")
9391
+
9392
+ @_builtins.property
9393
+ @pulumi.getter(name="nodefsAvailable")
9394
+ def nodefs_available(self) -> Optional[_builtins.str]:
9395
+ """
9396
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9397
+ """
9398
+ return pulumi.get(self, "nodefs_available")
9399
+
9400
+ @_builtins.property
9401
+ @pulumi.getter(name="nodefsInodesFree")
9402
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
9403
+ """
9404
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9405
+ """
9406
+ return pulumi.get(self, "nodefs_inodes_free")
9407
+
9408
+ @_builtins.property
9409
+ @pulumi.getter(name="pidAvailable")
9410
+ def pid_available(self) -> Optional[_builtins.str]:
9411
+ """
9412
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
9413
+ """
9414
+ return pulumi.get(self, "pid_available")
9415
+
9416
+
9014
9417
  @pulumi.output_type
9015
9418
  class ClusterNodeConfigLinuxNodeConfig(dict):
9016
9419
  @staticmethod
@@ -9020,6 +9423,10 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
9020
9423
  suggest = "cgroup_mode"
9021
9424
  elif key == "hugepagesConfig":
9022
9425
  suggest = "hugepages_config"
9426
+ elif key == "transparentHugepageDefrag":
9427
+ suggest = "transparent_hugepage_defrag"
9428
+ elif key == "transparentHugepageEnabled":
9429
+ suggest = "transparent_hugepage_enabled"
9023
9430
 
9024
9431
  if suggest:
9025
9432
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -9035,7 +9442,9 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
9035
9442
  def __init__(__self__, *,
9036
9443
  cgroup_mode: Optional[_builtins.str] = None,
9037
9444
  hugepages_config: Optional['outputs.ClusterNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
9038
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
9445
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
9446
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
9447
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
9039
9448
  """
9040
9449
  :param _builtins.str cgroup_mode: Possible cgroup modes that can be used.
9041
9450
  Accepted values are:
@@ -9047,6 +9456,8 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
9047
9456
  and all pods running on the nodes. Specified as a map from the key, such as
9048
9457
  `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
9049
9458
  Note that validations happen all server side. All attributes are optional.
9459
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
9460
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
9050
9461
  """
9051
9462
  if cgroup_mode is not None:
9052
9463
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -9054,6 +9465,10 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
9054
9465
  pulumi.set(__self__, "hugepages_config", hugepages_config)
9055
9466
  if sysctls is not None:
9056
9467
  pulumi.set(__self__, "sysctls", sysctls)
9468
+ if transparent_hugepage_defrag is not None:
9469
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
9470
+ if transparent_hugepage_enabled is not None:
9471
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
9057
9472
 
9058
9473
  @_builtins.property
9059
9474
  @pulumi.getter(name="cgroupMode")
@@ -9086,6 +9501,22 @@ class ClusterNodeConfigLinuxNodeConfig(dict):
9086
9501
  """
9087
9502
  return pulumi.get(self, "sysctls")
9088
9503
 
9504
+ @_builtins.property
9505
+ @pulumi.getter(name="transparentHugepageDefrag")
9506
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
9507
+ """
9508
+ The Linux kernel transparent hugepage defrag setting.
9509
+ """
9510
+ return pulumi.get(self, "transparent_hugepage_defrag")
9511
+
9512
+ @_builtins.property
9513
+ @pulumi.getter(name="transparentHugepageEnabled")
9514
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
9515
+ """
9516
+ The Linux kernel transparent hugepage setting.
9517
+ """
9518
+ return pulumi.get(self, "transparent_hugepage_enabled")
9519
+
9089
9520
 
9090
9521
  @pulumi.output_type
9091
9522
  class ClusterNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -9398,6 +9829,8 @@ class ClusterNodeConfigSoleTenantConfig(dict):
9398
9829
  suggest = None
9399
9830
  if key == "nodeAffinities":
9400
9831
  suggest = "node_affinities"
9832
+ elif key == "minNodeCpus":
9833
+ suggest = "min_node_cpus"
9401
9834
 
9402
9835
  if suggest:
9403
9836
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -9411,20 +9844,32 @@ class ClusterNodeConfigSoleTenantConfig(dict):
9411
9844
  return super().get(key, default)
9412
9845
 
9413
9846
  def __init__(__self__, *,
9414
- node_affinities: Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity']):
9847
+ node_affinities: Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity'],
9848
+ min_node_cpus: Optional[_builtins.int] = None):
9415
9849
  """
9416
- :param Sequence['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
9850
+ :param Sequence['ClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
9851
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
9417
9852
  """
9418
9853
  pulumi.set(__self__, "node_affinities", node_affinities)
9854
+ if min_node_cpus is not None:
9855
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
9419
9856
 
9420
9857
  @_builtins.property
9421
9858
  @pulumi.getter(name="nodeAffinities")
9422
9859
  def node_affinities(self) -> Sequence['outputs.ClusterNodeConfigSoleTenantConfigNodeAffinity']:
9423
9860
  """
9424
- .
9861
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
9425
9862
  """
9426
9863
  return pulumi.get(self, "node_affinities")
9427
9864
 
9865
+ @_builtins.property
9866
+ @pulumi.getter(name="minNodeCpus")
9867
+ def min_node_cpus(self) -> Optional[_builtins.int]:
9868
+ """
9869
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
9870
+ """
9871
+ return pulumi.get(self, "min_node_cpus")
9872
+
9428
9873
 
9429
9874
  @pulumi.output_type
9430
9875
  class ClusterNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -10991,7 +11436,7 @@ class ClusterNodePoolNodeConfig(dict):
10991
11436
  :param _builtins.str service_account: The service account to be used by the Node VMs.
10992
11437
  If not specified, the "default" service account is used.
10993
11438
  :param 'ClusterNodePoolNodeConfigShieldedInstanceConfigArgs' shielded_instance_config: Shielded Instance options. Structure is documented below.
10994
- :param 'ClusterNodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
11439
+ :param 'ClusterNodePoolNodeConfigSoleTenantConfigArgs' sole_tenant_config: Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
10995
11440
  :param _builtins.bool spot: A boolean that represents whether the underlying node VMs are spot.
10996
11441
  See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms)
10997
11442
  for more information. Defaults to false.
@@ -11470,7 +11915,7 @@ class ClusterNodePoolNodeConfig(dict):
11470
11915
  @pulumi.getter(name="soleTenantConfig")
11471
11916
  def sole_tenant_config(self) -> Optional['outputs.ClusterNodePoolNodeConfigSoleTenantConfig']:
11472
11917
  """
11473
- Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). `node_affinity` structure is documented below.
11918
+ Allows specifying multiple [node affinities](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes#node_affinity_and_anti-affinity) useful for running workloads on [sole tenant nodes](https://cloud.google.com/kubernetes-engine/docs/how-to/sole-tenancy). Structure is documented below.
11474
11919
  """
11475
11920
  return pulumi.get(self, "sole_tenant_config")
11476
11921
 
@@ -12312,6 +12757,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12312
12757
  suggest = "cpu_cfs_quota_period"
12313
12758
  elif key == "cpuManagerPolicy":
12314
12759
  suggest = "cpu_manager_policy"
12760
+ elif key == "evictionMaxPodGracePeriodSeconds":
12761
+ suggest = "eviction_max_pod_grace_period_seconds"
12762
+ elif key == "evictionMinimumReclaim":
12763
+ suggest = "eviction_minimum_reclaim"
12764
+ elif key == "evictionSoft":
12765
+ suggest = "eviction_soft"
12766
+ elif key == "evictionSoftGracePeriod":
12767
+ suggest = "eviction_soft_grace_period"
12315
12768
  elif key == "imageGcHighThresholdPercent":
12316
12769
  suggest = "image_gc_high_threshold_percent"
12317
12770
  elif key == "imageGcLowThresholdPercent":
@@ -12322,6 +12775,8 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12322
12775
  suggest = "image_minimum_gc_age"
12323
12776
  elif key == "insecureKubeletReadonlyPortEnabled":
12324
12777
  suggest = "insecure_kubelet_readonly_port_enabled"
12778
+ elif key == "maxParallelImagePulls":
12779
+ suggest = "max_parallel_image_pulls"
12325
12780
  elif key == "podPidsLimit":
12326
12781
  suggest = "pod_pids_limit"
12327
12782
  elif key == "singleProcessOomKill":
@@ -12345,11 +12800,16 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12345
12800
  cpu_cfs_quota: Optional[_builtins.bool] = None,
12346
12801
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
12347
12802
  cpu_manager_policy: Optional[_builtins.str] = None,
12803
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
12804
+ eviction_minimum_reclaim: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
12805
+ eviction_soft: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoft'] = None,
12806
+ eviction_soft_grace_period: Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
12348
12807
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
12349
12808
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
12350
12809
  image_maximum_gc_age: Optional[_builtins.str] = None,
12351
12810
  image_minimum_gc_age: Optional[_builtins.str] = None,
12352
12811
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
12812
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
12353
12813
  pod_pids_limit: Optional[_builtins.int] = None,
12354
12814
  single_process_oom_kill: Optional[_builtins.bool] = None):
12355
12815
  """
@@ -12371,11 +12831,16 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12371
12831
  One of `"none"` or `"static"`. If unset (or set to the empty string `""`), the API will treat the field as if set to "none".
12372
12832
  Prior to the 6.4.0 this field was marked as required. The workaround for the required field
12373
12833
  is setting the empty string `""`, which will function identically to not setting this field.
12834
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
12835
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
12836
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
12837
+ :param 'ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
12374
12838
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run. The integer must be between 10 and 85, inclusive.
12375
12839
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The integer must be between 10 and 85, inclusive.
12376
12840
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`, and `"2h45m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". The value must be a positive duration.
12377
12841
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected. Specified as a sequence of decimal numbers, each with optional fraction and a unit suffix, such as `"300s"`, `"1.5m"`. The value cannot be greater than "2m".
12378
12842
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
12843
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
12379
12844
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod. The value must be greater than or equal to 1024 and less than 4194304.
12380
12845
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer. If true, the processes in the container will be OOM killed individually instead of as a group.
12381
12846
  """
@@ -12391,6 +12856,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12391
12856
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
12392
12857
  if cpu_manager_policy is not None:
12393
12858
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
12859
+ if eviction_max_pod_grace_period_seconds is not None:
12860
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
12861
+ if eviction_minimum_reclaim is not None:
12862
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
12863
+ if eviction_soft is not None:
12864
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
12865
+ if eviction_soft_grace_period is not None:
12866
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
12394
12867
  if image_gc_high_threshold_percent is not None:
12395
12868
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
12396
12869
  if image_gc_low_threshold_percent is not None:
@@ -12401,6 +12874,8 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12401
12874
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
12402
12875
  if insecure_kubelet_readonly_port_enabled is not None:
12403
12876
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
12877
+ if max_parallel_image_pulls is not None:
12878
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
12404
12879
  if pod_pids_limit is not None:
12405
12880
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
12406
12881
  if single_process_oom_kill is not None:
@@ -12466,6 +12941,38 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12466
12941
  """
12467
12942
  return pulumi.get(self, "cpu_manager_policy")
12468
12943
 
12944
+ @_builtins.property
12945
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
12946
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
12947
+ """
12948
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. The integer must be positive and not exceed 300.
12949
+ """
12950
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
12951
+
12952
+ @_builtins.property
12953
+ @pulumi.getter(name="evictionMinimumReclaim")
12954
+ def eviction_minimum_reclaim(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim']:
12955
+ """
12956
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction. Structure is documented below.
12957
+ """
12958
+ return pulumi.get(self, "eviction_minimum_reclaim")
12959
+
12960
+ @_builtins.property
12961
+ @pulumi.getter(name="evictionSoft")
12962
+ def eviction_soft(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoft']:
12963
+ """
12964
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds. Structure is documented below.
12965
+ """
12966
+ return pulumi.get(self, "eviction_soft")
12967
+
12968
+ @_builtins.property
12969
+ @pulumi.getter(name="evictionSoftGracePeriod")
12970
+ def eviction_soft_grace_period(self) -> Optional['outputs.ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod']:
12971
+ """
12972
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period. Structure is documented below.
12973
+ """
12974
+ return pulumi.get(self, "eviction_soft_grace_period")
12975
+
12469
12976
  @_builtins.property
12470
12977
  @pulumi.getter(name="imageGcHighThresholdPercent")
12471
12978
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -12506,6 +13013,14 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12506
13013
  """
12507
13014
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
12508
13015
 
13016
+ @_builtins.property
13017
+ @pulumi.getter(name="maxParallelImagePulls")
13018
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
13019
+ """
13020
+ Set the maximum number of image pulls in parallel. The integer must be between 2 and 5, inclusive.
13021
+ """
13022
+ return pulumi.get(self, "max_parallel_image_pulls")
13023
+
12509
13024
  @_builtins.property
12510
13025
  @pulumi.getter(name="podPidsLimit")
12511
13026
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -12523,6 +13038,324 @@ class ClusterNodePoolNodeConfigKubeletConfig(dict):
12523
13038
  return pulumi.get(self, "single_process_oom_kill")
12524
13039
 
12525
13040
 
13041
+ @pulumi.output_type
13042
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
13043
+ @staticmethod
13044
+ def __key_warning(key: str):
13045
+ suggest = None
13046
+ if key == "imagefsAvailable":
13047
+ suggest = "imagefs_available"
13048
+ elif key == "imagefsInodesFree":
13049
+ suggest = "imagefs_inodes_free"
13050
+ elif key == "memoryAvailable":
13051
+ suggest = "memory_available"
13052
+ elif key == "nodefsAvailable":
13053
+ suggest = "nodefs_available"
13054
+ elif key == "nodefsInodesFree":
13055
+ suggest = "nodefs_inodes_free"
13056
+ elif key == "pidAvailable":
13057
+ suggest = "pid_available"
13058
+
13059
+ if suggest:
13060
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
13061
+
13062
+ def __getitem__(self, key: str) -> Any:
13063
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
13064
+ return super().__getitem__(key)
13065
+
13066
+ def get(self, key: str, default = None) -> Any:
13067
+ ClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
13068
+ return super().get(key, default)
13069
+
13070
+ def __init__(__self__, *,
13071
+ imagefs_available: Optional[_builtins.str] = None,
13072
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13073
+ memory_available: Optional[_builtins.str] = None,
13074
+ nodefs_available: Optional[_builtins.str] = None,
13075
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13076
+ pid_available: Optional[_builtins.str] = None):
13077
+ """
13078
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13079
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13080
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13081
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13082
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13083
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13084
+ """
13085
+ if imagefs_available is not None:
13086
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13087
+ if imagefs_inodes_free is not None:
13088
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13089
+ if memory_available is not None:
13090
+ pulumi.set(__self__, "memory_available", memory_available)
13091
+ if nodefs_available is not None:
13092
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13093
+ if nodefs_inodes_free is not None:
13094
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13095
+ if pid_available is not None:
13096
+ pulumi.set(__self__, "pid_available", pid_available)
13097
+
13098
+ @_builtins.property
13099
+ @pulumi.getter(name="imagefsAvailable")
13100
+ def imagefs_available(self) -> Optional[_builtins.str]:
13101
+ """
13102
+ Defines percentage of minimum reclaim for imagefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13103
+ """
13104
+ return pulumi.get(self, "imagefs_available")
13105
+
13106
+ @_builtins.property
13107
+ @pulumi.getter(name="imagefsInodesFree")
13108
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13109
+ """
13110
+ Defines percentage of minimum reclaim for imagefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13111
+ """
13112
+ return pulumi.get(self, "imagefs_inodes_free")
13113
+
13114
+ @_builtins.property
13115
+ @pulumi.getter(name="memoryAvailable")
13116
+ def memory_available(self) -> Optional[_builtins.str]:
13117
+ """
13118
+ Defines percentage of minimum reclaim for memory.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13119
+ """
13120
+ return pulumi.get(self, "memory_available")
13121
+
13122
+ @_builtins.property
13123
+ @pulumi.getter(name="nodefsAvailable")
13124
+ def nodefs_available(self) -> Optional[_builtins.str]:
13125
+ """
13126
+ Defines percentage of minimum reclaim for nodefs.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13127
+ """
13128
+ return pulumi.get(self, "nodefs_available")
13129
+
13130
+ @_builtins.property
13131
+ @pulumi.getter(name="nodefsInodesFree")
13132
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13133
+ """
13134
+ Defines percentage of minimum reclaim for nodefs.inodesFree. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13135
+ """
13136
+ return pulumi.get(self, "nodefs_inodes_free")
13137
+
13138
+ @_builtins.property
13139
+ @pulumi.getter(name="pidAvailable")
13140
+ def pid_available(self) -> Optional[_builtins.str]:
13141
+ """
13142
+ Defines percentage of minimum reclaim for pid.available. The value must be a percentage no more than `"10%"`, such as `"5%"`.
13143
+ """
13144
+ return pulumi.get(self, "pid_available")
13145
+
13146
+
13147
+ @pulumi.output_type
13148
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoft(dict):
13149
+ @staticmethod
13150
+ def __key_warning(key: str):
13151
+ suggest = None
13152
+ if key == "imagefsAvailable":
13153
+ suggest = "imagefs_available"
13154
+ elif key == "imagefsInodesFree":
13155
+ suggest = "imagefs_inodes_free"
13156
+ elif key == "memoryAvailable":
13157
+ suggest = "memory_available"
13158
+ elif key == "nodefsAvailable":
13159
+ suggest = "nodefs_available"
13160
+ elif key == "nodefsInodesFree":
13161
+ suggest = "nodefs_inodes_free"
13162
+ elif key == "pidAvailable":
13163
+ suggest = "pid_available"
13164
+
13165
+ if suggest:
13166
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
13167
+
13168
+ def __getitem__(self, key: str) -> Any:
13169
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
13170
+ return super().__getitem__(key)
13171
+
13172
+ def get(self, key: str, default = None) -> Any:
13173
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
13174
+ return super().get(key, default)
13175
+
13176
+ def __init__(__self__, *,
13177
+ imagefs_available: Optional[_builtins.str] = None,
13178
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13179
+ memory_available: Optional[_builtins.str] = None,
13180
+ nodefs_available: Optional[_builtins.str] = None,
13181
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13182
+ pid_available: Optional[_builtins.str] = None):
13183
+ """
13184
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
13185
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13186
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
13187
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13188
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13189
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13190
+ """
13191
+ if imagefs_available is not None:
13192
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13193
+ if imagefs_inodes_free is not None:
13194
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13195
+ if memory_available is not None:
13196
+ pulumi.set(__self__, "memory_available", memory_available)
13197
+ if nodefs_available is not None:
13198
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13199
+ if nodefs_inodes_free is not None:
13200
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13201
+ if pid_available is not None:
13202
+ pulumi.set(__self__, "pid_available", pid_available)
13203
+
13204
+ @_builtins.property
13205
+ @pulumi.getter(name="imagefsAvailable")
13206
+ def imagefs_available(self) -> Optional[_builtins.str]:
13207
+ """
13208
+ Defines percentage of soft eviction threshold for imagefs.available. The value must be a percentage between `15%` and `50%`, such as `"20%"`.
13209
+ """
13210
+ return pulumi.get(self, "imagefs_available")
13211
+
13212
+ @_builtins.property
13213
+ @pulumi.getter(name="imagefsInodesFree")
13214
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13215
+ """
13216
+ Defines percentage of soft eviction threshold for imagefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13217
+ """
13218
+ return pulumi.get(self, "imagefs_inodes_free")
13219
+
13220
+ @_builtins.property
13221
+ @pulumi.getter(name="memoryAvailable")
13222
+ def memory_available(self) -> Optional[_builtins.str]:
13223
+ """
13224
+ Defines quantity of soft eviction threshold for memory.available. The value must be a quantity, such as `"100Mi"`. The value must be greater than or equal to the GKE default hard eviction threshold of `"100Mi"` and less than 50% of machine memory.
13225
+ """
13226
+ return pulumi.get(self, "memory_available")
13227
+
13228
+ @_builtins.property
13229
+ @pulumi.getter(name="nodefsAvailable")
13230
+ def nodefs_available(self) -> Optional[_builtins.str]:
13231
+ """
13232
+ Defines percentage of soft eviction threshold for nodefs.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13233
+ """
13234
+ return pulumi.get(self, "nodefs_available")
13235
+
13236
+ @_builtins.property
13237
+ @pulumi.getter(name="nodefsInodesFree")
13238
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13239
+ """
13240
+ Defines percentage of soft eviction threshold for nodefs.inodesFree. The value must be a percentage between `5%` and `50%`, such as `"20%"`.
13241
+ """
13242
+ return pulumi.get(self, "nodefs_inodes_free")
13243
+
13244
+ @_builtins.property
13245
+ @pulumi.getter(name="pidAvailable")
13246
+ def pid_available(self) -> Optional[_builtins.str]:
13247
+ """
13248
+ Defines percentage of soft eviction threshold for pid.available. The value must be a percentage between `10%` and `50%`, such as `"20%"`.
13249
+ """
13250
+ return pulumi.get(self, "pid_available")
13251
+
13252
+
13253
+ @pulumi.output_type
13254
+ class ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
13255
+ @staticmethod
13256
+ def __key_warning(key: str):
13257
+ suggest = None
13258
+ if key == "imagefsAvailable":
13259
+ suggest = "imagefs_available"
13260
+ elif key == "imagefsInodesFree":
13261
+ suggest = "imagefs_inodes_free"
13262
+ elif key == "memoryAvailable":
13263
+ suggest = "memory_available"
13264
+ elif key == "nodefsAvailable":
13265
+ suggest = "nodefs_available"
13266
+ elif key == "nodefsInodesFree":
13267
+ suggest = "nodefs_inodes_free"
13268
+ elif key == "pidAvailable":
13269
+ suggest = "pid_available"
13270
+
13271
+ if suggest:
13272
+ pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
13273
+
13274
+ def __getitem__(self, key: str) -> Any:
13275
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
13276
+ return super().__getitem__(key)
13277
+
13278
+ def get(self, key: str, default = None) -> Any:
13279
+ ClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
13280
+ return super().get(key, default)
13281
+
13282
+ def __init__(__self__, *,
13283
+ imagefs_available: Optional[_builtins.str] = None,
13284
+ imagefs_inodes_free: Optional[_builtins.str] = None,
13285
+ memory_available: Optional[_builtins.str] = None,
13286
+ nodefs_available: Optional[_builtins.str] = None,
13287
+ nodefs_inodes_free: Optional[_builtins.str] = None,
13288
+ pid_available: Optional[_builtins.str] = None):
13289
+ """
13290
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13291
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13292
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
13293
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13294
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13295
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13296
+ """
13297
+ if imagefs_available is not None:
13298
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
13299
+ if imagefs_inodes_free is not None:
13300
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
13301
+ if memory_available is not None:
13302
+ pulumi.set(__self__, "memory_available", memory_available)
13303
+ if nodefs_available is not None:
13304
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
13305
+ if nodefs_inodes_free is not None:
13306
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
13307
+ if pid_available is not None:
13308
+ pulumi.set(__self__, "pid_available", pid_available)
13309
+
13310
+ @_builtins.property
13311
+ @pulumi.getter(name="imagefsAvailable")
13312
+ def imagefs_available(self) -> Optional[_builtins.str]:
13313
+ """
13314
+ Defines grace period for the imagefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13315
+ """
13316
+ return pulumi.get(self, "imagefs_available")
13317
+
13318
+ @_builtins.property
13319
+ @pulumi.getter(name="imagefsInodesFree")
13320
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
13321
+ """
13322
+ Defines grace period for the imagefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13323
+ """
13324
+ return pulumi.get(self, "imagefs_inodes_free")
13325
+
13326
+ @_builtins.property
13327
+ @pulumi.getter(name="memoryAvailable")
13328
+ def memory_available(self) -> Optional[_builtins.str]:
13329
+ """
13330
+ Defines grace period for the memory.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`, such as `"30s"`, `"1m30s"`, `"2.5m"`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
13331
+ """
13332
+ return pulumi.get(self, "memory_available")
13333
+
13334
+ @_builtins.property
13335
+ @pulumi.getter(name="nodefsAvailable")
13336
+ def nodefs_available(self) -> Optional[_builtins.str]:
13337
+ """
13338
+ Defines grace period for the nodefs.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13339
+ """
13340
+ return pulumi.get(self, "nodefs_available")
13341
+
13342
+ @_builtins.property
13343
+ @pulumi.getter(name="nodefsInodesFree")
13344
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
13345
+ """
13346
+ Defines grace period for the nodefs.inodesFree soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13347
+ """
13348
+ return pulumi.get(self, "nodefs_inodes_free")
13349
+
13350
+ @_builtins.property
13351
+ @pulumi.getter(name="pidAvailable")
13352
+ def pid_available(self) -> Optional[_builtins.str]:
13353
+ """
13354
+ Defines grace period for the pid.available soft eviction threshold. The value must be a positive duration string no more than `"5m"`.
13355
+ """
13356
+ return pulumi.get(self, "pid_available")
13357
+
13358
+
12526
13359
  @pulumi.output_type
12527
13360
  class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12528
13361
  @staticmethod
@@ -12532,6 +13365,10 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12532
13365
  suggest = "cgroup_mode"
12533
13366
  elif key == "hugepagesConfig":
12534
13367
  suggest = "hugepages_config"
13368
+ elif key == "transparentHugepageDefrag":
13369
+ suggest = "transparent_hugepage_defrag"
13370
+ elif key == "transparentHugepageEnabled":
13371
+ suggest = "transparent_hugepage_enabled"
12535
13372
 
12536
13373
  if suggest:
12537
13374
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12547,7 +13384,9 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12547
13384
  def __init__(__self__, *,
12548
13385
  cgroup_mode: Optional[_builtins.str] = None,
12549
13386
  hugepages_config: Optional['outputs.ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
12550
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
13387
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
13388
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
13389
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
12551
13390
  """
12552
13391
  :param _builtins.str cgroup_mode: Possible cgroup modes that can be used.
12553
13392
  Accepted values are:
@@ -12559,6 +13398,8 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12559
13398
  and all pods running on the nodes. Specified as a map from the key, such as
12560
13399
  `net.core.wmem_max`, to a string value. Currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file).
12561
13400
  Note that validations happen all server side. All attributes are optional.
13401
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
13402
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
12562
13403
  """
12563
13404
  if cgroup_mode is not None:
12564
13405
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -12566,6 +13407,10 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12566
13407
  pulumi.set(__self__, "hugepages_config", hugepages_config)
12567
13408
  if sysctls is not None:
12568
13409
  pulumi.set(__self__, "sysctls", sysctls)
13410
+ if transparent_hugepage_defrag is not None:
13411
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
13412
+ if transparent_hugepage_enabled is not None:
13413
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
12569
13414
 
12570
13415
  @_builtins.property
12571
13416
  @pulumi.getter(name="cgroupMode")
@@ -12598,6 +13443,22 @@ class ClusterNodePoolNodeConfigLinuxNodeConfig(dict):
12598
13443
  """
12599
13444
  return pulumi.get(self, "sysctls")
12600
13445
 
13446
+ @_builtins.property
13447
+ @pulumi.getter(name="transparentHugepageDefrag")
13448
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
13449
+ """
13450
+ The Linux kernel transparent hugepage defrag setting.
13451
+ """
13452
+ return pulumi.get(self, "transparent_hugepage_defrag")
13453
+
13454
+ @_builtins.property
13455
+ @pulumi.getter(name="transparentHugepageEnabled")
13456
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
13457
+ """
13458
+ The Linux kernel transparent hugepage setting.
13459
+ """
13460
+ return pulumi.get(self, "transparent_hugepage_enabled")
13461
+
12601
13462
 
12602
13463
  @pulumi.output_type
12603
13464
  class ClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -12910,6 +13771,8 @@ class ClusterNodePoolNodeConfigSoleTenantConfig(dict):
12910
13771
  suggest = None
12911
13772
  if key == "nodeAffinities":
12912
13773
  suggest = "node_affinities"
13774
+ elif key == "minNodeCpus":
13775
+ suggest = "min_node_cpus"
12913
13776
 
12914
13777
  if suggest:
12915
13778
  pulumi.log.warn(f"Key '{key}' not found in ClusterNodePoolNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -12923,20 +13786,32 @@ class ClusterNodePoolNodeConfigSoleTenantConfig(dict):
12923
13786
  return super().get(key, default)
12924
13787
 
12925
13788
  def __init__(__self__, *,
12926
- node_affinities: Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity']):
13789
+ node_affinities: Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity'],
13790
+ min_node_cpus: Optional[_builtins.int] = None):
12927
13791
  """
12928
- :param Sequence['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
13792
+ :param Sequence['ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: The node affinity settings for the sole tenant node pool. Structure is documented below.
13793
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
12929
13794
  """
12930
13795
  pulumi.set(__self__, "node_affinities", node_affinities)
13796
+ if min_node_cpus is not None:
13797
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
12931
13798
 
12932
13799
  @_builtins.property
12933
13800
  @pulumi.getter(name="nodeAffinities")
12934
13801
  def node_affinities(self) -> Sequence['outputs.ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity']:
12935
13802
  """
12936
- .
13803
+ The node affinity settings for the sole tenant node pool. Structure is documented below.
12937
13804
  """
12938
13805
  return pulumi.get(self, "node_affinities")
12939
13806
 
13807
+ @_builtins.property
13808
+ @pulumi.getter(name="minNodeCpus")
13809
+ def min_node_cpus(self) -> Optional[_builtins.int]:
13810
+ """
13811
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feeature is disabled. The value should be greater than or equal to half of the machine type's CPU count.
13812
+ """
13813
+ return pulumi.get(self, "min_node_cpus")
13814
+
12940
13815
 
12941
13816
  @pulumi.output_type
12942
13817
  class ClusterNodePoolNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -16266,6 +17141,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
16266
17141
  suggest = "cpu_cfs_quota_period"
16267
17142
  elif key == "cpuManagerPolicy":
16268
17143
  suggest = "cpu_manager_policy"
17144
+ elif key == "evictionMaxPodGracePeriodSeconds":
17145
+ suggest = "eviction_max_pod_grace_period_seconds"
17146
+ elif key == "evictionMinimumReclaim":
17147
+ suggest = "eviction_minimum_reclaim"
17148
+ elif key == "evictionSoft":
17149
+ suggest = "eviction_soft"
17150
+ elif key == "evictionSoftGracePeriod":
17151
+ suggest = "eviction_soft_grace_period"
16269
17152
  elif key == "imageGcHighThresholdPercent":
16270
17153
  suggest = "image_gc_high_threshold_percent"
16271
17154
  elif key == "imageGcLowThresholdPercent":
@@ -16276,6 +17159,8 @@ class NodePoolNodeConfigKubeletConfig(dict):
16276
17159
  suggest = "image_minimum_gc_age"
16277
17160
  elif key == "insecureKubeletReadonlyPortEnabled":
16278
17161
  suggest = "insecure_kubelet_readonly_port_enabled"
17162
+ elif key == "maxParallelImagePulls":
17163
+ suggest = "max_parallel_image_pulls"
16279
17164
  elif key == "podPidsLimit":
16280
17165
  suggest = "pod_pids_limit"
16281
17166
  elif key == "singleProcessOomKill":
@@ -16299,11 +17184,16 @@ class NodePoolNodeConfigKubeletConfig(dict):
16299
17184
  cpu_cfs_quota: Optional[_builtins.bool] = None,
16300
17185
  cpu_cfs_quota_period: Optional[_builtins.str] = None,
16301
17186
  cpu_manager_policy: Optional[_builtins.str] = None,
17187
+ eviction_max_pod_grace_period_seconds: Optional[_builtins.int] = None,
17188
+ eviction_minimum_reclaim: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim'] = None,
17189
+ eviction_soft: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoft'] = None,
17190
+ eviction_soft_grace_period: Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod'] = None,
16302
17191
  image_gc_high_threshold_percent: Optional[_builtins.int] = None,
16303
17192
  image_gc_low_threshold_percent: Optional[_builtins.int] = None,
16304
17193
  image_maximum_gc_age: Optional[_builtins.str] = None,
16305
17194
  image_minimum_gc_age: Optional[_builtins.str] = None,
16306
17195
  insecure_kubelet_readonly_port_enabled: Optional[_builtins.str] = None,
17196
+ max_parallel_image_pulls: Optional[_builtins.int] = None,
16307
17197
  pod_pids_limit: Optional[_builtins.int] = None,
16308
17198
  single_process_oom_kill: Optional[_builtins.bool] = None):
16309
17199
  """
@@ -16313,11 +17203,16 @@ class NodePoolNodeConfigKubeletConfig(dict):
16313
17203
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
16314
17204
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
16315
17205
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
17206
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
17207
+ :param 'NodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs' eviction_minimum_reclaim: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
17208
+ :param 'NodePoolNodeConfigKubeletConfigEvictionSoftArgs' eviction_soft: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
17209
+ :param 'NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs' eviction_soft_grace_period: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
16316
17210
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
16317
17211
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
16318
17212
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
16319
17213
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
16320
17214
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
17215
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
16321
17216
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
16322
17217
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
16323
17218
  """
@@ -16333,6 +17228,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
16333
17228
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
16334
17229
  if cpu_manager_policy is not None:
16335
17230
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
17231
+ if eviction_max_pod_grace_period_seconds is not None:
17232
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
17233
+ if eviction_minimum_reclaim is not None:
17234
+ pulumi.set(__self__, "eviction_minimum_reclaim", eviction_minimum_reclaim)
17235
+ if eviction_soft is not None:
17236
+ pulumi.set(__self__, "eviction_soft", eviction_soft)
17237
+ if eviction_soft_grace_period is not None:
17238
+ pulumi.set(__self__, "eviction_soft_grace_period", eviction_soft_grace_period)
16336
17239
  if image_gc_high_threshold_percent is not None:
16337
17240
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
16338
17241
  if image_gc_low_threshold_percent is not None:
@@ -16343,6 +17246,8 @@ class NodePoolNodeConfigKubeletConfig(dict):
16343
17246
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
16344
17247
  if insecure_kubelet_readonly_port_enabled is not None:
16345
17248
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
17249
+ if max_parallel_image_pulls is not None:
17250
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
16346
17251
  if pod_pids_limit is not None:
16347
17252
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
16348
17253
  if single_process_oom_kill is not None:
@@ -16396,6 +17301,38 @@ class NodePoolNodeConfigKubeletConfig(dict):
16396
17301
  """
16397
17302
  return pulumi.get(self, "cpu_manager_policy")
16398
17303
 
17304
+ @_builtins.property
17305
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
17306
+ def eviction_max_pod_grace_period_seconds(self) -> Optional[_builtins.int]:
17307
+ """
17308
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
17309
+ """
17310
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
17311
+
17312
+ @_builtins.property
17313
+ @pulumi.getter(name="evictionMinimumReclaim")
17314
+ def eviction_minimum_reclaim(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim']:
17315
+ """
17316
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
17317
+ """
17318
+ return pulumi.get(self, "eviction_minimum_reclaim")
17319
+
17320
+ @_builtins.property
17321
+ @pulumi.getter(name="evictionSoft")
17322
+ def eviction_soft(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoft']:
17323
+ """
17324
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
17325
+ """
17326
+ return pulumi.get(self, "eviction_soft")
17327
+
17328
+ @_builtins.property
17329
+ @pulumi.getter(name="evictionSoftGracePeriod")
17330
+ def eviction_soft_grace_period(self) -> Optional['outputs.NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod']:
17331
+ """
17332
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
17333
+ """
17334
+ return pulumi.get(self, "eviction_soft_grace_period")
17335
+
16399
17336
  @_builtins.property
16400
17337
  @pulumi.getter(name="imageGcHighThresholdPercent")
16401
17338
  def image_gc_high_threshold_percent(self) -> Optional[_builtins.int]:
@@ -16436,6 +17373,14 @@ class NodePoolNodeConfigKubeletConfig(dict):
16436
17373
  """
16437
17374
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
16438
17375
 
17376
+ @_builtins.property
17377
+ @pulumi.getter(name="maxParallelImagePulls")
17378
+ def max_parallel_image_pulls(self) -> Optional[_builtins.int]:
17379
+ """
17380
+ Set the maximum number of image pulls in parallel.
17381
+ """
17382
+ return pulumi.get(self, "max_parallel_image_pulls")
17383
+
16439
17384
  @_builtins.property
16440
17385
  @pulumi.getter(name="podPidsLimit")
16441
17386
  def pod_pids_limit(self) -> Optional[_builtins.int]:
@@ -16453,6 +17398,324 @@ class NodePoolNodeConfigKubeletConfig(dict):
16453
17398
  return pulumi.get(self, "single_process_oom_kill")
16454
17399
 
16455
17400
 
17401
+ @pulumi.output_type
17402
+ class NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim(dict):
17403
+ @staticmethod
17404
+ def __key_warning(key: str):
17405
+ suggest = None
17406
+ if key == "imagefsAvailable":
17407
+ suggest = "imagefs_available"
17408
+ elif key == "imagefsInodesFree":
17409
+ suggest = "imagefs_inodes_free"
17410
+ elif key == "memoryAvailable":
17411
+ suggest = "memory_available"
17412
+ elif key == "nodefsAvailable":
17413
+ suggest = "nodefs_available"
17414
+ elif key == "nodefsInodesFree":
17415
+ suggest = "nodefs_inodes_free"
17416
+ elif key == "pidAvailable":
17417
+ suggest = "pid_available"
17418
+
17419
+ if suggest:
17420
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim. Access the value via the '{suggest}' property getter instead.")
17421
+
17422
+ def __getitem__(self, key: str) -> Any:
17423
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
17424
+ return super().__getitem__(key)
17425
+
17426
+ def get(self, key: str, default = None) -> Any:
17427
+ NodePoolNodeConfigKubeletConfigEvictionMinimumReclaim.__key_warning(key)
17428
+ return super().get(key, default)
17429
+
17430
+ def __init__(__self__, *,
17431
+ imagefs_available: Optional[_builtins.str] = None,
17432
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17433
+ memory_available: Optional[_builtins.str] = None,
17434
+ nodefs_available: Optional[_builtins.str] = None,
17435
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17436
+ pid_available: Optional[_builtins.str] = None):
17437
+ """
17438
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
17439
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
17440
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
17441
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
17442
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
17443
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
17444
+ """
17445
+ if imagefs_available is not None:
17446
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17447
+ if imagefs_inodes_free is not None:
17448
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17449
+ if memory_available is not None:
17450
+ pulumi.set(__self__, "memory_available", memory_available)
17451
+ if nodefs_available is not None:
17452
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17453
+ if nodefs_inodes_free is not None:
17454
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17455
+ if pid_available is not None:
17456
+ pulumi.set(__self__, "pid_available", pid_available)
17457
+
17458
+ @_builtins.property
17459
+ @pulumi.getter(name="imagefsAvailable")
17460
+ def imagefs_available(self) -> Optional[_builtins.str]:
17461
+ """
17462
+ Defines percentage of minimum reclaim for imagefs.available.
17463
+ """
17464
+ return pulumi.get(self, "imagefs_available")
17465
+
17466
+ @_builtins.property
17467
+ @pulumi.getter(name="imagefsInodesFree")
17468
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17469
+ """
17470
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
17471
+ """
17472
+ return pulumi.get(self, "imagefs_inodes_free")
17473
+
17474
+ @_builtins.property
17475
+ @pulumi.getter(name="memoryAvailable")
17476
+ def memory_available(self) -> Optional[_builtins.str]:
17477
+ """
17478
+ Defines percentage of minimum reclaim for memory.available.
17479
+ """
17480
+ return pulumi.get(self, "memory_available")
17481
+
17482
+ @_builtins.property
17483
+ @pulumi.getter(name="nodefsAvailable")
17484
+ def nodefs_available(self) -> Optional[_builtins.str]:
17485
+ """
17486
+ Defines percentage of minimum reclaim for nodefs.available.
17487
+ """
17488
+ return pulumi.get(self, "nodefs_available")
17489
+
17490
+ @_builtins.property
17491
+ @pulumi.getter(name="nodefsInodesFree")
17492
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17493
+ """
17494
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
17495
+ """
17496
+ return pulumi.get(self, "nodefs_inodes_free")
17497
+
17498
+ @_builtins.property
17499
+ @pulumi.getter(name="pidAvailable")
17500
+ def pid_available(self) -> Optional[_builtins.str]:
17501
+ """
17502
+ Defines percentage of minimum reclaim for pid.available.
17503
+ """
17504
+ return pulumi.get(self, "pid_available")
17505
+
17506
+
17507
+ @pulumi.output_type
17508
+ class NodePoolNodeConfigKubeletConfigEvictionSoft(dict):
17509
+ @staticmethod
17510
+ def __key_warning(key: str):
17511
+ suggest = None
17512
+ if key == "imagefsAvailable":
17513
+ suggest = "imagefs_available"
17514
+ elif key == "imagefsInodesFree":
17515
+ suggest = "imagefs_inodes_free"
17516
+ elif key == "memoryAvailable":
17517
+ suggest = "memory_available"
17518
+ elif key == "nodefsAvailable":
17519
+ suggest = "nodefs_available"
17520
+ elif key == "nodefsInodesFree":
17521
+ suggest = "nodefs_inodes_free"
17522
+ elif key == "pidAvailable":
17523
+ suggest = "pid_available"
17524
+
17525
+ if suggest:
17526
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionSoft. Access the value via the '{suggest}' property getter instead.")
17527
+
17528
+ def __getitem__(self, key: str) -> Any:
17529
+ NodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
17530
+ return super().__getitem__(key)
17531
+
17532
+ def get(self, key: str, default = None) -> Any:
17533
+ NodePoolNodeConfigKubeletConfigEvictionSoft.__key_warning(key)
17534
+ return super().get(key, default)
17535
+
17536
+ def __init__(__self__, *,
17537
+ imagefs_available: Optional[_builtins.str] = None,
17538
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17539
+ memory_available: Optional[_builtins.str] = None,
17540
+ nodefs_available: Optional[_builtins.str] = None,
17541
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17542
+ pid_available: Optional[_builtins.str] = None):
17543
+ """
17544
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
17545
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
17546
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
17547
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
17548
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
17549
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
17550
+ """
17551
+ if imagefs_available is not None:
17552
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17553
+ if imagefs_inodes_free is not None:
17554
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17555
+ if memory_available is not None:
17556
+ pulumi.set(__self__, "memory_available", memory_available)
17557
+ if nodefs_available is not None:
17558
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17559
+ if nodefs_inodes_free is not None:
17560
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17561
+ if pid_available is not None:
17562
+ pulumi.set(__self__, "pid_available", pid_available)
17563
+
17564
+ @_builtins.property
17565
+ @pulumi.getter(name="imagefsAvailable")
17566
+ def imagefs_available(self) -> Optional[_builtins.str]:
17567
+ """
17568
+ Defines percentage of soft eviction threshold for imagefs.available.
17569
+ """
17570
+ return pulumi.get(self, "imagefs_available")
17571
+
17572
+ @_builtins.property
17573
+ @pulumi.getter(name="imagefsInodesFree")
17574
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17575
+ """
17576
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
17577
+ """
17578
+ return pulumi.get(self, "imagefs_inodes_free")
17579
+
17580
+ @_builtins.property
17581
+ @pulumi.getter(name="memoryAvailable")
17582
+ def memory_available(self) -> Optional[_builtins.str]:
17583
+ """
17584
+ Defines quantity of soft eviction threshold for memory.available.
17585
+ """
17586
+ return pulumi.get(self, "memory_available")
17587
+
17588
+ @_builtins.property
17589
+ @pulumi.getter(name="nodefsAvailable")
17590
+ def nodefs_available(self) -> Optional[_builtins.str]:
17591
+ """
17592
+ Defines percentage of soft eviction threshold for nodefs.available.
17593
+ """
17594
+ return pulumi.get(self, "nodefs_available")
17595
+
17596
+ @_builtins.property
17597
+ @pulumi.getter(name="nodefsInodesFree")
17598
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17599
+ """
17600
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
17601
+ """
17602
+ return pulumi.get(self, "nodefs_inodes_free")
17603
+
17604
+ @_builtins.property
17605
+ @pulumi.getter(name="pidAvailable")
17606
+ def pid_available(self) -> Optional[_builtins.str]:
17607
+ """
17608
+ Defines percentage of soft eviction threshold for pid.available.
17609
+ """
17610
+ return pulumi.get(self, "pid_available")
17611
+
17612
+
17613
+ @pulumi.output_type
17614
+ class NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod(dict):
17615
+ @staticmethod
17616
+ def __key_warning(key: str):
17617
+ suggest = None
17618
+ if key == "imagefsAvailable":
17619
+ suggest = "imagefs_available"
17620
+ elif key == "imagefsInodesFree":
17621
+ suggest = "imagefs_inodes_free"
17622
+ elif key == "memoryAvailable":
17623
+ suggest = "memory_available"
17624
+ elif key == "nodefsAvailable":
17625
+ suggest = "nodefs_available"
17626
+ elif key == "nodefsInodesFree":
17627
+ suggest = "nodefs_inodes_free"
17628
+ elif key == "pidAvailable":
17629
+ suggest = "pid_available"
17630
+
17631
+ if suggest:
17632
+ pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod. Access the value via the '{suggest}' property getter instead.")
17633
+
17634
+ def __getitem__(self, key: str) -> Any:
17635
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
17636
+ return super().__getitem__(key)
17637
+
17638
+ def get(self, key: str, default = None) -> Any:
17639
+ NodePoolNodeConfigKubeletConfigEvictionSoftGracePeriod.__key_warning(key)
17640
+ return super().get(key, default)
17641
+
17642
+ def __init__(__self__, *,
17643
+ imagefs_available: Optional[_builtins.str] = None,
17644
+ imagefs_inodes_free: Optional[_builtins.str] = None,
17645
+ memory_available: Optional[_builtins.str] = None,
17646
+ nodefs_available: Optional[_builtins.str] = None,
17647
+ nodefs_inodes_free: Optional[_builtins.str] = None,
17648
+ pid_available: Optional[_builtins.str] = None):
17649
+ """
17650
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
17651
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
17652
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
17653
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
17654
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
17655
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
17656
+ """
17657
+ if imagefs_available is not None:
17658
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
17659
+ if imagefs_inodes_free is not None:
17660
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
17661
+ if memory_available is not None:
17662
+ pulumi.set(__self__, "memory_available", memory_available)
17663
+ if nodefs_available is not None:
17664
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
17665
+ if nodefs_inodes_free is not None:
17666
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
17667
+ if pid_available is not None:
17668
+ pulumi.set(__self__, "pid_available", pid_available)
17669
+
17670
+ @_builtins.property
17671
+ @pulumi.getter(name="imagefsAvailable")
17672
+ def imagefs_available(self) -> Optional[_builtins.str]:
17673
+ """
17674
+ Defines grace period for the imagefs.available soft eviction threshold
17675
+ """
17676
+ return pulumi.get(self, "imagefs_available")
17677
+
17678
+ @_builtins.property
17679
+ @pulumi.getter(name="imagefsInodesFree")
17680
+ def imagefs_inodes_free(self) -> Optional[_builtins.str]:
17681
+ """
17682
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
17683
+ """
17684
+ return pulumi.get(self, "imagefs_inodes_free")
17685
+
17686
+ @_builtins.property
17687
+ @pulumi.getter(name="memoryAvailable")
17688
+ def memory_available(self) -> Optional[_builtins.str]:
17689
+ """
17690
+ Defines grace period for the memory.available soft eviction threshold.
17691
+ """
17692
+ return pulumi.get(self, "memory_available")
17693
+
17694
+ @_builtins.property
17695
+ @pulumi.getter(name="nodefsAvailable")
17696
+ def nodefs_available(self) -> Optional[_builtins.str]:
17697
+ """
17698
+ Defines grace period for the nodefs.available soft eviction threshold.
17699
+ """
17700
+ return pulumi.get(self, "nodefs_available")
17701
+
17702
+ @_builtins.property
17703
+ @pulumi.getter(name="nodefsInodesFree")
17704
+ def nodefs_inodes_free(self) -> Optional[_builtins.str]:
17705
+ """
17706
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
17707
+ """
17708
+ return pulumi.get(self, "nodefs_inodes_free")
17709
+
17710
+ @_builtins.property
17711
+ @pulumi.getter(name="pidAvailable")
17712
+ def pid_available(self) -> Optional[_builtins.str]:
17713
+ """
17714
+ Defines grace period for the pid.available soft eviction threshold.
17715
+ """
17716
+ return pulumi.get(self, "pid_available")
17717
+
17718
+
16456
17719
  @pulumi.output_type
16457
17720
  class NodePoolNodeConfigLinuxNodeConfig(dict):
16458
17721
  @staticmethod
@@ -16462,6 +17725,10 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16462
17725
  suggest = "cgroup_mode"
16463
17726
  elif key == "hugepagesConfig":
16464
17727
  suggest = "hugepages_config"
17728
+ elif key == "transparentHugepageDefrag":
17729
+ suggest = "transparent_hugepage_defrag"
17730
+ elif key == "transparentHugepageEnabled":
17731
+ suggest = "transparent_hugepage_enabled"
16465
17732
 
16466
17733
  if suggest:
16467
17734
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigLinuxNodeConfig. Access the value via the '{suggest}' property getter instead.")
@@ -16477,11 +17744,15 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16477
17744
  def __init__(__self__, *,
16478
17745
  cgroup_mode: Optional[_builtins.str] = None,
16479
17746
  hugepages_config: Optional['outputs.NodePoolNodeConfigLinuxNodeConfigHugepagesConfig'] = None,
16480
- sysctls: Optional[Mapping[str, _builtins.str]] = None):
17747
+ sysctls: Optional[Mapping[str, _builtins.str]] = None,
17748
+ transparent_hugepage_defrag: Optional[_builtins.str] = None,
17749
+ transparent_hugepage_enabled: Optional[_builtins.str] = None):
16481
17750
  """
16482
17751
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
16483
17752
  :param 'NodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs' hugepages_config: Amounts for 2M and 1G hugepages.
16484
17753
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
17754
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
17755
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
16485
17756
  """
16486
17757
  if cgroup_mode is not None:
16487
17758
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
@@ -16489,6 +17760,10 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16489
17760
  pulumi.set(__self__, "hugepages_config", hugepages_config)
16490
17761
  if sysctls is not None:
16491
17762
  pulumi.set(__self__, "sysctls", sysctls)
17763
+ if transparent_hugepage_defrag is not None:
17764
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
17765
+ if transparent_hugepage_enabled is not None:
17766
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
16492
17767
 
16493
17768
  @_builtins.property
16494
17769
  @pulumi.getter(name="cgroupMode")
@@ -16514,6 +17789,22 @@ class NodePoolNodeConfigLinuxNodeConfig(dict):
16514
17789
  """
16515
17790
  return pulumi.get(self, "sysctls")
16516
17791
 
17792
+ @_builtins.property
17793
+ @pulumi.getter(name="transparentHugepageDefrag")
17794
+ def transparent_hugepage_defrag(self) -> Optional[_builtins.str]:
17795
+ """
17796
+ The Linux kernel transparent hugepage defrag setting.
17797
+ """
17798
+ return pulumi.get(self, "transparent_hugepage_defrag")
17799
+
17800
+ @_builtins.property
17801
+ @pulumi.getter(name="transparentHugepageEnabled")
17802
+ def transparent_hugepage_enabled(self) -> Optional[_builtins.str]:
17803
+ """
17804
+ The Linux kernel transparent hugepage setting.
17805
+ """
17806
+ return pulumi.get(self, "transparent_hugepage_enabled")
17807
+
16517
17808
 
16518
17809
  @pulumi.output_type
16519
17810
  class NodePoolNodeConfigLinuxNodeConfigHugepagesConfig(dict):
@@ -16810,6 +18101,8 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16810
18101
  suggest = None
16811
18102
  if key == "nodeAffinities":
16812
18103
  suggest = "node_affinities"
18104
+ elif key == "minNodeCpus":
18105
+ suggest = "min_node_cpus"
16813
18106
 
16814
18107
  if suggest:
16815
18108
  pulumi.log.warn(f"Key '{key}' not found in NodePoolNodeConfigSoleTenantConfig. Access the value via the '{suggest}' property getter instead.")
@@ -16823,11 +18116,15 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16823
18116
  return super().get(key, default)
16824
18117
 
16825
18118
  def __init__(__self__, *,
16826
- node_affinities: Sequence['outputs.NodePoolNodeConfigSoleTenantConfigNodeAffinity']):
18119
+ node_affinities: Sequence['outputs.NodePoolNodeConfigSoleTenantConfigNodeAffinity'],
18120
+ min_node_cpus: Optional[_builtins.int] = None):
16827
18121
  """
16828
18122
  :param Sequence['NodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
18123
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
16829
18124
  """
16830
18125
  pulumi.set(__self__, "node_affinities", node_affinities)
18126
+ if min_node_cpus is not None:
18127
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
16831
18128
 
16832
18129
  @_builtins.property
16833
18130
  @pulumi.getter(name="nodeAffinities")
@@ -16837,6 +18134,14 @@ class NodePoolNodeConfigSoleTenantConfig(dict):
16837
18134
  """
16838
18135
  return pulumi.get(self, "node_affinities")
16839
18136
 
18137
+ @_builtins.property
18138
+ @pulumi.getter(name="minNodeCpus")
18139
+ def min_node_cpus(self) -> Optional[_builtins.int]:
18140
+ """
18141
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
18142
+ """
18143
+ return pulumi.get(self, "min_node_cpus")
18144
+
16840
18145
 
16841
18146
  @pulumi.output_type
16842
18147
  class NodePoolNodeConfigSoleTenantConfigNodeAffinity(dict):
@@ -20272,11 +21577,16 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
20272
21577
  cpu_cfs_quota: _builtins.bool,
20273
21578
  cpu_cfs_quota_period: _builtins.str,
20274
21579
  cpu_manager_policy: _builtins.str,
21580
+ eviction_max_pod_grace_period_seconds: _builtins.int,
21581
+ eviction_minimum_reclaims: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult'],
21582
+ eviction_soft_grace_periods: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult'],
21583
+ eviction_softs: Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftResult'],
20275
21584
  image_gc_high_threshold_percent: _builtins.int,
20276
21585
  image_gc_low_threshold_percent: _builtins.int,
20277
21586
  image_maximum_gc_age: _builtins.str,
20278
21587
  image_minimum_gc_age: _builtins.str,
20279
21588
  insecure_kubelet_readonly_port_enabled: _builtins.str,
21589
+ max_parallel_image_pulls: _builtins.int,
20280
21590
  pod_pids_limit: _builtins.int,
20281
21591
  single_process_oom_kill: _builtins.bool):
20282
21592
  """
@@ -20286,11 +21596,16 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
20286
21596
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
20287
21597
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
20288
21598
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
21599
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21600
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaims: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21601
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_periods: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21602
+ :param Sequence['GetClusterNodeConfigKubeletConfigEvictionSoftArgs'] eviction_softs: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
20289
21603
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
20290
21604
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
20291
21605
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
20292
21606
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
20293
21607
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
21608
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
20294
21609
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
20295
21610
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
20296
21611
  """
@@ -20300,11 +21615,16 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
20300
21615
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
20301
21616
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
20302
21617
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
21618
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
21619
+ pulumi.set(__self__, "eviction_minimum_reclaims", eviction_minimum_reclaims)
21620
+ pulumi.set(__self__, "eviction_soft_grace_periods", eviction_soft_grace_periods)
21621
+ pulumi.set(__self__, "eviction_softs", eviction_softs)
20303
21622
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
20304
21623
  pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
20305
21624
  pulumi.set(__self__, "image_maximum_gc_age", image_maximum_gc_age)
20306
21625
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
20307
21626
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
21627
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
20308
21628
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
20309
21629
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
20310
21630
 
@@ -20356,6 +21676,38 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
20356
21676
  """
20357
21677
  return pulumi.get(self, "cpu_manager_policy")
20358
21678
 
21679
+ @_builtins.property
21680
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
21681
+ def eviction_max_pod_grace_period_seconds(self) -> _builtins.int:
21682
+ """
21683
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
21684
+ """
21685
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
21686
+
21687
+ @_builtins.property
21688
+ @pulumi.getter(name="evictionMinimumReclaims")
21689
+ def eviction_minimum_reclaims(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult']:
21690
+ """
21691
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
21692
+ """
21693
+ return pulumi.get(self, "eviction_minimum_reclaims")
21694
+
21695
+ @_builtins.property
21696
+ @pulumi.getter(name="evictionSoftGracePeriods")
21697
+ def eviction_soft_grace_periods(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult']:
21698
+ """
21699
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
21700
+ """
21701
+ return pulumi.get(self, "eviction_soft_grace_periods")
21702
+
21703
+ @_builtins.property
21704
+ @pulumi.getter(name="evictionSofts")
21705
+ def eviction_softs(self) -> Sequence['outputs.GetClusterNodeConfigKubeletConfigEvictionSoftResult']:
21706
+ """
21707
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
21708
+ """
21709
+ return pulumi.get(self, "eviction_softs")
21710
+
20359
21711
  @_builtins.property
20360
21712
  @pulumi.getter(name="imageGcHighThresholdPercent")
20361
21713
  def image_gc_high_threshold_percent(self) -> _builtins.int:
@@ -20396,6 +21748,14 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
20396
21748
  """
20397
21749
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
20398
21750
 
21751
+ @_builtins.property
21752
+ @pulumi.getter(name="maxParallelImagePulls")
21753
+ def max_parallel_image_pulls(self) -> _builtins.int:
21754
+ """
21755
+ Set the maximum number of image pulls in parallel.
21756
+ """
21757
+ return pulumi.get(self, "max_parallel_image_pulls")
21758
+
20399
21759
  @_builtins.property
20400
21760
  @pulumi.getter(name="podPidsLimit")
20401
21761
  def pod_pids_limit(self) -> _builtins.int:
@@ -20413,20 +21773,245 @@ class GetClusterNodeConfigKubeletConfigResult(dict):
20413
21773
  return pulumi.get(self, "single_process_oom_kill")
20414
21774
 
20415
21775
 
21776
+ @pulumi.output_type
21777
+ class GetClusterNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
21778
+ def __init__(__self__, *,
21779
+ imagefs_available: _builtins.str,
21780
+ imagefs_inodes_free: _builtins.str,
21781
+ memory_available: _builtins.str,
21782
+ nodefs_available: _builtins.str,
21783
+ nodefs_inodes_free: _builtins.str,
21784
+ pid_available: _builtins.str):
21785
+ """
21786
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
21787
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
21788
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
21789
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
21790
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
21791
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
21792
+ """
21793
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21794
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21795
+ pulumi.set(__self__, "memory_available", memory_available)
21796
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21797
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21798
+ pulumi.set(__self__, "pid_available", pid_available)
21799
+
21800
+ @_builtins.property
21801
+ @pulumi.getter(name="imagefsAvailable")
21802
+ def imagefs_available(self) -> _builtins.str:
21803
+ """
21804
+ Defines percentage of minimum reclaim for imagefs.available.
21805
+ """
21806
+ return pulumi.get(self, "imagefs_available")
21807
+
21808
+ @_builtins.property
21809
+ @pulumi.getter(name="imagefsInodesFree")
21810
+ def imagefs_inodes_free(self) -> _builtins.str:
21811
+ """
21812
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
21813
+ """
21814
+ return pulumi.get(self, "imagefs_inodes_free")
21815
+
21816
+ @_builtins.property
21817
+ @pulumi.getter(name="memoryAvailable")
21818
+ def memory_available(self) -> _builtins.str:
21819
+ """
21820
+ Defines percentage of minimum reclaim for memory.available.
21821
+ """
21822
+ return pulumi.get(self, "memory_available")
21823
+
21824
+ @_builtins.property
21825
+ @pulumi.getter(name="nodefsAvailable")
21826
+ def nodefs_available(self) -> _builtins.str:
21827
+ """
21828
+ Defines percentage of minimum reclaim for nodefs.available.
21829
+ """
21830
+ return pulumi.get(self, "nodefs_available")
21831
+
21832
+ @_builtins.property
21833
+ @pulumi.getter(name="nodefsInodesFree")
21834
+ def nodefs_inodes_free(self) -> _builtins.str:
21835
+ """
21836
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
21837
+ """
21838
+ return pulumi.get(self, "nodefs_inodes_free")
21839
+
21840
+ @_builtins.property
21841
+ @pulumi.getter(name="pidAvailable")
21842
+ def pid_available(self) -> _builtins.str:
21843
+ """
21844
+ Defines percentage of minimum reclaim for pid.available.
21845
+ """
21846
+ return pulumi.get(self, "pid_available")
21847
+
21848
+
21849
+ @pulumi.output_type
21850
+ class GetClusterNodeConfigKubeletConfigEvictionSoftResult(dict):
21851
+ def __init__(__self__, *,
21852
+ imagefs_available: _builtins.str,
21853
+ imagefs_inodes_free: _builtins.str,
21854
+ memory_available: _builtins.str,
21855
+ nodefs_available: _builtins.str,
21856
+ nodefs_inodes_free: _builtins.str,
21857
+ pid_available: _builtins.str):
21858
+ """
21859
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
21860
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
21861
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
21862
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
21863
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
21864
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
21865
+ """
21866
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21867
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21868
+ pulumi.set(__self__, "memory_available", memory_available)
21869
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21870
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21871
+ pulumi.set(__self__, "pid_available", pid_available)
21872
+
21873
+ @_builtins.property
21874
+ @pulumi.getter(name="imagefsAvailable")
21875
+ def imagefs_available(self) -> _builtins.str:
21876
+ """
21877
+ Defines percentage of soft eviction threshold for imagefs.available.
21878
+ """
21879
+ return pulumi.get(self, "imagefs_available")
21880
+
21881
+ @_builtins.property
21882
+ @pulumi.getter(name="imagefsInodesFree")
21883
+ def imagefs_inodes_free(self) -> _builtins.str:
21884
+ """
21885
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
21886
+ """
21887
+ return pulumi.get(self, "imagefs_inodes_free")
21888
+
21889
+ @_builtins.property
21890
+ @pulumi.getter(name="memoryAvailable")
21891
+ def memory_available(self) -> _builtins.str:
21892
+ """
21893
+ Defines quantity of soft eviction threshold for memory.available.
21894
+ """
21895
+ return pulumi.get(self, "memory_available")
21896
+
21897
+ @_builtins.property
21898
+ @pulumi.getter(name="nodefsAvailable")
21899
+ def nodefs_available(self) -> _builtins.str:
21900
+ """
21901
+ Defines percentage of soft eviction threshold for nodefs.available.
21902
+ """
21903
+ return pulumi.get(self, "nodefs_available")
21904
+
21905
+ @_builtins.property
21906
+ @pulumi.getter(name="nodefsInodesFree")
21907
+ def nodefs_inodes_free(self) -> _builtins.str:
21908
+ """
21909
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
21910
+ """
21911
+ return pulumi.get(self, "nodefs_inodes_free")
21912
+
21913
+ @_builtins.property
21914
+ @pulumi.getter(name="pidAvailable")
21915
+ def pid_available(self) -> _builtins.str:
21916
+ """
21917
+ Defines percentage of soft eviction threshold for pid.available.
21918
+ """
21919
+ return pulumi.get(self, "pid_available")
21920
+
21921
+
21922
+ @pulumi.output_type
21923
+ class GetClusterNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
21924
+ def __init__(__self__, *,
21925
+ imagefs_available: _builtins.str,
21926
+ imagefs_inodes_free: _builtins.str,
21927
+ memory_available: _builtins.str,
21928
+ nodefs_available: _builtins.str,
21929
+ nodefs_inodes_free: _builtins.str,
21930
+ pid_available: _builtins.str):
21931
+ """
21932
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
21933
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
21934
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
21935
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
21936
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
21937
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
21938
+ """
21939
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
21940
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
21941
+ pulumi.set(__self__, "memory_available", memory_available)
21942
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
21943
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
21944
+ pulumi.set(__self__, "pid_available", pid_available)
21945
+
21946
+ @_builtins.property
21947
+ @pulumi.getter(name="imagefsAvailable")
21948
+ def imagefs_available(self) -> _builtins.str:
21949
+ """
21950
+ Defines grace period for the imagefs.available soft eviction threshold
21951
+ """
21952
+ return pulumi.get(self, "imagefs_available")
21953
+
21954
+ @_builtins.property
21955
+ @pulumi.getter(name="imagefsInodesFree")
21956
+ def imagefs_inodes_free(self) -> _builtins.str:
21957
+ """
21958
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
21959
+ """
21960
+ return pulumi.get(self, "imagefs_inodes_free")
21961
+
21962
+ @_builtins.property
21963
+ @pulumi.getter(name="memoryAvailable")
21964
+ def memory_available(self) -> _builtins.str:
21965
+ """
21966
+ Defines grace period for the memory.available soft eviction threshold.
21967
+ """
21968
+ return pulumi.get(self, "memory_available")
21969
+
21970
+ @_builtins.property
21971
+ @pulumi.getter(name="nodefsAvailable")
21972
+ def nodefs_available(self) -> _builtins.str:
21973
+ """
21974
+ Defines grace period for the nodefs.available soft eviction threshold.
21975
+ """
21976
+ return pulumi.get(self, "nodefs_available")
21977
+
21978
+ @_builtins.property
21979
+ @pulumi.getter(name="nodefsInodesFree")
21980
+ def nodefs_inodes_free(self) -> _builtins.str:
21981
+ """
21982
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
21983
+ """
21984
+ return pulumi.get(self, "nodefs_inodes_free")
21985
+
21986
+ @_builtins.property
21987
+ @pulumi.getter(name="pidAvailable")
21988
+ def pid_available(self) -> _builtins.str:
21989
+ """
21990
+ Defines grace period for the pid.available soft eviction threshold.
21991
+ """
21992
+ return pulumi.get(self, "pid_available")
21993
+
21994
+
20416
21995
  @pulumi.output_type
20417
21996
  class GetClusterNodeConfigLinuxNodeConfigResult(dict):
20418
21997
  def __init__(__self__, *,
20419
21998
  cgroup_mode: _builtins.str,
20420
21999
  hugepages_configs: Sequence['outputs.GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult'],
20421
- sysctls: Mapping[str, _builtins.str]):
22000
+ sysctls: Mapping[str, _builtins.str],
22001
+ transparent_hugepage_defrag: _builtins.str,
22002
+ transparent_hugepage_enabled: _builtins.str):
20422
22003
  """
20423
22004
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
20424
22005
  :param Sequence['GetClusterNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_configs: Amounts for 2M and 1G hugepages.
20425
22006
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
22007
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
22008
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
20426
22009
  """
20427
22010
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
20428
22011
  pulumi.set(__self__, "hugepages_configs", hugepages_configs)
20429
22012
  pulumi.set(__self__, "sysctls", sysctls)
22013
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
22014
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
20430
22015
 
20431
22016
  @_builtins.property
20432
22017
  @pulumi.getter(name="cgroupMode")
@@ -20452,6 +22037,22 @@ class GetClusterNodeConfigLinuxNodeConfigResult(dict):
20452
22037
  """
20453
22038
  return pulumi.get(self, "sysctls")
20454
22039
 
22040
+ @_builtins.property
22041
+ @pulumi.getter(name="transparentHugepageDefrag")
22042
+ def transparent_hugepage_defrag(self) -> _builtins.str:
22043
+ """
22044
+ The Linux kernel transparent hugepage defrag setting.
22045
+ """
22046
+ return pulumi.get(self, "transparent_hugepage_defrag")
22047
+
22048
+ @_builtins.property
22049
+ @pulumi.getter(name="transparentHugepageEnabled")
22050
+ def transparent_hugepage_enabled(self) -> _builtins.str:
22051
+ """
22052
+ The Linux kernel transparent hugepage setting.
22053
+ """
22054
+ return pulumi.get(self, "transparent_hugepage_enabled")
22055
+
20455
22056
 
20456
22057
  @pulumi.output_type
20457
22058
  class GetClusterNodeConfigLinuxNodeConfigHugepagesConfigResult(dict):
@@ -20619,12 +22220,23 @@ class GetClusterNodeConfigShieldedInstanceConfigResult(dict):
20619
22220
  @pulumi.output_type
20620
22221
  class GetClusterNodeConfigSoleTenantConfigResult(dict):
20621
22222
  def __init__(__self__, *,
22223
+ min_node_cpus: _builtins.int,
20622
22224
  node_affinities: Sequence['outputs.GetClusterNodeConfigSoleTenantConfigNodeAffinityResult']):
20623
22225
  """
22226
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
20624
22227
  :param Sequence['GetClusterNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
20625
22228
  """
22229
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
20626
22230
  pulumi.set(__self__, "node_affinities", node_affinities)
20627
22231
 
22232
+ @_builtins.property
22233
+ @pulumi.getter(name="minNodeCpus")
22234
+ def min_node_cpus(self) -> _builtins.int:
22235
+ """
22236
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22237
+ """
22238
+ return pulumi.get(self, "min_node_cpus")
22239
+
20628
22240
  @_builtins.property
20629
22241
  @pulumi.getter(name="nodeAffinities")
20630
22242
  def node_affinities(self) -> Sequence['outputs.GetClusterNodeConfigSoleTenantConfigNodeAffinityResult']:
@@ -22515,11 +24127,16 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22515
24127
  cpu_cfs_quota: _builtins.bool,
22516
24128
  cpu_cfs_quota_period: _builtins.str,
22517
24129
  cpu_manager_policy: _builtins.str,
24130
+ eviction_max_pod_grace_period_seconds: _builtins.int,
24131
+ eviction_minimum_reclaims: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult'],
24132
+ eviction_soft_grace_periods: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult'],
24133
+ eviction_softs: Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult'],
22518
24134
  image_gc_high_threshold_percent: _builtins.int,
22519
24135
  image_gc_low_threshold_percent: _builtins.int,
22520
24136
  image_maximum_gc_age: _builtins.str,
22521
24137
  image_minimum_gc_age: _builtins.str,
22522
24138
  insecure_kubelet_readonly_port_enabled: _builtins.str,
24139
+ max_parallel_image_pulls: _builtins.int,
22523
24140
  pod_pids_limit: _builtins.int,
22524
24141
  single_process_oom_kill: _builtins.bool):
22525
24142
  """
@@ -22529,11 +24146,16 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22529
24146
  :param _builtins.bool cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
22530
24147
  :param _builtins.str cpu_cfs_quota_period: Set the CPU CFS quota period value 'cpu.cfs_period_us'.
22531
24148
  :param _builtins.str cpu_manager_policy: Control the CPU management policy on the node.
24149
+ :param _builtins.int eviction_max_pod_grace_period_seconds: Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
24150
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimArgs'] eviction_minimum_reclaims: Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
24151
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodArgs'] eviction_soft_grace_periods: Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
24152
+ :param Sequence['GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftArgs'] eviction_softs: Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
22532
24153
  :param _builtins.int image_gc_high_threshold_percent: Defines the percent of disk usage after which image garbage collection is always run.
22533
24154
  :param _builtins.int image_gc_low_threshold_percent: Defines the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.
22534
24155
  :param _builtins.str image_maximum_gc_age: Defines the maximum age an image can be unused before it is garbage collected.
22535
24156
  :param _builtins.str image_minimum_gc_age: Defines the minimum age for an unused image before it is garbage collected.
22536
24157
  :param _builtins.str insecure_kubelet_readonly_port_enabled: Controls whether the kubelet read-only port is enabled. It is strongly recommended to set this to `FALSE`. Possible values: `TRUE`, `FALSE`.
24158
+ :param _builtins.int max_parallel_image_pulls: Set the maximum number of image pulls in parallel.
22537
24159
  :param _builtins.int pod_pids_limit: Controls the maximum number of processes allowed to run in a pod.
22538
24160
  :param _builtins.bool single_process_oom_kill: Defines whether to enable single process OOM killer.
22539
24161
  """
@@ -22543,11 +24165,16 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22543
24165
  pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
22544
24166
  pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
22545
24167
  pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
24168
+ pulumi.set(__self__, "eviction_max_pod_grace_period_seconds", eviction_max_pod_grace_period_seconds)
24169
+ pulumi.set(__self__, "eviction_minimum_reclaims", eviction_minimum_reclaims)
24170
+ pulumi.set(__self__, "eviction_soft_grace_periods", eviction_soft_grace_periods)
24171
+ pulumi.set(__self__, "eviction_softs", eviction_softs)
22546
24172
  pulumi.set(__self__, "image_gc_high_threshold_percent", image_gc_high_threshold_percent)
22547
24173
  pulumi.set(__self__, "image_gc_low_threshold_percent", image_gc_low_threshold_percent)
22548
24174
  pulumi.set(__self__, "image_maximum_gc_age", image_maximum_gc_age)
22549
24175
  pulumi.set(__self__, "image_minimum_gc_age", image_minimum_gc_age)
22550
24176
  pulumi.set(__self__, "insecure_kubelet_readonly_port_enabled", insecure_kubelet_readonly_port_enabled)
24177
+ pulumi.set(__self__, "max_parallel_image_pulls", max_parallel_image_pulls)
22551
24178
  pulumi.set(__self__, "pod_pids_limit", pod_pids_limit)
22552
24179
  pulumi.set(__self__, "single_process_oom_kill", single_process_oom_kill)
22553
24180
 
@@ -22599,6 +24226,38 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22599
24226
  """
22600
24227
  return pulumi.get(self, "cpu_manager_policy")
22601
24228
 
24229
+ @_builtins.property
24230
+ @pulumi.getter(name="evictionMaxPodGracePeriodSeconds")
24231
+ def eviction_max_pod_grace_period_seconds(self) -> _builtins.int:
24232
+ """
24233
+ Defines the maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.
24234
+ """
24235
+ return pulumi.get(self, "eviction_max_pod_grace_period_seconds")
24236
+
24237
+ @_builtins.property
24238
+ @pulumi.getter(name="evictionMinimumReclaims")
24239
+ def eviction_minimum_reclaims(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult']:
24240
+ """
24241
+ Defines a map of signal names to percentage that defines minimum reclaims. It describes the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction.
24242
+ """
24243
+ return pulumi.get(self, "eviction_minimum_reclaims")
24244
+
24245
+ @_builtins.property
24246
+ @pulumi.getter(name="evictionSoftGracePeriods")
24247
+ def eviction_soft_grace_periods(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult']:
24248
+ """
24249
+ Defines a map of signal names to durations that defines grace periods for soft eviction thresholds. Each soft eviction threshold must have a corresponding grace period.
24250
+ """
24251
+ return pulumi.get(self, "eviction_soft_grace_periods")
24252
+
24253
+ @_builtins.property
24254
+ @pulumi.getter(name="evictionSofts")
24255
+ def eviction_softs(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult']:
24256
+ """
24257
+ Defines a map of signal names to quantities or percentage that defines soft eviction thresholds.
24258
+ """
24259
+ return pulumi.get(self, "eviction_softs")
24260
+
22602
24261
  @_builtins.property
22603
24262
  @pulumi.getter(name="imageGcHighThresholdPercent")
22604
24263
  def image_gc_high_threshold_percent(self) -> _builtins.int:
@@ -22639,6 +24298,14 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22639
24298
  """
22640
24299
  return pulumi.get(self, "insecure_kubelet_readonly_port_enabled")
22641
24300
 
24301
+ @_builtins.property
24302
+ @pulumi.getter(name="maxParallelImagePulls")
24303
+ def max_parallel_image_pulls(self) -> _builtins.int:
24304
+ """
24305
+ Set the maximum number of image pulls in parallel.
24306
+ """
24307
+ return pulumi.get(self, "max_parallel_image_pulls")
24308
+
22642
24309
  @_builtins.property
22643
24310
  @pulumi.getter(name="podPidsLimit")
22644
24311
  def pod_pids_limit(self) -> _builtins.int:
@@ -22656,20 +24323,245 @@ class GetClusterNodePoolNodeConfigKubeletConfigResult(dict):
22656
24323
  return pulumi.get(self, "single_process_oom_kill")
22657
24324
 
22658
24325
 
24326
+ @pulumi.output_type
24327
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionMinimumReclaimResult(dict):
24328
+ def __init__(__self__, *,
24329
+ imagefs_available: _builtins.str,
24330
+ imagefs_inodes_free: _builtins.str,
24331
+ memory_available: _builtins.str,
24332
+ nodefs_available: _builtins.str,
24333
+ nodefs_inodes_free: _builtins.str,
24334
+ pid_available: _builtins.str):
24335
+ """
24336
+ :param _builtins.str imagefs_available: Defines percentage of minimum reclaim for imagefs.available.
24337
+ :param _builtins.str imagefs_inodes_free: Defines percentage of minimum reclaim for imagefs.inodesFree.
24338
+ :param _builtins.str memory_available: Defines percentage of minimum reclaim for memory.available.
24339
+ :param _builtins.str nodefs_available: Defines percentage of minimum reclaim for nodefs.available.
24340
+ :param _builtins.str nodefs_inodes_free: Defines percentage of minimum reclaim for nodefs.inodesFree.
24341
+ :param _builtins.str pid_available: Defines percentage of minimum reclaim for pid.available.
24342
+ """
24343
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24344
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24345
+ pulumi.set(__self__, "memory_available", memory_available)
24346
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24347
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24348
+ pulumi.set(__self__, "pid_available", pid_available)
24349
+
24350
+ @_builtins.property
24351
+ @pulumi.getter(name="imagefsAvailable")
24352
+ def imagefs_available(self) -> _builtins.str:
24353
+ """
24354
+ Defines percentage of minimum reclaim for imagefs.available.
24355
+ """
24356
+ return pulumi.get(self, "imagefs_available")
24357
+
24358
+ @_builtins.property
24359
+ @pulumi.getter(name="imagefsInodesFree")
24360
+ def imagefs_inodes_free(self) -> _builtins.str:
24361
+ """
24362
+ Defines percentage of minimum reclaim for imagefs.inodesFree.
24363
+ """
24364
+ return pulumi.get(self, "imagefs_inodes_free")
24365
+
24366
+ @_builtins.property
24367
+ @pulumi.getter(name="memoryAvailable")
24368
+ def memory_available(self) -> _builtins.str:
24369
+ """
24370
+ Defines percentage of minimum reclaim for memory.available.
24371
+ """
24372
+ return pulumi.get(self, "memory_available")
24373
+
24374
+ @_builtins.property
24375
+ @pulumi.getter(name="nodefsAvailable")
24376
+ def nodefs_available(self) -> _builtins.str:
24377
+ """
24378
+ Defines percentage of minimum reclaim for nodefs.available.
24379
+ """
24380
+ return pulumi.get(self, "nodefs_available")
24381
+
24382
+ @_builtins.property
24383
+ @pulumi.getter(name="nodefsInodesFree")
24384
+ def nodefs_inodes_free(self) -> _builtins.str:
24385
+ """
24386
+ Defines percentage of minimum reclaim for nodefs.inodesFree.
24387
+ """
24388
+ return pulumi.get(self, "nodefs_inodes_free")
24389
+
24390
+ @_builtins.property
24391
+ @pulumi.getter(name="pidAvailable")
24392
+ def pid_available(self) -> _builtins.str:
24393
+ """
24394
+ Defines percentage of minimum reclaim for pid.available.
24395
+ """
24396
+ return pulumi.get(self, "pid_available")
24397
+
24398
+
24399
+ @pulumi.output_type
24400
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftResult(dict):
24401
+ def __init__(__self__, *,
24402
+ imagefs_available: _builtins.str,
24403
+ imagefs_inodes_free: _builtins.str,
24404
+ memory_available: _builtins.str,
24405
+ nodefs_available: _builtins.str,
24406
+ nodefs_inodes_free: _builtins.str,
24407
+ pid_available: _builtins.str):
24408
+ """
24409
+ :param _builtins.str imagefs_available: Defines percentage of soft eviction threshold for imagefs.available.
24410
+ :param _builtins.str imagefs_inodes_free: Defines percentage of soft eviction threshold for imagefs.inodesFree.
24411
+ :param _builtins.str memory_available: Defines quantity of soft eviction threshold for memory.available.
24412
+ :param _builtins.str nodefs_available: Defines percentage of soft eviction threshold for nodefs.available.
24413
+ :param _builtins.str nodefs_inodes_free: Defines percentage of soft eviction threshold for nodefs.inodesFree.
24414
+ :param _builtins.str pid_available: Defines percentage of soft eviction threshold for pid.available.
24415
+ """
24416
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24417
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24418
+ pulumi.set(__self__, "memory_available", memory_available)
24419
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24420
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24421
+ pulumi.set(__self__, "pid_available", pid_available)
24422
+
24423
+ @_builtins.property
24424
+ @pulumi.getter(name="imagefsAvailable")
24425
+ def imagefs_available(self) -> _builtins.str:
24426
+ """
24427
+ Defines percentage of soft eviction threshold for imagefs.available.
24428
+ """
24429
+ return pulumi.get(self, "imagefs_available")
24430
+
24431
+ @_builtins.property
24432
+ @pulumi.getter(name="imagefsInodesFree")
24433
+ def imagefs_inodes_free(self) -> _builtins.str:
24434
+ """
24435
+ Defines percentage of soft eviction threshold for imagefs.inodesFree.
24436
+ """
24437
+ return pulumi.get(self, "imagefs_inodes_free")
24438
+
24439
+ @_builtins.property
24440
+ @pulumi.getter(name="memoryAvailable")
24441
+ def memory_available(self) -> _builtins.str:
24442
+ """
24443
+ Defines quantity of soft eviction threshold for memory.available.
24444
+ """
24445
+ return pulumi.get(self, "memory_available")
24446
+
24447
+ @_builtins.property
24448
+ @pulumi.getter(name="nodefsAvailable")
24449
+ def nodefs_available(self) -> _builtins.str:
24450
+ """
24451
+ Defines percentage of soft eviction threshold for nodefs.available.
24452
+ """
24453
+ return pulumi.get(self, "nodefs_available")
24454
+
24455
+ @_builtins.property
24456
+ @pulumi.getter(name="nodefsInodesFree")
24457
+ def nodefs_inodes_free(self) -> _builtins.str:
24458
+ """
24459
+ Defines percentage of soft eviction threshold for nodefs.inodesFree.
24460
+ """
24461
+ return pulumi.get(self, "nodefs_inodes_free")
24462
+
24463
+ @_builtins.property
24464
+ @pulumi.getter(name="pidAvailable")
24465
+ def pid_available(self) -> _builtins.str:
24466
+ """
24467
+ Defines percentage of soft eviction threshold for pid.available.
24468
+ """
24469
+ return pulumi.get(self, "pid_available")
24470
+
24471
+
24472
+ @pulumi.output_type
24473
+ class GetClusterNodePoolNodeConfigKubeletConfigEvictionSoftGracePeriodResult(dict):
24474
+ def __init__(__self__, *,
24475
+ imagefs_available: _builtins.str,
24476
+ imagefs_inodes_free: _builtins.str,
24477
+ memory_available: _builtins.str,
24478
+ nodefs_available: _builtins.str,
24479
+ nodefs_inodes_free: _builtins.str,
24480
+ pid_available: _builtins.str):
24481
+ """
24482
+ :param _builtins.str imagefs_available: Defines grace period for the imagefs.available soft eviction threshold
24483
+ :param _builtins.str imagefs_inodes_free: Defines grace period for the imagefs.inodesFree soft eviction threshold.
24484
+ :param _builtins.str memory_available: Defines grace period for the memory.available soft eviction threshold.
24485
+ :param _builtins.str nodefs_available: Defines grace period for the nodefs.available soft eviction threshold.
24486
+ :param _builtins.str nodefs_inodes_free: Defines grace period for the nodefs.inodesFree soft eviction threshold.
24487
+ :param _builtins.str pid_available: Defines grace period for the pid.available soft eviction threshold.
24488
+ """
24489
+ pulumi.set(__self__, "imagefs_available", imagefs_available)
24490
+ pulumi.set(__self__, "imagefs_inodes_free", imagefs_inodes_free)
24491
+ pulumi.set(__self__, "memory_available", memory_available)
24492
+ pulumi.set(__self__, "nodefs_available", nodefs_available)
24493
+ pulumi.set(__self__, "nodefs_inodes_free", nodefs_inodes_free)
24494
+ pulumi.set(__self__, "pid_available", pid_available)
24495
+
24496
+ @_builtins.property
24497
+ @pulumi.getter(name="imagefsAvailable")
24498
+ def imagefs_available(self) -> _builtins.str:
24499
+ """
24500
+ Defines grace period for the imagefs.available soft eviction threshold
24501
+ """
24502
+ return pulumi.get(self, "imagefs_available")
24503
+
24504
+ @_builtins.property
24505
+ @pulumi.getter(name="imagefsInodesFree")
24506
+ def imagefs_inodes_free(self) -> _builtins.str:
24507
+ """
24508
+ Defines grace period for the imagefs.inodesFree soft eviction threshold.
24509
+ """
24510
+ return pulumi.get(self, "imagefs_inodes_free")
24511
+
24512
+ @_builtins.property
24513
+ @pulumi.getter(name="memoryAvailable")
24514
+ def memory_available(self) -> _builtins.str:
24515
+ """
24516
+ Defines grace period for the memory.available soft eviction threshold.
24517
+ """
24518
+ return pulumi.get(self, "memory_available")
24519
+
24520
+ @_builtins.property
24521
+ @pulumi.getter(name="nodefsAvailable")
24522
+ def nodefs_available(self) -> _builtins.str:
24523
+ """
24524
+ Defines grace period for the nodefs.available soft eviction threshold.
24525
+ """
24526
+ return pulumi.get(self, "nodefs_available")
24527
+
24528
+ @_builtins.property
24529
+ @pulumi.getter(name="nodefsInodesFree")
24530
+ def nodefs_inodes_free(self) -> _builtins.str:
24531
+ """
24532
+ Defines grace period for the nodefs.inodesFree soft eviction threshold.
24533
+ """
24534
+ return pulumi.get(self, "nodefs_inodes_free")
24535
+
24536
+ @_builtins.property
24537
+ @pulumi.getter(name="pidAvailable")
24538
+ def pid_available(self) -> _builtins.str:
24539
+ """
24540
+ Defines grace period for the pid.available soft eviction threshold.
24541
+ """
24542
+ return pulumi.get(self, "pid_available")
24543
+
24544
+
22659
24545
  @pulumi.output_type
22660
24546
  class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
22661
24547
  def __init__(__self__, *,
22662
24548
  cgroup_mode: _builtins.str,
22663
24549
  hugepages_configs: Sequence['outputs.GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult'],
22664
- sysctls: Mapping[str, _builtins.str]):
24550
+ sysctls: Mapping[str, _builtins.str],
24551
+ transparent_hugepage_defrag: _builtins.str,
24552
+ transparent_hugepage_enabled: _builtins.str):
22665
24553
  """
22666
24554
  :param _builtins.str cgroup_mode: cgroupMode specifies the cgroup mode to be used on the node.
22667
24555
  :param Sequence['GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigArgs'] hugepages_configs: Amounts for 2M and 1G hugepages.
22668
24556
  :param Mapping[str, _builtins.str] sysctls: The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.
24557
+ :param _builtins.str transparent_hugepage_defrag: The Linux kernel transparent hugepage defrag setting.
24558
+ :param _builtins.str transparent_hugepage_enabled: The Linux kernel transparent hugepage setting.
22669
24559
  """
22670
24560
  pulumi.set(__self__, "cgroup_mode", cgroup_mode)
22671
24561
  pulumi.set(__self__, "hugepages_configs", hugepages_configs)
22672
24562
  pulumi.set(__self__, "sysctls", sysctls)
24563
+ pulumi.set(__self__, "transparent_hugepage_defrag", transparent_hugepage_defrag)
24564
+ pulumi.set(__self__, "transparent_hugepage_enabled", transparent_hugepage_enabled)
22673
24565
 
22674
24566
  @_builtins.property
22675
24567
  @pulumi.getter(name="cgroupMode")
@@ -22695,6 +24587,22 @@ class GetClusterNodePoolNodeConfigLinuxNodeConfigResult(dict):
22695
24587
  """
22696
24588
  return pulumi.get(self, "sysctls")
22697
24589
 
24590
+ @_builtins.property
24591
+ @pulumi.getter(name="transparentHugepageDefrag")
24592
+ def transparent_hugepage_defrag(self) -> _builtins.str:
24593
+ """
24594
+ The Linux kernel transparent hugepage defrag setting.
24595
+ """
24596
+ return pulumi.get(self, "transparent_hugepage_defrag")
24597
+
24598
+ @_builtins.property
24599
+ @pulumi.getter(name="transparentHugepageEnabled")
24600
+ def transparent_hugepage_enabled(self) -> _builtins.str:
24601
+ """
24602
+ The Linux kernel transparent hugepage setting.
24603
+ """
24604
+ return pulumi.get(self, "transparent_hugepage_enabled")
24605
+
22698
24606
 
22699
24607
  @pulumi.output_type
22700
24608
  class GetClusterNodePoolNodeConfigLinuxNodeConfigHugepagesConfigResult(dict):
@@ -22862,12 +24770,23 @@ class GetClusterNodePoolNodeConfigShieldedInstanceConfigResult(dict):
22862
24770
  @pulumi.output_type
22863
24771
  class GetClusterNodePoolNodeConfigSoleTenantConfigResult(dict):
22864
24772
  def __init__(__self__, *,
24773
+ min_node_cpus: _builtins.int,
22865
24774
  node_affinities: Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityResult']):
22866
24775
  """
24776
+ :param _builtins.int min_node_cpus: Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
22867
24777
  :param Sequence['GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityArgs'] node_affinities: .
22868
24778
  """
24779
+ pulumi.set(__self__, "min_node_cpus", min_node_cpus)
22869
24780
  pulumi.set(__self__, "node_affinities", node_affinities)
22870
24781
 
24782
+ @_builtins.property
24783
+ @pulumi.getter(name="minNodeCpus")
24784
+ def min_node_cpus(self) -> _builtins.int:
24785
+ """
24786
+ Specifies the minimum number of vCPUs that each sole tenant node must have to use CPU overcommit. If not specified, the CPU overcommit feature is disabled.
24787
+ """
24788
+ return pulumi.get(self, "min_node_cpus")
24789
+
22871
24790
  @_builtins.property
22872
24791
  @pulumi.getter(name="nodeAffinities")
22873
24792
  def node_affinities(self) -> Sequence['outputs.GetClusterNodePoolNodeConfigSoleTenantConfigNodeAffinityResult']: