pulumi-gcp 8.40.0a1754721948__py3-none-any.whl → 8.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (469) hide show
  1. pulumi_gcp/__init__.py +152 -0
  2. pulumi_gcp/accesscontextmanager/_inputs.py +24 -4
  3. pulumi_gcp/accesscontextmanager/access_policy_iam_binding.py +2 -0
  4. pulumi_gcp/accesscontextmanager/access_policy_iam_member.py +2 -0
  5. pulumi_gcp/accesscontextmanager/access_policy_iam_policy.py +2 -0
  6. pulumi_gcp/accesscontextmanager/outputs.py +15 -3
  7. pulumi_gcp/apigateway/api_config_iam_binding.py +2 -0
  8. pulumi_gcp/apigateway/api_config_iam_member.py +2 -0
  9. pulumi_gcp/apigateway/api_config_iam_policy.py +2 -0
  10. pulumi_gcp/apigateway/api_iam_binding.py +2 -0
  11. pulumi_gcp/apigateway/api_iam_member.py +2 -0
  12. pulumi_gcp/apigateway/api_iam_policy.py +2 -0
  13. pulumi_gcp/apigateway/gateway_iam_binding.py +2 -0
  14. pulumi_gcp/apigateway/gateway_iam_member.py +2 -0
  15. pulumi_gcp/apigateway/gateway_iam_policy.py +2 -0
  16. pulumi_gcp/apigee/__init__.py +2 -0
  17. pulumi_gcp/apigee/_inputs.py +1435 -0
  18. pulumi_gcp/apigee/api_product.py +1698 -0
  19. pulumi_gcp/apigee/environment_iam_binding.py +2 -0
  20. pulumi_gcp/apigee/environment_iam_member.py +2 -0
  21. pulumi_gcp/apigee/environment_iam_policy.py +2 -0
  22. pulumi_gcp/apigee/outputs.py +1081 -0
  23. pulumi_gcp/apigee/security_action.py +1010 -0
  24. pulumi_gcp/artifactregistry/__init__.py +6 -0
  25. pulumi_gcp/artifactregistry/get_docker_images.py +164 -0
  26. pulumi_gcp/artifactregistry/get_package.py +220 -0
  27. pulumi_gcp/artifactregistry/get_repositories.py +160 -0
  28. pulumi_gcp/artifactregistry/get_tag.py +187 -0
  29. pulumi_gcp/artifactregistry/get_tags.py +200 -0
  30. pulumi_gcp/artifactregistry/get_version.py +261 -0
  31. pulumi_gcp/artifactregistry/outputs.py +239 -2
  32. pulumi_gcp/artifactregistry/repository.py +6 -6
  33. pulumi_gcp/artifactregistry/repository_iam_binding.py +2 -0
  34. pulumi_gcp/artifactregistry/repository_iam_member.py +2 -0
  35. pulumi_gcp/artifactregistry/repository_iam_policy.py +2 -0
  36. pulumi_gcp/backupdisasterrecovery/backup_plan.py +114 -7
  37. pulumi_gcp/backupdisasterrecovery/backup_vault.py +56 -0
  38. pulumi_gcp/backupdisasterrecovery/get_backup_plan.py +12 -1
  39. pulumi_gcp/backupdisasterrecovery/get_backup_vault.py +12 -1
  40. pulumi_gcp/beyondcorp/application_iam_binding.py +8 -0
  41. pulumi_gcp/beyondcorp/application_iam_member.py +8 -0
  42. pulumi_gcp/beyondcorp/application_iam_policy.py +8 -0
  43. pulumi_gcp/beyondcorp/get_application_iam_policy.py +4 -0
  44. pulumi_gcp/beyondcorp/security_gateway_application_iam_binding.py +2 -0
  45. pulumi_gcp/beyondcorp/security_gateway_application_iam_member.py +2 -0
  46. pulumi_gcp/beyondcorp/security_gateway_application_iam_policy.py +2 -0
  47. pulumi_gcp/beyondcorp/security_gateway_iam_binding.py +2 -0
  48. pulumi_gcp/beyondcorp/security_gateway_iam_member.py +2 -0
  49. pulumi_gcp/beyondcorp/security_gateway_iam_policy.py +2 -0
  50. pulumi_gcp/bigquery/_inputs.py +6 -0
  51. pulumi_gcp/bigquery/connection_iam_binding.py +2 -0
  52. pulumi_gcp/bigquery/connection_iam_member.py +2 -0
  53. pulumi_gcp/bigquery/connection_iam_policy.py +2 -0
  54. pulumi_gcp/bigquery/data_transfer_config.py +2 -0
  55. pulumi_gcp/bigquery/dataset.py +2 -2
  56. pulumi_gcp/bigquery/get_table.py +23 -1
  57. pulumi_gcp/bigquery/iam_binding.py +2 -0
  58. pulumi_gcp/bigquery/iam_member.py +2 -0
  59. pulumi_gcp/bigquery/iam_policy.py +2 -0
  60. pulumi_gcp/bigquery/outputs.py +4 -0
  61. pulumi_gcp/bigquery/reservation.py +535 -0
  62. pulumi_gcp/bigquery/table.py +62 -0
  63. pulumi_gcp/bigqueryanalyticshub/_inputs.py +180 -0
  64. pulumi_gcp/bigqueryanalyticshub/data_exchange.py +80 -0
  65. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_binding.py +2 -0
  66. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_member.py +2 -0
  67. pulumi_gcp/bigqueryanalyticshub/data_exchange_iam_policy.py +2 -0
  68. pulumi_gcp/bigqueryanalyticshub/listing.py +322 -2
  69. pulumi_gcp/bigqueryanalyticshub/listing_iam_binding.py +2 -0
  70. pulumi_gcp/bigqueryanalyticshub/listing_iam_member.py +2 -0
  71. pulumi_gcp/bigqueryanalyticshub/listing_iam_policy.py +2 -0
  72. pulumi_gcp/bigqueryanalyticshub/listing_subscription.py +32 -0
  73. pulumi_gcp/bigqueryanalyticshub/outputs.py +159 -0
  74. pulumi_gcp/bigquerydatapolicy/data_policy_iam_binding.py +2 -0
  75. pulumi_gcp/bigquerydatapolicy/data_policy_iam_member.py +2 -0
  76. pulumi_gcp/bigquerydatapolicy/data_policy_iam_policy.py +2 -0
  77. pulumi_gcp/bigtable/__init__.py +1 -0
  78. pulumi_gcp/bigtable/_inputs.py +33 -0
  79. pulumi_gcp/bigtable/outputs.py +36 -0
  80. pulumi_gcp/bigtable/schema_bundle.py +568 -0
  81. pulumi_gcp/binaryauthorization/attestor_iam_binding.py +2 -0
  82. pulumi_gcp/binaryauthorization/attestor_iam_member.py +2 -0
  83. pulumi_gcp/binaryauthorization/attestor_iam_policy.py +2 -0
  84. pulumi_gcp/certificateauthority/ca_pool_iam_binding.py +2 -0
  85. pulumi_gcp/certificateauthority/ca_pool_iam_member.py +2 -0
  86. pulumi_gcp/certificateauthority/ca_pool_iam_policy.py +2 -0
  87. pulumi_gcp/certificateauthority/certificate_template_iam_binding.py +2 -0
  88. pulumi_gcp/certificateauthority/certificate_template_iam_member.py +2 -0
  89. pulumi_gcp/certificateauthority/certificate_template_iam_policy.py +2 -0
  90. pulumi_gcp/cloudbuildv2/connection_iam_binding.py +2 -0
  91. pulumi_gcp/cloudbuildv2/connection_iam_member.py +2 -0
  92. pulumi_gcp/cloudbuildv2/connection_iam_policy.py +2 -0
  93. pulumi_gcp/clouddeploy/_inputs.py +48 -48
  94. pulumi_gcp/clouddeploy/deploy_policy.py +54 -74
  95. pulumi_gcp/clouddeploy/outputs.py +32 -32
  96. pulumi_gcp/cloudfunctions/_inputs.py +48 -0
  97. pulumi_gcp/cloudfunctions/function.py +94 -0
  98. pulumi_gcp/cloudfunctions/function_iam_binding.py +2 -0
  99. pulumi_gcp/cloudfunctions/function_iam_member.py +2 -0
  100. pulumi_gcp/cloudfunctions/function_iam_policy.py +2 -0
  101. pulumi_gcp/cloudfunctions/get_function.py +23 -1
  102. pulumi_gcp/cloudfunctions/outputs.py +70 -0
  103. pulumi_gcp/cloudfunctionsv2/function_iam_binding.py +2 -0
  104. pulumi_gcp/cloudfunctionsv2/function_iam_member.py +2 -0
  105. pulumi_gcp/cloudfunctionsv2/function_iam_policy.py +2 -0
  106. pulumi_gcp/cloudrun/iam_binding.py +2 -0
  107. pulumi_gcp/cloudrun/iam_member.py +2 -0
  108. pulumi_gcp/cloudrun/iam_policy.py +2 -0
  109. pulumi_gcp/cloudrunv2/_inputs.py +20 -0
  110. pulumi_gcp/cloudrunv2/job.py +2 -0
  111. pulumi_gcp/cloudrunv2/job_iam_binding.py +2 -0
  112. pulumi_gcp/cloudrunv2/job_iam_member.py +2 -0
  113. pulumi_gcp/cloudrunv2/job_iam_policy.py +2 -0
  114. pulumi_gcp/cloudrunv2/outputs.py +25 -0
  115. pulumi_gcp/cloudrunv2/service_iam_binding.py +2 -0
  116. pulumi_gcp/cloudrunv2/service_iam_member.py +2 -0
  117. pulumi_gcp/cloudrunv2/service_iam_policy.py +2 -0
  118. pulumi_gcp/cloudrunv2/worker_pool.py +2 -0
  119. pulumi_gcp/cloudrunv2/worker_pool_iam_binding.py +2 -0
  120. pulumi_gcp/cloudrunv2/worker_pool_iam_member.py +2 -0
  121. pulumi_gcp/cloudrunv2/worker_pool_iam_policy.py +2 -0
  122. pulumi_gcp/cloudtasks/queue_iam_binding.py +2 -0
  123. pulumi_gcp/cloudtasks/queue_iam_member.py +2 -0
  124. pulumi_gcp/cloudtasks/queue_iam_policy.py +2 -0
  125. pulumi_gcp/colab/runtime_template_iam_binding.py +2 -0
  126. pulumi_gcp/colab/runtime_template_iam_member.py +2 -0
  127. pulumi_gcp/colab/runtime_template_iam_policy.py +2 -0
  128. pulumi_gcp/composer/user_workloads_config_map.py +26 -2
  129. pulumi_gcp/compute/__init__.py +1 -0
  130. pulumi_gcp/compute/_inputs.py +1068 -22
  131. pulumi_gcp/compute/disk_iam_binding.py +2 -0
  132. pulumi_gcp/compute/disk_iam_member.py +2 -0
  133. pulumi_gcp/compute/disk_iam_policy.py +2 -0
  134. pulumi_gcp/compute/firewall_policy_with_rules.py +66 -0
  135. pulumi_gcp/compute/forwarding_rule.py +0 -21
  136. pulumi_gcp/compute/get_region_backend_service.py +12 -1
  137. pulumi_gcp/compute/get_router.py +12 -1
  138. pulumi_gcp/compute/image_iam_binding.py +2 -0
  139. pulumi_gcp/compute/image_iam_member.py +2 -0
  140. pulumi_gcp/compute/image_iam_policy.py +2 -0
  141. pulumi_gcp/compute/instance_iam_binding.py +2 -0
  142. pulumi_gcp/compute/instance_iam_member.py +2 -0
  143. pulumi_gcp/compute/instance_iam_policy.py +2 -0
  144. pulumi_gcp/compute/instance_template_iam_binding.py +2 -0
  145. pulumi_gcp/compute/instance_template_iam_member.py +2 -0
  146. pulumi_gcp/compute/instance_template_iam_policy.py +2 -0
  147. pulumi_gcp/compute/instant_snapshot_iam_binding.py +2 -0
  148. pulumi_gcp/compute/instant_snapshot_iam_member.py +2 -0
  149. pulumi_gcp/compute/instant_snapshot_iam_policy.py +2 -0
  150. pulumi_gcp/compute/machine_image_iam_binding.py +2 -0
  151. pulumi_gcp/compute/machine_image_iam_member.py +2 -0
  152. pulumi_gcp/compute/machine_image_iam_policy.py +2 -0
  153. pulumi_gcp/compute/outputs.py +966 -22
  154. pulumi_gcp/compute/preview_feature.py +396 -0
  155. pulumi_gcp/compute/region_backend_service.py +257 -0
  156. pulumi_gcp/compute/region_disk_iam_binding.py +2 -0
  157. pulumi_gcp/compute/region_disk_iam_member.py +2 -0
  158. pulumi_gcp/compute/region_disk_iam_policy.py +2 -0
  159. pulumi_gcp/compute/region_security_policy.py +54 -0
  160. pulumi_gcp/compute/region_url_map.py +392 -0
  161. pulumi_gcp/compute/reservation.py +4 -4
  162. pulumi_gcp/compute/router.py +54 -0
  163. pulumi_gcp/compute/service_attachment.py +126 -0
  164. pulumi_gcp/compute/snapshot_iam_binding.py +2 -0
  165. pulumi_gcp/compute/snapshot_iam_member.py +2 -0
  166. pulumi_gcp/compute/snapshot_iam_policy.py +2 -0
  167. pulumi_gcp/compute/storage_pool.py +154 -0
  168. pulumi_gcp/compute/storage_pool_iam_binding.py +2 -0
  169. pulumi_gcp/compute/storage_pool_iam_member.py +2 -0
  170. pulumi_gcp/compute/storage_pool_iam_policy.py +2 -0
  171. pulumi_gcp/compute/subnetwork.py +54 -0
  172. pulumi_gcp/compute/subnetwork_iam_binding.py +2 -0
  173. pulumi_gcp/compute/subnetwork_iam_member.py +2 -0
  174. pulumi_gcp/compute/subnetwork_iam_policy.py +2 -0
  175. pulumi_gcp/config/__init__.pyi +2 -4
  176. pulumi_gcp/config/vars.py +4 -8
  177. pulumi_gcp/container/_inputs.py +2622 -246
  178. pulumi_gcp/container/cluster.py +61 -21
  179. pulumi_gcp/container/get_cluster.py +12 -1
  180. pulumi_gcp/container/outputs.py +2877 -133
  181. pulumi_gcp/containeranalysis/note_iam_binding.py +2 -0
  182. pulumi_gcp/containeranalysis/note_iam_member.py +2 -0
  183. pulumi_gcp/containeranalysis/note_iam_policy.py +2 -0
  184. pulumi_gcp/datacatalog/entry_group_iam_binding.py +2 -0
  185. pulumi_gcp/datacatalog/entry_group_iam_member.py +2 -0
  186. pulumi_gcp/datacatalog/entry_group_iam_policy.py +2 -0
  187. pulumi_gcp/datacatalog/policy_tag_iam_binding.py +2 -0
  188. pulumi_gcp/datacatalog/policy_tag_iam_member.py +2 -0
  189. pulumi_gcp/datacatalog/policy_tag_iam_policy.py +2 -0
  190. pulumi_gcp/datacatalog/tag_template_iam_binding.py +2 -0
  191. pulumi_gcp/datacatalog/tag_template_iam_member.py +2 -0
  192. pulumi_gcp/datacatalog/tag_template_iam_policy.py +2 -0
  193. pulumi_gcp/datacatalog/taxonomy_iam_binding.py +2 -0
  194. pulumi_gcp/datacatalog/taxonomy_iam_member.py +2 -0
  195. pulumi_gcp/datacatalog/taxonomy_iam_policy.py +2 -0
  196. pulumi_gcp/datafusion/instance.py +18 -4
  197. pulumi_gcp/dataplex/aspect_type_iam_binding.py +2 -0
  198. pulumi_gcp/dataplex/aspect_type_iam_member.py +2 -0
  199. pulumi_gcp/dataplex/aspect_type_iam_policy.py +2 -0
  200. pulumi_gcp/dataplex/asset_iam_binding.py +2 -0
  201. pulumi_gcp/dataplex/asset_iam_member.py +2 -0
  202. pulumi_gcp/dataplex/asset_iam_policy.py +2 -0
  203. pulumi_gcp/dataplex/datascan_iam_binding.py +2 -0
  204. pulumi_gcp/dataplex/datascan_iam_member.py +2 -0
  205. pulumi_gcp/dataplex/datascan_iam_policy.py +2 -0
  206. pulumi_gcp/dataplex/entry_group_iam_binding.py +2 -0
  207. pulumi_gcp/dataplex/entry_group_iam_member.py +2 -0
  208. pulumi_gcp/dataplex/entry_group_iam_policy.py +2 -0
  209. pulumi_gcp/dataplex/entry_type_iam_binding.py +2 -0
  210. pulumi_gcp/dataplex/entry_type_iam_member.py +2 -0
  211. pulumi_gcp/dataplex/entry_type_iam_policy.py +2 -0
  212. pulumi_gcp/dataplex/glossary_iam_binding.py +2 -0
  213. pulumi_gcp/dataplex/glossary_iam_member.py +2 -0
  214. pulumi_gcp/dataplex/glossary_iam_policy.py +2 -0
  215. pulumi_gcp/dataplex/lake_iam_binding.py +2 -0
  216. pulumi_gcp/dataplex/lake_iam_member.py +2 -0
  217. pulumi_gcp/dataplex/lake_iam_policy.py +2 -0
  218. pulumi_gcp/dataplex/task_iam_binding.py +2 -0
  219. pulumi_gcp/dataplex/task_iam_member.py +2 -0
  220. pulumi_gcp/dataplex/task_iam_policy.py +2 -0
  221. pulumi_gcp/dataplex/zone_iam_binding.py +2 -0
  222. pulumi_gcp/dataplex/zone_iam_member.py +2 -0
  223. pulumi_gcp/dataplex/zone_iam_policy.py +2 -0
  224. pulumi_gcp/dataproc/_inputs.py +249 -14
  225. pulumi_gcp/dataproc/autoscaling_policy_iam_binding.py +2 -0
  226. pulumi_gcp/dataproc/autoscaling_policy_iam_member.py +2 -0
  227. pulumi_gcp/dataproc/autoscaling_policy_iam_policy.py +2 -0
  228. pulumi_gcp/dataproc/batch.py +6 -0
  229. pulumi_gcp/dataproc/cluster.py +2 -0
  230. pulumi_gcp/dataproc/metastore_database_iam_binding.py +2 -0
  231. pulumi_gcp/dataproc/metastore_database_iam_member.py +2 -0
  232. pulumi_gcp/dataproc/metastore_database_iam_policy.py +2 -0
  233. pulumi_gcp/dataproc/metastore_federation_iam_binding.py +2 -0
  234. pulumi_gcp/dataproc/metastore_federation_iam_member.py +2 -0
  235. pulumi_gcp/dataproc/metastore_federation_iam_policy.py +2 -0
  236. pulumi_gcp/dataproc/metastore_service_iam_binding.py +2 -0
  237. pulumi_gcp/dataproc/metastore_service_iam_member.py +2 -0
  238. pulumi_gcp/dataproc/metastore_service_iam_policy.py +2 -0
  239. pulumi_gcp/dataproc/metastore_table_iam_binding.py +2 -0
  240. pulumi_gcp/dataproc/metastore_table_iam_member.py +2 -0
  241. pulumi_gcp/dataproc/metastore_table_iam_policy.py +2 -0
  242. pulumi_gcp/dataproc/outputs.py +215 -12
  243. pulumi_gcp/dataproc/session_template.py +14 -2
  244. pulumi_gcp/developerconnect/__init__.py +1 -0
  245. pulumi_gcp/developerconnect/_inputs.py +583 -0
  246. pulumi_gcp/developerconnect/insights_config.py +895 -0
  247. pulumi_gcp/developerconnect/outputs.py +442 -0
  248. pulumi_gcp/diagflow/__init__.py +3 -0
  249. pulumi_gcp/diagflow/_inputs.py +11899 -7963
  250. pulumi_gcp/diagflow/conversation_profile.py +959 -0
  251. pulumi_gcp/diagflow/cx_generator.py +636 -0
  252. pulumi_gcp/diagflow/cx_playbook.py +967 -0
  253. pulumi_gcp/diagflow/cx_tool.py +2 -2
  254. pulumi_gcp/diagflow/cx_webhook.py +380 -36
  255. pulumi_gcp/diagflow/outputs.py +9099 -5946
  256. pulumi_gcp/discoveryengine/__init__.py +2 -0
  257. pulumi_gcp/discoveryengine/_inputs.py +465 -0
  258. pulumi_gcp/discoveryengine/cmek_config.py +707 -0
  259. pulumi_gcp/discoveryengine/outputs.py +412 -0
  260. pulumi_gcp/discoveryengine/recommendation_engine.py +813 -0
  261. pulumi_gcp/dns/dns_managed_zone_iam_binding.py +2 -0
  262. pulumi_gcp/dns/dns_managed_zone_iam_member.py +2 -0
  263. pulumi_gcp/dns/dns_managed_zone_iam_policy.py +2 -0
  264. pulumi_gcp/endpoints/service_iam_binding.py +2 -0
  265. pulumi_gcp/endpoints/service_iam_member.py +2 -0
  266. pulumi_gcp/endpoints/service_iam_policy.py +2 -0
  267. pulumi_gcp/firestore/field.py +6 -6
  268. pulumi_gcp/gemini/gemini_gcp_enablement_setting.py +107 -9
  269. pulumi_gcp/gemini/gemini_gcp_enablement_setting_binding.py +2 -2
  270. pulumi_gcp/gemini/repository_group_iam_binding.py +2 -0
  271. pulumi_gcp/gemini/repository_group_iam_member.py +2 -0
  272. pulumi_gcp/gemini/repository_group_iam_policy.py +2 -0
  273. pulumi_gcp/gkebackup/backup_plan_iam_binding.py +2 -0
  274. pulumi_gcp/gkebackup/backup_plan_iam_member.py +2 -0
  275. pulumi_gcp/gkebackup/backup_plan_iam_policy.py +2 -0
  276. pulumi_gcp/gkebackup/restore_plan_iam_binding.py +2 -0
  277. pulumi_gcp/gkebackup/restore_plan_iam_member.py +2 -0
  278. pulumi_gcp/gkebackup/restore_plan_iam_policy.py +2 -0
  279. pulumi_gcp/gkehub/feature_iam_binding.py +2 -0
  280. pulumi_gcp/gkehub/feature_iam_member.py +2 -0
  281. pulumi_gcp/gkehub/feature_iam_policy.py +2 -0
  282. pulumi_gcp/gkehub/membership_binding.py +6 -6
  283. pulumi_gcp/gkehub/membership_iam_binding.py +2 -0
  284. pulumi_gcp/gkehub/membership_iam_member.py +2 -0
  285. pulumi_gcp/gkehub/membership_iam_policy.py +2 -0
  286. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  287. pulumi_gcp/gkehub/namespace.py +4 -4
  288. pulumi_gcp/gkehub/scope_iam_binding.py +2 -0
  289. pulumi_gcp/gkehub/scope_iam_member.py +2 -0
  290. pulumi_gcp/gkehub/scope_iam_policy.py +2 -0
  291. pulumi_gcp/gkehub/scope_rbac_role_binding.py +8 -8
  292. pulumi_gcp/gkeonprem/vmware_admin_cluster.py +24 -3
  293. pulumi_gcp/healthcare/consent_store_iam_binding.py +2 -0
  294. pulumi_gcp/healthcare/consent_store_iam_member.py +2 -0
  295. pulumi_gcp/healthcare/consent_store_iam_policy.py +2 -0
  296. pulumi_gcp/iam/__init__.py +4 -0
  297. pulumi_gcp/iam/_inputs.py +98 -0
  298. pulumi_gcp/iam/get_workforce_pool_iam_policy.py +161 -0
  299. pulumi_gcp/iam/outputs.py +56 -0
  300. pulumi_gcp/iam/workforce_pool_iam_binding.py +763 -0
  301. pulumi_gcp/iam/workforce_pool_iam_member.py +763 -0
  302. pulumi_gcp/iam/workforce_pool_iam_policy.py +602 -0
  303. pulumi_gcp/iap/app_engine_service_iam_binding.py +2 -0
  304. pulumi_gcp/iap/app_engine_service_iam_member.py +2 -0
  305. pulumi_gcp/iap/app_engine_service_iam_policy.py +2 -0
  306. pulumi_gcp/iap/app_engine_version_iam_binding.py +2 -0
  307. pulumi_gcp/iap/app_engine_version_iam_member.py +2 -0
  308. pulumi_gcp/iap/app_engine_version_iam_policy.py +2 -0
  309. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  310. pulumi_gcp/iap/tunnel_dest_group_iam_binding.py +2 -0
  311. pulumi_gcp/iap/tunnel_dest_group_iam_member.py +2 -0
  312. pulumi_gcp/iap/tunnel_dest_group_iam_policy.py +2 -0
  313. pulumi_gcp/iap/tunnel_iam_binding.py +2 -0
  314. pulumi_gcp/iap/tunnel_iam_member.py +2 -0
  315. pulumi_gcp/iap/tunnel_iam_policy.py +2 -0
  316. pulumi_gcp/iap/tunnel_instance_iam_binding.py +2 -0
  317. pulumi_gcp/iap/tunnel_instance_iam_member.py +2 -0
  318. pulumi_gcp/iap/tunnel_instance_iam_policy.py +2 -0
  319. pulumi_gcp/iap/web_backend_service_iam_binding.py +2 -0
  320. pulumi_gcp/iap/web_backend_service_iam_member.py +2 -0
  321. pulumi_gcp/iap/web_backend_service_iam_policy.py +2 -0
  322. pulumi_gcp/iap/web_cloud_run_service_iam_binding.py +2 -0
  323. pulumi_gcp/iap/web_cloud_run_service_iam_member.py +2 -0
  324. pulumi_gcp/iap/web_cloud_run_service_iam_policy.py +2 -0
  325. pulumi_gcp/iap/web_iam_binding.py +2 -0
  326. pulumi_gcp/iap/web_iam_member.py +2 -0
  327. pulumi_gcp/iap/web_iam_policy.py +2 -0
  328. pulumi_gcp/iap/web_region_backend_service_iam_binding.py +2 -0
  329. pulumi_gcp/iap/web_region_backend_service_iam_member.py +2 -0
  330. pulumi_gcp/iap/web_region_backend_service_iam_policy.py +2 -0
  331. pulumi_gcp/iap/web_type_app_enging_iam_binding.py +2 -0
  332. pulumi_gcp/iap/web_type_app_enging_iam_member.py +2 -0
  333. pulumi_gcp/iap/web_type_app_enging_iam_policy.py +2 -0
  334. pulumi_gcp/iap/web_type_compute_iam_binding.py +2 -0
  335. pulumi_gcp/iap/web_type_compute_iam_member.py +2 -0
  336. pulumi_gcp/iap/web_type_compute_iam_policy.py +2 -0
  337. pulumi_gcp/integrationconnectors/managed_zone.py +8 -8
  338. pulumi_gcp/kms/crypto_key.py +7 -0
  339. pulumi_gcp/kms/ekm_connection_iam_binding.py +2 -0
  340. pulumi_gcp/kms/ekm_connection_iam_member.py +2 -0
  341. pulumi_gcp/kms/ekm_connection_iam_policy.py +2 -0
  342. pulumi_gcp/kms/outputs.py +2 -0
  343. pulumi_gcp/logging/log_view_iam_binding.py +2 -0
  344. pulumi_gcp/logging/log_view_iam_member.py +2 -0
  345. pulumi_gcp/logging/log_view_iam_policy.py +2 -0
  346. pulumi_gcp/looker/instance.py +28 -7
  347. pulumi_gcp/managedkafka/_inputs.py +127 -0
  348. pulumi_gcp/managedkafka/cluster.py +131 -1
  349. pulumi_gcp/managedkafka/connect_cluster.py +4 -4
  350. pulumi_gcp/managedkafka/connector.py +4 -4
  351. pulumi_gcp/managedkafka/outputs.py +128 -0
  352. pulumi_gcp/memorystore/get_instance.py +12 -1
  353. pulumi_gcp/memorystore/instance.py +78 -12
  354. pulumi_gcp/modelarmor/__init__.py +1 -0
  355. pulumi_gcp/modelarmor/_inputs.py +683 -0
  356. pulumi_gcp/modelarmor/floorsetting.py +736 -0
  357. pulumi_gcp/modelarmor/outputs.py +618 -0
  358. pulumi_gcp/monitoring/_inputs.py +3 -3
  359. pulumi_gcp/monitoring/outputs.py +2 -2
  360. pulumi_gcp/networkconnectivity/_inputs.py +60 -0
  361. pulumi_gcp/networkconnectivity/internal_range.py +136 -0
  362. pulumi_gcp/networkconnectivity/outputs.py +55 -0
  363. pulumi_gcp/networkconnectivity/spoke.py +14 -14
  364. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +213 -168
  365. pulumi_gcp/notebooks/instance.py +18 -18
  366. pulumi_gcp/notebooks/instance_iam_binding.py +2 -0
  367. pulumi_gcp/notebooks/instance_iam_member.py +2 -0
  368. pulumi_gcp/notebooks/instance_iam_policy.py +2 -0
  369. pulumi_gcp/notebooks/runtime_iam_binding.py +2 -0
  370. pulumi_gcp/notebooks/runtime_iam_member.py +2 -0
  371. pulumi_gcp/notebooks/runtime_iam_policy.py +2 -0
  372. pulumi_gcp/oracledatabase/__init__.py +2 -0
  373. pulumi_gcp/oracledatabase/autonomous_database.py +262 -38
  374. pulumi_gcp/oracledatabase/cloud_vm_cluster.py +314 -50
  375. pulumi_gcp/oracledatabase/get_autonomous_database.py +23 -1
  376. pulumi_gcp/oracledatabase/get_cloud_vm_cluster.py +34 -1
  377. pulumi_gcp/oracledatabase/odb_network.py +721 -0
  378. pulumi_gcp/oracledatabase/odb_subnet.py +803 -0
  379. pulumi_gcp/oracledatabase/outputs.py +83 -0
  380. pulumi_gcp/organizations/folder.py +56 -0
  381. pulumi_gcp/organizations/get_folder.py +29 -1
  382. pulumi_gcp/orgpolicy/policy.py +2 -2
  383. pulumi_gcp/parametermanager/parameter_version.py +62 -0
  384. pulumi_gcp/parametermanager/regional_parameter_version.py +64 -0
  385. pulumi_gcp/projects/api_key.py +88 -1
  386. pulumi_gcp/provider.py +20 -40
  387. pulumi_gcp/pubsub/schema_iam_binding.py +2 -0
  388. pulumi_gcp/pubsub/schema_iam_member.py +2 -0
  389. pulumi_gcp/pubsub/schema_iam_policy.py +2 -0
  390. pulumi_gcp/pubsub/subscription.py +130 -6
  391. pulumi_gcp/pubsub/topic.py +116 -0
  392. pulumi_gcp/pubsub/topic_iam_binding.py +2 -0
  393. pulumi_gcp/pubsub/topic_iam_member.py +2 -0
  394. pulumi_gcp/pubsub/topic_iam_policy.py +2 -0
  395. pulumi_gcp/pulumi-plugin.json +1 -1
  396. pulumi_gcp/redis/cluster.py +70 -0
  397. pulumi_gcp/redis/get_cluster.py +12 -1
  398. pulumi_gcp/redis/instance.py +8 -12
  399. pulumi_gcp/secretmanager/get_regional_secret.py +12 -1
  400. pulumi_gcp/secretmanager/get_secret.py +12 -1
  401. pulumi_gcp/secretmanager/outputs.py +30 -0
  402. pulumi_gcp/secretmanager/regional_secret.py +61 -0
  403. pulumi_gcp/secretmanager/regional_secret_iam_binding.py +2 -0
  404. pulumi_gcp/secretmanager/regional_secret_iam_member.py +2 -0
  405. pulumi_gcp/secretmanager/regional_secret_iam_policy.py +2 -0
  406. pulumi_gcp/secretmanager/secret.py +61 -0
  407. pulumi_gcp/secretmanager/secret_iam_binding.py +2 -0
  408. pulumi_gcp/secretmanager/secret_iam_member.py +2 -0
  409. pulumi_gcp/secretmanager/secret_iam_policy.py +2 -0
  410. pulumi_gcp/secretmanager/secret_version.py +1 -48
  411. pulumi_gcp/securesourcemanager/branch_rule.py +16 -8
  412. pulumi_gcp/securesourcemanager/instance.py +112 -4
  413. pulumi_gcp/securesourcemanager/repository.py +112 -8
  414. pulumi_gcp/securesourcemanager/repository_iam_binding.py +2 -0
  415. pulumi_gcp/securesourcemanager/repository_iam_member.py +2 -0
  416. pulumi_gcp/securesourcemanager/repository_iam_policy.py +2 -0
  417. pulumi_gcp/securitycenter/instance_iam_binding.py +18 -4
  418. pulumi_gcp/securitycenter/instance_iam_member.py +18 -4
  419. pulumi_gcp/securitycenter/instance_iam_policy.py +18 -4
  420. pulumi_gcp/securitycenter/v2_organization_source_iam_binding.py +2 -0
  421. pulumi_gcp/securitycenter/v2_organization_source_iam_member.py +2 -0
  422. pulumi_gcp/securitycenter/v2_organization_source_iam_policy.py +2 -0
  423. pulumi_gcp/serviceaccount/get_account_key.py +1 -0
  424. pulumi_gcp/servicedirectory/namespace_iam_binding.py +2 -0
  425. pulumi_gcp/servicedirectory/namespace_iam_member.py +2 -0
  426. pulumi_gcp/servicedirectory/namespace_iam_policy.py +2 -0
  427. pulumi_gcp/servicedirectory/service_iam_binding.py +2 -0
  428. pulumi_gcp/servicedirectory/service_iam_member.py +2 -0
  429. pulumi_gcp/servicedirectory/service_iam_policy.py +2 -0
  430. pulumi_gcp/sourcerepo/repository_iam_binding.py +2 -0
  431. pulumi_gcp/sourcerepo/repository_iam_member.py +2 -0
  432. pulumi_gcp/sourcerepo/repository_iam_policy.py +2 -0
  433. pulumi_gcp/sql/_inputs.py +88 -10
  434. pulumi_gcp/sql/database.py +0 -12
  435. pulumi_gcp/sql/database_instance.py +108 -7
  436. pulumi_gcp/sql/get_database_instance.py +12 -1
  437. pulumi_gcp/sql/outputs.py +158 -11
  438. pulumi_gcp/storage/__init__.py +2 -0
  439. pulumi_gcp/storage/_inputs.py +555 -12
  440. pulumi_gcp/storage/bucket.py +7 -7
  441. pulumi_gcp/storage/bucket_object.py +34 -0
  442. pulumi_gcp/storage/get_bucket_object.py +12 -1
  443. pulumi_gcp/storage/get_bucket_object_content.py +12 -1
  444. pulumi_gcp/storage/get_insights_dataset_config.py +363 -0
  445. pulumi_gcp/storage/insights_dataset_config.py +1280 -0
  446. pulumi_gcp/storage/outputs.py +703 -7
  447. pulumi_gcp/tags/tag_key_iam_binding.py +2 -0
  448. pulumi_gcp/tags/tag_key_iam_member.py +2 -0
  449. pulumi_gcp/tags/tag_key_iam_policy.py +2 -0
  450. pulumi_gcp/tags/tag_value_iam_binding.py +2 -0
  451. pulumi_gcp/tags/tag_value_iam_member.py +2 -0
  452. pulumi_gcp/tags/tag_value_iam_policy.py +2 -0
  453. pulumi_gcp/tpu/get_tensorflow_versions.py +10 -0
  454. pulumi_gcp/vertex/__init__.py +2 -0
  455. pulumi_gcp/vertex/_inputs.py +3768 -3
  456. pulumi_gcp/vertex/ai_endpoint.py +4 -4
  457. pulumi_gcp/vertex/ai_endpoint_with_model_garden_deployment.py +940 -0
  458. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  459. pulumi_gcp/vertex/ai_index.py +21 -7
  460. pulumi_gcp/vertex/ai_rag_engine_config.py +354 -0
  461. pulumi_gcp/vertex/outputs.py +2678 -2
  462. pulumi_gcp/vmwareengine/network_peering.py +7 -7
  463. pulumi_gcp/workbench/_inputs.py +118 -0
  464. pulumi_gcp/workbench/instance.py +171 -2
  465. pulumi_gcp/workbench/outputs.py +91 -0
  466. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/METADATA +1 -1
  467. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/RECORD +469 -442
  468. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/WHEEL +0 -0
  469. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.41.0.dist-info}/top_level.txt +0 -0
@@ -32,6 +32,35 @@ __all__ = [
32
32
  'AiEndpointPredictRequestResponseLoggingConfig',
33
33
  'AiEndpointPredictRequestResponseLoggingConfigBigqueryDestination',
34
34
  'AiEndpointPrivateServiceConnectConfig',
35
+ 'AiEndpointWithModelGardenDeploymentDeployConfig',
36
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources',
37
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec',
38
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec',
39
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity',
40
+ 'AiEndpointWithModelGardenDeploymentEndpointConfig',
41
+ 'AiEndpointWithModelGardenDeploymentModelConfig',
42
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpec',
43
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv',
44
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort',
45
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe',
46
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec',
47
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc',
48
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet',
49
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader',
50
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket',
51
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe',
52
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec',
53
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc',
54
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet',
55
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader',
56
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket',
57
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort',
58
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe',
59
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec',
60
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc',
61
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet',
62
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader',
63
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket',
35
64
  'AiFeatureGroupBigQuery',
36
65
  'AiFeatureGroupBigQueryBigQuerySource',
37
66
  'AiFeatureGroupIamBindingCondition',
@@ -82,6 +111,10 @@ __all__ = [
82
111
  'AiIndexMetadataConfigAlgorithmConfigTreeAhConfig',
83
112
  'AiMetadataStoreEncryptionSpec',
84
113
  'AiMetadataStoreState',
114
+ 'AiRagEngineConfigRagManagedDbConfig',
115
+ 'AiRagEngineConfigRagManagedDbConfigBasic',
116
+ 'AiRagEngineConfigRagManagedDbConfigScaled',
117
+ 'AiRagEngineConfigRagManagedDbConfigUnprovisioned',
85
118
  'AiTensorboardEncryptionSpec',
86
119
  'GetAiIndexDeployedIndexResult',
87
120
  'GetAiIndexIndexStatResult',
@@ -764,7 +797,7 @@ class AiEndpointDeployedModelDedicatedResourceMachineSpec(dict):
764
797
  :param _builtins.str accelerator_type: (Output)
765
798
  The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
766
799
  :param _builtins.str machine_type: (Output)
767
- The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional.
800
+ The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO: Try to better unify the required vs optional.
768
801
  """
769
802
  if accelerator_count is not None:
770
803
  pulumi.set(__self__, "accelerator_count", accelerator_count)
@@ -796,7 +829,7 @@ class AiEndpointDeployedModelDedicatedResourceMachineSpec(dict):
796
829
  def machine_type(self) -> Optional[_builtins.str]:
797
830
  """
798
831
  (Output)
799
- The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional.
832
+ The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO: Try to better unify the required vs optional.
800
833
  """
801
834
  return pulumi.get(self, "machine_type")
802
835
 
@@ -1139,6 +1172,2584 @@ class AiEndpointPrivateServiceConnectConfig(dict):
1139
1172
  return pulumi.get(self, "project_allowlists")
1140
1173
 
1141
1174
 
1175
+ @pulumi.output_type
1176
+ class AiEndpointWithModelGardenDeploymentDeployConfig(dict):
1177
+ @staticmethod
1178
+ def __key_warning(key: str):
1179
+ suggest = None
1180
+ if key == "dedicatedResources":
1181
+ suggest = "dedicated_resources"
1182
+ elif key == "fastTryoutEnabled":
1183
+ suggest = "fast_tryout_enabled"
1184
+ elif key == "systemLabels":
1185
+ suggest = "system_labels"
1186
+
1187
+ if suggest:
1188
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfig. Access the value via the '{suggest}' property getter instead.")
1189
+
1190
+ def __getitem__(self, key: str) -> Any:
1191
+ AiEndpointWithModelGardenDeploymentDeployConfig.__key_warning(key)
1192
+ return super().__getitem__(key)
1193
+
1194
+ def get(self, key: str, default = None) -> Any:
1195
+ AiEndpointWithModelGardenDeploymentDeployConfig.__key_warning(key)
1196
+ return super().get(key, default)
1197
+
1198
+ def __init__(__self__, *,
1199
+ dedicated_resources: Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources'] = None,
1200
+ fast_tryout_enabled: Optional[_builtins.bool] = None,
1201
+ system_labels: Optional[Mapping[str, _builtins.str]] = None):
1202
+ """
1203
+ :param 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesArgs' dedicated_resources: A description of resources that are dedicated to a DeployedModel or
1204
+ DeployedIndex, and that need a higher degree of manual configuration.
1205
+ Structure is documented below.
1206
+ :param _builtins.bool fast_tryout_enabled: If true, enable the QMT fast tryout feature for this model if possible.
1207
+ :param Mapping[str, _builtins.str] system_labels: System labels for Model Garden deployments.
1208
+ These labels are managed by Google and for tracking purposes only.
1209
+ """
1210
+ if dedicated_resources is not None:
1211
+ pulumi.set(__self__, "dedicated_resources", dedicated_resources)
1212
+ if fast_tryout_enabled is not None:
1213
+ pulumi.set(__self__, "fast_tryout_enabled", fast_tryout_enabled)
1214
+ if system_labels is not None:
1215
+ pulumi.set(__self__, "system_labels", system_labels)
1216
+
1217
+ @_builtins.property
1218
+ @pulumi.getter(name="dedicatedResources")
1219
+ def dedicated_resources(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources']:
1220
+ """
1221
+ A description of resources that are dedicated to a DeployedModel or
1222
+ DeployedIndex, and that need a higher degree of manual configuration.
1223
+ Structure is documented below.
1224
+ """
1225
+ return pulumi.get(self, "dedicated_resources")
1226
+
1227
+ @_builtins.property
1228
+ @pulumi.getter(name="fastTryoutEnabled")
1229
+ def fast_tryout_enabled(self) -> Optional[_builtins.bool]:
1230
+ """
1231
+ If true, enable the QMT fast tryout feature for this model if possible.
1232
+ """
1233
+ return pulumi.get(self, "fast_tryout_enabled")
1234
+
1235
+ @_builtins.property
1236
+ @pulumi.getter(name="systemLabels")
1237
+ def system_labels(self) -> Optional[Mapping[str, _builtins.str]]:
1238
+ """
1239
+ System labels for Model Garden deployments.
1240
+ These labels are managed by Google and for tracking purposes only.
1241
+ """
1242
+ return pulumi.get(self, "system_labels")
1243
+
1244
+
1245
+ @pulumi.output_type
1246
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources(dict):
1247
+ @staticmethod
1248
+ def __key_warning(key: str):
1249
+ suggest = None
1250
+ if key == "machineSpec":
1251
+ suggest = "machine_spec"
1252
+ elif key == "minReplicaCount":
1253
+ suggest = "min_replica_count"
1254
+ elif key == "autoscalingMetricSpecs":
1255
+ suggest = "autoscaling_metric_specs"
1256
+ elif key == "maxReplicaCount":
1257
+ suggest = "max_replica_count"
1258
+ elif key == "requiredReplicaCount":
1259
+ suggest = "required_replica_count"
1260
+
1261
+ if suggest:
1262
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources. Access the value via the '{suggest}' property getter instead.")
1263
+
1264
+ def __getitem__(self, key: str) -> Any:
1265
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources.__key_warning(key)
1266
+ return super().__getitem__(key)
1267
+
1268
+ def get(self, key: str, default = None) -> Any:
1269
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources.__key_warning(key)
1270
+ return super().get(key, default)
1271
+
1272
+ def __init__(__self__, *,
1273
+ machine_spec: 'outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec',
1274
+ min_replica_count: _builtins.int,
1275
+ autoscaling_metric_specs: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec']] = None,
1276
+ max_replica_count: Optional[_builtins.int] = None,
1277
+ required_replica_count: Optional[_builtins.int] = None,
1278
+ spot: Optional[_builtins.bool] = None):
1279
+ """
1280
+ :param 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecArgs' machine_spec: Specification of a single machine.
1281
+ Structure is documented below.
1282
+ :param _builtins.int min_replica_count: The minimum number of machine replicas that will be always deployed on.
1283
+ This value must be greater than or equal to 1.
1284
+ If traffic increases, it may dynamically be deployed onto more replicas,
1285
+ and as traffic decreases, some of these extra replicas may be freed.
1286
+ :param Sequence['AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpecArgs'] autoscaling_metric_specs: The metric specifications that overrides a resource
1287
+ utilization metric (CPU utilization, accelerator's duty cycle, and so on)
1288
+ target value (default to 60 if not set). At most one entry is allowed per
1289
+ metric.
1290
+ If machine_spec.accelerator_count is
1291
+ above 0, the autoscaling will be based on both CPU utilization and
1292
+ accelerator's duty cycle metrics and scale up when either metrics exceeds
1293
+ its target value while scale down if both metrics are under their target
1294
+ value. The default target value is 60 for both metrics.
1295
+ If machine_spec.accelerator_count is
1296
+ 0, the autoscaling will be based on CPU utilization metric only with
1297
+ default target value 60 if not explicitly set.
1298
+ For example, in the case of Online Prediction, if you want to override
1299
+ target CPU utilization to 80, you should set
1300
+ autoscaling_metric_specs.metric_name
1301
+ to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and
1302
+ autoscaling_metric_specs.target to `80`.
1303
+ Structure is documented below.
1304
+ :param _builtins.int max_replica_count: The maximum number of replicas that may be deployed on when the traffic
1305
+ against it increases. If the requested value is too large, the deployment
1306
+ will error, but if deployment succeeds then the ability to scale to that
1307
+ many replicas is guaranteed (barring service outages). If traffic increases
1308
+ beyond what its replicas at maximum may handle, a portion of the traffic
1309
+ will be dropped. If this value is not provided, will use
1310
+ min_replica_count as the default value.
1311
+ The value of this field impacts the charge against Vertex CPU and GPU
1312
+ quotas. Specifically, you will be charged for (max_replica_count *
1313
+ number of cores in the selected machine type) and (max_replica_count *
1314
+ number of GPUs per replica in the selected machine type).
1315
+ :param _builtins.int required_replica_count: Number of required available replicas for the deployment to succeed.
1316
+ This field is only needed when partial deployment/mutation is
1317
+ desired. If set, the deploy/mutate operation will succeed once
1318
+ available_replica_count reaches required_replica_count, and the rest of
1319
+ the replicas will be retried. If not set, the default
1320
+ required_replica_count will be min_replica_count.
1321
+ :param _builtins.bool spot: If true, schedule the deployment workload on [spot
1322
+ VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms).
1323
+ """
1324
+ pulumi.set(__self__, "machine_spec", machine_spec)
1325
+ pulumi.set(__self__, "min_replica_count", min_replica_count)
1326
+ if autoscaling_metric_specs is not None:
1327
+ pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
1328
+ if max_replica_count is not None:
1329
+ pulumi.set(__self__, "max_replica_count", max_replica_count)
1330
+ if required_replica_count is not None:
1331
+ pulumi.set(__self__, "required_replica_count", required_replica_count)
1332
+ if spot is not None:
1333
+ pulumi.set(__self__, "spot", spot)
1334
+
1335
+ @_builtins.property
1336
+ @pulumi.getter(name="machineSpec")
1337
+ def machine_spec(self) -> 'outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec':
1338
+ """
1339
+ Specification of a single machine.
1340
+ Structure is documented below.
1341
+ """
1342
+ return pulumi.get(self, "machine_spec")
1343
+
1344
+ @_builtins.property
1345
+ @pulumi.getter(name="minReplicaCount")
1346
+ def min_replica_count(self) -> _builtins.int:
1347
+ """
1348
+ The minimum number of machine replicas that will be always deployed on.
1349
+ This value must be greater than or equal to 1.
1350
+ If traffic increases, it may dynamically be deployed onto more replicas,
1351
+ and as traffic decreases, some of these extra replicas may be freed.
1352
+ """
1353
+ return pulumi.get(self, "min_replica_count")
1354
+
1355
+ @_builtins.property
1356
+ @pulumi.getter(name="autoscalingMetricSpecs")
1357
+ def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec']]:
1358
+ """
1359
+ The metric specifications that overrides a resource
1360
+ utilization metric (CPU utilization, accelerator's duty cycle, and so on)
1361
+ target value (default to 60 if not set). At most one entry is allowed per
1362
+ metric.
1363
+ If machine_spec.accelerator_count is
1364
+ above 0, the autoscaling will be based on both CPU utilization and
1365
+ accelerator's duty cycle metrics and scale up when either metrics exceeds
1366
+ its target value while scale down if both metrics are under their target
1367
+ value. The default target value is 60 for both metrics.
1368
+ If machine_spec.accelerator_count is
1369
+ 0, the autoscaling will be based on CPU utilization metric only with
1370
+ default target value 60 if not explicitly set.
1371
+ For example, in the case of Online Prediction, if you want to override
1372
+ target CPU utilization to 80, you should set
1373
+ autoscaling_metric_specs.metric_name
1374
+ to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and
1375
+ autoscaling_metric_specs.target to `80`.
1376
+ Structure is documented below.
1377
+ """
1378
+ return pulumi.get(self, "autoscaling_metric_specs")
1379
+
1380
+ @_builtins.property
1381
+ @pulumi.getter(name="maxReplicaCount")
1382
+ def max_replica_count(self) -> Optional[_builtins.int]:
1383
+ """
1384
+ The maximum number of replicas that may be deployed on when the traffic
1385
+ against it increases. If the requested value is too large, the deployment
1386
+ will error, but if deployment succeeds then the ability to scale to that
1387
+ many replicas is guaranteed (barring service outages). If traffic increases
1388
+ beyond what its replicas at maximum may handle, a portion of the traffic
1389
+ will be dropped. If this value is not provided, will use
1390
+ min_replica_count as the default value.
1391
+ The value of this field impacts the charge against Vertex CPU and GPU
1392
+ quotas. Specifically, you will be charged for (max_replica_count *
1393
+ number of cores in the selected machine type) and (max_replica_count *
1394
+ number of GPUs per replica in the selected machine type).
1395
+ """
1396
+ return pulumi.get(self, "max_replica_count")
1397
+
1398
+ @_builtins.property
1399
+ @pulumi.getter(name="requiredReplicaCount")
1400
+ def required_replica_count(self) -> Optional[_builtins.int]:
1401
+ """
1402
+ Number of required available replicas for the deployment to succeed.
1403
+ This field is only needed when partial deployment/mutation is
1404
+ desired. If set, the deploy/mutate operation will succeed once
1405
+ available_replica_count reaches required_replica_count, and the rest of
1406
+ the replicas will be retried. If not set, the default
1407
+ required_replica_count will be min_replica_count.
1408
+ """
1409
+ return pulumi.get(self, "required_replica_count")
1410
+
1411
+ @_builtins.property
1412
+ @pulumi.getter
1413
+ def spot(self) -> Optional[_builtins.bool]:
1414
+ """
1415
+ If true, schedule the deployment workload on [spot
1416
+ VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms).
1417
+ """
1418
+ return pulumi.get(self, "spot")
1419
+
1420
+
1421
+ @pulumi.output_type
1422
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec(dict):
1423
+ @staticmethod
1424
+ def __key_warning(key: str):
1425
+ suggest = None
1426
+ if key == "metricName":
1427
+ suggest = "metric_name"
1428
+
1429
+ if suggest:
1430
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec. Access the value via the '{suggest}' property getter instead.")
1431
+
1432
+ def __getitem__(self, key: str) -> Any:
1433
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
1434
+ return super().__getitem__(key)
1435
+
1436
+ def get(self, key: str, default = None) -> Any:
1437
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
1438
+ return super().get(key, default)
1439
+
1440
+ def __init__(__self__, *,
1441
+ metric_name: _builtins.str,
1442
+ target: Optional[_builtins.int] = None):
1443
+ """
1444
+ :param _builtins.str metric_name: The resource metric name.
1445
+ Supported metrics:
1446
+ * For Online Prediction:
1447
+ * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`
1448
+ * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
1449
+ :param _builtins.int target: The target resource utilization in percentage (1% - 100%) for the given
1450
+ metric; once the real usage deviates from the target by a certain
1451
+ percentage, the machine replicas change. The default value is 60
1452
+ (representing 60%) if not provided.
1453
+ """
1454
+ pulumi.set(__self__, "metric_name", metric_name)
1455
+ if target is not None:
1456
+ pulumi.set(__self__, "target", target)
1457
+
1458
+ @_builtins.property
1459
+ @pulumi.getter(name="metricName")
1460
+ def metric_name(self) -> _builtins.str:
1461
+ """
1462
+ The resource metric name.
1463
+ Supported metrics:
1464
+ * For Online Prediction:
1465
+ * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`
1466
+ * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
1467
+ """
1468
+ return pulumi.get(self, "metric_name")
1469
+
1470
+ @_builtins.property
1471
+ @pulumi.getter
1472
+ def target(self) -> Optional[_builtins.int]:
1473
+ """
1474
+ The target resource utilization in percentage (1% - 100%) for the given
1475
+ metric; once the real usage deviates from the target by a certain
1476
+ percentage, the machine replicas change. The default value is 60
1477
+ (representing 60%) if not provided.
1478
+ """
1479
+ return pulumi.get(self, "target")
1480
+
1481
+
1482
+ @pulumi.output_type
1483
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec(dict):
1484
+ @staticmethod
1485
+ def __key_warning(key: str):
1486
+ suggest = None
1487
+ if key == "acceleratorCount":
1488
+ suggest = "accelerator_count"
1489
+ elif key == "acceleratorType":
1490
+ suggest = "accelerator_type"
1491
+ elif key == "machineType":
1492
+ suggest = "machine_type"
1493
+ elif key == "multihostGpuNodeCount":
1494
+ suggest = "multihost_gpu_node_count"
1495
+ elif key == "reservationAffinity":
1496
+ suggest = "reservation_affinity"
1497
+ elif key == "tpuTopology":
1498
+ suggest = "tpu_topology"
1499
+
1500
+ if suggest:
1501
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec. Access the value via the '{suggest}' property getter instead.")
1502
+
1503
+ def __getitem__(self, key: str) -> Any:
1504
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec.__key_warning(key)
1505
+ return super().__getitem__(key)
1506
+
1507
+ def get(self, key: str, default = None) -> Any:
1508
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec.__key_warning(key)
1509
+ return super().get(key, default)
1510
+
1511
+ def __init__(__self__, *,
1512
+ accelerator_count: Optional[_builtins.int] = None,
1513
+ accelerator_type: Optional[_builtins.str] = None,
1514
+ machine_type: Optional[_builtins.str] = None,
1515
+ multihost_gpu_node_count: Optional[_builtins.int] = None,
1516
+ reservation_affinity: Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity'] = None,
1517
+ tpu_topology: Optional[_builtins.str] = None):
1518
+ """
1519
+ :param _builtins.int accelerator_count: The number of accelerators to attach to the machine.
1520
+ :param _builtins.str accelerator_type: Possible values:
1521
+ ACCELERATOR_TYPE_UNSPECIFIED
1522
+ NVIDIA_TESLA_K80
1523
+ NVIDIA_TESLA_P100
1524
+ NVIDIA_TESLA_V100
1525
+ NVIDIA_TESLA_P4
1526
+ NVIDIA_TESLA_T4
1527
+ NVIDIA_TESLA_A100
1528
+ NVIDIA_A100_80GB
1529
+ NVIDIA_L4
1530
+ NVIDIA_H100_80GB
1531
+ NVIDIA_H100_MEGA_80GB
1532
+ NVIDIA_H200_141GB
1533
+ NVIDIA_B200
1534
+ TPU_V2
1535
+ TPU_V3
1536
+ TPU_V4_POD
1537
+ TPU_V5_LITEPOD
1538
+ :param _builtins.str machine_type: The type of the machine.
1539
+ See the [list of machine types supported for
1540
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types)
1541
+ See the [list of machine types supported for custom
1542
+ training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
1543
+ For DeployedModel this field is optional, and the default
1544
+ value is `n1-standard-2`. For BatchPredictionJob or as part of
1545
+ WorkerPoolSpec this field is required.
1546
+ :param _builtins.int multihost_gpu_node_count: The number of nodes per replica for multihost GPU deployments.
1547
+ :param 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinityArgs' reservation_affinity: A ReservationAffinity can be used to configure a Vertex AI resource (e.g., a
1548
+ DeployedModel) to draw its Compute Engine resources from a Shared
1549
+ Reservation, or exclusively from on-demand capacity.
1550
+ Structure is documented below.
1551
+ :param _builtins.str tpu_topology: The topology of the TPUs. Corresponds to the TPU topologies available from
1552
+ GKE. (Example: tpu_topology: "2x2x1").
1553
+ """
1554
+ if accelerator_count is not None:
1555
+ pulumi.set(__self__, "accelerator_count", accelerator_count)
1556
+ if accelerator_type is not None:
1557
+ pulumi.set(__self__, "accelerator_type", accelerator_type)
1558
+ if machine_type is not None:
1559
+ pulumi.set(__self__, "machine_type", machine_type)
1560
+ if multihost_gpu_node_count is not None:
1561
+ pulumi.set(__self__, "multihost_gpu_node_count", multihost_gpu_node_count)
1562
+ if reservation_affinity is not None:
1563
+ pulumi.set(__self__, "reservation_affinity", reservation_affinity)
1564
+ if tpu_topology is not None:
1565
+ pulumi.set(__self__, "tpu_topology", tpu_topology)
1566
+
1567
+ @_builtins.property
1568
+ @pulumi.getter(name="acceleratorCount")
1569
+ def accelerator_count(self) -> Optional[_builtins.int]:
1570
+ """
1571
+ The number of accelerators to attach to the machine.
1572
+ """
1573
+ return pulumi.get(self, "accelerator_count")
1574
+
1575
+ @_builtins.property
1576
+ @pulumi.getter(name="acceleratorType")
1577
+ def accelerator_type(self) -> Optional[_builtins.str]:
1578
+ """
1579
+ Possible values:
1580
+ ACCELERATOR_TYPE_UNSPECIFIED
1581
+ NVIDIA_TESLA_K80
1582
+ NVIDIA_TESLA_P100
1583
+ NVIDIA_TESLA_V100
1584
+ NVIDIA_TESLA_P4
1585
+ NVIDIA_TESLA_T4
1586
+ NVIDIA_TESLA_A100
1587
+ NVIDIA_A100_80GB
1588
+ NVIDIA_L4
1589
+ NVIDIA_H100_80GB
1590
+ NVIDIA_H100_MEGA_80GB
1591
+ NVIDIA_H200_141GB
1592
+ NVIDIA_B200
1593
+ TPU_V2
1594
+ TPU_V3
1595
+ TPU_V4_POD
1596
+ TPU_V5_LITEPOD
1597
+ """
1598
+ return pulumi.get(self, "accelerator_type")
1599
+
1600
+ @_builtins.property
1601
+ @pulumi.getter(name="machineType")
1602
+ def machine_type(self) -> Optional[_builtins.str]:
1603
+ """
1604
+ The type of the machine.
1605
+ See the [list of machine types supported for
1606
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types)
1607
+ See the [list of machine types supported for custom
1608
+ training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
1609
+ For DeployedModel this field is optional, and the default
1610
+ value is `n1-standard-2`. For BatchPredictionJob or as part of
1611
+ WorkerPoolSpec this field is required.
1612
+ """
1613
+ return pulumi.get(self, "machine_type")
1614
+
1615
+ @_builtins.property
1616
+ @pulumi.getter(name="multihostGpuNodeCount")
1617
+ def multihost_gpu_node_count(self) -> Optional[_builtins.int]:
1618
+ """
1619
+ The number of nodes per replica for multihost GPU deployments.
1620
+ """
1621
+ return pulumi.get(self, "multihost_gpu_node_count")
1622
+
1623
+ @_builtins.property
1624
+ @pulumi.getter(name="reservationAffinity")
1625
+ def reservation_affinity(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity']:
1626
+ """
1627
+ A ReservationAffinity can be used to configure a Vertex AI resource (e.g., a
1628
+ DeployedModel) to draw its Compute Engine resources from a Shared
1629
+ Reservation, or exclusively from on-demand capacity.
1630
+ Structure is documented below.
1631
+ """
1632
+ return pulumi.get(self, "reservation_affinity")
1633
+
1634
+ @_builtins.property
1635
+ @pulumi.getter(name="tpuTopology")
1636
+ def tpu_topology(self) -> Optional[_builtins.str]:
1637
+ """
1638
+ The topology of the TPUs. Corresponds to the TPU topologies available from
1639
+ GKE. (Example: tpu_topology: "2x2x1").
1640
+ """
1641
+ return pulumi.get(self, "tpu_topology")
1642
+
1643
+
1644
+ @pulumi.output_type
1645
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity(dict):
1646
+ @staticmethod
1647
+ def __key_warning(key: str):
1648
+ suggest = None
1649
+ if key == "reservationAffinityType":
1650
+ suggest = "reservation_affinity_type"
1651
+
1652
+ if suggest:
1653
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity. Access the value via the '{suggest}' property getter instead.")
1654
+
1655
+ def __getitem__(self, key: str) -> Any:
1656
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity.__key_warning(key)
1657
+ return super().__getitem__(key)
1658
+
1659
+ def get(self, key: str, default = None) -> Any:
1660
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity.__key_warning(key)
1661
+ return super().get(key, default)
1662
+
1663
+ def __init__(__self__, *,
1664
+ reservation_affinity_type: _builtins.str,
1665
+ key: Optional[_builtins.str] = None,
1666
+ values: Optional[Sequence[_builtins.str]] = None):
1667
+ """
1668
+ :param _builtins.str reservation_affinity_type: Specifies the reservation affinity type.
1669
+ Possible values:
1670
+ TYPE_UNSPECIFIED
1671
+ NO_RESERVATION
1672
+ ANY_RESERVATION
1673
+ SPECIFIC_RESERVATION
1674
+ :param _builtins.str key: Corresponds to the label key of a reservation resource. To target a
1675
+ SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name`
1676
+ as the key and specify the name of your reservation as its value.
1677
+ :param Sequence[_builtins.str] values: Corresponds to the label values of a reservation resource. This must be the
1678
+ full resource name of the reservation or reservation block.
1679
+ """
1680
+ pulumi.set(__self__, "reservation_affinity_type", reservation_affinity_type)
1681
+ if key is not None:
1682
+ pulumi.set(__self__, "key", key)
1683
+ if values is not None:
1684
+ pulumi.set(__self__, "values", values)
1685
+
1686
+ @_builtins.property
1687
+ @pulumi.getter(name="reservationAffinityType")
1688
+ def reservation_affinity_type(self) -> _builtins.str:
1689
+ """
1690
+ Specifies the reservation affinity type.
1691
+ Possible values:
1692
+ TYPE_UNSPECIFIED
1693
+ NO_RESERVATION
1694
+ ANY_RESERVATION
1695
+ SPECIFIC_RESERVATION
1696
+ """
1697
+ return pulumi.get(self, "reservation_affinity_type")
1698
+
1699
+ @_builtins.property
1700
+ @pulumi.getter
1701
+ def key(self) -> Optional[_builtins.str]:
1702
+ """
1703
+ Corresponds to the label key of a reservation resource. To target a
1704
+ SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name`
1705
+ as the key and specify the name of your reservation as its value.
1706
+ """
1707
+ return pulumi.get(self, "key")
1708
+
1709
+ @_builtins.property
1710
+ @pulumi.getter
1711
+ def values(self) -> Optional[Sequence[_builtins.str]]:
1712
+ """
1713
+ Corresponds to the label values of a reservation resource. This must be the
1714
+ full resource name of the reservation or reservation block.
1715
+ """
1716
+ return pulumi.get(self, "values")
1717
+
1718
+
1719
+ @pulumi.output_type
1720
+ class AiEndpointWithModelGardenDeploymentEndpointConfig(dict):
1721
+ @staticmethod
1722
+ def __key_warning(key: str):
1723
+ suggest = None
1724
+ if key == "dedicatedEndpointEnabled":
1725
+ suggest = "dedicated_endpoint_enabled"
1726
+ elif key == "endpointDisplayName":
1727
+ suggest = "endpoint_display_name"
1728
+
1729
+ if suggest:
1730
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentEndpointConfig. Access the value via the '{suggest}' property getter instead.")
1731
+
1732
+ def __getitem__(self, key: str) -> Any:
1733
+ AiEndpointWithModelGardenDeploymentEndpointConfig.__key_warning(key)
1734
+ return super().__getitem__(key)
1735
+
1736
+ def get(self, key: str, default = None) -> Any:
1737
+ AiEndpointWithModelGardenDeploymentEndpointConfig.__key_warning(key)
1738
+ return super().get(key, default)
1739
+
1740
+ def __init__(__self__, *,
1741
+ dedicated_endpoint_enabled: Optional[_builtins.bool] = None,
1742
+ endpoint_display_name: Optional[_builtins.str] = None):
1743
+ """
1744
+ :param _builtins.bool dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated
1745
+ DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS
1746
+ will be isolated from other users' traffic and will have better
1747
+ performance and reliability. Note: Once you enabled dedicated endpoint,
1748
+ you won't be able to send request to the shared DNS
1749
+ {region}-aiplatform.googleapis.com. The limitations will be removed soon.
1750
+ :param _builtins.str endpoint_display_name: The user-specified display name of the endpoint. If not set, a
1751
+ default name will be used.
1752
+ """
1753
+ if dedicated_endpoint_enabled is not None:
1754
+ pulumi.set(__self__, "dedicated_endpoint_enabled", dedicated_endpoint_enabled)
1755
+ if endpoint_display_name is not None:
1756
+ pulumi.set(__self__, "endpoint_display_name", endpoint_display_name)
1757
+
1758
+ @_builtins.property
1759
+ @pulumi.getter(name="dedicatedEndpointEnabled")
1760
+ def dedicated_endpoint_enabled(self) -> Optional[_builtins.bool]:
1761
+ """
1762
+ If true, the endpoint will be exposed through a dedicated
1763
+ DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS
1764
+ will be isolated from other users' traffic and will have better
1765
+ performance and reliability. Note: Once you enabled dedicated endpoint,
1766
+ you won't be able to send request to the shared DNS
1767
+ {region}-aiplatform.googleapis.com. The limitations will be removed soon.
1768
+ """
1769
+ return pulumi.get(self, "dedicated_endpoint_enabled")
1770
+
1771
+ @_builtins.property
1772
+ @pulumi.getter(name="endpointDisplayName")
1773
+ def endpoint_display_name(self) -> Optional[_builtins.str]:
1774
+ """
1775
+ The user-specified display name of the endpoint. If not set, a
1776
+ default name will be used.
1777
+ """
1778
+ return pulumi.get(self, "endpoint_display_name")
1779
+
1780
+
1781
+ @pulumi.output_type
1782
+ class AiEndpointWithModelGardenDeploymentModelConfig(dict):
1783
+ @staticmethod
1784
+ def __key_warning(key: str):
1785
+ suggest = None
1786
+ if key == "acceptEula":
1787
+ suggest = "accept_eula"
1788
+ elif key == "containerSpec":
1789
+ suggest = "container_spec"
1790
+ elif key == "huggingFaceAccessToken":
1791
+ suggest = "hugging_face_access_token"
1792
+ elif key == "huggingFaceCacheEnabled":
1793
+ suggest = "hugging_face_cache_enabled"
1794
+ elif key == "modelDisplayName":
1795
+ suggest = "model_display_name"
1796
+
1797
+ if suggest:
1798
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfig. Access the value via the '{suggest}' property getter instead.")
1799
+
1800
+ def __getitem__(self, key: str) -> Any:
1801
+ AiEndpointWithModelGardenDeploymentModelConfig.__key_warning(key)
1802
+ return super().__getitem__(key)
1803
+
1804
+ def get(self, key: str, default = None) -> Any:
1805
+ AiEndpointWithModelGardenDeploymentModelConfig.__key_warning(key)
1806
+ return super().get(key, default)
1807
+
1808
+ def __init__(__self__, *,
1809
+ accept_eula: Optional[_builtins.bool] = None,
1810
+ container_spec: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpec'] = None,
1811
+ hugging_face_access_token: Optional[_builtins.str] = None,
1812
+ hugging_face_cache_enabled: Optional[_builtins.bool] = None,
1813
+ model_display_name: Optional[_builtins.str] = None):
1814
+ """
1815
+ :param _builtins.bool accept_eula: Whether the user accepts the End User License Agreement (EULA)
1816
+ for the model.
1817
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecArgs' container_spec: Specification of a container for serving predictions. Some fields in this
1818
+ message correspond to fields in the [Kubernetes Container v1 core
1819
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
1820
+ Structure is documented below.
1821
+ :param _builtins.str hugging_face_access_token: The Hugging Face read access token used to access the model
1822
+ artifacts of gated models.
1823
+ :param _builtins.bool hugging_face_cache_enabled: If true, the model will deploy with a cached version instead of directly
1824
+ downloading the model artifacts from Hugging Face. This is suitable for
1825
+ VPC-SC users with limited internet access.
1826
+ :param _builtins.str model_display_name: The user-specified display name of the uploaded model. If not
1827
+ set, a default name will be used.
1828
+ """
1829
+ if accept_eula is not None:
1830
+ pulumi.set(__self__, "accept_eula", accept_eula)
1831
+ if container_spec is not None:
1832
+ pulumi.set(__self__, "container_spec", container_spec)
1833
+ if hugging_face_access_token is not None:
1834
+ pulumi.set(__self__, "hugging_face_access_token", hugging_face_access_token)
1835
+ if hugging_face_cache_enabled is not None:
1836
+ pulumi.set(__self__, "hugging_face_cache_enabled", hugging_face_cache_enabled)
1837
+ if model_display_name is not None:
1838
+ pulumi.set(__self__, "model_display_name", model_display_name)
1839
+
1840
+ @_builtins.property
1841
+ @pulumi.getter(name="acceptEula")
1842
+ def accept_eula(self) -> Optional[_builtins.bool]:
1843
+ """
1844
+ Whether the user accepts the End User License Agreement (EULA)
1845
+ for the model.
1846
+ """
1847
+ return pulumi.get(self, "accept_eula")
1848
+
1849
+ @_builtins.property
1850
+ @pulumi.getter(name="containerSpec")
1851
+ def container_spec(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpec']:
1852
+ """
1853
+ Specification of a container for serving predictions. Some fields in this
1854
+ message correspond to fields in the [Kubernetes Container v1 core
1855
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
1856
+ Structure is documented below.
1857
+ """
1858
+ return pulumi.get(self, "container_spec")
1859
+
1860
+ @_builtins.property
1861
+ @pulumi.getter(name="huggingFaceAccessToken")
1862
+ def hugging_face_access_token(self) -> Optional[_builtins.str]:
1863
+ """
1864
+ The Hugging Face read access token used to access the model
1865
+ artifacts of gated models.
1866
+ """
1867
+ return pulumi.get(self, "hugging_face_access_token")
1868
+
1869
+ @_builtins.property
1870
+ @pulumi.getter(name="huggingFaceCacheEnabled")
1871
+ def hugging_face_cache_enabled(self) -> Optional[_builtins.bool]:
1872
+ """
1873
+ If true, the model will deploy with a cached version instead of directly
1874
+ downloading the model artifacts from Hugging Face. This is suitable for
1875
+ VPC-SC users with limited internet access.
1876
+ """
1877
+ return pulumi.get(self, "hugging_face_cache_enabled")
1878
+
1879
+ @_builtins.property
1880
+ @pulumi.getter(name="modelDisplayName")
1881
+ def model_display_name(self) -> Optional[_builtins.str]:
1882
+ """
1883
+ The user-specified display name of the uploaded model. If not
1884
+ set, a default name will be used.
1885
+ """
1886
+ return pulumi.get(self, "model_display_name")
1887
+
1888
+
1889
+ @pulumi.output_type
1890
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpec(dict):
1891
+ @staticmethod
1892
+ def __key_warning(key: str):
1893
+ suggest = None
1894
+ if key == "imageUri":
1895
+ suggest = "image_uri"
1896
+ elif key == "deploymentTimeout":
1897
+ suggest = "deployment_timeout"
1898
+ elif key == "grpcPorts":
1899
+ suggest = "grpc_ports"
1900
+ elif key == "healthProbe":
1901
+ suggest = "health_probe"
1902
+ elif key == "healthRoute":
1903
+ suggest = "health_route"
1904
+ elif key == "livenessProbe":
1905
+ suggest = "liveness_probe"
1906
+ elif key == "predictRoute":
1907
+ suggest = "predict_route"
1908
+ elif key == "sharedMemorySizeMb":
1909
+ suggest = "shared_memory_size_mb"
1910
+ elif key == "startupProbe":
1911
+ suggest = "startup_probe"
1912
+
1913
+ if suggest:
1914
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpec. Access the value via the '{suggest}' property getter instead.")
1915
+
1916
+ def __getitem__(self, key: str) -> Any:
1917
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpec.__key_warning(key)
1918
+ return super().__getitem__(key)
1919
+
1920
+ def get(self, key: str, default = None) -> Any:
1921
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpec.__key_warning(key)
1922
+ return super().get(key, default)
1923
+
1924
+ def __init__(__self__, *,
1925
+ image_uri: _builtins.str,
1926
+ args: Optional[Sequence[_builtins.str]] = None,
1927
+ commands: Optional[Sequence[_builtins.str]] = None,
1928
+ deployment_timeout: Optional[_builtins.str] = None,
1929
+ envs: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv']] = None,
1930
+ grpc_ports: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort']] = None,
1931
+ health_probe: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe'] = None,
1932
+ health_route: Optional[_builtins.str] = None,
1933
+ liveness_probe: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe'] = None,
1934
+ ports: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort']] = None,
1935
+ predict_route: Optional[_builtins.str] = None,
1936
+ shared_memory_size_mb: Optional[_builtins.str] = None,
1937
+ startup_probe: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe'] = None):
1938
+ """
1939
+ :param _builtins.str image_uri: URI of the Docker image to be used as the custom container for serving
1940
+ predictions. This URI must identify an image in Artifact Registry or
1941
+ Container Registry. Learn more about the [container publishing
1942
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
1943
+ including permissions requirements for the Vertex AI Service Agent.
1944
+ The container image is ingested upon ModelService.UploadModel, stored
1945
+ internally, and this original path is afterwards not used.
1946
+ To learn about the requirements for the Docker image itself, see
1947
+ [Custom container
1948
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
1949
+ You can use the URI to one of Vertex AI's [pre-built container images for
1950
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
1951
+ in this field.
1952
+ :param Sequence[_builtins.str] args: Specifies arguments for the command that runs when the container starts.
1953
+ This overrides the container's
1954
+ [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
1955
+ this field as an array of executable and arguments, similar to a Docker
1956
+ `CMD`'s "default parameters" form.
1957
+ If you don't specify this field but do specify the
1958
+ command field, then the command from the
1959
+ `command` field runs without any additional arguments. See the
1960
+ [Kubernetes documentation about how the
1961
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
1962
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
1963
+ If you don't specify this field and don't specify the `command` field,
1964
+ then the container's
1965
+ [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
1966
+ `CMD` determine what runs based on their default behavior. See the Docker
1967
+ documentation about [how `CMD` and `ENTRYPOINT`
1968
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
1969
+ In this field, you can reference [environment variables
1970
+ set by Vertex
1971
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
1972
+ and environment variables set in the env field.
1973
+ You cannot reference environment variables set in the Docker image. In
1974
+ order for environment variables to be expanded, reference them by using the
1975
+ following syntax:$(VARIABLE_NAME)
1976
+ Note that this differs from Bash variable expansion, which does not use
1977
+ parentheses. If a variable cannot be resolved, the reference in the input
1978
+ string is used unchanged. To avoid variable expansion, you can escape this
1979
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
1980
+ This field corresponds to the `args` field of the Kubernetes Containers
1981
+ [v1 core
1982
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
1983
+ :param Sequence[_builtins.str] commands: Specifies the command that runs when the container starts. This overrides
1984
+ the container's
1985
+ [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
1986
+ Specify this field as an array of executable and arguments, similar to a
1987
+ Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
1988
+ If you do not specify this field, then the container's `ENTRYPOINT` runs,
1989
+ in conjunction with the args field or the
1990
+ container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
1991
+ if either exists. If this field is not specified and the container does not
1992
+ have an `ENTRYPOINT`, then refer to the Docker documentation about [how
1993
+ `CMD` and `ENTRYPOINT`
1994
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
1995
+ If you specify this field, then you can also specify the `args` field to
1996
+ provide additional arguments for this command. However, if you specify this
1997
+ field, then the container's `CMD` is ignored. See the
1998
+ [Kubernetes documentation about how the
1999
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
2000
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
2001
+ In this field, you can reference [environment variables set by Vertex
2002
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
2003
+ and environment variables set in the env field.
2004
+ You cannot reference environment variables set in the Docker image. In
2005
+ order for environment variables to be expanded, reference them by using the
2006
+ following syntax:$(VARIABLE_NAME)
2007
+ Note that this differs from Bash variable expansion, which does not use
2008
+ parentheses. If a variable cannot be resolved, the reference in the input
2009
+ string is used unchanged. To avoid variable expansion, you can escape this
2010
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
2011
+ This field corresponds to the `command` field of the Kubernetes Containers
2012
+ [v1 core
2013
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2014
+ :param _builtins.str deployment_timeout: Deployment timeout.
2015
+ Limit for deployment timeout is 2 hours.
2016
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnvArgs'] envs: List of environment variables to set in the container. After the container
2017
+ starts running, code running in the container can read these environment
2018
+ variables.
2019
+ Additionally, the command and
2020
+ args fields can reference these variables. Later
2021
+ entries in this list can also reference earlier entries. For example, the
2022
+ following example sets the variable `VAR_2` to have the value `foo bar`:
2023
+ ```json
2024
+ [
2025
+ {
2026
+ "name": "VAR_1",
2027
+ "value": "foo"
2028
+ },
2029
+ {
2030
+ "name": "VAR_2",
2031
+ "value": "$(VAR_1) bar"
2032
+ }
2033
+ ]
2034
+ ```
2035
+ If you switch the order of the variables in the example, then the expansion
2036
+ does not occur.
2037
+ This field corresponds to the `env` field of the Kubernetes Containers
2038
+ [v1 core
2039
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2040
+ Structure is documented below.
2041
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPortArgs'] grpc_ports: List of ports to expose from the container. Vertex AI sends gRPC
2042
+ prediction requests that it receives to the first port on this list. Vertex
2043
+ AI also sends liveness and health checks to this port.
2044
+ If you do not specify this field, gRPC requests to the container will be
2045
+ disabled.
2046
+ Vertex AI does not use ports other than the first one listed. This field
2047
+ corresponds to the `ports` field of the Kubernetes Containers v1 core API.
2048
+ Structure is documented below.
2049
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeArgs' health_probe: Probe describes a health check to be performed against a container to
2050
+ determine whether it is alive or ready to receive traffic.
2051
+ Structure is documented below.
2052
+ :param _builtins.str health_route: HTTP path on the container to send health checks to. Vertex AI
2053
+ intermittently sends GET requests to this path on the container's IP
2054
+ address and port to check that the container is healthy. Read more about
2055
+ [health
2056
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
2057
+ For example, if you set this field to `/bar`, then Vertex AI
2058
+ intermittently sends a GET request to the `/bar` path on the port of your
2059
+ container specified by the first value of this `ModelContainerSpec`'s
2060
+ ports field.
2061
+ If you don't specify this field, it defaults to the following value when
2062
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2063
+ The placeholders in this value are replaced as follows:
2064
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2065
+ Endpoint.name][] field of the Endpoint where this Model has been
2066
+ deployed. (Vertex AI makes this value available to your container code
2067
+ as the [`AIP_ENDPOINT_ID` environment
2068
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2069
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2070
+ (Vertex AI makes this value available to your container code as the
2071
+ [`AIP_DEPLOYED_MODEL_ID` environment
2072
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2073
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeArgs' liveness_probe: Probe describes a health check to be performed against a container to
2074
+ determine whether it is alive or ready to receive traffic.
2075
+ Structure is documented below.
2076
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPortArgs'] ports: List of ports to expose from the container. Vertex AI sends any
2077
+ prediction requests that it receives to the first port on this list. Vertex
2078
+ AI also sends
2079
+ [liveness and health
2080
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
2081
+ to this port.
2082
+ If you do not specify this field, it defaults to following value:
2083
+ ```json
2084
+ [
2085
+ {
2086
+ "containerPort": 8080
2087
+ }
2088
+ ]
2089
+ ```
2090
+ Vertex AI does not use ports other than the first one listed. This field
2091
+ corresponds to the `ports` field of the Kubernetes Containers
2092
+ [v1 core
2093
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2094
+ Structure is documented below.
2095
+ :param _builtins.str predict_route: HTTP path on the container to send prediction requests to. Vertex AI
2096
+ forwards requests sent using
2097
+ projects.locations.endpoints.predict to this
2098
+ path on the container's IP address and port. Vertex AI then returns the
2099
+ container's response in the API response.
2100
+ For example, if you set this field to `/foo`, then when Vertex AI
2101
+ receives a prediction request, it forwards the request body in a POST
2102
+ request to the `/foo` path on the port of your container specified by the
2103
+ first value of this `ModelContainerSpec`'s
2104
+ ports field.
2105
+ If you don't specify this field, it defaults to the following value when
2106
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2107
+ The placeholders in this value are replaced as follows:
2108
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2109
+ Endpoint.name][] field of the Endpoint where this Model has been
2110
+ deployed. (Vertex AI makes this value available to your container code
2111
+ as the [`AIP_ENDPOINT_ID` environment
2112
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2113
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2114
+ (Vertex AI makes this value available to your container code
2115
+ as the [`AIP_DEPLOYED_MODEL_ID` environment
2116
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2117
+ :param _builtins.str shared_memory_size_mb: The amount of the VM memory to reserve as the shared memory for the model
2118
+ in megabytes.
2119
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeArgs' startup_probe: Probe describes a health check to be performed against a container to
2120
+ determine whether it is alive or ready to receive traffic.
2121
+ Structure is documented below.
2122
+ """
2123
+ pulumi.set(__self__, "image_uri", image_uri)
2124
+ if args is not None:
2125
+ pulumi.set(__self__, "args", args)
2126
+ if commands is not None:
2127
+ pulumi.set(__self__, "commands", commands)
2128
+ if deployment_timeout is not None:
2129
+ pulumi.set(__self__, "deployment_timeout", deployment_timeout)
2130
+ if envs is not None:
2131
+ pulumi.set(__self__, "envs", envs)
2132
+ if grpc_ports is not None:
2133
+ pulumi.set(__self__, "grpc_ports", grpc_ports)
2134
+ if health_probe is not None:
2135
+ pulumi.set(__self__, "health_probe", health_probe)
2136
+ if health_route is not None:
2137
+ pulumi.set(__self__, "health_route", health_route)
2138
+ if liveness_probe is not None:
2139
+ pulumi.set(__self__, "liveness_probe", liveness_probe)
2140
+ if ports is not None:
2141
+ pulumi.set(__self__, "ports", ports)
2142
+ if predict_route is not None:
2143
+ pulumi.set(__self__, "predict_route", predict_route)
2144
+ if shared_memory_size_mb is not None:
2145
+ pulumi.set(__self__, "shared_memory_size_mb", shared_memory_size_mb)
2146
+ if startup_probe is not None:
2147
+ pulumi.set(__self__, "startup_probe", startup_probe)
2148
+
2149
+ @_builtins.property
2150
+ @pulumi.getter(name="imageUri")
2151
+ def image_uri(self) -> _builtins.str:
2152
+ """
2153
+ URI of the Docker image to be used as the custom container for serving
2154
+ predictions. This URI must identify an image in Artifact Registry or
2155
+ Container Registry. Learn more about the [container publishing
2156
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
2157
+ including permissions requirements for the Vertex AI Service Agent.
2158
+ The container image is ingested upon ModelService.UploadModel, stored
2159
+ internally, and this original path is afterwards not used.
2160
+ To learn about the requirements for the Docker image itself, see
2161
+ [Custom container
2162
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
2163
+ You can use the URI to one of Vertex AI's [pre-built container images for
2164
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
2165
+ in this field.
2166
+ """
2167
+ return pulumi.get(self, "image_uri")
2168
+
2169
+ @_builtins.property
2170
+ @pulumi.getter
2171
+ def args(self) -> Optional[Sequence[_builtins.str]]:
2172
+ """
2173
+ Specifies arguments for the command that runs when the container starts.
2174
+ This overrides the container's
2175
+ [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
2176
+ this field as an array of executable and arguments, similar to a Docker
2177
+ `CMD`'s "default parameters" form.
2178
+ If you don't specify this field but do specify the
2179
+ command field, then the command from the
2180
+ `command` field runs without any additional arguments. See the
2181
+ [Kubernetes documentation about how the
2182
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
2183
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
2184
+ If you don't specify this field and don't specify the `command` field,
2185
+ then the container's
2186
+ [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
2187
+ `CMD` determine what runs based on their default behavior. See the Docker
2188
+ documentation about [how `CMD` and `ENTRYPOINT`
2189
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
2190
+ In this field, you can reference [environment variables
2191
+ set by Vertex
2192
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
2193
+ and environment variables set in the env field.
2194
+ You cannot reference environment variables set in the Docker image. In
2195
+ order for environment variables to be expanded, reference them by using the
2196
+ following syntax:$(VARIABLE_NAME)
2197
+ Note that this differs from Bash variable expansion, which does not use
2198
+ parentheses. If a variable cannot be resolved, the reference in the input
2199
+ string is used unchanged. To avoid variable expansion, you can escape this
2200
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
2201
+ This field corresponds to the `args` field of the Kubernetes Containers
2202
+ [v1 core
2203
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2204
+ """
2205
+ return pulumi.get(self, "args")
2206
+
2207
+ @_builtins.property
2208
+ @pulumi.getter
2209
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
2210
+ """
2211
+ Specifies the command that runs when the container starts. This overrides
2212
+ the container's
2213
+ [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
2214
+ Specify this field as an array of executable and arguments, similar to a
2215
+ Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
2216
+ If you do not specify this field, then the container's `ENTRYPOINT` runs,
2217
+ in conjunction with the args field or the
2218
+ container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
2219
+ if either exists. If this field is not specified and the container does not
2220
+ have an `ENTRYPOINT`, then refer to the Docker documentation about [how
2221
+ `CMD` and `ENTRYPOINT`
2222
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
2223
+ If you specify this field, then you can also specify the `args` field to
2224
+ provide additional arguments for this command. However, if you specify this
2225
+ field, then the container's `CMD` is ignored. See the
2226
+ [Kubernetes documentation about how the
2227
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
2228
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
2229
+ In this field, you can reference [environment variables set by Vertex
2230
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
2231
+ and environment variables set in the env field.
2232
+ You cannot reference environment variables set in the Docker image. In
2233
+ order for environment variables to be expanded, reference them by using the
2234
+ following syntax:$(VARIABLE_NAME)
2235
+ Note that this differs from Bash variable expansion, which does not use
2236
+ parentheses. If a variable cannot be resolved, the reference in the input
2237
+ string is used unchanged. To avoid variable expansion, you can escape this
2238
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
2239
+ This field corresponds to the `command` field of the Kubernetes Containers
2240
+ [v1 core
2241
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2242
+ """
2243
+ return pulumi.get(self, "commands")
2244
+
2245
+ @_builtins.property
2246
+ @pulumi.getter(name="deploymentTimeout")
2247
+ def deployment_timeout(self) -> Optional[_builtins.str]:
2248
+ """
2249
+ Deployment timeout.
2250
+ Limit for deployment timeout is 2 hours.
2251
+ """
2252
+ return pulumi.get(self, "deployment_timeout")
2253
+
2254
+ @_builtins.property
2255
+ @pulumi.getter
2256
+ def envs(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv']]:
2257
+ """
2258
+ List of environment variables to set in the container. After the container
2259
+ starts running, code running in the container can read these environment
2260
+ variables.
2261
+ Additionally, the command and
2262
+ args fields can reference these variables. Later
2263
+ entries in this list can also reference earlier entries. For example, the
2264
+ following example sets the variable `VAR_2` to have the value `foo bar`:
2265
+ ```json
2266
+ [
2267
+ {
2268
+ "name": "VAR_1",
2269
+ "value": "foo"
2270
+ },
2271
+ {
2272
+ "name": "VAR_2",
2273
+ "value": "$(VAR_1) bar"
2274
+ }
2275
+ ]
2276
+ ```
2277
+ If you switch the order of the variables in the example, then the expansion
2278
+ does not occur.
2279
+ This field corresponds to the `env` field of the Kubernetes Containers
2280
+ [v1 core
2281
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2282
+ Structure is documented below.
2283
+ """
2284
+ return pulumi.get(self, "envs")
2285
+
2286
+ @_builtins.property
2287
+ @pulumi.getter(name="grpcPorts")
2288
+ def grpc_ports(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort']]:
2289
+ """
2290
+ List of ports to expose from the container. Vertex AI sends gRPC
2291
+ prediction requests that it receives to the first port on this list. Vertex
2292
+ AI also sends liveness and health checks to this port.
2293
+ If you do not specify this field, gRPC requests to the container will be
2294
+ disabled.
2295
+ Vertex AI does not use ports other than the first one listed. This field
2296
+ corresponds to the `ports` field of the Kubernetes Containers v1 core API.
2297
+ Structure is documented below.
2298
+ """
2299
+ return pulumi.get(self, "grpc_ports")
2300
+
2301
+ @_builtins.property
2302
+ @pulumi.getter(name="healthProbe")
2303
+ def health_probe(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe']:
2304
+ """
2305
+ Probe describes a health check to be performed against a container to
2306
+ determine whether it is alive or ready to receive traffic.
2307
+ Structure is documented below.
2308
+ """
2309
+ return pulumi.get(self, "health_probe")
2310
+
2311
+ @_builtins.property
2312
+ @pulumi.getter(name="healthRoute")
2313
+ def health_route(self) -> Optional[_builtins.str]:
2314
+ """
2315
+ HTTP path on the container to send health checks to. Vertex AI
2316
+ intermittently sends GET requests to this path on the container's IP
2317
+ address and port to check that the container is healthy. Read more about
2318
+ [health
2319
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
2320
+ For example, if you set this field to `/bar`, then Vertex AI
2321
+ intermittently sends a GET request to the `/bar` path on the port of your
2322
+ container specified by the first value of this `ModelContainerSpec`'s
2323
+ ports field.
2324
+ If you don't specify this field, it defaults to the following value when
2325
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2326
+ The placeholders in this value are replaced as follows:
2327
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2328
+ Endpoint.name][] field of the Endpoint where this Model has been
2329
+ deployed. (Vertex AI makes this value available to your container code
2330
+ as the [`AIP_ENDPOINT_ID` environment
2331
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2332
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2333
+ (Vertex AI makes this value available to your container code as the
2334
+ [`AIP_DEPLOYED_MODEL_ID` environment
2335
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2336
+ """
2337
+ return pulumi.get(self, "health_route")
2338
+
2339
+ @_builtins.property
2340
+ @pulumi.getter(name="livenessProbe")
2341
+ def liveness_probe(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe']:
2342
+ """
2343
+ Probe describes a health check to be performed against a container to
2344
+ determine whether it is alive or ready to receive traffic.
2345
+ Structure is documented below.
2346
+ """
2347
+ return pulumi.get(self, "liveness_probe")
2348
+
2349
+ @_builtins.property
2350
+ @pulumi.getter
2351
+ def ports(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort']]:
2352
+ """
2353
+ List of ports to expose from the container. Vertex AI sends any
2354
+ prediction requests that it receives to the first port on this list. Vertex
2355
+ AI also sends
2356
+ [liveness and health
2357
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
2358
+ to this port.
2359
+ If you do not specify this field, it defaults to following value:
2360
+ ```json
2361
+ [
2362
+ {
2363
+ "containerPort": 8080
2364
+ }
2365
+ ]
2366
+ ```
2367
+ Vertex AI does not use ports other than the first one listed. This field
2368
+ corresponds to the `ports` field of the Kubernetes Containers
2369
+ [v1 core
2370
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2371
+ Structure is documented below.
2372
+ """
2373
+ return pulumi.get(self, "ports")
2374
+
2375
+ @_builtins.property
2376
+ @pulumi.getter(name="predictRoute")
2377
+ def predict_route(self) -> Optional[_builtins.str]:
2378
+ """
2379
+ HTTP path on the container to send prediction requests to. Vertex AI
2380
+ forwards requests sent using
2381
+ projects.locations.endpoints.predict to this
2382
+ path on the container's IP address and port. Vertex AI then returns the
2383
+ container's response in the API response.
2384
+ For example, if you set this field to `/foo`, then when Vertex AI
2385
+ receives a prediction request, it forwards the request body in a POST
2386
+ request to the `/foo` path on the port of your container specified by the
2387
+ first value of this `ModelContainerSpec`'s
2388
+ ports field.
2389
+ If you don't specify this field, it defaults to the following value when
2390
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2391
+ The placeholders in this value are replaced as follows:
2392
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2393
+ Endpoint.name][] field of the Endpoint where this Model has been
2394
+ deployed. (Vertex AI makes this value available to your container code
2395
+ as the [`AIP_ENDPOINT_ID` environment
2396
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2397
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2398
+ (Vertex AI makes this value available to your container code
2399
+ as the [`AIP_DEPLOYED_MODEL_ID` environment
2400
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2401
+ """
2402
+ return pulumi.get(self, "predict_route")
2403
+
2404
+ @_builtins.property
2405
+ @pulumi.getter(name="sharedMemorySizeMb")
2406
+ def shared_memory_size_mb(self) -> Optional[_builtins.str]:
2407
+ """
2408
+ The amount of the VM memory to reserve as the shared memory for the model
2409
+ in megabytes.
2410
+ """
2411
+ return pulumi.get(self, "shared_memory_size_mb")
2412
+
2413
+ @_builtins.property
2414
+ @pulumi.getter(name="startupProbe")
2415
+ def startup_probe(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe']:
2416
+ """
2417
+ Probe describes a health check to be performed against a container to
2418
+ determine whether it is alive or ready to receive traffic.
2419
+ Structure is documented below.
2420
+ """
2421
+ return pulumi.get(self, "startup_probe")
2422
+
2423
+
2424
+ @pulumi.output_type
2425
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv(dict):
2426
+ def __init__(__self__, *,
2427
+ name: _builtins.str,
2428
+ value: _builtins.str):
2429
+ """
2430
+ :param _builtins.str name: Name of the environment variable. Must be a valid C identifier.
2431
+ :param _builtins.str value: Variables that reference a $(VAR_NAME) are expanded
2432
+ using the previous defined environment variables in the container and
2433
+ any service environment variables. If a variable cannot be resolved,
2434
+ the reference in the input string will be unchanged. The $(VAR_NAME)
2435
+ syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
2436
+ references will never be expanded, regardless of whether the variable
2437
+ exists or not.
2438
+ """
2439
+ pulumi.set(__self__, "name", name)
2440
+ pulumi.set(__self__, "value", value)
2441
+
2442
+ @_builtins.property
2443
+ @pulumi.getter
2444
+ def name(self) -> _builtins.str:
2445
+ """
2446
+ Name of the environment variable. Must be a valid C identifier.
2447
+ """
2448
+ return pulumi.get(self, "name")
2449
+
2450
+ @_builtins.property
2451
+ @pulumi.getter
2452
+ def value(self) -> _builtins.str:
2453
+ """
2454
+ Variables that reference a $(VAR_NAME) are expanded
2455
+ using the previous defined environment variables in the container and
2456
+ any service environment variables. If a variable cannot be resolved,
2457
+ the reference in the input string will be unchanged. The $(VAR_NAME)
2458
+ syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
2459
+ references will never be expanded, regardless of whether the variable
2460
+ exists or not.
2461
+ """
2462
+ return pulumi.get(self, "value")
2463
+
2464
+
2465
+ @pulumi.output_type
2466
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort(dict):
2467
+ @staticmethod
2468
+ def __key_warning(key: str):
2469
+ suggest = None
2470
+ if key == "containerPort":
2471
+ suggest = "container_port"
2472
+
2473
+ if suggest:
2474
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort. Access the value via the '{suggest}' property getter instead.")
2475
+
2476
+ def __getitem__(self, key: str) -> Any:
2477
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort.__key_warning(key)
2478
+ return super().__getitem__(key)
2479
+
2480
+ def get(self, key: str, default = None) -> Any:
2481
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort.__key_warning(key)
2482
+ return super().get(key, default)
2483
+
2484
+ def __init__(__self__, *,
2485
+ container_port: Optional[_builtins.int] = None):
2486
+ """
2487
+ :param _builtins.int container_port: The number of the port to expose on the pod's IP address.
2488
+ Must be a valid port number, between 1 and 65535 inclusive.
2489
+ """
2490
+ if container_port is not None:
2491
+ pulumi.set(__self__, "container_port", container_port)
2492
+
2493
+ @_builtins.property
2494
+ @pulumi.getter(name="containerPort")
2495
+ def container_port(self) -> Optional[_builtins.int]:
2496
+ """
2497
+ The number of the port to expose on the pod's IP address.
2498
+ Must be a valid port number, between 1 and 65535 inclusive.
2499
+ """
2500
+ return pulumi.get(self, "container_port")
2501
+
2502
+
2503
+ @pulumi.output_type
2504
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe(dict):
2505
+ @staticmethod
2506
+ def __key_warning(key: str):
2507
+ suggest = None
2508
+ if key == "exec":
2509
+ suggest = "exec_"
2510
+ elif key == "failureThreshold":
2511
+ suggest = "failure_threshold"
2512
+ elif key == "httpGet":
2513
+ suggest = "http_get"
2514
+ elif key == "initialDelaySeconds":
2515
+ suggest = "initial_delay_seconds"
2516
+ elif key == "periodSeconds":
2517
+ suggest = "period_seconds"
2518
+ elif key == "successThreshold":
2519
+ suggest = "success_threshold"
2520
+ elif key == "tcpSocket":
2521
+ suggest = "tcp_socket"
2522
+ elif key == "timeoutSeconds":
2523
+ suggest = "timeout_seconds"
2524
+
2525
+ if suggest:
2526
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe. Access the value via the '{suggest}' property getter instead.")
2527
+
2528
+ def __getitem__(self, key: str) -> Any:
2529
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe.__key_warning(key)
2530
+ return super().__getitem__(key)
2531
+
2532
+ def get(self, key: str, default = None) -> Any:
2533
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe.__key_warning(key)
2534
+ return super().get(key, default)
2535
+
2536
+ def __init__(__self__, *,
2537
+ exec_: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec'] = None,
2538
+ failure_threshold: Optional[_builtins.int] = None,
2539
+ grpc: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc'] = None,
2540
+ http_get: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet'] = None,
2541
+ initial_delay_seconds: Optional[_builtins.int] = None,
2542
+ period_seconds: Optional[_builtins.int] = None,
2543
+ success_threshold: Optional[_builtins.int] = None,
2544
+ tcp_socket: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket'] = None,
2545
+ timeout_seconds: Optional[_builtins.int] = None):
2546
+ """
2547
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExecArgs' exec_: ExecAction specifies a command to execute.
2548
+ Structure is documented below.
2549
+ :param _builtins.int failure_threshold: Number of consecutive failures before the probe is considered failed.
2550
+ Defaults to 3. Minimum value is 1.
2551
+ Maps to Kubernetes probe argument 'failureThreshold'.
2552
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpcArgs' grpc: GrpcAction checks the health of a container using a gRPC service.
2553
+ Structure is documented below.
2554
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetArgs' http_get: HttpGetAction describes an action based on HTTP Get requests.
2555
+ Structure is documented below.
2556
+ :param _builtins.int initial_delay_seconds: Number of seconds to wait before starting the probe. Defaults to 0.
2557
+ Minimum value is 0.
2558
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
2559
+ :param _builtins.int period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds.
2560
+ Minimum value is 1. Must be less than timeout_seconds.
2561
+ Maps to Kubernetes probe argument 'periodSeconds'.
2562
+ :param _builtins.int success_threshold: Number of consecutive successes before the probe is considered successful.
2563
+ Defaults to 1. Minimum value is 1.
2564
+ Maps to Kubernetes probe argument 'successThreshold'.
2565
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocketArgs' tcp_socket: TcpSocketAction probes the health of a container by opening a TCP socket
2566
+ connection.
2567
+ Structure is documented below.
2568
+ :param _builtins.int timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second.
2569
+ Minimum value is 1. Must be greater or equal to period_seconds.
2570
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
2571
+ """
2572
+ if exec_ is not None:
2573
+ pulumi.set(__self__, "exec_", exec_)
2574
+ if failure_threshold is not None:
2575
+ pulumi.set(__self__, "failure_threshold", failure_threshold)
2576
+ if grpc is not None:
2577
+ pulumi.set(__self__, "grpc", grpc)
2578
+ if http_get is not None:
2579
+ pulumi.set(__self__, "http_get", http_get)
2580
+ if initial_delay_seconds is not None:
2581
+ pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
2582
+ if period_seconds is not None:
2583
+ pulumi.set(__self__, "period_seconds", period_seconds)
2584
+ if success_threshold is not None:
2585
+ pulumi.set(__self__, "success_threshold", success_threshold)
2586
+ if tcp_socket is not None:
2587
+ pulumi.set(__self__, "tcp_socket", tcp_socket)
2588
+ if timeout_seconds is not None:
2589
+ pulumi.set(__self__, "timeout_seconds", timeout_seconds)
2590
+
2591
+ @_builtins.property
2592
+ @pulumi.getter(name="exec")
2593
+ def exec_(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec']:
2594
+ """
2595
+ ExecAction specifies a command to execute.
2596
+ Structure is documented below.
2597
+ """
2598
+ return pulumi.get(self, "exec_")
2599
+
2600
+ @_builtins.property
2601
+ @pulumi.getter(name="failureThreshold")
2602
+ def failure_threshold(self) -> Optional[_builtins.int]:
2603
+ """
2604
+ Number of consecutive failures before the probe is considered failed.
2605
+ Defaults to 3. Minimum value is 1.
2606
+ Maps to Kubernetes probe argument 'failureThreshold'.
2607
+ """
2608
+ return pulumi.get(self, "failure_threshold")
2609
+
2610
+ @_builtins.property
2611
+ @pulumi.getter
2612
+ def grpc(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc']:
2613
+ """
2614
+ GrpcAction checks the health of a container using a gRPC service.
2615
+ Structure is documented below.
2616
+ """
2617
+ return pulumi.get(self, "grpc")
2618
+
2619
+ @_builtins.property
2620
+ @pulumi.getter(name="httpGet")
2621
+ def http_get(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet']:
2622
+ """
2623
+ HttpGetAction describes an action based on HTTP Get requests.
2624
+ Structure is documented below.
2625
+ """
2626
+ return pulumi.get(self, "http_get")
2627
+
2628
+ @_builtins.property
2629
+ @pulumi.getter(name="initialDelaySeconds")
2630
+ def initial_delay_seconds(self) -> Optional[_builtins.int]:
2631
+ """
2632
+ Number of seconds to wait before starting the probe. Defaults to 0.
2633
+ Minimum value is 0.
2634
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
2635
+ """
2636
+ return pulumi.get(self, "initial_delay_seconds")
2637
+
2638
+ @_builtins.property
2639
+ @pulumi.getter(name="periodSeconds")
2640
+ def period_seconds(self) -> Optional[_builtins.int]:
2641
+ """
2642
+ How often (in seconds) to perform the probe. Default to 10 seconds.
2643
+ Minimum value is 1. Must be less than timeout_seconds.
2644
+ Maps to Kubernetes probe argument 'periodSeconds'.
2645
+ """
2646
+ return pulumi.get(self, "period_seconds")
2647
+
2648
+ @_builtins.property
2649
+ @pulumi.getter(name="successThreshold")
2650
+ def success_threshold(self) -> Optional[_builtins.int]:
2651
+ """
2652
+ Number of consecutive successes before the probe is considered successful.
2653
+ Defaults to 1. Minimum value is 1.
2654
+ Maps to Kubernetes probe argument 'successThreshold'.
2655
+ """
2656
+ return pulumi.get(self, "success_threshold")
2657
+
2658
+ @_builtins.property
2659
+ @pulumi.getter(name="tcpSocket")
2660
+ def tcp_socket(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket']:
2661
+ """
2662
+ TcpSocketAction probes the health of a container by opening a TCP socket
2663
+ connection.
2664
+ Structure is documented below.
2665
+ """
2666
+ return pulumi.get(self, "tcp_socket")
2667
+
2668
+ @_builtins.property
2669
+ @pulumi.getter(name="timeoutSeconds")
2670
+ def timeout_seconds(self) -> Optional[_builtins.int]:
2671
+ """
2672
+ Number of seconds after which the probe times out. Defaults to 1 second.
2673
+ Minimum value is 1. Must be greater or equal to period_seconds.
2674
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
2675
+ """
2676
+ return pulumi.get(self, "timeout_seconds")
2677
+
2678
+
2679
+ @pulumi.output_type
2680
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec(dict):
2681
+ def __init__(__self__, *,
2682
+ commands: Optional[Sequence[_builtins.str]] = None):
2683
+ """
2684
+ :param Sequence[_builtins.str] commands: Command is the command line to execute inside the container, the working
2685
+ directory for the command is root ('/') in the container's filesystem.
2686
+ The command is simply exec'd, it is not run inside a shell, so
2687
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
2688
+ need to explicitly call out to that shell. Exit status of 0 is treated as
2689
+ live/healthy and non-zero is unhealthy.
2690
+ """
2691
+ if commands is not None:
2692
+ pulumi.set(__self__, "commands", commands)
2693
+
2694
+ @_builtins.property
2695
+ @pulumi.getter
2696
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
2697
+ """
2698
+ Command is the command line to execute inside the container, the working
2699
+ directory for the command is root ('/') in the container's filesystem.
2700
+ The command is simply exec'd, it is not run inside a shell, so
2701
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
2702
+ need to explicitly call out to that shell. Exit status of 0 is treated as
2703
+ live/healthy and non-zero is unhealthy.
2704
+ """
2705
+ return pulumi.get(self, "commands")
2706
+
2707
+
2708
+ @pulumi.output_type
2709
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc(dict):
2710
+ def __init__(__self__, *,
2711
+ port: Optional[_builtins.int] = None,
2712
+ service: Optional[_builtins.str] = None):
2713
+ """
2714
+ :param _builtins.int port: Port number of the gRPC service. Number must be in the range 1 to 65535.
2715
+ :param _builtins.str service: Service is the name of the service to place in the gRPC
2716
+ HealthCheckRequest. See
2717
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
2718
+ If this is not specified, the default behavior is defined by gRPC.
2719
+ """
2720
+ if port is not None:
2721
+ pulumi.set(__self__, "port", port)
2722
+ if service is not None:
2723
+ pulumi.set(__self__, "service", service)
2724
+
2725
+ @_builtins.property
2726
+ @pulumi.getter
2727
+ def port(self) -> Optional[_builtins.int]:
2728
+ """
2729
+ Port number of the gRPC service. Number must be in the range 1 to 65535.
2730
+ """
2731
+ return pulumi.get(self, "port")
2732
+
2733
+ @_builtins.property
2734
+ @pulumi.getter
2735
+ def service(self) -> Optional[_builtins.str]:
2736
+ """
2737
+ Service is the name of the service to place in the gRPC
2738
+ HealthCheckRequest. See
2739
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
2740
+ If this is not specified, the default behavior is defined by gRPC.
2741
+ """
2742
+ return pulumi.get(self, "service")
2743
+
2744
+
2745
+ @pulumi.output_type
2746
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet(dict):
2747
+ @staticmethod
2748
+ def __key_warning(key: str):
2749
+ suggest = None
2750
+ if key == "httpHeaders":
2751
+ suggest = "http_headers"
2752
+
2753
+ if suggest:
2754
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet. Access the value via the '{suggest}' property getter instead.")
2755
+
2756
+ def __getitem__(self, key: str) -> Any:
2757
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet.__key_warning(key)
2758
+ return super().__getitem__(key)
2759
+
2760
+ def get(self, key: str, default = None) -> Any:
2761
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet.__key_warning(key)
2762
+ return super().get(key, default)
2763
+
2764
+ def __init__(__self__, *,
2765
+ host: Optional[_builtins.str] = None,
2766
+ http_headers: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader']] = None,
2767
+ path: Optional[_builtins.str] = None,
2768
+ port: Optional[_builtins.int] = None,
2769
+ scheme: Optional[_builtins.str] = None):
2770
+ """
2771
+ :param _builtins.str host: Host name to connect to, defaults to the model serving container's IP.
2772
+ You probably want to set "Host" in httpHeaders instead.
2773
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeaderArgs'] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
2774
+ Structure is documented below.
2775
+ :param _builtins.str path: Path to access on the HTTP server.
2776
+ :param _builtins.int port: Number of the port to access on the container.
2777
+ Number must be in the range 1 to 65535.
2778
+ :param _builtins.str scheme: Scheme to use for connecting to the host.
2779
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
2780
+ """
2781
+ if host is not None:
2782
+ pulumi.set(__self__, "host", host)
2783
+ if http_headers is not None:
2784
+ pulumi.set(__self__, "http_headers", http_headers)
2785
+ if path is not None:
2786
+ pulumi.set(__self__, "path", path)
2787
+ if port is not None:
2788
+ pulumi.set(__self__, "port", port)
2789
+ if scheme is not None:
2790
+ pulumi.set(__self__, "scheme", scheme)
2791
+
2792
+ @_builtins.property
2793
+ @pulumi.getter
2794
+ def host(self) -> Optional[_builtins.str]:
2795
+ """
2796
+ Host name to connect to, defaults to the model serving container's IP.
2797
+ You probably want to set "Host" in httpHeaders instead.
2798
+ """
2799
+ return pulumi.get(self, "host")
2800
+
2801
+ @_builtins.property
2802
+ @pulumi.getter(name="httpHeaders")
2803
+ def http_headers(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader']]:
2804
+ """
2805
+ Custom headers to set in the request. HTTP allows repeated headers.
2806
+ Structure is documented below.
2807
+ """
2808
+ return pulumi.get(self, "http_headers")
2809
+
2810
+ @_builtins.property
2811
+ @pulumi.getter
2812
+ def path(self) -> Optional[_builtins.str]:
2813
+ """
2814
+ Path to access on the HTTP server.
2815
+ """
2816
+ return pulumi.get(self, "path")
2817
+
2818
+ @_builtins.property
2819
+ @pulumi.getter
2820
+ def port(self) -> Optional[_builtins.int]:
2821
+ """
2822
+ Number of the port to access on the container.
2823
+ Number must be in the range 1 to 65535.
2824
+ """
2825
+ return pulumi.get(self, "port")
2826
+
2827
+ @_builtins.property
2828
+ @pulumi.getter
2829
+ def scheme(self) -> Optional[_builtins.str]:
2830
+ """
2831
+ Scheme to use for connecting to the host.
2832
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
2833
+ """
2834
+ return pulumi.get(self, "scheme")
2835
+
2836
+
2837
+ @pulumi.output_type
2838
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader(dict):
2839
+ def __init__(__self__, *,
2840
+ name: Optional[_builtins.str] = None,
2841
+ value: Optional[_builtins.str] = None):
2842
+ """
2843
+ :param _builtins.str name: The header field name.
2844
+ This will be canonicalized upon output, so case-variant names will be
2845
+ understood as the same header.
2846
+ :param _builtins.str value: The header field value
2847
+ """
2848
+ if name is not None:
2849
+ pulumi.set(__self__, "name", name)
2850
+ if value is not None:
2851
+ pulumi.set(__self__, "value", value)
2852
+
2853
+ @_builtins.property
2854
+ @pulumi.getter
2855
+ def name(self) -> Optional[_builtins.str]:
2856
+ """
2857
+ The header field name.
2858
+ This will be canonicalized upon output, so case-variant names will be
2859
+ understood as the same header.
2860
+ """
2861
+ return pulumi.get(self, "name")
2862
+
2863
+ @_builtins.property
2864
+ @pulumi.getter
2865
+ def value(self) -> Optional[_builtins.str]:
2866
+ """
2867
+ The header field value
2868
+ """
2869
+ return pulumi.get(self, "value")
2870
+
2871
+
2872
+ @pulumi.output_type
2873
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket(dict):
2874
+ def __init__(__self__, *,
2875
+ host: Optional[_builtins.str] = None,
2876
+ port: Optional[_builtins.int] = None):
2877
+ """
2878
+ :param _builtins.str host: Optional: Host name to connect to, defaults to the model serving
2879
+ container's IP.
2880
+ :param _builtins.int port: Number of the port to access on the container.
2881
+ Number must be in the range 1 to 65535.
2882
+ """
2883
+ if host is not None:
2884
+ pulumi.set(__self__, "host", host)
2885
+ if port is not None:
2886
+ pulumi.set(__self__, "port", port)
2887
+
2888
+ @_builtins.property
2889
+ @pulumi.getter
2890
+ def host(self) -> Optional[_builtins.str]:
2891
+ """
2892
+ Optional: Host name to connect to, defaults to the model serving
2893
+ container's IP.
2894
+ """
2895
+ return pulumi.get(self, "host")
2896
+
2897
+ @_builtins.property
2898
+ @pulumi.getter
2899
+ def port(self) -> Optional[_builtins.int]:
2900
+ """
2901
+ Number of the port to access on the container.
2902
+ Number must be in the range 1 to 65535.
2903
+ """
2904
+ return pulumi.get(self, "port")
2905
+
2906
+
2907
+ @pulumi.output_type
2908
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe(dict):
2909
+ @staticmethod
2910
+ def __key_warning(key: str):
2911
+ suggest = None
2912
+ if key == "exec":
2913
+ suggest = "exec_"
2914
+ elif key == "failureThreshold":
2915
+ suggest = "failure_threshold"
2916
+ elif key == "httpGet":
2917
+ suggest = "http_get"
2918
+ elif key == "initialDelaySeconds":
2919
+ suggest = "initial_delay_seconds"
2920
+ elif key == "periodSeconds":
2921
+ suggest = "period_seconds"
2922
+ elif key == "successThreshold":
2923
+ suggest = "success_threshold"
2924
+ elif key == "tcpSocket":
2925
+ suggest = "tcp_socket"
2926
+ elif key == "timeoutSeconds":
2927
+ suggest = "timeout_seconds"
2928
+
2929
+ if suggest:
2930
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe. Access the value via the '{suggest}' property getter instead.")
2931
+
2932
+ def __getitem__(self, key: str) -> Any:
2933
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe.__key_warning(key)
2934
+ return super().__getitem__(key)
2935
+
2936
+ def get(self, key: str, default = None) -> Any:
2937
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe.__key_warning(key)
2938
+ return super().get(key, default)
2939
+
2940
+ def __init__(__self__, *,
2941
+ exec_: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec'] = None,
2942
+ failure_threshold: Optional[_builtins.int] = None,
2943
+ grpc: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc'] = None,
2944
+ http_get: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet'] = None,
2945
+ initial_delay_seconds: Optional[_builtins.int] = None,
2946
+ period_seconds: Optional[_builtins.int] = None,
2947
+ success_threshold: Optional[_builtins.int] = None,
2948
+ tcp_socket: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket'] = None,
2949
+ timeout_seconds: Optional[_builtins.int] = None):
2950
+ """
2951
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExecArgs' exec_: ExecAction specifies a command to execute.
2952
+ Structure is documented below.
2953
+ :param _builtins.int failure_threshold: Number of consecutive failures before the probe is considered failed.
2954
+ Defaults to 3. Minimum value is 1.
2955
+ Maps to Kubernetes probe argument 'failureThreshold'.
2956
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpcArgs' grpc: GrpcAction checks the health of a container using a gRPC service.
2957
+ Structure is documented below.
2958
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetArgs' http_get: HttpGetAction describes an action based on HTTP Get requests.
2959
+ Structure is documented below.
2960
+ :param _builtins.int initial_delay_seconds: Number of seconds to wait before starting the probe. Defaults to 0.
2961
+ Minimum value is 0.
2962
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
2963
+ :param _builtins.int period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds.
2964
+ Minimum value is 1. Must be less than timeout_seconds.
2965
+ Maps to Kubernetes probe argument 'periodSeconds'.
2966
+ :param _builtins.int success_threshold: Number of consecutive successes before the probe is considered successful.
2967
+ Defaults to 1. Minimum value is 1.
2968
+ Maps to Kubernetes probe argument 'successThreshold'.
2969
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocketArgs' tcp_socket: TcpSocketAction probes the health of a container by opening a TCP socket
2970
+ connection.
2971
+ Structure is documented below.
2972
+ :param _builtins.int timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second.
2973
+ Minimum value is 1. Must be greater or equal to period_seconds.
2974
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
2975
+ """
2976
+ if exec_ is not None:
2977
+ pulumi.set(__self__, "exec_", exec_)
2978
+ if failure_threshold is not None:
2979
+ pulumi.set(__self__, "failure_threshold", failure_threshold)
2980
+ if grpc is not None:
2981
+ pulumi.set(__self__, "grpc", grpc)
2982
+ if http_get is not None:
2983
+ pulumi.set(__self__, "http_get", http_get)
2984
+ if initial_delay_seconds is not None:
2985
+ pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
2986
+ if period_seconds is not None:
2987
+ pulumi.set(__self__, "period_seconds", period_seconds)
2988
+ if success_threshold is not None:
2989
+ pulumi.set(__self__, "success_threshold", success_threshold)
2990
+ if tcp_socket is not None:
2991
+ pulumi.set(__self__, "tcp_socket", tcp_socket)
2992
+ if timeout_seconds is not None:
2993
+ pulumi.set(__self__, "timeout_seconds", timeout_seconds)
2994
+
2995
+ @_builtins.property
2996
+ @pulumi.getter(name="exec")
2997
+ def exec_(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec']:
2998
+ """
2999
+ ExecAction specifies a command to execute.
3000
+ Structure is documented below.
3001
+ """
3002
+ return pulumi.get(self, "exec_")
3003
+
3004
+ @_builtins.property
3005
+ @pulumi.getter(name="failureThreshold")
3006
+ def failure_threshold(self) -> Optional[_builtins.int]:
3007
+ """
3008
+ Number of consecutive failures before the probe is considered failed.
3009
+ Defaults to 3. Minimum value is 1.
3010
+ Maps to Kubernetes probe argument 'failureThreshold'.
3011
+ """
3012
+ return pulumi.get(self, "failure_threshold")
3013
+
3014
+ @_builtins.property
3015
+ @pulumi.getter
3016
+ def grpc(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc']:
3017
+ """
3018
+ GrpcAction checks the health of a container using a gRPC service.
3019
+ Structure is documented below.
3020
+ """
3021
+ return pulumi.get(self, "grpc")
3022
+
3023
+ @_builtins.property
3024
+ @pulumi.getter(name="httpGet")
3025
+ def http_get(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet']:
3026
+ """
3027
+ HttpGetAction describes an action based on HTTP Get requests.
3028
+ Structure is documented below.
3029
+ """
3030
+ return pulumi.get(self, "http_get")
3031
+
3032
+ @_builtins.property
3033
+ @pulumi.getter(name="initialDelaySeconds")
3034
+ def initial_delay_seconds(self) -> Optional[_builtins.int]:
3035
+ """
3036
+ Number of seconds to wait before starting the probe. Defaults to 0.
3037
+ Minimum value is 0.
3038
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
3039
+ """
3040
+ return pulumi.get(self, "initial_delay_seconds")
3041
+
3042
+ @_builtins.property
3043
+ @pulumi.getter(name="periodSeconds")
3044
+ def period_seconds(self) -> Optional[_builtins.int]:
3045
+ """
3046
+ How often (in seconds) to perform the probe. Default to 10 seconds.
3047
+ Minimum value is 1. Must be less than timeout_seconds.
3048
+ Maps to Kubernetes probe argument 'periodSeconds'.
3049
+ """
3050
+ return pulumi.get(self, "period_seconds")
3051
+
3052
+ @_builtins.property
3053
+ @pulumi.getter(name="successThreshold")
3054
+ def success_threshold(self) -> Optional[_builtins.int]:
3055
+ """
3056
+ Number of consecutive successes before the probe is considered successful.
3057
+ Defaults to 1. Minimum value is 1.
3058
+ Maps to Kubernetes probe argument 'successThreshold'.
3059
+ """
3060
+ return pulumi.get(self, "success_threshold")
3061
+
3062
+ @_builtins.property
3063
+ @pulumi.getter(name="tcpSocket")
3064
+ def tcp_socket(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket']:
3065
+ """
3066
+ TcpSocketAction probes the health of a container by opening a TCP socket
3067
+ connection.
3068
+ Structure is documented below.
3069
+ """
3070
+ return pulumi.get(self, "tcp_socket")
3071
+
3072
+ @_builtins.property
3073
+ @pulumi.getter(name="timeoutSeconds")
3074
+ def timeout_seconds(self) -> Optional[_builtins.int]:
3075
+ """
3076
+ Number of seconds after which the probe times out. Defaults to 1 second.
3077
+ Minimum value is 1. Must be greater or equal to period_seconds.
3078
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
3079
+ """
3080
+ return pulumi.get(self, "timeout_seconds")
3081
+
3082
+
3083
+ @pulumi.output_type
3084
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec(dict):
3085
+ def __init__(__self__, *,
3086
+ commands: Optional[Sequence[_builtins.str]] = None):
3087
+ """
3088
+ :param Sequence[_builtins.str] commands: Command is the command line to execute inside the container, the working
3089
+ directory for the command is root ('/') in the container's filesystem.
3090
+ The command is simply exec'd, it is not run inside a shell, so
3091
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3092
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3093
+ live/healthy and non-zero is unhealthy.
3094
+ """
3095
+ if commands is not None:
3096
+ pulumi.set(__self__, "commands", commands)
3097
+
3098
+ @_builtins.property
3099
+ @pulumi.getter
3100
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
3101
+ """
3102
+ Command is the command line to execute inside the container, the working
3103
+ directory for the command is root ('/') in the container's filesystem.
3104
+ The command is simply exec'd, it is not run inside a shell, so
3105
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3106
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3107
+ live/healthy and non-zero is unhealthy.
3108
+ """
3109
+ return pulumi.get(self, "commands")
3110
+
3111
+
3112
+ @pulumi.output_type
3113
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc(dict):
3114
+ def __init__(__self__, *,
3115
+ port: Optional[_builtins.int] = None,
3116
+ service: Optional[_builtins.str] = None):
3117
+ """
3118
+ :param _builtins.int port: Port number of the gRPC service. Number must be in the range 1 to 65535.
3119
+ :param _builtins.str service: Service is the name of the service to place in the gRPC
3120
+ HealthCheckRequest. See
3121
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3122
+ If this is not specified, the default behavior is defined by gRPC.
3123
+ """
3124
+ if port is not None:
3125
+ pulumi.set(__self__, "port", port)
3126
+ if service is not None:
3127
+ pulumi.set(__self__, "service", service)
3128
+
3129
+ @_builtins.property
3130
+ @pulumi.getter
3131
+ def port(self) -> Optional[_builtins.int]:
3132
+ """
3133
+ Port number of the gRPC service. Number must be in the range 1 to 65535.
3134
+ """
3135
+ return pulumi.get(self, "port")
3136
+
3137
+ @_builtins.property
3138
+ @pulumi.getter
3139
+ def service(self) -> Optional[_builtins.str]:
3140
+ """
3141
+ Service is the name of the service to place in the gRPC
3142
+ HealthCheckRequest. See
3143
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3144
+ If this is not specified, the default behavior is defined by gRPC.
3145
+ """
3146
+ return pulumi.get(self, "service")
3147
+
3148
+
3149
+ @pulumi.output_type
3150
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet(dict):
3151
+ @staticmethod
3152
+ def __key_warning(key: str):
3153
+ suggest = None
3154
+ if key == "httpHeaders":
3155
+ suggest = "http_headers"
3156
+
3157
+ if suggest:
3158
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet. Access the value via the '{suggest}' property getter instead.")
3159
+
3160
+ def __getitem__(self, key: str) -> Any:
3161
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet.__key_warning(key)
3162
+ return super().__getitem__(key)
3163
+
3164
+ def get(self, key: str, default = None) -> Any:
3165
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet.__key_warning(key)
3166
+ return super().get(key, default)
3167
+
3168
+ def __init__(__self__, *,
3169
+ host: Optional[_builtins.str] = None,
3170
+ http_headers: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader']] = None,
3171
+ path: Optional[_builtins.str] = None,
3172
+ port: Optional[_builtins.int] = None,
3173
+ scheme: Optional[_builtins.str] = None):
3174
+ """
3175
+ :param _builtins.str host: Host name to connect to, defaults to the model serving container's IP.
3176
+ You probably want to set "Host" in httpHeaders instead.
3177
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeaderArgs'] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
3178
+ Structure is documented below.
3179
+ :param _builtins.str path: Path to access on the HTTP server.
3180
+ :param _builtins.int port: Number of the port to access on the container.
3181
+ Number must be in the range 1 to 65535.
3182
+ :param _builtins.str scheme: Scheme to use for connecting to the host.
3183
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3184
+ """
3185
+ if host is not None:
3186
+ pulumi.set(__self__, "host", host)
3187
+ if http_headers is not None:
3188
+ pulumi.set(__self__, "http_headers", http_headers)
3189
+ if path is not None:
3190
+ pulumi.set(__self__, "path", path)
3191
+ if port is not None:
3192
+ pulumi.set(__self__, "port", port)
3193
+ if scheme is not None:
3194
+ pulumi.set(__self__, "scheme", scheme)
3195
+
3196
+ @_builtins.property
3197
+ @pulumi.getter
3198
+ def host(self) -> Optional[_builtins.str]:
3199
+ """
3200
+ Host name to connect to, defaults to the model serving container's IP.
3201
+ You probably want to set "Host" in httpHeaders instead.
3202
+ """
3203
+ return pulumi.get(self, "host")
3204
+
3205
+ @_builtins.property
3206
+ @pulumi.getter(name="httpHeaders")
3207
+ def http_headers(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader']]:
3208
+ """
3209
+ Custom headers to set in the request. HTTP allows repeated headers.
3210
+ Structure is documented below.
3211
+ """
3212
+ return pulumi.get(self, "http_headers")
3213
+
3214
+ @_builtins.property
3215
+ @pulumi.getter
3216
+ def path(self) -> Optional[_builtins.str]:
3217
+ """
3218
+ Path to access on the HTTP server.
3219
+ """
3220
+ return pulumi.get(self, "path")
3221
+
3222
+ @_builtins.property
3223
+ @pulumi.getter
3224
+ def port(self) -> Optional[_builtins.int]:
3225
+ """
3226
+ Number of the port to access on the container.
3227
+ Number must be in the range 1 to 65535.
3228
+ """
3229
+ return pulumi.get(self, "port")
3230
+
3231
+ @_builtins.property
3232
+ @pulumi.getter
3233
+ def scheme(self) -> Optional[_builtins.str]:
3234
+ """
3235
+ Scheme to use for connecting to the host.
3236
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3237
+ """
3238
+ return pulumi.get(self, "scheme")
3239
+
3240
+
3241
+ @pulumi.output_type
3242
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader(dict):
3243
+ def __init__(__self__, *,
3244
+ name: Optional[_builtins.str] = None,
3245
+ value: Optional[_builtins.str] = None):
3246
+ """
3247
+ :param _builtins.str name: The header field name.
3248
+ This will be canonicalized upon output, so case-variant names will be
3249
+ understood as the same header.
3250
+ :param _builtins.str value: The header field value
3251
+ """
3252
+ if name is not None:
3253
+ pulumi.set(__self__, "name", name)
3254
+ if value is not None:
3255
+ pulumi.set(__self__, "value", value)
3256
+
3257
+ @_builtins.property
3258
+ @pulumi.getter
3259
+ def name(self) -> Optional[_builtins.str]:
3260
+ """
3261
+ The header field name.
3262
+ This will be canonicalized upon output, so case-variant names will be
3263
+ understood as the same header.
3264
+ """
3265
+ return pulumi.get(self, "name")
3266
+
3267
+ @_builtins.property
3268
+ @pulumi.getter
3269
+ def value(self) -> Optional[_builtins.str]:
3270
+ """
3271
+ The header field value
3272
+ """
3273
+ return pulumi.get(self, "value")
3274
+
3275
+
3276
+ @pulumi.output_type
3277
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket(dict):
3278
+ def __init__(__self__, *,
3279
+ host: Optional[_builtins.str] = None,
3280
+ port: Optional[_builtins.int] = None):
3281
+ """
3282
+ :param _builtins.str host: Optional: Host name to connect to, defaults to the model serving
3283
+ container's IP.
3284
+ :param _builtins.int port: Number of the port to access on the container.
3285
+ Number must be in the range 1 to 65535.
3286
+ """
3287
+ if host is not None:
3288
+ pulumi.set(__self__, "host", host)
3289
+ if port is not None:
3290
+ pulumi.set(__self__, "port", port)
3291
+
3292
+ @_builtins.property
3293
+ @pulumi.getter
3294
+ def host(self) -> Optional[_builtins.str]:
3295
+ """
3296
+ Optional: Host name to connect to, defaults to the model serving
3297
+ container's IP.
3298
+ """
3299
+ return pulumi.get(self, "host")
3300
+
3301
+ @_builtins.property
3302
+ @pulumi.getter
3303
+ def port(self) -> Optional[_builtins.int]:
3304
+ """
3305
+ Number of the port to access on the container.
3306
+ Number must be in the range 1 to 65535.
3307
+ """
3308
+ return pulumi.get(self, "port")
3309
+
3310
+
3311
+ @pulumi.output_type
3312
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort(dict):
3313
+ @staticmethod
3314
+ def __key_warning(key: str):
3315
+ suggest = None
3316
+ if key == "containerPort":
3317
+ suggest = "container_port"
3318
+
3319
+ if suggest:
3320
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort. Access the value via the '{suggest}' property getter instead.")
3321
+
3322
+ def __getitem__(self, key: str) -> Any:
3323
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort.__key_warning(key)
3324
+ return super().__getitem__(key)
3325
+
3326
+ def get(self, key: str, default = None) -> Any:
3327
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort.__key_warning(key)
3328
+ return super().get(key, default)
3329
+
3330
+ def __init__(__self__, *,
3331
+ container_port: Optional[_builtins.int] = None):
3332
+ """
3333
+ :param _builtins.int container_port: The number of the port to expose on the pod's IP address.
3334
+ Must be a valid port number, between 1 and 65535 inclusive.
3335
+ """
3336
+ if container_port is not None:
3337
+ pulumi.set(__self__, "container_port", container_port)
3338
+
3339
+ @_builtins.property
3340
+ @pulumi.getter(name="containerPort")
3341
+ def container_port(self) -> Optional[_builtins.int]:
3342
+ """
3343
+ The number of the port to expose on the pod's IP address.
3344
+ Must be a valid port number, between 1 and 65535 inclusive.
3345
+ """
3346
+ return pulumi.get(self, "container_port")
3347
+
3348
+
3349
+ @pulumi.output_type
3350
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe(dict):
3351
+ @staticmethod
3352
+ def __key_warning(key: str):
3353
+ suggest = None
3354
+ if key == "exec":
3355
+ suggest = "exec_"
3356
+ elif key == "failureThreshold":
3357
+ suggest = "failure_threshold"
3358
+ elif key == "httpGet":
3359
+ suggest = "http_get"
3360
+ elif key == "initialDelaySeconds":
3361
+ suggest = "initial_delay_seconds"
3362
+ elif key == "periodSeconds":
3363
+ suggest = "period_seconds"
3364
+ elif key == "successThreshold":
3365
+ suggest = "success_threshold"
3366
+ elif key == "tcpSocket":
3367
+ suggest = "tcp_socket"
3368
+ elif key == "timeoutSeconds":
3369
+ suggest = "timeout_seconds"
3370
+
3371
+ if suggest:
3372
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe. Access the value via the '{suggest}' property getter instead.")
3373
+
3374
+ def __getitem__(self, key: str) -> Any:
3375
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe.__key_warning(key)
3376
+ return super().__getitem__(key)
3377
+
3378
+ def get(self, key: str, default = None) -> Any:
3379
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe.__key_warning(key)
3380
+ return super().get(key, default)
3381
+
3382
+ def __init__(__self__, *,
3383
+ exec_: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec'] = None,
3384
+ failure_threshold: Optional[_builtins.int] = None,
3385
+ grpc: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc'] = None,
3386
+ http_get: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet'] = None,
3387
+ initial_delay_seconds: Optional[_builtins.int] = None,
3388
+ period_seconds: Optional[_builtins.int] = None,
3389
+ success_threshold: Optional[_builtins.int] = None,
3390
+ tcp_socket: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket'] = None,
3391
+ timeout_seconds: Optional[_builtins.int] = None):
3392
+ """
3393
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExecArgs' exec_: ExecAction specifies a command to execute.
3394
+ Structure is documented below.
3395
+ :param _builtins.int failure_threshold: Number of consecutive failures before the probe is considered failed.
3396
+ Defaults to 3. Minimum value is 1.
3397
+ Maps to Kubernetes probe argument 'failureThreshold'.
3398
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpcArgs' grpc: GrpcAction checks the health of a container using a gRPC service.
3399
+ Structure is documented below.
3400
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetArgs' http_get: HttpGetAction describes an action based on HTTP Get requests.
3401
+ Structure is documented below.
3402
+ :param _builtins.int initial_delay_seconds: Number of seconds to wait before starting the probe. Defaults to 0.
3403
+ Minimum value is 0.
3404
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
3405
+ :param _builtins.int period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds.
3406
+ Minimum value is 1. Must be less than timeout_seconds.
3407
+ Maps to Kubernetes probe argument 'periodSeconds'.
3408
+ :param _builtins.int success_threshold: Number of consecutive successes before the probe is considered successful.
3409
+ Defaults to 1. Minimum value is 1.
3410
+ Maps to Kubernetes probe argument 'successThreshold'.
3411
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocketArgs' tcp_socket: TcpSocketAction probes the health of a container by opening a TCP socket
3412
+ connection.
3413
+ Structure is documented below.
3414
+ :param _builtins.int timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second.
3415
+ Minimum value is 1. Must be greater or equal to period_seconds.
3416
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
3417
+ """
3418
+ if exec_ is not None:
3419
+ pulumi.set(__self__, "exec_", exec_)
3420
+ if failure_threshold is not None:
3421
+ pulumi.set(__self__, "failure_threshold", failure_threshold)
3422
+ if grpc is not None:
3423
+ pulumi.set(__self__, "grpc", grpc)
3424
+ if http_get is not None:
3425
+ pulumi.set(__self__, "http_get", http_get)
3426
+ if initial_delay_seconds is not None:
3427
+ pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
3428
+ if period_seconds is not None:
3429
+ pulumi.set(__self__, "period_seconds", period_seconds)
3430
+ if success_threshold is not None:
3431
+ pulumi.set(__self__, "success_threshold", success_threshold)
3432
+ if tcp_socket is not None:
3433
+ pulumi.set(__self__, "tcp_socket", tcp_socket)
3434
+ if timeout_seconds is not None:
3435
+ pulumi.set(__self__, "timeout_seconds", timeout_seconds)
3436
+
3437
+ @_builtins.property
3438
+ @pulumi.getter(name="exec")
3439
+ def exec_(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec']:
3440
+ """
3441
+ ExecAction specifies a command to execute.
3442
+ Structure is documented below.
3443
+ """
3444
+ return pulumi.get(self, "exec_")
3445
+
3446
+ @_builtins.property
3447
+ @pulumi.getter(name="failureThreshold")
3448
+ def failure_threshold(self) -> Optional[_builtins.int]:
3449
+ """
3450
+ Number of consecutive failures before the probe is considered failed.
3451
+ Defaults to 3. Minimum value is 1.
3452
+ Maps to Kubernetes probe argument 'failureThreshold'.
3453
+ """
3454
+ return pulumi.get(self, "failure_threshold")
3455
+
3456
+ @_builtins.property
3457
+ @pulumi.getter
3458
+ def grpc(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc']:
3459
+ """
3460
+ GrpcAction checks the health of a container using a gRPC service.
3461
+ Structure is documented below.
3462
+ """
3463
+ return pulumi.get(self, "grpc")
3464
+
3465
+ @_builtins.property
3466
+ @pulumi.getter(name="httpGet")
3467
+ def http_get(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet']:
3468
+ """
3469
+ HttpGetAction describes an action based on HTTP Get requests.
3470
+ Structure is documented below.
3471
+ """
3472
+ return pulumi.get(self, "http_get")
3473
+
3474
+ @_builtins.property
3475
+ @pulumi.getter(name="initialDelaySeconds")
3476
+ def initial_delay_seconds(self) -> Optional[_builtins.int]:
3477
+ """
3478
+ Number of seconds to wait before starting the probe. Defaults to 0.
3479
+ Minimum value is 0.
3480
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
3481
+ """
3482
+ return pulumi.get(self, "initial_delay_seconds")
3483
+
3484
+ @_builtins.property
3485
+ @pulumi.getter(name="periodSeconds")
3486
+ def period_seconds(self) -> Optional[_builtins.int]:
3487
+ """
3488
+ How often (in seconds) to perform the probe. Default to 10 seconds.
3489
+ Minimum value is 1. Must be less than timeout_seconds.
3490
+ Maps to Kubernetes probe argument 'periodSeconds'.
3491
+ """
3492
+ return pulumi.get(self, "period_seconds")
3493
+
3494
+ @_builtins.property
3495
+ @pulumi.getter(name="successThreshold")
3496
+ def success_threshold(self) -> Optional[_builtins.int]:
3497
+ """
3498
+ Number of consecutive successes before the probe is considered successful.
3499
+ Defaults to 1. Minimum value is 1.
3500
+ Maps to Kubernetes probe argument 'successThreshold'.
3501
+ """
3502
+ return pulumi.get(self, "success_threshold")
3503
+
3504
+ @_builtins.property
3505
+ @pulumi.getter(name="tcpSocket")
3506
+ def tcp_socket(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket']:
3507
+ """
3508
+ TcpSocketAction probes the health of a container by opening a TCP socket
3509
+ connection.
3510
+ Structure is documented below.
3511
+ """
3512
+ return pulumi.get(self, "tcp_socket")
3513
+
3514
+ @_builtins.property
3515
+ @pulumi.getter(name="timeoutSeconds")
3516
+ def timeout_seconds(self) -> Optional[_builtins.int]:
3517
+ """
3518
+ Number of seconds after which the probe times out. Defaults to 1 second.
3519
+ Minimum value is 1. Must be greater or equal to period_seconds.
3520
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
3521
+ """
3522
+ return pulumi.get(self, "timeout_seconds")
3523
+
3524
+
3525
+ @pulumi.output_type
3526
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec(dict):
3527
+ def __init__(__self__, *,
3528
+ commands: Optional[Sequence[_builtins.str]] = None):
3529
+ """
3530
+ :param Sequence[_builtins.str] commands: Command is the command line to execute inside the container, the working
3531
+ directory for the command is root ('/') in the container's filesystem.
3532
+ The command is simply exec'd, it is not run inside a shell, so
3533
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3534
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3535
+ live/healthy and non-zero is unhealthy.
3536
+ """
3537
+ if commands is not None:
3538
+ pulumi.set(__self__, "commands", commands)
3539
+
3540
+ @_builtins.property
3541
+ @pulumi.getter
3542
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
3543
+ """
3544
+ Command is the command line to execute inside the container, the working
3545
+ directory for the command is root ('/') in the container's filesystem.
3546
+ The command is simply exec'd, it is not run inside a shell, so
3547
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3548
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3549
+ live/healthy and non-zero is unhealthy.
3550
+ """
3551
+ return pulumi.get(self, "commands")
3552
+
3553
+
3554
+ @pulumi.output_type
3555
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc(dict):
3556
+ def __init__(__self__, *,
3557
+ port: Optional[_builtins.int] = None,
3558
+ service: Optional[_builtins.str] = None):
3559
+ """
3560
+ :param _builtins.int port: Port number of the gRPC service. Number must be in the range 1 to 65535.
3561
+ :param _builtins.str service: Service is the name of the service to place in the gRPC
3562
+ HealthCheckRequest. See
3563
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3564
+ If this is not specified, the default behavior is defined by gRPC.
3565
+ """
3566
+ if port is not None:
3567
+ pulumi.set(__self__, "port", port)
3568
+ if service is not None:
3569
+ pulumi.set(__self__, "service", service)
3570
+
3571
+ @_builtins.property
3572
+ @pulumi.getter
3573
+ def port(self) -> Optional[_builtins.int]:
3574
+ """
3575
+ Port number of the gRPC service. Number must be in the range 1 to 65535.
3576
+ """
3577
+ return pulumi.get(self, "port")
3578
+
3579
+ @_builtins.property
3580
+ @pulumi.getter
3581
+ def service(self) -> Optional[_builtins.str]:
3582
+ """
3583
+ Service is the name of the service to place in the gRPC
3584
+ HealthCheckRequest. See
3585
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3586
+ If this is not specified, the default behavior is defined by gRPC.
3587
+ """
3588
+ return pulumi.get(self, "service")
3589
+
3590
+
3591
+ @pulumi.output_type
3592
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet(dict):
3593
+ @staticmethod
3594
+ def __key_warning(key: str):
3595
+ suggest = None
3596
+ if key == "httpHeaders":
3597
+ suggest = "http_headers"
3598
+
3599
+ if suggest:
3600
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet. Access the value via the '{suggest}' property getter instead.")
3601
+
3602
+ def __getitem__(self, key: str) -> Any:
3603
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet.__key_warning(key)
3604
+ return super().__getitem__(key)
3605
+
3606
+ def get(self, key: str, default = None) -> Any:
3607
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet.__key_warning(key)
3608
+ return super().get(key, default)
3609
+
3610
+ def __init__(__self__, *,
3611
+ host: Optional[_builtins.str] = None,
3612
+ http_headers: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader']] = None,
3613
+ path: Optional[_builtins.str] = None,
3614
+ port: Optional[_builtins.int] = None,
3615
+ scheme: Optional[_builtins.str] = None):
3616
+ """
3617
+ :param _builtins.str host: Host name to connect to, defaults to the model serving container's IP.
3618
+ You probably want to set "Host" in httpHeaders instead.
3619
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeaderArgs'] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
3620
+ Structure is documented below.
3621
+ :param _builtins.str path: Path to access on the HTTP server.
3622
+ :param _builtins.int port: Number of the port to access on the container.
3623
+ Number must be in the range 1 to 65535.
3624
+ :param _builtins.str scheme: Scheme to use for connecting to the host.
3625
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3626
+ """
3627
+ if host is not None:
3628
+ pulumi.set(__self__, "host", host)
3629
+ if http_headers is not None:
3630
+ pulumi.set(__self__, "http_headers", http_headers)
3631
+ if path is not None:
3632
+ pulumi.set(__self__, "path", path)
3633
+ if port is not None:
3634
+ pulumi.set(__self__, "port", port)
3635
+ if scheme is not None:
3636
+ pulumi.set(__self__, "scheme", scheme)
3637
+
3638
+ @_builtins.property
3639
+ @pulumi.getter
3640
+ def host(self) -> Optional[_builtins.str]:
3641
+ """
3642
+ Host name to connect to, defaults to the model serving container's IP.
3643
+ You probably want to set "Host" in httpHeaders instead.
3644
+ """
3645
+ return pulumi.get(self, "host")
3646
+
3647
+ @_builtins.property
3648
+ @pulumi.getter(name="httpHeaders")
3649
+ def http_headers(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader']]:
3650
+ """
3651
+ Custom headers to set in the request. HTTP allows repeated headers.
3652
+ Structure is documented below.
3653
+ """
3654
+ return pulumi.get(self, "http_headers")
3655
+
3656
+ @_builtins.property
3657
+ @pulumi.getter
3658
+ def path(self) -> Optional[_builtins.str]:
3659
+ """
3660
+ Path to access on the HTTP server.
3661
+ """
3662
+ return pulumi.get(self, "path")
3663
+
3664
+ @_builtins.property
3665
+ @pulumi.getter
3666
+ def port(self) -> Optional[_builtins.int]:
3667
+ """
3668
+ Number of the port to access on the container.
3669
+ Number must be in the range 1 to 65535.
3670
+ """
3671
+ return pulumi.get(self, "port")
3672
+
3673
+ @_builtins.property
3674
+ @pulumi.getter
3675
+ def scheme(self) -> Optional[_builtins.str]:
3676
+ """
3677
+ Scheme to use for connecting to the host.
3678
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3679
+ """
3680
+ return pulumi.get(self, "scheme")
3681
+
3682
+
3683
+ @pulumi.output_type
3684
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader(dict):
3685
+ def __init__(__self__, *,
3686
+ name: Optional[_builtins.str] = None,
3687
+ value: Optional[_builtins.str] = None):
3688
+ """
3689
+ :param _builtins.str name: The header field name.
3690
+ This will be canonicalized upon output, so case-variant names will be
3691
+ understood as the same header.
3692
+ :param _builtins.str value: The header field value
3693
+ """
3694
+ if name is not None:
3695
+ pulumi.set(__self__, "name", name)
3696
+ if value is not None:
3697
+ pulumi.set(__self__, "value", value)
3698
+
3699
+ @_builtins.property
3700
+ @pulumi.getter
3701
+ def name(self) -> Optional[_builtins.str]:
3702
+ """
3703
+ The header field name.
3704
+ This will be canonicalized upon output, so case-variant names will be
3705
+ understood as the same header.
3706
+ """
3707
+ return pulumi.get(self, "name")
3708
+
3709
+ @_builtins.property
3710
+ @pulumi.getter
3711
+ def value(self) -> Optional[_builtins.str]:
3712
+ """
3713
+ The header field value
3714
+ """
3715
+ return pulumi.get(self, "value")
3716
+
3717
+
3718
+ @pulumi.output_type
3719
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket(dict):
3720
+ def __init__(__self__, *,
3721
+ host: Optional[_builtins.str] = None,
3722
+ port: Optional[_builtins.int] = None):
3723
+ """
3724
+ :param _builtins.str host: Optional: Host name to connect to, defaults to the model serving
3725
+ container's IP.
3726
+ :param _builtins.int port: Number of the port to access on the container.
3727
+ Number must be in the range 1 to 65535.
3728
+ """
3729
+ if host is not None:
3730
+ pulumi.set(__self__, "host", host)
3731
+ if port is not None:
3732
+ pulumi.set(__self__, "port", port)
3733
+
3734
+ @_builtins.property
3735
+ @pulumi.getter
3736
+ def host(self) -> Optional[_builtins.str]:
3737
+ """
3738
+ Optional: Host name to connect to, defaults to the model serving
3739
+ container's IP.
3740
+ """
3741
+ return pulumi.get(self, "host")
3742
+
3743
+ @_builtins.property
3744
+ @pulumi.getter
3745
+ def port(self) -> Optional[_builtins.int]:
3746
+ """
3747
+ Number of the port to access on the container.
3748
+ Number must be in the range 1 to 65535.
3749
+ """
3750
+ return pulumi.get(self, "port")
3751
+
3752
+
1142
3753
  @pulumi.output_type
1143
3754
  class AiFeatureGroupBigQuery(dict):
1144
3755
  @staticmethod
@@ -3411,6 +6022,71 @@ class AiMetadataStoreState(dict):
3411
6022
  return pulumi.get(self, "disk_utilization_bytes")
3412
6023
 
3413
6024
 
6025
+ @pulumi.output_type
6026
+ class AiRagEngineConfigRagManagedDbConfig(dict):
6027
+ def __init__(__self__, *,
6028
+ basic: Optional['outputs.AiRagEngineConfigRagManagedDbConfigBasic'] = None,
6029
+ scaled: Optional['outputs.AiRagEngineConfigRagManagedDbConfigScaled'] = None,
6030
+ unprovisioned: Optional['outputs.AiRagEngineConfigRagManagedDbConfigUnprovisioned'] = None):
6031
+ """
6032
+ :param 'AiRagEngineConfigRagManagedDbConfigBasicArgs' basic: Basic tier is a cost-effective and low compute tier suitable for the following cases: Experimenting with RagManagedDb, Small data size, Latency insensitive workload, Only using RAG Engine with external vector DBs.
6033
+ NOTE: This is the default tier if not explicitly chosen.
6034
+ :param 'AiRagEngineConfigRagManagedDbConfigScaledArgs' scaled: Scaled tier offers production grade performance along with autoscaling functionality. It is suitable for customers with large amounts of data or performance sensitive workloads.
6035
+ :param 'AiRagEngineConfigRagManagedDbConfigUnprovisionedArgs' unprovisioned: Disables the RAG Engine service and deletes all your data held within this service. This will halt the billing of the service.
6036
+ NOTE: Once deleted the data cannot be recovered. To start using RAG Engine again, you will need to update the tier by calling the UpdateRagEngineConfig API.
6037
+ """
6038
+ if basic is not None:
6039
+ pulumi.set(__self__, "basic", basic)
6040
+ if scaled is not None:
6041
+ pulumi.set(__self__, "scaled", scaled)
6042
+ if unprovisioned is not None:
6043
+ pulumi.set(__self__, "unprovisioned", unprovisioned)
6044
+
6045
+ @_builtins.property
6046
+ @pulumi.getter
6047
+ def basic(self) -> Optional['outputs.AiRagEngineConfigRagManagedDbConfigBasic']:
6048
+ """
6049
+ Basic tier is a cost-effective and low compute tier suitable for the following cases: Experimenting with RagManagedDb, Small data size, Latency insensitive workload, Only using RAG Engine with external vector DBs.
6050
+ NOTE: This is the default tier if not explicitly chosen.
6051
+ """
6052
+ return pulumi.get(self, "basic")
6053
+
6054
+ @_builtins.property
6055
+ @pulumi.getter
6056
+ def scaled(self) -> Optional['outputs.AiRagEngineConfigRagManagedDbConfigScaled']:
6057
+ """
6058
+ Scaled tier offers production grade performance along with autoscaling functionality. It is suitable for customers with large amounts of data or performance sensitive workloads.
6059
+ """
6060
+ return pulumi.get(self, "scaled")
6061
+
6062
+ @_builtins.property
6063
+ @pulumi.getter
6064
+ def unprovisioned(self) -> Optional['outputs.AiRagEngineConfigRagManagedDbConfigUnprovisioned']:
6065
+ """
6066
+ Disables the RAG Engine service and deletes all your data held within this service. This will halt the billing of the service.
6067
+ NOTE: Once deleted the data cannot be recovered. To start using RAG Engine again, you will need to update the tier by calling the UpdateRagEngineConfig API.
6068
+ """
6069
+ return pulumi.get(self, "unprovisioned")
6070
+
6071
+
6072
+ @pulumi.output_type
6073
+ class AiRagEngineConfigRagManagedDbConfigBasic(dict):
6074
+ def __init__(__self__):
6075
+ pass
6076
+
6077
+
6078
+ @pulumi.output_type
6079
+ class AiRagEngineConfigRagManagedDbConfigScaled(dict):
6080
+ def __init__(__self__):
6081
+ pass
6082
+
6083
+
6084
+ @pulumi.output_type
6085
+ class AiRagEngineConfigRagManagedDbConfigUnprovisioned(dict):
6086
+ def __init__(__self__):
6087
+ pass
6088
+
6089
+
3414
6090
  @pulumi.output_type
3415
6091
  class AiTensorboardEncryptionSpec(dict):
3416
6092
  @staticmethod