pulumi-gcp 8.40.0a1754721948__py3-none-any.whl → 8.40.0a1754951145__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. pulumi_gcp/__init__.py +128 -0
  2. pulumi_gcp/accesscontextmanager/_inputs.py +24 -4
  3. pulumi_gcp/accesscontextmanager/outputs.py +15 -3
  4. pulumi_gcp/apigee/__init__.py +2 -0
  5. pulumi_gcp/apigee/_inputs.py +1435 -0
  6. pulumi_gcp/apigee/api_product.py +1698 -0
  7. pulumi_gcp/apigee/outputs.py +1081 -0
  8. pulumi_gcp/apigee/security_action.py +1010 -0
  9. pulumi_gcp/artifactregistry/__init__.py +1 -0
  10. pulumi_gcp/artifactregistry/get_docker_images.py +164 -0
  11. pulumi_gcp/artifactregistry/outputs.py +109 -2
  12. pulumi_gcp/artifactregistry/repository.py +6 -6
  13. pulumi_gcp/backupdisasterrecovery/backup_vault.py +56 -0
  14. pulumi_gcp/backupdisasterrecovery/get_backup_vault.py +12 -1
  15. pulumi_gcp/bigquery/_inputs.py +6 -0
  16. pulumi_gcp/bigquery/get_table.py +23 -1
  17. pulumi_gcp/bigquery/outputs.py +4 -0
  18. pulumi_gcp/bigquery/table.py +62 -0
  19. pulumi_gcp/bigqueryanalyticshub/_inputs.py +180 -0
  20. pulumi_gcp/bigqueryanalyticshub/data_exchange.py +80 -0
  21. pulumi_gcp/bigqueryanalyticshub/listing.py +322 -2
  22. pulumi_gcp/bigqueryanalyticshub/listing_subscription.py +32 -0
  23. pulumi_gcp/bigqueryanalyticshub/outputs.py +159 -0
  24. pulumi_gcp/bigtable/__init__.py +1 -0
  25. pulumi_gcp/bigtable/_inputs.py +33 -0
  26. pulumi_gcp/bigtable/outputs.py +36 -0
  27. pulumi_gcp/bigtable/schema_bundle.py +568 -0
  28. pulumi_gcp/cloudfunctions/_inputs.py +48 -0
  29. pulumi_gcp/cloudfunctions/function.py +94 -0
  30. pulumi_gcp/cloudfunctions/get_function.py +23 -1
  31. pulumi_gcp/cloudfunctions/outputs.py +70 -0
  32. pulumi_gcp/cloudrunv2/_inputs.py +20 -0
  33. pulumi_gcp/cloudrunv2/job.py +2 -0
  34. pulumi_gcp/cloudrunv2/outputs.py +25 -0
  35. pulumi_gcp/cloudrunv2/worker_pool.py +2 -0
  36. pulumi_gcp/compute/__init__.py +1 -0
  37. pulumi_gcp/compute/_inputs.py +713 -22
  38. pulumi_gcp/compute/firewall_policy_with_rules.py +66 -0
  39. pulumi_gcp/compute/forwarding_rule.py +0 -21
  40. pulumi_gcp/compute/get_router.py +12 -1
  41. pulumi_gcp/compute/outputs.py +562 -22
  42. pulumi_gcp/compute/preview_feature.py +396 -0
  43. pulumi_gcp/compute/region_url_map.py +392 -0
  44. pulumi_gcp/compute/reservation.py +4 -4
  45. pulumi_gcp/compute/router.py +54 -0
  46. pulumi_gcp/compute/storage_pool.py +154 -0
  47. pulumi_gcp/compute/subnetwork.py +54 -0
  48. pulumi_gcp/config/__init__.pyi +2 -0
  49. pulumi_gcp/config/vars.py +4 -0
  50. pulumi_gcp/container/_inputs.py +278 -8
  51. pulumi_gcp/container/cluster.py +61 -21
  52. pulumi_gcp/container/get_cluster.py +12 -1
  53. pulumi_gcp/container/outputs.py +352 -8
  54. pulumi_gcp/dataproc/_inputs.py +249 -14
  55. pulumi_gcp/dataproc/batch.py +6 -0
  56. pulumi_gcp/dataproc/cluster.py +2 -0
  57. pulumi_gcp/dataproc/outputs.py +215 -12
  58. pulumi_gcp/dataproc/session_template.py +14 -2
  59. pulumi_gcp/developerconnect/__init__.py +1 -0
  60. pulumi_gcp/developerconnect/_inputs.py +583 -0
  61. pulumi_gcp/developerconnect/insights_config.py +895 -0
  62. pulumi_gcp/developerconnect/outputs.py +442 -0
  63. pulumi_gcp/diagflow/__init__.py +1 -0
  64. pulumi_gcp/diagflow/_inputs.py +1165 -58
  65. pulumi_gcp/diagflow/cx_generator.py +636 -0
  66. pulumi_gcp/diagflow/cx_tool.py +2 -2
  67. pulumi_gcp/diagflow/cx_webhook.py +380 -36
  68. pulumi_gcp/diagflow/outputs.py +848 -25
  69. pulumi_gcp/discoveryengine/__init__.py +2 -0
  70. pulumi_gcp/discoveryengine/_inputs.py +465 -0
  71. pulumi_gcp/discoveryengine/cmek_config.py +707 -0
  72. pulumi_gcp/discoveryengine/outputs.py +412 -0
  73. pulumi_gcp/discoveryengine/recommendation_engine.py +813 -0
  74. pulumi_gcp/firestore/field.py +6 -6
  75. pulumi_gcp/gemini/gemini_gcp_enablement_setting.py +107 -9
  76. pulumi_gcp/gemini/gemini_gcp_enablement_setting_binding.py +2 -2
  77. pulumi_gcp/gkehub/membership_binding.py +6 -6
  78. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  79. pulumi_gcp/gkehub/namespace.py +4 -4
  80. pulumi_gcp/gkehub/scope_rbac_role_binding.py +8 -8
  81. pulumi_gcp/iam/__init__.py +4 -0
  82. pulumi_gcp/iam/_inputs.py +98 -0
  83. pulumi_gcp/iam/get_workforce_pool_iam_policy.py +161 -0
  84. pulumi_gcp/iam/outputs.py +56 -0
  85. pulumi_gcp/iam/workforce_pool_iam_binding.py +761 -0
  86. pulumi_gcp/iam/workforce_pool_iam_member.py +761 -0
  87. pulumi_gcp/iam/workforce_pool_iam_policy.py +600 -0
  88. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  89. pulumi_gcp/integrationconnectors/managed_zone.py +8 -8
  90. pulumi_gcp/looker/instance.py +28 -7
  91. pulumi_gcp/managedkafka/_inputs.py +127 -0
  92. pulumi_gcp/managedkafka/cluster.py +131 -1
  93. pulumi_gcp/managedkafka/connect_cluster.py +4 -4
  94. pulumi_gcp/managedkafka/connector.py +4 -4
  95. pulumi_gcp/managedkafka/outputs.py +128 -0
  96. pulumi_gcp/memorystore/instance.py +8 -12
  97. pulumi_gcp/modelarmor/__init__.py +1 -0
  98. pulumi_gcp/modelarmor/_inputs.py +683 -0
  99. pulumi_gcp/modelarmor/floorsetting.py +736 -0
  100. pulumi_gcp/modelarmor/outputs.py +618 -0
  101. pulumi_gcp/networkconnectivity/_inputs.py +60 -0
  102. pulumi_gcp/networkconnectivity/internal_range.py +136 -0
  103. pulumi_gcp/networkconnectivity/outputs.py +55 -0
  104. pulumi_gcp/networkconnectivity/spoke.py +14 -14
  105. pulumi_gcp/oracledatabase/__init__.py +2 -0
  106. pulumi_gcp/oracledatabase/autonomous_database.py +262 -38
  107. pulumi_gcp/oracledatabase/cloud_vm_cluster.py +314 -50
  108. pulumi_gcp/oracledatabase/get_autonomous_database.py +23 -1
  109. pulumi_gcp/oracledatabase/get_cloud_vm_cluster.py +34 -1
  110. pulumi_gcp/oracledatabase/odb_network.py +721 -0
  111. pulumi_gcp/oracledatabase/odb_subnet.py +803 -0
  112. pulumi_gcp/oracledatabase/outputs.py +83 -0
  113. pulumi_gcp/orgpolicy/policy.py +2 -2
  114. pulumi_gcp/parametermanager/parameter_version.py +62 -0
  115. pulumi_gcp/parametermanager/regional_parameter_version.py +64 -0
  116. pulumi_gcp/provider.py +20 -0
  117. pulumi_gcp/pubsub/subscription.py +46 -6
  118. pulumi_gcp/pubsub/topic.py +36 -0
  119. pulumi_gcp/pulumi-plugin.json +1 -1
  120. pulumi_gcp/redis/cluster.py +70 -0
  121. pulumi_gcp/redis/get_cluster.py +12 -1
  122. pulumi_gcp/redis/instance.py +8 -12
  123. pulumi_gcp/secretmanager/get_regional_secret.py +12 -1
  124. pulumi_gcp/secretmanager/get_secret.py +12 -1
  125. pulumi_gcp/secretmanager/outputs.py +30 -0
  126. pulumi_gcp/secretmanager/regional_secret.py +61 -0
  127. pulumi_gcp/secretmanager/secret.py +61 -0
  128. pulumi_gcp/securesourcemanager/branch_rule.py +16 -8
  129. pulumi_gcp/securesourcemanager/instance.py +112 -4
  130. pulumi_gcp/securesourcemanager/repository.py +112 -8
  131. pulumi_gcp/serviceaccount/get_account_key.py +1 -0
  132. pulumi_gcp/sql/_inputs.py +6 -6
  133. pulumi_gcp/sql/database.py +0 -12
  134. pulumi_gcp/sql/outputs.py +4 -4
  135. pulumi_gcp/storage/__init__.py +2 -0
  136. pulumi_gcp/storage/_inputs.py +451 -0
  137. pulumi_gcp/storage/bucket.py +7 -7
  138. pulumi_gcp/storage/bucket_object.py +34 -0
  139. pulumi_gcp/storage/get_bucket_object.py +12 -1
  140. pulumi_gcp/storage/get_bucket_object_content.py +12 -1
  141. pulumi_gcp/storage/get_insights_dataset_config.py +363 -0
  142. pulumi_gcp/storage/insights_dataset_config.py +1280 -0
  143. pulumi_gcp/storage/outputs.py +619 -0
  144. pulumi_gcp/vertex/__init__.py +1 -0
  145. pulumi_gcp/vertex/_inputs.py +3646 -3
  146. pulumi_gcp/vertex/ai_endpoint.py +4 -4
  147. pulumi_gcp/vertex/ai_endpoint_with_model_garden_deployment.py +940 -0
  148. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  149. pulumi_gcp/vertex/outputs.py +2609 -2
  150. pulumi_gcp/vmwareengine/network_peering.py +7 -7
  151. pulumi_gcp/workbench/_inputs.py +118 -0
  152. pulumi_gcp/workbench/instance.py +171 -2
  153. pulumi_gcp/workbench/outputs.py +91 -0
  154. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/METADATA +1 -1
  155. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/RECORD +157 -138
  156. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/WHEEL +0 -0
  157. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/top_level.txt +0 -0
@@ -32,6 +32,35 @@ __all__ = [
32
32
  'AiEndpointPredictRequestResponseLoggingConfig',
33
33
  'AiEndpointPredictRequestResponseLoggingConfigBigqueryDestination',
34
34
  'AiEndpointPrivateServiceConnectConfig',
35
+ 'AiEndpointWithModelGardenDeploymentDeployConfig',
36
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources',
37
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec',
38
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec',
39
+ 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity',
40
+ 'AiEndpointWithModelGardenDeploymentEndpointConfig',
41
+ 'AiEndpointWithModelGardenDeploymentModelConfig',
42
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpec',
43
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv',
44
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort',
45
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe',
46
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec',
47
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc',
48
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet',
49
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader',
50
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket',
51
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe',
52
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec',
53
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc',
54
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet',
55
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader',
56
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket',
57
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort',
58
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe',
59
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec',
60
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc',
61
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet',
62
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader',
63
+ 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket',
35
64
  'AiFeatureGroupBigQuery',
36
65
  'AiFeatureGroupBigQueryBigQuerySource',
37
66
  'AiFeatureGroupIamBindingCondition',
@@ -764,7 +793,7 @@ class AiEndpointDeployedModelDedicatedResourceMachineSpec(dict):
764
793
  :param _builtins.str accelerator_type: (Output)
765
794
  The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
766
795
  :param _builtins.str machine_type: (Output)
767
- The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional.
796
+ The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO: Try to better unify the required vs optional.
768
797
  """
769
798
  if accelerator_count is not None:
770
799
  pulumi.set(__self__, "accelerator_count", accelerator_count)
@@ -796,7 +825,7 @@ class AiEndpointDeployedModelDedicatedResourceMachineSpec(dict):
796
825
  def machine_type(self) -> Optional[_builtins.str]:
797
826
  """
798
827
  (Output)
799
- The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional.
828
+ The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO: Try to better unify the required vs optional.
800
829
  """
801
830
  return pulumi.get(self, "machine_type")
802
831
 
@@ -1139,6 +1168,2584 @@ class AiEndpointPrivateServiceConnectConfig(dict):
1139
1168
  return pulumi.get(self, "project_allowlists")
1140
1169
 
1141
1170
 
1171
+ @pulumi.output_type
1172
+ class AiEndpointWithModelGardenDeploymentDeployConfig(dict):
1173
+ @staticmethod
1174
+ def __key_warning(key: str):
1175
+ suggest = None
1176
+ if key == "dedicatedResources":
1177
+ suggest = "dedicated_resources"
1178
+ elif key == "fastTryoutEnabled":
1179
+ suggest = "fast_tryout_enabled"
1180
+ elif key == "systemLabels":
1181
+ suggest = "system_labels"
1182
+
1183
+ if suggest:
1184
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfig. Access the value via the '{suggest}' property getter instead.")
1185
+
1186
+ def __getitem__(self, key: str) -> Any:
1187
+ AiEndpointWithModelGardenDeploymentDeployConfig.__key_warning(key)
1188
+ return super().__getitem__(key)
1189
+
1190
+ def get(self, key: str, default = None) -> Any:
1191
+ AiEndpointWithModelGardenDeploymentDeployConfig.__key_warning(key)
1192
+ return super().get(key, default)
1193
+
1194
+ def __init__(__self__, *,
1195
+ dedicated_resources: Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources'] = None,
1196
+ fast_tryout_enabled: Optional[_builtins.bool] = None,
1197
+ system_labels: Optional[Mapping[str, _builtins.str]] = None):
1198
+ """
1199
+ :param 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesArgs' dedicated_resources: A description of resources that are dedicated to a DeployedModel or
1200
+ DeployedIndex, and that need a higher degree of manual configuration.
1201
+ Structure is documented below.
1202
+ :param _builtins.bool fast_tryout_enabled: If true, enable the QMT fast tryout feature for this model if possible.
1203
+ :param Mapping[str, _builtins.str] system_labels: System labels for Model Garden deployments.
1204
+ These labels are managed by Google and for tracking purposes only.
1205
+ """
1206
+ if dedicated_resources is not None:
1207
+ pulumi.set(__self__, "dedicated_resources", dedicated_resources)
1208
+ if fast_tryout_enabled is not None:
1209
+ pulumi.set(__self__, "fast_tryout_enabled", fast_tryout_enabled)
1210
+ if system_labels is not None:
1211
+ pulumi.set(__self__, "system_labels", system_labels)
1212
+
1213
+ @_builtins.property
1214
+ @pulumi.getter(name="dedicatedResources")
1215
+ def dedicated_resources(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources']:
1216
+ """
1217
+ A description of resources that are dedicated to a DeployedModel or
1218
+ DeployedIndex, and that need a higher degree of manual configuration.
1219
+ Structure is documented below.
1220
+ """
1221
+ return pulumi.get(self, "dedicated_resources")
1222
+
1223
+ @_builtins.property
1224
+ @pulumi.getter(name="fastTryoutEnabled")
1225
+ def fast_tryout_enabled(self) -> Optional[_builtins.bool]:
1226
+ """
1227
+ If true, enable the QMT fast tryout feature for this model if possible.
1228
+ """
1229
+ return pulumi.get(self, "fast_tryout_enabled")
1230
+
1231
+ @_builtins.property
1232
+ @pulumi.getter(name="systemLabels")
1233
+ def system_labels(self) -> Optional[Mapping[str, _builtins.str]]:
1234
+ """
1235
+ System labels for Model Garden deployments.
1236
+ These labels are managed by Google and for tracking purposes only.
1237
+ """
1238
+ return pulumi.get(self, "system_labels")
1239
+
1240
+
1241
+ @pulumi.output_type
1242
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources(dict):
1243
+ @staticmethod
1244
+ def __key_warning(key: str):
1245
+ suggest = None
1246
+ if key == "machineSpec":
1247
+ suggest = "machine_spec"
1248
+ elif key == "minReplicaCount":
1249
+ suggest = "min_replica_count"
1250
+ elif key == "autoscalingMetricSpecs":
1251
+ suggest = "autoscaling_metric_specs"
1252
+ elif key == "maxReplicaCount":
1253
+ suggest = "max_replica_count"
1254
+ elif key == "requiredReplicaCount":
1255
+ suggest = "required_replica_count"
1256
+
1257
+ if suggest:
1258
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources. Access the value via the '{suggest}' property getter instead.")
1259
+
1260
+ def __getitem__(self, key: str) -> Any:
1261
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources.__key_warning(key)
1262
+ return super().__getitem__(key)
1263
+
1264
+ def get(self, key: str, default = None) -> Any:
1265
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResources.__key_warning(key)
1266
+ return super().get(key, default)
1267
+
1268
+ def __init__(__self__, *,
1269
+ machine_spec: 'outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec',
1270
+ min_replica_count: _builtins.int,
1271
+ autoscaling_metric_specs: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec']] = None,
1272
+ max_replica_count: Optional[_builtins.int] = None,
1273
+ required_replica_count: Optional[_builtins.int] = None,
1274
+ spot: Optional[_builtins.bool] = None):
1275
+ """
1276
+ :param 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecArgs' machine_spec: Specification of a single machine.
1277
+ Structure is documented below.
1278
+ :param _builtins.int min_replica_count: The minimum number of machine replicas that will be always deployed on.
1279
+ This value must be greater than or equal to 1.
1280
+ If traffic increases, it may dynamically be deployed onto more replicas,
1281
+ and as traffic decreases, some of these extra replicas may be freed.
1282
+ :param Sequence['AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpecArgs'] autoscaling_metric_specs: The metric specifications that overrides a resource
1283
+ utilization metric (CPU utilization, accelerator's duty cycle, and so on)
1284
+ target value (default to 60 if not set). At most one entry is allowed per
1285
+ metric.
1286
+ If machine_spec.accelerator_count is
1287
+ above 0, the autoscaling will be based on both CPU utilization and
1288
+ accelerator's duty cycle metrics and scale up when either metrics exceeds
1289
+ its target value while scale down if both metrics are under their target
1290
+ value. The default target value is 60 for both metrics.
1291
+ If machine_spec.accelerator_count is
1292
+ 0, the autoscaling will be based on CPU utilization metric only with
1293
+ default target value 60 if not explicitly set.
1294
+ For example, in the case of Online Prediction, if you want to override
1295
+ target CPU utilization to 80, you should set
1296
+ autoscaling_metric_specs.metric_name
1297
+ to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and
1298
+ autoscaling_metric_specs.target to `80`.
1299
+ Structure is documented below.
1300
+ :param _builtins.int max_replica_count: The maximum number of replicas that may be deployed on when the traffic
1301
+ against it increases. If the requested value is too large, the deployment
1302
+ will error, but if deployment succeeds then the ability to scale to that
1303
+ many replicas is guaranteed (barring service outages). If traffic increases
1304
+ beyond what its replicas at maximum may handle, a portion of the traffic
1305
+ will be dropped. If this value is not provided, will use
1306
+ min_replica_count as the default value.
1307
+ The value of this field impacts the charge against Vertex CPU and GPU
1308
+ quotas. Specifically, you will be charged for (max_replica_count *
1309
+ number of cores in the selected machine type) and (max_replica_count *
1310
+ number of GPUs per replica in the selected machine type).
1311
+ :param _builtins.int required_replica_count: Number of required available replicas for the deployment to succeed.
1312
+ This field is only needed when partial deployment/mutation is
1313
+ desired. If set, the deploy/mutate operation will succeed once
1314
+ available_replica_count reaches required_replica_count, and the rest of
1315
+ the replicas will be retried. If not set, the default
1316
+ required_replica_count will be min_replica_count.
1317
+ :param _builtins.bool spot: If true, schedule the deployment workload on [spot
1318
+ VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms).
1319
+ """
1320
+ pulumi.set(__self__, "machine_spec", machine_spec)
1321
+ pulumi.set(__self__, "min_replica_count", min_replica_count)
1322
+ if autoscaling_metric_specs is not None:
1323
+ pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
1324
+ if max_replica_count is not None:
1325
+ pulumi.set(__self__, "max_replica_count", max_replica_count)
1326
+ if required_replica_count is not None:
1327
+ pulumi.set(__self__, "required_replica_count", required_replica_count)
1328
+ if spot is not None:
1329
+ pulumi.set(__self__, "spot", spot)
1330
+
1331
+ @_builtins.property
1332
+ @pulumi.getter(name="machineSpec")
1333
+ def machine_spec(self) -> 'outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec':
1334
+ """
1335
+ Specification of a single machine.
1336
+ Structure is documented below.
1337
+ """
1338
+ return pulumi.get(self, "machine_spec")
1339
+
1340
+ @_builtins.property
1341
+ @pulumi.getter(name="minReplicaCount")
1342
+ def min_replica_count(self) -> _builtins.int:
1343
+ """
1344
+ The minimum number of machine replicas that will be always deployed on.
1345
+ This value must be greater than or equal to 1.
1346
+ If traffic increases, it may dynamically be deployed onto more replicas,
1347
+ and as traffic decreases, some of these extra replicas may be freed.
1348
+ """
1349
+ return pulumi.get(self, "min_replica_count")
1350
+
1351
+ @_builtins.property
1352
+ @pulumi.getter(name="autoscalingMetricSpecs")
1353
+ def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec']]:
1354
+ """
1355
+ The metric specifications that overrides a resource
1356
+ utilization metric (CPU utilization, accelerator's duty cycle, and so on)
1357
+ target value (default to 60 if not set). At most one entry is allowed per
1358
+ metric.
1359
+ If machine_spec.accelerator_count is
1360
+ above 0, the autoscaling will be based on both CPU utilization and
1361
+ accelerator's duty cycle metrics and scale up when either metrics exceeds
1362
+ its target value while scale down if both metrics are under their target
1363
+ value. The default target value is 60 for both metrics.
1364
+ If machine_spec.accelerator_count is
1365
+ 0, the autoscaling will be based on CPU utilization metric only with
1366
+ default target value 60 if not explicitly set.
1367
+ For example, in the case of Online Prediction, if you want to override
1368
+ target CPU utilization to 80, you should set
1369
+ autoscaling_metric_specs.metric_name
1370
+ to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and
1371
+ autoscaling_metric_specs.target to `80`.
1372
+ Structure is documented below.
1373
+ """
1374
+ return pulumi.get(self, "autoscaling_metric_specs")
1375
+
1376
+ @_builtins.property
1377
+ @pulumi.getter(name="maxReplicaCount")
1378
+ def max_replica_count(self) -> Optional[_builtins.int]:
1379
+ """
1380
+ The maximum number of replicas that may be deployed on when the traffic
1381
+ against it increases. If the requested value is too large, the deployment
1382
+ will error, but if deployment succeeds then the ability to scale to that
1383
+ many replicas is guaranteed (barring service outages). If traffic increases
1384
+ beyond what its replicas at maximum may handle, a portion of the traffic
1385
+ will be dropped. If this value is not provided, will use
1386
+ min_replica_count as the default value.
1387
+ The value of this field impacts the charge against Vertex CPU and GPU
1388
+ quotas. Specifically, you will be charged for (max_replica_count *
1389
+ number of cores in the selected machine type) and (max_replica_count *
1390
+ number of GPUs per replica in the selected machine type).
1391
+ """
1392
+ return pulumi.get(self, "max_replica_count")
1393
+
1394
+ @_builtins.property
1395
+ @pulumi.getter(name="requiredReplicaCount")
1396
+ def required_replica_count(self) -> Optional[_builtins.int]:
1397
+ """
1398
+ Number of required available replicas for the deployment to succeed.
1399
+ This field is only needed when partial deployment/mutation is
1400
+ desired. If set, the deploy/mutate operation will succeed once
1401
+ available_replica_count reaches required_replica_count, and the rest of
1402
+ the replicas will be retried. If not set, the default
1403
+ required_replica_count will be min_replica_count.
1404
+ """
1405
+ return pulumi.get(self, "required_replica_count")
1406
+
1407
+ @_builtins.property
1408
+ @pulumi.getter
1409
+ def spot(self) -> Optional[_builtins.bool]:
1410
+ """
1411
+ If true, schedule the deployment workload on [spot
1412
+ VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms).
1413
+ """
1414
+ return pulumi.get(self, "spot")
1415
+
1416
+
1417
+ @pulumi.output_type
1418
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec(dict):
1419
+ @staticmethod
1420
+ def __key_warning(key: str):
1421
+ suggest = None
1422
+ if key == "metricName":
1423
+ suggest = "metric_name"
1424
+
1425
+ if suggest:
1426
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec. Access the value via the '{suggest}' property getter instead.")
1427
+
1428
+ def __getitem__(self, key: str) -> Any:
1429
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
1430
+ return super().__getitem__(key)
1431
+
1432
+ def get(self, key: str, default = None) -> Any:
1433
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
1434
+ return super().get(key, default)
1435
+
1436
+ def __init__(__self__, *,
1437
+ metric_name: _builtins.str,
1438
+ target: Optional[_builtins.int] = None):
1439
+ """
1440
+ :param _builtins.str metric_name: The resource metric name.
1441
+ Supported metrics:
1442
+ * For Online Prediction:
1443
+ * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`
1444
+ * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
1445
+ :param _builtins.int target: The target resource utilization in percentage (1% - 100%) for the given
1446
+ metric; once the real usage deviates from the target by a certain
1447
+ percentage, the machine replicas change. The default value is 60
1448
+ (representing 60%) if not provided.
1449
+ """
1450
+ pulumi.set(__self__, "metric_name", metric_name)
1451
+ if target is not None:
1452
+ pulumi.set(__self__, "target", target)
1453
+
1454
+ @_builtins.property
1455
+ @pulumi.getter(name="metricName")
1456
+ def metric_name(self) -> _builtins.str:
1457
+ """
1458
+ The resource metric name.
1459
+ Supported metrics:
1460
+ * For Online Prediction:
1461
+ * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`
1462
+ * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
1463
+ """
1464
+ return pulumi.get(self, "metric_name")
1465
+
1466
+ @_builtins.property
1467
+ @pulumi.getter
1468
+ def target(self) -> Optional[_builtins.int]:
1469
+ """
1470
+ The target resource utilization in percentage (1% - 100%) for the given
1471
+ metric; once the real usage deviates from the target by a certain
1472
+ percentage, the machine replicas change. The default value is 60
1473
+ (representing 60%) if not provided.
1474
+ """
1475
+ return pulumi.get(self, "target")
1476
+
1477
+
1478
+ @pulumi.output_type
1479
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec(dict):
1480
+ @staticmethod
1481
+ def __key_warning(key: str):
1482
+ suggest = None
1483
+ if key == "acceleratorCount":
1484
+ suggest = "accelerator_count"
1485
+ elif key == "acceleratorType":
1486
+ suggest = "accelerator_type"
1487
+ elif key == "machineType":
1488
+ suggest = "machine_type"
1489
+ elif key == "multihostGpuNodeCount":
1490
+ suggest = "multihost_gpu_node_count"
1491
+ elif key == "reservationAffinity":
1492
+ suggest = "reservation_affinity"
1493
+ elif key == "tpuTopology":
1494
+ suggest = "tpu_topology"
1495
+
1496
+ if suggest:
1497
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec. Access the value via the '{suggest}' property getter instead.")
1498
+
1499
+ def __getitem__(self, key: str) -> Any:
1500
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec.__key_warning(key)
1501
+ return super().__getitem__(key)
1502
+
1503
+ def get(self, key: str, default = None) -> Any:
1504
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpec.__key_warning(key)
1505
+ return super().get(key, default)
1506
+
1507
+ def __init__(__self__, *,
1508
+ accelerator_count: Optional[_builtins.int] = None,
1509
+ accelerator_type: Optional[_builtins.str] = None,
1510
+ machine_type: Optional[_builtins.str] = None,
1511
+ multihost_gpu_node_count: Optional[_builtins.int] = None,
1512
+ reservation_affinity: Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity'] = None,
1513
+ tpu_topology: Optional[_builtins.str] = None):
1514
+ """
1515
+ :param _builtins.int accelerator_count: The number of accelerators to attach to the machine.
1516
+ :param _builtins.str accelerator_type: Possible values:
1517
+ ACCELERATOR_TYPE_UNSPECIFIED
1518
+ NVIDIA_TESLA_K80
1519
+ NVIDIA_TESLA_P100
1520
+ NVIDIA_TESLA_V100
1521
+ NVIDIA_TESLA_P4
1522
+ NVIDIA_TESLA_T4
1523
+ NVIDIA_TESLA_A100
1524
+ NVIDIA_A100_80GB
1525
+ NVIDIA_L4
1526
+ NVIDIA_H100_80GB
1527
+ NVIDIA_H100_MEGA_80GB
1528
+ NVIDIA_H200_141GB
1529
+ NVIDIA_B200
1530
+ TPU_V2
1531
+ TPU_V3
1532
+ TPU_V4_POD
1533
+ TPU_V5_LITEPOD
1534
+ :param _builtins.str machine_type: The type of the machine.
1535
+ See the [list of machine types supported for
1536
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types)
1537
+ See the [list of machine types supported for custom
1538
+ training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
1539
+ For DeployedModel this field is optional, and the default
1540
+ value is `n1-standard-2`. For BatchPredictionJob or as part of
1541
+ WorkerPoolSpec this field is required.
1542
+ :param _builtins.int multihost_gpu_node_count: The number of nodes per replica for multihost GPU deployments.
1543
+ :param 'AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinityArgs' reservation_affinity: A ReservationAffinity can be used to configure a Vertex AI resource (e.g., a
1544
+ DeployedModel) to draw its Compute Engine resources from a Shared
1545
+ Reservation, or exclusively from on-demand capacity.
1546
+ Structure is documented below.
1547
+ :param _builtins.str tpu_topology: The topology of the TPUs. Corresponds to the TPU topologies available from
1548
+ GKE. (Example: tpu_topology: "2x2x1").
1549
+ """
1550
+ if accelerator_count is not None:
1551
+ pulumi.set(__self__, "accelerator_count", accelerator_count)
1552
+ if accelerator_type is not None:
1553
+ pulumi.set(__self__, "accelerator_type", accelerator_type)
1554
+ if machine_type is not None:
1555
+ pulumi.set(__self__, "machine_type", machine_type)
1556
+ if multihost_gpu_node_count is not None:
1557
+ pulumi.set(__self__, "multihost_gpu_node_count", multihost_gpu_node_count)
1558
+ if reservation_affinity is not None:
1559
+ pulumi.set(__self__, "reservation_affinity", reservation_affinity)
1560
+ if tpu_topology is not None:
1561
+ pulumi.set(__self__, "tpu_topology", tpu_topology)
1562
+
1563
+ @_builtins.property
1564
+ @pulumi.getter(name="acceleratorCount")
1565
+ def accelerator_count(self) -> Optional[_builtins.int]:
1566
+ """
1567
+ The number of accelerators to attach to the machine.
1568
+ """
1569
+ return pulumi.get(self, "accelerator_count")
1570
+
1571
+ @_builtins.property
1572
+ @pulumi.getter(name="acceleratorType")
1573
+ def accelerator_type(self) -> Optional[_builtins.str]:
1574
+ """
1575
+ Possible values:
1576
+ ACCELERATOR_TYPE_UNSPECIFIED
1577
+ NVIDIA_TESLA_K80
1578
+ NVIDIA_TESLA_P100
1579
+ NVIDIA_TESLA_V100
1580
+ NVIDIA_TESLA_P4
1581
+ NVIDIA_TESLA_T4
1582
+ NVIDIA_TESLA_A100
1583
+ NVIDIA_A100_80GB
1584
+ NVIDIA_L4
1585
+ NVIDIA_H100_80GB
1586
+ NVIDIA_H100_MEGA_80GB
1587
+ NVIDIA_H200_141GB
1588
+ NVIDIA_B200
1589
+ TPU_V2
1590
+ TPU_V3
1591
+ TPU_V4_POD
1592
+ TPU_V5_LITEPOD
1593
+ """
1594
+ return pulumi.get(self, "accelerator_type")
1595
+
1596
+ @_builtins.property
1597
+ @pulumi.getter(name="machineType")
1598
+ def machine_type(self) -> Optional[_builtins.str]:
1599
+ """
1600
+ The type of the machine.
1601
+ See the [list of machine types supported for
1602
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types)
1603
+ See the [list of machine types supported for custom
1604
+ training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
1605
+ For DeployedModel this field is optional, and the default
1606
+ value is `n1-standard-2`. For BatchPredictionJob or as part of
1607
+ WorkerPoolSpec this field is required.
1608
+ """
1609
+ return pulumi.get(self, "machine_type")
1610
+
1611
+ @_builtins.property
1612
+ @pulumi.getter(name="multihostGpuNodeCount")
1613
+ def multihost_gpu_node_count(self) -> Optional[_builtins.int]:
1614
+ """
1615
+ The number of nodes per replica for multihost GPU deployments.
1616
+ """
1617
+ return pulumi.get(self, "multihost_gpu_node_count")
1618
+
1619
+ @_builtins.property
1620
+ @pulumi.getter(name="reservationAffinity")
1621
+ def reservation_affinity(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity']:
1622
+ """
1623
+ A ReservationAffinity can be used to configure a Vertex AI resource (e.g., a
1624
+ DeployedModel) to draw its Compute Engine resources from a Shared
1625
+ Reservation, or exclusively from on-demand capacity.
1626
+ Structure is documented below.
1627
+ """
1628
+ return pulumi.get(self, "reservation_affinity")
1629
+
1630
+ @_builtins.property
1631
+ @pulumi.getter(name="tpuTopology")
1632
+ def tpu_topology(self) -> Optional[_builtins.str]:
1633
+ """
1634
+ The topology of the TPUs. Corresponds to the TPU topologies available from
1635
+ GKE. (Example: tpu_topology: "2x2x1").
1636
+ """
1637
+ return pulumi.get(self, "tpu_topology")
1638
+
1639
+
1640
+ @pulumi.output_type
1641
+ class AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity(dict):
1642
+ @staticmethod
1643
+ def __key_warning(key: str):
1644
+ suggest = None
1645
+ if key == "reservationAffinityType":
1646
+ suggest = "reservation_affinity_type"
1647
+
1648
+ if suggest:
1649
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity. Access the value via the '{suggest}' property getter instead.")
1650
+
1651
+ def __getitem__(self, key: str) -> Any:
1652
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity.__key_warning(key)
1653
+ return super().__getitem__(key)
1654
+
1655
+ def get(self, key: str, default = None) -> Any:
1656
+ AiEndpointWithModelGardenDeploymentDeployConfigDedicatedResourcesMachineSpecReservationAffinity.__key_warning(key)
1657
+ return super().get(key, default)
1658
+
1659
+ def __init__(__self__, *,
1660
+ reservation_affinity_type: _builtins.str,
1661
+ key: Optional[_builtins.str] = None,
1662
+ values: Optional[Sequence[_builtins.str]] = None):
1663
+ """
1664
+ :param _builtins.str reservation_affinity_type: Specifies the reservation affinity type.
1665
+ Possible values:
1666
+ TYPE_UNSPECIFIED
1667
+ NO_RESERVATION
1668
+ ANY_RESERVATION
1669
+ SPECIFIC_RESERVATION
1670
+ :param _builtins.str key: Corresponds to the label key of a reservation resource. To target a
1671
+ SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name`
1672
+ as the key and specify the name of your reservation as its value.
1673
+ :param Sequence[_builtins.str] values: Corresponds to the label values of a reservation resource. This must be the
1674
+ full resource name of the reservation or reservation block.
1675
+ """
1676
+ pulumi.set(__self__, "reservation_affinity_type", reservation_affinity_type)
1677
+ if key is not None:
1678
+ pulumi.set(__self__, "key", key)
1679
+ if values is not None:
1680
+ pulumi.set(__self__, "values", values)
1681
+
1682
+ @_builtins.property
1683
+ @pulumi.getter(name="reservationAffinityType")
1684
+ def reservation_affinity_type(self) -> _builtins.str:
1685
+ """
1686
+ Specifies the reservation affinity type.
1687
+ Possible values:
1688
+ TYPE_UNSPECIFIED
1689
+ NO_RESERVATION
1690
+ ANY_RESERVATION
1691
+ SPECIFIC_RESERVATION
1692
+ """
1693
+ return pulumi.get(self, "reservation_affinity_type")
1694
+
1695
+ @_builtins.property
1696
+ @pulumi.getter
1697
+ def key(self) -> Optional[_builtins.str]:
1698
+ """
1699
+ Corresponds to the label key of a reservation resource. To target a
1700
+ SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name`
1701
+ as the key and specify the name of your reservation as its value.
1702
+ """
1703
+ return pulumi.get(self, "key")
1704
+
1705
+ @_builtins.property
1706
+ @pulumi.getter
1707
+ def values(self) -> Optional[Sequence[_builtins.str]]:
1708
+ """
1709
+ Corresponds to the label values of a reservation resource. This must be the
1710
+ full resource name of the reservation or reservation block.
1711
+ """
1712
+ return pulumi.get(self, "values")
1713
+
1714
+
1715
+ @pulumi.output_type
1716
+ class AiEndpointWithModelGardenDeploymentEndpointConfig(dict):
1717
+ @staticmethod
1718
+ def __key_warning(key: str):
1719
+ suggest = None
1720
+ if key == "dedicatedEndpointEnabled":
1721
+ suggest = "dedicated_endpoint_enabled"
1722
+ elif key == "endpointDisplayName":
1723
+ suggest = "endpoint_display_name"
1724
+
1725
+ if suggest:
1726
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentEndpointConfig. Access the value via the '{suggest}' property getter instead.")
1727
+
1728
+ def __getitem__(self, key: str) -> Any:
1729
+ AiEndpointWithModelGardenDeploymentEndpointConfig.__key_warning(key)
1730
+ return super().__getitem__(key)
1731
+
1732
+ def get(self, key: str, default = None) -> Any:
1733
+ AiEndpointWithModelGardenDeploymentEndpointConfig.__key_warning(key)
1734
+ return super().get(key, default)
1735
+
1736
+ def __init__(__self__, *,
1737
+ dedicated_endpoint_enabled: Optional[_builtins.bool] = None,
1738
+ endpoint_display_name: Optional[_builtins.str] = None):
1739
+ """
1740
+ :param _builtins.bool dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated
1741
+ DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS
1742
+ will be isolated from other users' traffic and will have better
1743
+ performance and reliability. Note: Once you enabled dedicated endpoint,
1744
+ you won't be able to send request to the shared DNS
1745
+ {region}-aiplatform.googleapis.com. The limitations will be removed soon.
1746
+ :param _builtins.str endpoint_display_name: The user-specified display name of the endpoint. If not set, a
1747
+ default name will be used.
1748
+ """
1749
+ if dedicated_endpoint_enabled is not None:
1750
+ pulumi.set(__self__, "dedicated_endpoint_enabled", dedicated_endpoint_enabled)
1751
+ if endpoint_display_name is not None:
1752
+ pulumi.set(__self__, "endpoint_display_name", endpoint_display_name)
1753
+
1754
+ @_builtins.property
1755
+ @pulumi.getter(name="dedicatedEndpointEnabled")
1756
+ def dedicated_endpoint_enabled(self) -> Optional[_builtins.bool]:
1757
+ """
1758
+ If true, the endpoint will be exposed through a dedicated
1759
+ DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS
1760
+ will be isolated from other users' traffic and will have better
1761
+ performance and reliability. Note: Once you enabled dedicated endpoint,
1762
+ you won't be able to send request to the shared DNS
1763
+ {region}-aiplatform.googleapis.com. The limitations will be removed soon.
1764
+ """
1765
+ return pulumi.get(self, "dedicated_endpoint_enabled")
1766
+
1767
+ @_builtins.property
1768
+ @pulumi.getter(name="endpointDisplayName")
1769
+ def endpoint_display_name(self) -> Optional[_builtins.str]:
1770
+ """
1771
+ The user-specified display name of the endpoint. If not set, a
1772
+ default name will be used.
1773
+ """
1774
+ return pulumi.get(self, "endpoint_display_name")
1775
+
1776
+
1777
+ @pulumi.output_type
1778
+ class AiEndpointWithModelGardenDeploymentModelConfig(dict):
1779
+ @staticmethod
1780
+ def __key_warning(key: str):
1781
+ suggest = None
1782
+ if key == "acceptEula":
1783
+ suggest = "accept_eula"
1784
+ elif key == "containerSpec":
1785
+ suggest = "container_spec"
1786
+ elif key == "huggingFaceAccessToken":
1787
+ suggest = "hugging_face_access_token"
1788
+ elif key == "huggingFaceCacheEnabled":
1789
+ suggest = "hugging_face_cache_enabled"
1790
+ elif key == "modelDisplayName":
1791
+ suggest = "model_display_name"
1792
+
1793
+ if suggest:
1794
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfig. Access the value via the '{suggest}' property getter instead.")
1795
+
1796
+ def __getitem__(self, key: str) -> Any:
1797
+ AiEndpointWithModelGardenDeploymentModelConfig.__key_warning(key)
1798
+ return super().__getitem__(key)
1799
+
1800
+ def get(self, key: str, default = None) -> Any:
1801
+ AiEndpointWithModelGardenDeploymentModelConfig.__key_warning(key)
1802
+ return super().get(key, default)
1803
+
1804
+ def __init__(__self__, *,
1805
+ accept_eula: Optional[_builtins.bool] = None,
1806
+ container_spec: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpec'] = None,
1807
+ hugging_face_access_token: Optional[_builtins.str] = None,
1808
+ hugging_face_cache_enabled: Optional[_builtins.bool] = None,
1809
+ model_display_name: Optional[_builtins.str] = None):
1810
+ """
1811
+ :param _builtins.bool accept_eula: Whether the user accepts the End User License Agreement (EULA)
1812
+ for the model.
1813
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecArgs' container_spec: Specification of a container for serving predictions. Some fields in this
1814
+ message correspond to fields in the [Kubernetes Container v1 core
1815
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
1816
+ Structure is documented below.
1817
+ :param _builtins.str hugging_face_access_token: The Hugging Face read access token used to access the model
1818
+ artifacts of gated models.
1819
+ :param _builtins.bool hugging_face_cache_enabled: If true, the model will deploy with a cached version instead of directly
1820
+ downloading the model artifacts from Hugging Face. This is suitable for
1821
+ VPC-SC users with limited internet access.
1822
+ :param _builtins.str model_display_name: The user-specified display name of the uploaded model. If not
1823
+ set, a default name will be used.
1824
+ """
1825
+ if accept_eula is not None:
1826
+ pulumi.set(__self__, "accept_eula", accept_eula)
1827
+ if container_spec is not None:
1828
+ pulumi.set(__self__, "container_spec", container_spec)
1829
+ if hugging_face_access_token is not None:
1830
+ pulumi.set(__self__, "hugging_face_access_token", hugging_face_access_token)
1831
+ if hugging_face_cache_enabled is not None:
1832
+ pulumi.set(__self__, "hugging_face_cache_enabled", hugging_face_cache_enabled)
1833
+ if model_display_name is not None:
1834
+ pulumi.set(__self__, "model_display_name", model_display_name)
1835
+
1836
+ @_builtins.property
1837
+ @pulumi.getter(name="acceptEula")
1838
+ def accept_eula(self) -> Optional[_builtins.bool]:
1839
+ """
1840
+ Whether the user accepts the End User License Agreement (EULA)
1841
+ for the model.
1842
+ """
1843
+ return pulumi.get(self, "accept_eula")
1844
+
1845
+ @_builtins.property
1846
+ @pulumi.getter(name="containerSpec")
1847
+ def container_spec(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpec']:
1848
+ """
1849
+ Specification of a container for serving predictions. Some fields in this
1850
+ message correspond to fields in the [Kubernetes Container v1 core
1851
+ specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
1852
+ Structure is documented below.
1853
+ """
1854
+ return pulumi.get(self, "container_spec")
1855
+
1856
+ @_builtins.property
1857
+ @pulumi.getter(name="huggingFaceAccessToken")
1858
+ def hugging_face_access_token(self) -> Optional[_builtins.str]:
1859
+ """
1860
+ The Hugging Face read access token used to access the model
1861
+ artifacts of gated models.
1862
+ """
1863
+ return pulumi.get(self, "hugging_face_access_token")
1864
+
1865
+ @_builtins.property
1866
+ @pulumi.getter(name="huggingFaceCacheEnabled")
1867
+ def hugging_face_cache_enabled(self) -> Optional[_builtins.bool]:
1868
+ """
1869
+ If true, the model will deploy with a cached version instead of directly
1870
+ downloading the model artifacts from Hugging Face. This is suitable for
1871
+ VPC-SC users with limited internet access.
1872
+ """
1873
+ return pulumi.get(self, "hugging_face_cache_enabled")
1874
+
1875
+ @_builtins.property
1876
+ @pulumi.getter(name="modelDisplayName")
1877
+ def model_display_name(self) -> Optional[_builtins.str]:
1878
+ """
1879
+ The user-specified display name of the uploaded model. If not
1880
+ set, a default name will be used.
1881
+ """
1882
+ return pulumi.get(self, "model_display_name")
1883
+
1884
+
1885
+ @pulumi.output_type
1886
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpec(dict):
1887
+ @staticmethod
1888
+ def __key_warning(key: str):
1889
+ suggest = None
1890
+ if key == "imageUri":
1891
+ suggest = "image_uri"
1892
+ elif key == "deploymentTimeout":
1893
+ suggest = "deployment_timeout"
1894
+ elif key == "grpcPorts":
1895
+ suggest = "grpc_ports"
1896
+ elif key == "healthProbe":
1897
+ suggest = "health_probe"
1898
+ elif key == "healthRoute":
1899
+ suggest = "health_route"
1900
+ elif key == "livenessProbe":
1901
+ suggest = "liveness_probe"
1902
+ elif key == "predictRoute":
1903
+ suggest = "predict_route"
1904
+ elif key == "sharedMemorySizeMb":
1905
+ suggest = "shared_memory_size_mb"
1906
+ elif key == "startupProbe":
1907
+ suggest = "startup_probe"
1908
+
1909
+ if suggest:
1910
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpec. Access the value via the '{suggest}' property getter instead.")
1911
+
1912
+ def __getitem__(self, key: str) -> Any:
1913
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpec.__key_warning(key)
1914
+ return super().__getitem__(key)
1915
+
1916
+ def get(self, key: str, default = None) -> Any:
1917
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpec.__key_warning(key)
1918
+ return super().get(key, default)
1919
+
1920
+ def __init__(__self__, *,
1921
+ image_uri: _builtins.str,
1922
+ args: Optional[Sequence[_builtins.str]] = None,
1923
+ commands: Optional[Sequence[_builtins.str]] = None,
1924
+ deployment_timeout: Optional[_builtins.str] = None,
1925
+ envs: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv']] = None,
1926
+ grpc_ports: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort']] = None,
1927
+ health_probe: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe'] = None,
1928
+ health_route: Optional[_builtins.str] = None,
1929
+ liveness_probe: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe'] = None,
1930
+ ports: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort']] = None,
1931
+ predict_route: Optional[_builtins.str] = None,
1932
+ shared_memory_size_mb: Optional[_builtins.str] = None,
1933
+ startup_probe: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe'] = None):
1934
+ """
1935
+ :param _builtins.str image_uri: URI of the Docker image to be used as the custom container for serving
1936
+ predictions. This URI must identify an image in Artifact Registry or
1937
+ Container Registry. Learn more about the [container publishing
1938
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
1939
+ including permissions requirements for the Vertex AI Service Agent.
1940
+ The container image is ingested upon ModelService.UploadModel, stored
1941
+ internally, and this original path is afterwards not used.
1942
+ To learn about the requirements for the Docker image itself, see
1943
+ [Custom container
1944
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
1945
+ You can use the URI to one of Vertex AI's [pre-built container images for
1946
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
1947
+ in this field.
1948
+ :param Sequence[_builtins.str] args: Specifies arguments for the command that runs when the container starts.
1949
+ This overrides the container's
1950
+ [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
1951
+ this field as an array of executable and arguments, similar to a Docker
1952
+ `CMD`'s "default parameters" form.
1953
+ If you don't specify this field but do specify the
1954
+ command field, then the command from the
1955
+ `command` field runs without any additional arguments. See the
1956
+ [Kubernetes documentation about how the
1957
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
1958
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
1959
+ If you don't specify this field and don't specify the `command` field,
1960
+ then the container's
1961
+ [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
1962
+ `CMD` determine what runs based on their default behavior. See the Docker
1963
+ documentation about [how `CMD` and `ENTRYPOINT`
1964
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
1965
+ In this field, you can reference [environment variables
1966
+ set by Vertex
1967
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
1968
+ and environment variables set in the env field.
1969
+ You cannot reference environment variables set in the Docker image. In
1970
+ order for environment variables to be expanded, reference them by using the
1971
+ following syntax:$(VARIABLE_NAME)
1972
+ Note that this differs from Bash variable expansion, which does not use
1973
+ parentheses. If a variable cannot be resolved, the reference in the input
1974
+ string is used unchanged. To avoid variable expansion, you can escape this
1975
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
1976
+ This field corresponds to the `args` field of the Kubernetes Containers
1977
+ [v1 core
1978
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
1979
+ :param Sequence[_builtins.str] commands: Specifies the command that runs when the container starts. This overrides
1980
+ the container's
1981
+ [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
1982
+ Specify this field as an array of executable and arguments, similar to a
1983
+ Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
1984
+ If you do not specify this field, then the container's `ENTRYPOINT` runs,
1985
+ in conjunction with the args field or the
1986
+ container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
1987
+ if either exists. If this field is not specified and the container does not
1988
+ have an `ENTRYPOINT`, then refer to the Docker documentation about [how
1989
+ `CMD` and `ENTRYPOINT`
1990
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
1991
+ If you specify this field, then you can also specify the `args` field to
1992
+ provide additional arguments for this command. However, if you specify this
1993
+ field, then the container's `CMD` is ignored. See the
1994
+ [Kubernetes documentation about how the
1995
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
1996
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
1997
+ In this field, you can reference [environment variables set by Vertex
1998
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
1999
+ and environment variables set in the env field.
2000
+ You cannot reference environment variables set in the Docker image. In
2001
+ order for environment variables to be expanded, reference them by using the
2002
+ following syntax:$(VARIABLE_NAME)
2003
+ Note that this differs from Bash variable expansion, which does not use
2004
+ parentheses. If a variable cannot be resolved, the reference in the input
2005
+ string is used unchanged. To avoid variable expansion, you can escape this
2006
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
2007
+ This field corresponds to the `command` field of the Kubernetes Containers
2008
+ [v1 core
2009
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2010
+ :param _builtins.str deployment_timeout: Deployment timeout.
2011
+ Limit for deployment timeout is 2 hours.
2012
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnvArgs'] envs: List of environment variables to set in the container. After the container
2013
+ starts running, code running in the container can read these environment
2014
+ variables.
2015
+ Additionally, the command and
2016
+ args fields can reference these variables. Later
2017
+ entries in this list can also reference earlier entries. For example, the
2018
+ following example sets the variable `VAR_2` to have the value `foo bar`:
2019
+ ```json
2020
+ [
2021
+ {
2022
+ "name": "VAR_1",
2023
+ "value": "foo"
2024
+ },
2025
+ {
2026
+ "name": "VAR_2",
2027
+ "value": "$(VAR_1) bar"
2028
+ }
2029
+ ]
2030
+ ```
2031
+ If you switch the order of the variables in the example, then the expansion
2032
+ does not occur.
2033
+ This field corresponds to the `env` field of the Kubernetes Containers
2034
+ [v1 core
2035
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2036
+ Structure is documented below.
2037
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPortArgs'] grpc_ports: List of ports to expose from the container. Vertex AI sends gRPC
2038
+ prediction requests that it receives to the first port on this list. Vertex
2039
+ AI also sends liveness and health checks to this port.
2040
+ If you do not specify this field, gRPC requests to the container will be
2041
+ disabled.
2042
+ Vertex AI does not use ports other than the first one listed. This field
2043
+ corresponds to the `ports` field of the Kubernetes Containers v1 core API.
2044
+ Structure is documented below.
2045
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeArgs' health_probe: Probe describes a health check to be performed against a container to
2046
+ determine whether it is alive or ready to receive traffic.
2047
+ Structure is documented below.
2048
+ :param _builtins.str health_route: HTTP path on the container to send health checks to. Vertex AI
2049
+ intermittently sends GET requests to this path on the container's IP
2050
+ address and port to check that the container is healthy. Read more about
2051
+ [health
2052
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
2053
+ For example, if you set this field to `/bar`, then Vertex AI
2054
+ intermittently sends a GET request to the `/bar` path on the port of your
2055
+ container specified by the first value of this `ModelContainerSpec`'s
2056
+ ports field.
2057
+ If you don't specify this field, it defaults to the following value when
2058
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2059
+ The placeholders in this value are replaced as follows:
2060
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2061
+ Endpoint.name][] field of the Endpoint where this Model has been
2062
+ deployed. (Vertex AI makes this value available to your container code
2063
+ as the [`AIP_ENDPOINT_ID` environment
2064
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2065
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2066
+ (Vertex AI makes this value available to your container code as the
2067
+ [`AIP_DEPLOYED_MODEL_ID` environment
2068
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2069
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeArgs' liveness_probe: Probe describes a health check to be performed against a container to
2070
+ determine whether it is alive or ready to receive traffic.
2071
+ Structure is documented below.
2072
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPortArgs'] ports: List of ports to expose from the container. Vertex AI sends any
2073
+ prediction requests that it receives to the first port on this list. Vertex
2074
+ AI also sends
2075
+ [liveness and health
2076
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
2077
+ to this port.
2078
+ If you do not specify this field, it defaults to following value:
2079
+ ```json
2080
+ [
2081
+ {
2082
+ "containerPort": 8080
2083
+ }
2084
+ ]
2085
+ ```
2086
+ Vertex AI does not use ports other than the first one listed. This field
2087
+ corresponds to the `ports` field of the Kubernetes Containers
2088
+ [v1 core
2089
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2090
+ Structure is documented below.
2091
+ :param _builtins.str predict_route: HTTP path on the container to send prediction requests to. Vertex AI
2092
+ forwards requests sent using
2093
+ projects.locations.endpoints.predict to this
2094
+ path on the container's IP address and port. Vertex AI then returns the
2095
+ container's response in the API response.
2096
+ For example, if you set this field to `/foo`, then when Vertex AI
2097
+ receives a prediction request, it forwards the request body in a POST
2098
+ request to the `/foo` path on the port of your container specified by the
2099
+ first value of this `ModelContainerSpec`'s
2100
+ ports field.
2101
+ If you don't specify this field, it defaults to the following value when
2102
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2103
+ The placeholders in this value are replaced as follows:
2104
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2105
+ Endpoint.name][] field of the Endpoint where this Model has been
2106
+ deployed. (Vertex AI makes this value available to your container code
2107
+ as the [`AIP_ENDPOINT_ID` environment
2108
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2109
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2110
+ (Vertex AI makes this value available to your container code
2111
+ as the [`AIP_DEPLOYED_MODEL_ID` environment
2112
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2113
+ :param _builtins.str shared_memory_size_mb: The amount of the VM memory to reserve as the shared memory for the model
2114
+ in megabytes.
2115
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeArgs' startup_probe: Probe describes a health check to be performed against a container to
2116
+ determine whether it is alive or ready to receive traffic.
2117
+ Structure is documented below.
2118
+ """
2119
+ pulumi.set(__self__, "image_uri", image_uri)
2120
+ if args is not None:
2121
+ pulumi.set(__self__, "args", args)
2122
+ if commands is not None:
2123
+ pulumi.set(__self__, "commands", commands)
2124
+ if deployment_timeout is not None:
2125
+ pulumi.set(__self__, "deployment_timeout", deployment_timeout)
2126
+ if envs is not None:
2127
+ pulumi.set(__self__, "envs", envs)
2128
+ if grpc_ports is not None:
2129
+ pulumi.set(__self__, "grpc_ports", grpc_ports)
2130
+ if health_probe is not None:
2131
+ pulumi.set(__self__, "health_probe", health_probe)
2132
+ if health_route is not None:
2133
+ pulumi.set(__self__, "health_route", health_route)
2134
+ if liveness_probe is not None:
2135
+ pulumi.set(__self__, "liveness_probe", liveness_probe)
2136
+ if ports is not None:
2137
+ pulumi.set(__self__, "ports", ports)
2138
+ if predict_route is not None:
2139
+ pulumi.set(__self__, "predict_route", predict_route)
2140
+ if shared_memory_size_mb is not None:
2141
+ pulumi.set(__self__, "shared_memory_size_mb", shared_memory_size_mb)
2142
+ if startup_probe is not None:
2143
+ pulumi.set(__self__, "startup_probe", startup_probe)
2144
+
2145
+ @_builtins.property
2146
+ @pulumi.getter(name="imageUri")
2147
+ def image_uri(self) -> _builtins.str:
2148
+ """
2149
+ URI of the Docker image to be used as the custom container for serving
2150
+ predictions. This URI must identify an image in Artifact Registry or
2151
+ Container Registry. Learn more about the [container publishing
2152
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
2153
+ including permissions requirements for the Vertex AI Service Agent.
2154
+ The container image is ingested upon ModelService.UploadModel, stored
2155
+ internally, and this original path is afterwards not used.
2156
+ To learn about the requirements for the Docker image itself, see
2157
+ [Custom container
2158
+ requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
2159
+ You can use the URI to one of Vertex AI's [pre-built container images for
2160
+ prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
2161
+ in this field.
2162
+ """
2163
+ return pulumi.get(self, "image_uri")
2164
+
2165
+ @_builtins.property
2166
+ @pulumi.getter
2167
+ def args(self) -> Optional[Sequence[_builtins.str]]:
2168
+ """
2169
+ Specifies arguments for the command that runs when the container starts.
2170
+ This overrides the container's
2171
+ [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
2172
+ this field as an array of executable and arguments, similar to a Docker
2173
+ `CMD`'s "default parameters" form.
2174
+ If you don't specify this field but do specify the
2175
+ command field, then the command from the
2176
+ `command` field runs without any additional arguments. See the
2177
+ [Kubernetes documentation about how the
2178
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
2179
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
2180
+ If you don't specify this field and don't specify the `command` field,
2181
+ then the container's
2182
+ [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
2183
+ `CMD` determine what runs based on their default behavior. See the Docker
2184
+ documentation about [how `CMD` and `ENTRYPOINT`
2185
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
2186
+ In this field, you can reference [environment variables
2187
+ set by Vertex
2188
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
2189
+ and environment variables set in the env field.
2190
+ You cannot reference environment variables set in the Docker image. In
2191
+ order for environment variables to be expanded, reference them by using the
2192
+ following syntax:$(VARIABLE_NAME)
2193
+ Note that this differs from Bash variable expansion, which does not use
2194
+ parentheses. If a variable cannot be resolved, the reference in the input
2195
+ string is used unchanged. To avoid variable expansion, you can escape this
2196
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
2197
+ This field corresponds to the `args` field of the Kubernetes Containers
2198
+ [v1 core
2199
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2200
+ """
2201
+ return pulumi.get(self, "args")
2202
+
2203
+ @_builtins.property
2204
+ @pulumi.getter
2205
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
2206
+ """
2207
+ Specifies the command that runs when the container starts. This overrides
2208
+ the container's
2209
+ [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
2210
+ Specify this field as an array of executable and arguments, similar to a
2211
+ Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
2212
+ If you do not specify this field, then the container's `ENTRYPOINT` runs,
2213
+ in conjunction with the args field or the
2214
+ container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
2215
+ if either exists. If this field is not specified and the container does not
2216
+ have an `ENTRYPOINT`, then refer to the Docker documentation about [how
2217
+ `CMD` and `ENTRYPOINT`
2218
+ interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
2219
+ If you specify this field, then you can also specify the `args` field to
2220
+ provide additional arguments for this command. However, if you specify this
2221
+ field, then the container's `CMD` is ignored. See the
2222
+ [Kubernetes documentation about how the
2223
+ `command` and `args` fields interact with a container's `ENTRYPOINT` and
2224
+ `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
2225
+ In this field, you can reference [environment variables set by Vertex
2226
+ AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
2227
+ and environment variables set in the env field.
2228
+ You cannot reference environment variables set in the Docker image. In
2229
+ order for environment variables to be expanded, reference them by using the
2230
+ following syntax:$(VARIABLE_NAME)
2231
+ Note that this differs from Bash variable expansion, which does not use
2232
+ parentheses. If a variable cannot be resolved, the reference in the input
2233
+ string is used unchanged. To avoid variable expansion, you can escape this
2234
+ syntax with `$$`; for example:$$(VARIABLE_NAME)
2235
+ This field corresponds to the `command` field of the Kubernetes Containers
2236
+ [v1 core
2237
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2238
+ """
2239
+ return pulumi.get(self, "commands")
2240
+
2241
+ @_builtins.property
2242
+ @pulumi.getter(name="deploymentTimeout")
2243
+ def deployment_timeout(self) -> Optional[_builtins.str]:
2244
+ """
2245
+ Deployment timeout.
2246
+ Limit for deployment timeout is 2 hours.
2247
+ """
2248
+ return pulumi.get(self, "deployment_timeout")
2249
+
2250
+ @_builtins.property
2251
+ @pulumi.getter
2252
+ def envs(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv']]:
2253
+ """
2254
+ List of environment variables to set in the container. After the container
2255
+ starts running, code running in the container can read these environment
2256
+ variables.
2257
+ Additionally, the command and
2258
+ args fields can reference these variables. Later
2259
+ entries in this list can also reference earlier entries. For example, the
2260
+ following example sets the variable `VAR_2` to have the value `foo bar`:
2261
+ ```json
2262
+ [
2263
+ {
2264
+ "name": "VAR_1",
2265
+ "value": "foo"
2266
+ },
2267
+ {
2268
+ "name": "VAR_2",
2269
+ "value": "$(VAR_1) bar"
2270
+ }
2271
+ ]
2272
+ ```
2273
+ If you switch the order of the variables in the example, then the expansion
2274
+ does not occur.
2275
+ This field corresponds to the `env` field of the Kubernetes Containers
2276
+ [v1 core
2277
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2278
+ Structure is documented below.
2279
+ """
2280
+ return pulumi.get(self, "envs")
2281
+
2282
+ @_builtins.property
2283
+ @pulumi.getter(name="grpcPorts")
2284
+ def grpc_ports(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort']]:
2285
+ """
2286
+ List of ports to expose from the container. Vertex AI sends gRPC
2287
+ prediction requests that it receives to the first port on this list. Vertex
2288
+ AI also sends liveness and health checks to this port.
2289
+ If you do not specify this field, gRPC requests to the container will be
2290
+ disabled.
2291
+ Vertex AI does not use ports other than the first one listed. This field
2292
+ corresponds to the `ports` field of the Kubernetes Containers v1 core API.
2293
+ Structure is documented below.
2294
+ """
2295
+ return pulumi.get(self, "grpc_ports")
2296
+
2297
+ @_builtins.property
2298
+ @pulumi.getter(name="healthProbe")
2299
+ def health_probe(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe']:
2300
+ """
2301
+ Probe describes a health check to be performed against a container to
2302
+ determine whether it is alive or ready to receive traffic.
2303
+ Structure is documented below.
2304
+ """
2305
+ return pulumi.get(self, "health_probe")
2306
+
2307
+ @_builtins.property
2308
+ @pulumi.getter(name="healthRoute")
2309
+ def health_route(self) -> Optional[_builtins.str]:
2310
+ """
2311
+ HTTP path on the container to send health checks to. Vertex AI
2312
+ intermittently sends GET requests to this path on the container's IP
2313
+ address and port to check that the container is healthy. Read more about
2314
+ [health
2315
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
2316
+ For example, if you set this field to `/bar`, then Vertex AI
2317
+ intermittently sends a GET request to the `/bar` path on the port of your
2318
+ container specified by the first value of this `ModelContainerSpec`'s
2319
+ ports field.
2320
+ If you don't specify this field, it defaults to the following value when
2321
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2322
+ The placeholders in this value are replaced as follows:
2323
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2324
+ Endpoint.name][] field of the Endpoint where this Model has been
2325
+ deployed. (Vertex AI makes this value available to your container code
2326
+ as the [`AIP_ENDPOINT_ID` environment
2327
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2328
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2329
+ (Vertex AI makes this value available to your container code as the
2330
+ [`AIP_DEPLOYED_MODEL_ID` environment
2331
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2332
+ """
2333
+ return pulumi.get(self, "health_route")
2334
+
2335
+ @_builtins.property
2336
+ @pulumi.getter(name="livenessProbe")
2337
+ def liveness_probe(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe']:
2338
+ """
2339
+ Probe describes a health check to be performed against a container to
2340
+ determine whether it is alive or ready to receive traffic.
2341
+ Structure is documented below.
2342
+ """
2343
+ return pulumi.get(self, "liveness_probe")
2344
+
2345
+ @_builtins.property
2346
+ @pulumi.getter
2347
+ def ports(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort']]:
2348
+ """
2349
+ List of ports to expose from the container. Vertex AI sends any
2350
+ prediction requests that it receives to the first port on this list. Vertex
2351
+ AI also sends
2352
+ [liveness and health
2353
+ checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
2354
+ to this port.
2355
+ If you do not specify this field, it defaults to following value:
2356
+ ```json
2357
+ [
2358
+ {
2359
+ "containerPort": 8080
2360
+ }
2361
+ ]
2362
+ ```
2363
+ Vertex AI does not use ports other than the first one listed. This field
2364
+ corresponds to the `ports` field of the Kubernetes Containers
2365
+ [v1 core
2366
+ API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
2367
+ Structure is documented below.
2368
+ """
2369
+ return pulumi.get(self, "ports")
2370
+
2371
+ @_builtins.property
2372
+ @pulumi.getter(name="predictRoute")
2373
+ def predict_route(self) -> Optional[_builtins.str]:
2374
+ """
2375
+ HTTP path on the container to send prediction requests to. Vertex AI
2376
+ forwards requests sent using
2377
+ projects.locations.endpoints.predict to this
2378
+ path on the container's IP address and port. Vertex AI then returns the
2379
+ container's response in the API response.
2380
+ For example, if you set this field to `/foo`, then when Vertex AI
2381
+ receives a prediction request, it forwards the request body in a POST
2382
+ request to the `/foo` path on the port of your container specified by the
2383
+ first value of this `ModelContainerSpec`'s
2384
+ ports field.
2385
+ If you don't specify this field, it defaults to the following value when
2386
+ you deploy this Model to an Endpoint:/v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
2387
+ The placeholders in this value are replaced as follows:
2388
+ * ENDPOINT: The last segment (following `endpoints/`)of the
2389
+ Endpoint.name][] field of the Endpoint where this Model has been
2390
+ deployed. (Vertex AI makes this value available to your container code
2391
+ as the [`AIP_ENDPOINT_ID` environment
2392
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2393
+ * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`.
2394
+ (Vertex AI makes this value available to your container code
2395
+ as the [`AIP_DEPLOYED_MODEL_ID` environment
2396
+ variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
2397
+ """
2398
+ return pulumi.get(self, "predict_route")
2399
+
2400
+ @_builtins.property
2401
+ @pulumi.getter(name="sharedMemorySizeMb")
2402
+ def shared_memory_size_mb(self) -> Optional[_builtins.str]:
2403
+ """
2404
+ The amount of the VM memory to reserve as the shared memory for the model
2405
+ in megabytes.
2406
+ """
2407
+ return pulumi.get(self, "shared_memory_size_mb")
2408
+
2409
+ @_builtins.property
2410
+ @pulumi.getter(name="startupProbe")
2411
+ def startup_probe(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe']:
2412
+ """
2413
+ Probe describes a health check to be performed against a container to
2414
+ determine whether it is alive or ready to receive traffic.
2415
+ Structure is documented below.
2416
+ """
2417
+ return pulumi.get(self, "startup_probe")
2418
+
2419
+
2420
+ @pulumi.output_type
2421
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecEnv(dict):
2422
+ def __init__(__self__, *,
2423
+ name: _builtins.str,
2424
+ value: _builtins.str):
2425
+ """
2426
+ :param _builtins.str name: Name of the environment variable. Must be a valid C identifier.
2427
+ :param _builtins.str value: Variables that reference a $(VAR_NAME) are expanded
2428
+ using the previous defined environment variables in the container and
2429
+ any service environment variables. If a variable cannot be resolved,
2430
+ the reference in the input string will be unchanged. The $(VAR_NAME)
2431
+ syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
2432
+ references will never be expanded, regardless of whether the variable
2433
+ exists or not.
2434
+ """
2435
+ pulumi.set(__self__, "name", name)
2436
+ pulumi.set(__self__, "value", value)
2437
+
2438
+ @_builtins.property
2439
+ @pulumi.getter
2440
+ def name(self) -> _builtins.str:
2441
+ """
2442
+ Name of the environment variable. Must be a valid C identifier.
2443
+ """
2444
+ return pulumi.get(self, "name")
2445
+
2446
+ @_builtins.property
2447
+ @pulumi.getter
2448
+ def value(self) -> _builtins.str:
2449
+ """
2450
+ Variables that reference a $(VAR_NAME) are expanded
2451
+ using the previous defined environment variables in the container and
2452
+ any service environment variables. If a variable cannot be resolved,
2453
+ the reference in the input string will be unchanged. The $(VAR_NAME)
2454
+ syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
2455
+ references will never be expanded, regardless of whether the variable
2456
+ exists or not.
2457
+ """
2458
+ return pulumi.get(self, "value")
2459
+
2460
+
2461
+ @pulumi.output_type
2462
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort(dict):
2463
+ @staticmethod
2464
+ def __key_warning(key: str):
2465
+ suggest = None
2466
+ if key == "containerPort":
2467
+ suggest = "container_port"
2468
+
2469
+ if suggest:
2470
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort. Access the value via the '{suggest}' property getter instead.")
2471
+
2472
+ def __getitem__(self, key: str) -> Any:
2473
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort.__key_warning(key)
2474
+ return super().__getitem__(key)
2475
+
2476
+ def get(self, key: str, default = None) -> Any:
2477
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecGrpcPort.__key_warning(key)
2478
+ return super().get(key, default)
2479
+
2480
+ def __init__(__self__, *,
2481
+ container_port: Optional[_builtins.int] = None):
2482
+ """
2483
+ :param _builtins.int container_port: The number of the port to expose on the pod's IP address.
2484
+ Must be a valid port number, between 1 and 65535 inclusive.
2485
+ """
2486
+ if container_port is not None:
2487
+ pulumi.set(__self__, "container_port", container_port)
2488
+
2489
+ @_builtins.property
2490
+ @pulumi.getter(name="containerPort")
2491
+ def container_port(self) -> Optional[_builtins.int]:
2492
+ """
2493
+ The number of the port to expose on the pod's IP address.
2494
+ Must be a valid port number, between 1 and 65535 inclusive.
2495
+ """
2496
+ return pulumi.get(self, "container_port")
2497
+
2498
+
2499
+ @pulumi.output_type
2500
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe(dict):
2501
+ @staticmethod
2502
+ def __key_warning(key: str):
2503
+ suggest = None
2504
+ if key == "exec":
2505
+ suggest = "exec_"
2506
+ elif key == "failureThreshold":
2507
+ suggest = "failure_threshold"
2508
+ elif key == "httpGet":
2509
+ suggest = "http_get"
2510
+ elif key == "initialDelaySeconds":
2511
+ suggest = "initial_delay_seconds"
2512
+ elif key == "periodSeconds":
2513
+ suggest = "period_seconds"
2514
+ elif key == "successThreshold":
2515
+ suggest = "success_threshold"
2516
+ elif key == "tcpSocket":
2517
+ suggest = "tcp_socket"
2518
+ elif key == "timeoutSeconds":
2519
+ suggest = "timeout_seconds"
2520
+
2521
+ if suggest:
2522
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe. Access the value via the '{suggest}' property getter instead.")
2523
+
2524
+ def __getitem__(self, key: str) -> Any:
2525
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe.__key_warning(key)
2526
+ return super().__getitem__(key)
2527
+
2528
+ def get(self, key: str, default = None) -> Any:
2529
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbe.__key_warning(key)
2530
+ return super().get(key, default)
2531
+
2532
+ def __init__(__self__, *,
2533
+ exec_: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec'] = None,
2534
+ failure_threshold: Optional[_builtins.int] = None,
2535
+ grpc: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc'] = None,
2536
+ http_get: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet'] = None,
2537
+ initial_delay_seconds: Optional[_builtins.int] = None,
2538
+ period_seconds: Optional[_builtins.int] = None,
2539
+ success_threshold: Optional[_builtins.int] = None,
2540
+ tcp_socket: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket'] = None,
2541
+ timeout_seconds: Optional[_builtins.int] = None):
2542
+ """
2543
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExecArgs' exec_: ExecAction specifies a command to execute.
2544
+ Structure is documented below.
2545
+ :param _builtins.int failure_threshold: Number of consecutive failures before the probe is considered failed.
2546
+ Defaults to 3. Minimum value is 1.
2547
+ Maps to Kubernetes probe argument 'failureThreshold'.
2548
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpcArgs' grpc: GrpcAction checks the health of a container using a gRPC service.
2549
+ Structure is documented below.
2550
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetArgs' http_get: HttpGetAction describes an action based on HTTP Get requests.
2551
+ Structure is documented below.
2552
+ :param _builtins.int initial_delay_seconds: Number of seconds to wait before starting the probe. Defaults to 0.
2553
+ Minimum value is 0.
2554
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
2555
+ :param _builtins.int period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds.
2556
+ Minimum value is 1. Must be less than timeout_seconds.
2557
+ Maps to Kubernetes probe argument 'periodSeconds'.
2558
+ :param _builtins.int success_threshold: Number of consecutive successes before the probe is considered successful.
2559
+ Defaults to 1. Minimum value is 1.
2560
+ Maps to Kubernetes probe argument 'successThreshold'.
2561
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocketArgs' tcp_socket: TcpSocketAction probes the health of a container by opening a TCP socket
2562
+ connection.
2563
+ Structure is documented below.
2564
+ :param _builtins.int timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second.
2565
+ Minimum value is 1. Must be greater or equal to period_seconds.
2566
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
2567
+ """
2568
+ if exec_ is not None:
2569
+ pulumi.set(__self__, "exec_", exec_)
2570
+ if failure_threshold is not None:
2571
+ pulumi.set(__self__, "failure_threshold", failure_threshold)
2572
+ if grpc is not None:
2573
+ pulumi.set(__self__, "grpc", grpc)
2574
+ if http_get is not None:
2575
+ pulumi.set(__self__, "http_get", http_get)
2576
+ if initial_delay_seconds is not None:
2577
+ pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
2578
+ if period_seconds is not None:
2579
+ pulumi.set(__self__, "period_seconds", period_seconds)
2580
+ if success_threshold is not None:
2581
+ pulumi.set(__self__, "success_threshold", success_threshold)
2582
+ if tcp_socket is not None:
2583
+ pulumi.set(__self__, "tcp_socket", tcp_socket)
2584
+ if timeout_seconds is not None:
2585
+ pulumi.set(__self__, "timeout_seconds", timeout_seconds)
2586
+
2587
+ @_builtins.property
2588
+ @pulumi.getter(name="exec")
2589
+ def exec_(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec']:
2590
+ """
2591
+ ExecAction specifies a command to execute.
2592
+ Structure is documented below.
2593
+ """
2594
+ return pulumi.get(self, "exec_")
2595
+
2596
+ @_builtins.property
2597
+ @pulumi.getter(name="failureThreshold")
2598
+ def failure_threshold(self) -> Optional[_builtins.int]:
2599
+ """
2600
+ Number of consecutive failures before the probe is considered failed.
2601
+ Defaults to 3. Minimum value is 1.
2602
+ Maps to Kubernetes probe argument 'failureThreshold'.
2603
+ """
2604
+ return pulumi.get(self, "failure_threshold")
2605
+
2606
+ @_builtins.property
2607
+ @pulumi.getter
2608
+ def grpc(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc']:
2609
+ """
2610
+ GrpcAction checks the health of a container using a gRPC service.
2611
+ Structure is documented below.
2612
+ """
2613
+ return pulumi.get(self, "grpc")
2614
+
2615
+ @_builtins.property
2616
+ @pulumi.getter(name="httpGet")
2617
+ def http_get(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet']:
2618
+ """
2619
+ HttpGetAction describes an action based on HTTP Get requests.
2620
+ Structure is documented below.
2621
+ """
2622
+ return pulumi.get(self, "http_get")
2623
+
2624
+ @_builtins.property
2625
+ @pulumi.getter(name="initialDelaySeconds")
2626
+ def initial_delay_seconds(self) -> Optional[_builtins.int]:
2627
+ """
2628
+ Number of seconds to wait before starting the probe. Defaults to 0.
2629
+ Minimum value is 0.
2630
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
2631
+ """
2632
+ return pulumi.get(self, "initial_delay_seconds")
2633
+
2634
+ @_builtins.property
2635
+ @pulumi.getter(name="periodSeconds")
2636
+ def period_seconds(self) -> Optional[_builtins.int]:
2637
+ """
2638
+ How often (in seconds) to perform the probe. Default to 10 seconds.
2639
+ Minimum value is 1. Must be less than timeout_seconds.
2640
+ Maps to Kubernetes probe argument 'periodSeconds'.
2641
+ """
2642
+ return pulumi.get(self, "period_seconds")
2643
+
2644
+ @_builtins.property
2645
+ @pulumi.getter(name="successThreshold")
2646
+ def success_threshold(self) -> Optional[_builtins.int]:
2647
+ """
2648
+ Number of consecutive successes before the probe is considered successful.
2649
+ Defaults to 1. Minimum value is 1.
2650
+ Maps to Kubernetes probe argument 'successThreshold'.
2651
+ """
2652
+ return pulumi.get(self, "success_threshold")
2653
+
2654
+ @_builtins.property
2655
+ @pulumi.getter(name="tcpSocket")
2656
+ def tcp_socket(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket']:
2657
+ """
2658
+ TcpSocketAction probes the health of a container by opening a TCP socket
2659
+ connection.
2660
+ Structure is documented below.
2661
+ """
2662
+ return pulumi.get(self, "tcp_socket")
2663
+
2664
+ @_builtins.property
2665
+ @pulumi.getter(name="timeoutSeconds")
2666
+ def timeout_seconds(self) -> Optional[_builtins.int]:
2667
+ """
2668
+ Number of seconds after which the probe times out. Defaults to 1 second.
2669
+ Minimum value is 1. Must be greater or equal to period_seconds.
2670
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
2671
+ """
2672
+ return pulumi.get(self, "timeout_seconds")
2673
+
2674
+
2675
+ @pulumi.output_type
2676
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeExec(dict):
2677
+ def __init__(__self__, *,
2678
+ commands: Optional[Sequence[_builtins.str]] = None):
2679
+ """
2680
+ :param Sequence[_builtins.str] commands: Command is the command line to execute inside the container, the working
2681
+ directory for the command is root ('/') in the container's filesystem.
2682
+ The command is simply exec'd, it is not run inside a shell, so
2683
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
2684
+ need to explicitly call out to that shell. Exit status of 0 is treated as
2685
+ live/healthy and non-zero is unhealthy.
2686
+ """
2687
+ if commands is not None:
2688
+ pulumi.set(__self__, "commands", commands)
2689
+
2690
+ @_builtins.property
2691
+ @pulumi.getter
2692
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
2693
+ """
2694
+ Command is the command line to execute inside the container, the working
2695
+ directory for the command is root ('/') in the container's filesystem.
2696
+ The command is simply exec'd, it is not run inside a shell, so
2697
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
2698
+ need to explicitly call out to that shell. Exit status of 0 is treated as
2699
+ live/healthy and non-zero is unhealthy.
2700
+ """
2701
+ return pulumi.get(self, "commands")
2702
+
2703
+
2704
+ @pulumi.output_type
2705
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeGrpc(dict):
2706
+ def __init__(__self__, *,
2707
+ port: Optional[_builtins.int] = None,
2708
+ service: Optional[_builtins.str] = None):
2709
+ """
2710
+ :param _builtins.int port: Port number of the gRPC service. Number must be in the range 1 to 65535.
2711
+ :param _builtins.str service: Service is the name of the service to place in the gRPC
2712
+ HealthCheckRequest. See
2713
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
2714
+ If this is not specified, the default behavior is defined by gRPC.
2715
+ """
2716
+ if port is not None:
2717
+ pulumi.set(__self__, "port", port)
2718
+ if service is not None:
2719
+ pulumi.set(__self__, "service", service)
2720
+
2721
+ @_builtins.property
2722
+ @pulumi.getter
2723
+ def port(self) -> Optional[_builtins.int]:
2724
+ """
2725
+ Port number of the gRPC service. Number must be in the range 1 to 65535.
2726
+ """
2727
+ return pulumi.get(self, "port")
2728
+
2729
+ @_builtins.property
2730
+ @pulumi.getter
2731
+ def service(self) -> Optional[_builtins.str]:
2732
+ """
2733
+ Service is the name of the service to place in the gRPC
2734
+ HealthCheckRequest. See
2735
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
2736
+ If this is not specified, the default behavior is defined by gRPC.
2737
+ """
2738
+ return pulumi.get(self, "service")
2739
+
2740
+
2741
+ @pulumi.output_type
2742
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet(dict):
2743
+ @staticmethod
2744
+ def __key_warning(key: str):
2745
+ suggest = None
2746
+ if key == "httpHeaders":
2747
+ suggest = "http_headers"
2748
+
2749
+ if suggest:
2750
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet. Access the value via the '{suggest}' property getter instead.")
2751
+
2752
+ def __getitem__(self, key: str) -> Any:
2753
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet.__key_warning(key)
2754
+ return super().__getitem__(key)
2755
+
2756
+ def get(self, key: str, default = None) -> Any:
2757
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGet.__key_warning(key)
2758
+ return super().get(key, default)
2759
+
2760
+ def __init__(__self__, *,
2761
+ host: Optional[_builtins.str] = None,
2762
+ http_headers: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader']] = None,
2763
+ path: Optional[_builtins.str] = None,
2764
+ port: Optional[_builtins.int] = None,
2765
+ scheme: Optional[_builtins.str] = None):
2766
+ """
2767
+ :param _builtins.str host: Host name to connect to, defaults to the model serving container's IP.
2768
+ You probably want to set "Host" in httpHeaders instead.
2769
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeaderArgs'] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
2770
+ Structure is documented below.
2771
+ :param _builtins.str path: Path to access on the HTTP server.
2772
+ :param _builtins.int port: Number of the port to access on the container.
2773
+ Number must be in the range 1 to 65535.
2774
+ :param _builtins.str scheme: Scheme to use for connecting to the host.
2775
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
2776
+ """
2777
+ if host is not None:
2778
+ pulumi.set(__self__, "host", host)
2779
+ if http_headers is not None:
2780
+ pulumi.set(__self__, "http_headers", http_headers)
2781
+ if path is not None:
2782
+ pulumi.set(__self__, "path", path)
2783
+ if port is not None:
2784
+ pulumi.set(__self__, "port", port)
2785
+ if scheme is not None:
2786
+ pulumi.set(__self__, "scheme", scheme)
2787
+
2788
+ @_builtins.property
2789
+ @pulumi.getter
2790
+ def host(self) -> Optional[_builtins.str]:
2791
+ """
2792
+ Host name to connect to, defaults to the model serving container's IP.
2793
+ You probably want to set "Host" in httpHeaders instead.
2794
+ """
2795
+ return pulumi.get(self, "host")
2796
+
2797
+ @_builtins.property
2798
+ @pulumi.getter(name="httpHeaders")
2799
+ def http_headers(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader']]:
2800
+ """
2801
+ Custom headers to set in the request. HTTP allows repeated headers.
2802
+ Structure is documented below.
2803
+ """
2804
+ return pulumi.get(self, "http_headers")
2805
+
2806
+ @_builtins.property
2807
+ @pulumi.getter
2808
+ def path(self) -> Optional[_builtins.str]:
2809
+ """
2810
+ Path to access on the HTTP server.
2811
+ """
2812
+ return pulumi.get(self, "path")
2813
+
2814
+ @_builtins.property
2815
+ @pulumi.getter
2816
+ def port(self) -> Optional[_builtins.int]:
2817
+ """
2818
+ Number of the port to access on the container.
2819
+ Number must be in the range 1 to 65535.
2820
+ """
2821
+ return pulumi.get(self, "port")
2822
+
2823
+ @_builtins.property
2824
+ @pulumi.getter
2825
+ def scheme(self) -> Optional[_builtins.str]:
2826
+ """
2827
+ Scheme to use for connecting to the host.
2828
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
2829
+ """
2830
+ return pulumi.get(self, "scheme")
2831
+
2832
+
2833
+ @pulumi.output_type
2834
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeHttpGetHttpHeader(dict):
2835
+ def __init__(__self__, *,
2836
+ name: Optional[_builtins.str] = None,
2837
+ value: Optional[_builtins.str] = None):
2838
+ """
2839
+ :param _builtins.str name: The header field name.
2840
+ This will be canonicalized upon output, so case-variant names will be
2841
+ understood as the same header.
2842
+ :param _builtins.str value: The header field value
2843
+ """
2844
+ if name is not None:
2845
+ pulumi.set(__self__, "name", name)
2846
+ if value is not None:
2847
+ pulumi.set(__self__, "value", value)
2848
+
2849
+ @_builtins.property
2850
+ @pulumi.getter
2851
+ def name(self) -> Optional[_builtins.str]:
2852
+ """
2853
+ The header field name.
2854
+ This will be canonicalized upon output, so case-variant names will be
2855
+ understood as the same header.
2856
+ """
2857
+ return pulumi.get(self, "name")
2858
+
2859
+ @_builtins.property
2860
+ @pulumi.getter
2861
+ def value(self) -> Optional[_builtins.str]:
2862
+ """
2863
+ The header field value
2864
+ """
2865
+ return pulumi.get(self, "value")
2866
+
2867
+
2868
+ @pulumi.output_type
2869
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecHealthProbeTcpSocket(dict):
2870
+ def __init__(__self__, *,
2871
+ host: Optional[_builtins.str] = None,
2872
+ port: Optional[_builtins.int] = None):
2873
+ """
2874
+ :param _builtins.str host: Optional: Host name to connect to, defaults to the model serving
2875
+ container's IP.
2876
+ :param _builtins.int port: Number of the port to access on the container.
2877
+ Number must be in the range 1 to 65535.
2878
+ """
2879
+ if host is not None:
2880
+ pulumi.set(__self__, "host", host)
2881
+ if port is not None:
2882
+ pulumi.set(__self__, "port", port)
2883
+
2884
+ @_builtins.property
2885
+ @pulumi.getter
2886
+ def host(self) -> Optional[_builtins.str]:
2887
+ """
2888
+ Optional: Host name to connect to, defaults to the model serving
2889
+ container's IP.
2890
+ """
2891
+ return pulumi.get(self, "host")
2892
+
2893
+ @_builtins.property
2894
+ @pulumi.getter
2895
+ def port(self) -> Optional[_builtins.int]:
2896
+ """
2897
+ Number of the port to access on the container.
2898
+ Number must be in the range 1 to 65535.
2899
+ """
2900
+ return pulumi.get(self, "port")
2901
+
2902
+
2903
+ @pulumi.output_type
2904
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe(dict):
2905
+ @staticmethod
2906
+ def __key_warning(key: str):
2907
+ suggest = None
2908
+ if key == "exec":
2909
+ suggest = "exec_"
2910
+ elif key == "failureThreshold":
2911
+ suggest = "failure_threshold"
2912
+ elif key == "httpGet":
2913
+ suggest = "http_get"
2914
+ elif key == "initialDelaySeconds":
2915
+ suggest = "initial_delay_seconds"
2916
+ elif key == "periodSeconds":
2917
+ suggest = "period_seconds"
2918
+ elif key == "successThreshold":
2919
+ suggest = "success_threshold"
2920
+ elif key == "tcpSocket":
2921
+ suggest = "tcp_socket"
2922
+ elif key == "timeoutSeconds":
2923
+ suggest = "timeout_seconds"
2924
+
2925
+ if suggest:
2926
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe. Access the value via the '{suggest}' property getter instead.")
2927
+
2928
+ def __getitem__(self, key: str) -> Any:
2929
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe.__key_warning(key)
2930
+ return super().__getitem__(key)
2931
+
2932
+ def get(self, key: str, default = None) -> Any:
2933
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbe.__key_warning(key)
2934
+ return super().get(key, default)
2935
+
2936
+ def __init__(__self__, *,
2937
+ exec_: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec'] = None,
2938
+ failure_threshold: Optional[_builtins.int] = None,
2939
+ grpc: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc'] = None,
2940
+ http_get: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet'] = None,
2941
+ initial_delay_seconds: Optional[_builtins.int] = None,
2942
+ period_seconds: Optional[_builtins.int] = None,
2943
+ success_threshold: Optional[_builtins.int] = None,
2944
+ tcp_socket: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket'] = None,
2945
+ timeout_seconds: Optional[_builtins.int] = None):
2946
+ """
2947
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExecArgs' exec_: ExecAction specifies a command to execute.
2948
+ Structure is documented below.
2949
+ :param _builtins.int failure_threshold: Number of consecutive failures before the probe is considered failed.
2950
+ Defaults to 3. Minimum value is 1.
2951
+ Maps to Kubernetes probe argument 'failureThreshold'.
2952
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpcArgs' grpc: GrpcAction checks the health of a container using a gRPC service.
2953
+ Structure is documented below.
2954
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetArgs' http_get: HttpGetAction describes an action based on HTTP Get requests.
2955
+ Structure is documented below.
2956
+ :param _builtins.int initial_delay_seconds: Number of seconds to wait before starting the probe. Defaults to 0.
2957
+ Minimum value is 0.
2958
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
2959
+ :param _builtins.int period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds.
2960
+ Minimum value is 1. Must be less than timeout_seconds.
2961
+ Maps to Kubernetes probe argument 'periodSeconds'.
2962
+ :param _builtins.int success_threshold: Number of consecutive successes before the probe is considered successful.
2963
+ Defaults to 1. Minimum value is 1.
2964
+ Maps to Kubernetes probe argument 'successThreshold'.
2965
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocketArgs' tcp_socket: TcpSocketAction probes the health of a container by opening a TCP socket
2966
+ connection.
2967
+ Structure is documented below.
2968
+ :param _builtins.int timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second.
2969
+ Minimum value is 1. Must be greater or equal to period_seconds.
2970
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
2971
+ """
2972
+ if exec_ is not None:
2973
+ pulumi.set(__self__, "exec_", exec_)
2974
+ if failure_threshold is not None:
2975
+ pulumi.set(__self__, "failure_threshold", failure_threshold)
2976
+ if grpc is not None:
2977
+ pulumi.set(__self__, "grpc", grpc)
2978
+ if http_get is not None:
2979
+ pulumi.set(__self__, "http_get", http_get)
2980
+ if initial_delay_seconds is not None:
2981
+ pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
2982
+ if period_seconds is not None:
2983
+ pulumi.set(__self__, "period_seconds", period_seconds)
2984
+ if success_threshold is not None:
2985
+ pulumi.set(__self__, "success_threshold", success_threshold)
2986
+ if tcp_socket is not None:
2987
+ pulumi.set(__self__, "tcp_socket", tcp_socket)
2988
+ if timeout_seconds is not None:
2989
+ pulumi.set(__self__, "timeout_seconds", timeout_seconds)
2990
+
2991
+ @_builtins.property
2992
+ @pulumi.getter(name="exec")
2993
+ def exec_(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec']:
2994
+ """
2995
+ ExecAction specifies a command to execute.
2996
+ Structure is documented below.
2997
+ """
2998
+ return pulumi.get(self, "exec_")
2999
+
3000
+ @_builtins.property
3001
+ @pulumi.getter(name="failureThreshold")
3002
+ def failure_threshold(self) -> Optional[_builtins.int]:
3003
+ """
3004
+ Number of consecutive failures before the probe is considered failed.
3005
+ Defaults to 3. Minimum value is 1.
3006
+ Maps to Kubernetes probe argument 'failureThreshold'.
3007
+ """
3008
+ return pulumi.get(self, "failure_threshold")
3009
+
3010
+ @_builtins.property
3011
+ @pulumi.getter
3012
+ def grpc(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc']:
3013
+ """
3014
+ GrpcAction checks the health of a container using a gRPC service.
3015
+ Structure is documented below.
3016
+ """
3017
+ return pulumi.get(self, "grpc")
3018
+
3019
+ @_builtins.property
3020
+ @pulumi.getter(name="httpGet")
3021
+ def http_get(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet']:
3022
+ """
3023
+ HttpGetAction describes an action based on HTTP Get requests.
3024
+ Structure is documented below.
3025
+ """
3026
+ return pulumi.get(self, "http_get")
3027
+
3028
+ @_builtins.property
3029
+ @pulumi.getter(name="initialDelaySeconds")
3030
+ def initial_delay_seconds(self) -> Optional[_builtins.int]:
3031
+ """
3032
+ Number of seconds to wait before starting the probe. Defaults to 0.
3033
+ Minimum value is 0.
3034
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
3035
+ """
3036
+ return pulumi.get(self, "initial_delay_seconds")
3037
+
3038
+ @_builtins.property
3039
+ @pulumi.getter(name="periodSeconds")
3040
+ def period_seconds(self) -> Optional[_builtins.int]:
3041
+ """
3042
+ How often (in seconds) to perform the probe. Default to 10 seconds.
3043
+ Minimum value is 1. Must be less than timeout_seconds.
3044
+ Maps to Kubernetes probe argument 'periodSeconds'.
3045
+ """
3046
+ return pulumi.get(self, "period_seconds")
3047
+
3048
+ @_builtins.property
3049
+ @pulumi.getter(name="successThreshold")
3050
+ def success_threshold(self) -> Optional[_builtins.int]:
3051
+ """
3052
+ Number of consecutive successes before the probe is considered successful.
3053
+ Defaults to 1. Minimum value is 1.
3054
+ Maps to Kubernetes probe argument 'successThreshold'.
3055
+ """
3056
+ return pulumi.get(self, "success_threshold")
3057
+
3058
+ @_builtins.property
3059
+ @pulumi.getter(name="tcpSocket")
3060
+ def tcp_socket(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket']:
3061
+ """
3062
+ TcpSocketAction probes the health of a container by opening a TCP socket
3063
+ connection.
3064
+ Structure is documented below.
3065
+ """
3066
+ return pulumi.get(self, "tcp_socket")
3067
+
3068
+ @_builtins.property
3069
+ @pulumi.getter(name="timeoutSeconds")
3070
+ def timeout_seconds(self) -> Optional[_builtins.int]:
3071
+ """
3072
+ Number of seconds after which the probe times out. Defaults to 1 second.
3073
+ Minimum value is 1. Must be greater or equal to period_seconds.
3074
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
3075
+ """
3076
+ return pulumi.get(self, "timeout_seconds")
3077
+
3078
+
3079
+ @pulumi.output_type
3080
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeExec(dict):
3081
+ def __init__(__self__, *,
3082
+ commands: Optional[Sequence[_builtins.str]] = None):
3083
+ """
3084
+ :param Sequence[_builtins.str] commands: Command is the command line to execute inside the container, the working
3085
+ directory for the command is root ('/') in the container's filesystem.
3086
+ The command is simply exec'd, it is not run inside a shell, so
3087
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3088
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3089
+ live/healthy and non-zero is unhealthy.
3090
+ """
3091
+ if commands is not None:
3092
+ pulumi.set(__self__, "commands", commands)
3093
+
3094
+ @_builtins.property
3095
+ @pulumi.getter
3096
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
3097
+ """
3098
+ Command is the command line to execute inside the container, the working
3099
+ directory for the command is root ('/') in the container's filesystem.
3100
+ The command is simply exec'd, it is not run inside a shell, so
3101
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3102
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3103
+ live/healthy and non-zero is unhealthy.
3104
+ """
3105
+ return pulumi.get(self, "commands")
3106
+
3107
+
3108
+ @pulumi.output_type
3109
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeGrpc(dict):
3110
+ def __init__(__self__, *,
3111
+ port: Optional[_builtins.int] = None,
3112
+ service: Optional[_builtins.str] = None):
3113
+ """
3114
+ :param _builtins.int port: Port number of the gRPC service. Number must be in the range 1 to 65535.
3115
+ :param _builtins.str service: Service is the name of the service to place in the gRPC
3116
+ HealthCheckRequest. See
3117
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3118
+ If this is not specified, the default behavior is defined by gRPC.
3119
+ """
3120
+ if port is not None:
3121
+ pulumi.set(__self__, "port", port)
3122
+ if service is not None:
3123
+ pulumi.set(__self__, "service", service)
3124
+
3125
+ @_builtins.property
3126
+ @pulumi.getter
3127
+ def port(self) -> Optional[_builtins.int]:
3128
+ """
3129
+ Port number of the gRPC service. Number must be in the range 1 to 65535.
3130
+ """
3131
+ return pulumi.get(self, "port")
3132
+
3133
+ @_builtins.property
3134
+ @pulumi.getter
3135
+ def service(self) -> Optional[_builtins.str]:
3136
+ """
3137
+ Service is the name of the service to place in the gRPC
3138
+ HealthCheckRequest. See
3139
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3140
+ If this is not specified, the default behavior is defined by gRPC.
3141
+ """
3142
+ return pulumi.get(self, "service")
3143
+
3144
+
3145
+ @pulumi.output_type
3146
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet(dict):
3147
+ @staticmethod
3148
+ def __key_warning(key: str):
3149
+ suggest = None
3150
+ if key == "httpHeaders":
3151
+ suggest = "http_headers"
3152
+
3153
+ if suggest:
3154
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet. Access the value via the '{suggest}' property getter instead.")
3155
+
3156
+ def __getitem__(self, key: str) -> Any:
3157
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet.__key_warning(key)
3158
+ return super().__getitem__(key)
3159
+
3160
+ def get(self, key: str, default = None) -> Any:
3161
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGet.__key_warning(key)
3162
+ return super().get(key, default)
3163
+
3164
+ def __init__(__self__, *,
3165
+ host: Optional[_builtins.str] = None,
3166
+ http_headers: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader']] = None,
3167
+ path: Optional[_builtins.str] = None,
3168
+ port: Optional[_builtins.int] = None,
3169
+ scheme: Optional[_builtins.str] = None):
3170
+ """
3171
+ :param _builtins.str host: Host name to connect to, defaults to the model serving container's IP.
3172
+ You probably want to set "Host" in httpHeaders instead.
3173
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeaderArgs'] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
3174
+ Structure is documented below.
3175
+ :param _builtins.str path: Path to access on the HTTP server.
3176
+ :param _builtins.int port: Number of the port to access on the container.
3177
+ Number must be in the range 1 to 65535.
3178
+ :param _builtins.str scheme: Scheme to use for connecting to the host.
3179
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3180
+ """
3181
+ if host is not None:
3182
+ pulumi.set(__self__, "host", host)
3183
+ if http_headers is not None:
3184
+ pulumi.set(__self__, "http_headers", http_headers)
3185
+ if path is not None:
3186
+ pulumi.set(__self__, "path", path)
3187
+ if port is not None:
3188
+ pulumi.set(__self__, "port", port)
3189
+ if scheme is not None:
3190
+ pulumi.set(__self__, "scheme", scheme)
3191
+
3192
+ @_builtins.property
3193
+ @pulumi.getter
3194
+ def host(self) -> Optional[_builtins.str]:
3195
+ """
3196
+ Host name to connect to, defaults to the model serving container's IP.
3197
+ You probably want to set "Host" in httpHeaders instead.
3198
+ """
3199
+ return pulumi.get(self, "host")
3200
+
3201
+ @_builtins.property
3202
+ @pulumi.getter(name="httpHeaders")
3203
+ def http_headers(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader']]:
3204
+ """
3205
+ Custom headers to set in the request. HTTP allows repeated headers.
3206
+ Structure is documented below.
3207
+ """
3208
+ return pulumi.get(self, "http_headers")
3209
+
3210
+ @_builtins.property
3211
+ @pulumi.getter
3212
+ def path(self) -> Optional[_builtins.str]:
3213
+ """
3214
+ Path to access on the HTTP server.
3215
+ """
3216
+ return pulumi.get(self, "path")
3217
+
3218
+ @_builtins.property
3219
+ @pulumi.getter
3220
+ def port(self) -> Optional[_builtins.int]:
3221
+ """
3222
+ Number of the port to access on the container.
3223
+ Number must be in the range 1 to 65535.
3224
+ """
3225
+ return pulumi.get(self, "port")
3226
+
3227
+ @_builtins.property
3228
+ @pulumi.getter
3229
+ def scheme(self) -> Optional[_builtins.str]:
3230
+ """
3231
+ Scheme to use for connecting to the host.
3232
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3233
+ """
3234
+ return pulumi.get(self, "scheme")
3235
+
3236
+
3237
+ @pulumi.output_type
3238
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeHttpGetHttpHeader(dict):
3239
+ def __init__(__self__, *,
3240
+ name: Optional[_builtins.str] = None,
3241
+ value: Optional[_builtins.str] = None):
3242
+ """
3243
+ :param _builtins.str name: The header field name.
3244
+ This will be canonicalized upon output, so case-variant names will be
3245
+ understood as the same header.
3246
+ :param _builtins.str value: The header field value
3247
+ """
3248
+ if name is not None:
3249
+ pulumi.set(__self__, "name", name)
3250
+ if value is not None:
3251
+ pulumi.set(__self__, "value", value)
3252
+
3253
+ @_builtins.property
3254
+ @pulumi.getter
3255
+ def name(self) -> Optional[_builtins.str]:
3256
+ """
3257
+ The header field name.
3258
+ This will be canonicalized upon output, so case-variant names will be
3259
+ understood as the same header.
3260
+ """
3261
+ return pulumi.get(self, "name")
3262
+
3263
+ @_builtins.property
3264
+ @pulumi.getter
3265
+ def value(self) -> Optional[_builtins.str]:
3266
+ """
3267
+ The header field value
3268
+ """
3269
+ return pulumi.get(self, "value")
3270
+
3271
+
3272
+ @pulumi.output_type
3273
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecLivenessProbeTcpSocket(dict):
3274
+ def __init__(__self__, *,
3275
+ host: Optional[_builtins.str] = None,
3276
+ port: Optional[_builtins.int] = None):
3277
+ """
3278
+ :param _builtins.str host: Optional: Host name to connect to, defaults to the model serving
3279
+ container's IP.
3280
+ :param _builtins.int port: Number of the port to access on the container.
3281
+ Number must be in the range 1 to 65535.
3282
+ """
3283
+ if host is not None:
3284
+ pulumi.set(__self__, "host", host)
3285
+ if port is not None:
3286
+ pulumi.set(__self__, "port", port)
3287
+
3288
+ @_builtins.property
3289
+ @pulumi.getter
3290
+ def host(self) -> Optional[_builtins.str]:
3291
+ """
3292
+ Optional: Host name to connect to, defaults to the model serving
3293
+ container's IP.
3294
+ """
3295
+ return pulumi.get(self, "host")
3296
+
3297
+ @_builtins.property
3298
+ @pulumi.getter
3299
+ def port(self) -> Optional[_builtins.int]:
3300
+ """
3301
+ Number of the port to access on the container.
3302
+ Number must be in the range 1 to 65535.
3303
+ """
3304
+ return pulumi.get(self, "port")
3305
+
3306
+
3307
+ @pulumi.output_type
3308
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort(dict):
3309
+ @staticmethod
3310
+ def __key_warning(key: str):
3311
+ suggest = None
3312
+ if key == "containerPort":
3313
+ suggest = "container_port"
3314
+
3315
+ if suggest:
3316
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort. Access the value via the '{suggest}' property getter instead.")
3317
+
3318
+ def __getitem__(self, key: str) -> Any:
3319
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort.__key_warning(key)
3320
+ return super().__getitem__(key)
3321
+
3322
+ def get(self, key: str, default = None) -> Any:
3323
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecPort.__key_warning(key)
3324
+ return super().get(key, default)
3325
+
3326
+ def __init__(__self__, *,
3327
+ container_port: Optional[_builtins.int] = None):
3328
+ """
3329
+ :param _builtins.int container_port: The number of the port to expose on the pod's IP address.
3330
+ Must be a valid port number, between 1 and 65535 inclusive.
3331
+ """
3332
+ if container_port is not None:
3333
+ pulumi.set(__self__, "container_port", container_port)
3334
+
3335
+ @_builtins.property
3336
+ @pulumi.getter(name="containerPort")
3337
+ def container_port(self) -> Optional[_builtins.int]:
3338
+ """
3339
+ The number of the port to expose on the pod's IP address.
3340
+ Must be a valid port number, between 1 and 65535 inclusive.
3341
+ """
3342
+ return pulumi.get(self, "container_port")
3343
+
3344
+
3345
+ @pulumi.output_type
3346
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe(dict):
3347
+ @staticmethod
3348
+ def __key_warning(key: str):
3349
+ suggest = None
3350
+ if key == "exec":
3351
+ suggest = "exec_"
3352
+ elif key == "failureThreshold":
3353
+ suggest = "failure_threshold"
3354
+ elif key == "httpGet":
3355
+ suggest = "http_get"
3356
+ elif key == "initialDelaySeconds":
3357
+ suggest = "initial_delay_seconds"
3358
+ elif key == "periodSeconds":
3359
+ suggest = "period_seconds"
3360
+ elif key == "successThreshold":
3361
+ suggest = "success_threshold"
3362
+ elif key == "tcpSocket":
3363
+ suggest = "tcp_socket"
3364
+ elif key == "timeoutSeconds":
3365
+ suggest = "timeout_seconds"
3366
+
3367
+ if suggest:
3368
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe. Access the value via the '{suggest}' property getter instead.")
3369
+
3370
+ def __getitem__(self, key: str) -> Any:
3371
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe.__key_warning(key)
3372
+ return super().__getitem__(key)
3373
+
3374
+ def get(self, key: str, default = None) -> Any:
3375
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbe.__key_warning(key)
3376
+ return super().get(key, default)
3377
+
3378
+ def __init__(__self__, *,
3379
+ exec_: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec'] = None,
3380
+ failure_threshold: Optional[_builtins.int] = None,
3381
+ grpc: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc'] = None,
3382
+ http_get: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet'] = None,
3383
+ initial_delay_seconds: Optional[_builtins.int] = None,
3384
+ period_seconds: Optional[_builtins.int] = None,
3385
+ success_threshold: Optional[_builtins.int] = None,
3386
+ tcp_socket: Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket'] = None,
3387
+ timeout_seconds: Optional[_builtins.int] = None):
3388
+ """
3389
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExecArgs' exec_: ExecAction specifies a command to execute.
3390
+ Structure is documented below.
3391
+ :param _builtins.int failure_threshold: Number of consecutive failures before the probe is considered failed.
3392
+ Defaults to 3. Minimum value is 1.
3393
+ Maps to Kubernetes probe argument 'failureThreshold'.
3394
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpcArgs' grpc: GrpcAction checks the health of a container using a gRPC service.
3395
+ Structure is documented below.
3396
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetArgs' http_get: HttpGetAction describes an action based on HTTP Get requests.
3397
+ Structure is documented below.
3398
+ :param _builtins.int initial_delay_seconds: Number of seconds to wait before starting the probe. Defaults to 0.
3399
+ Minimum value is 0.
3400
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
3401
+ :param _builtins.int period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds.
3402
+ Minimum value is 1. Must be less than timeout_seconds.
3403
+ Maps to Kubernetes probe argument 'periodSeconds'.
3404
+ :param _builtins.int success_threshold: Number of consecutive successes before the probe is considered successful.
3405
+ Defaults to 1. Minimum value is 1.
3406
+ Maps to Kubernetes probe argument 'successThreshold'.
3407
+ :param 'AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocketArgs' tcp_socket: TcpSocketAction probes the health of a container by opening a TCP socket
3408
+ connection.
3409
+ Structure is documented below.
3410
+ :param _builtins.int timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second.
3411
+ Minimum value is 1. Must be greater or equal to period_seconds.
3412
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
3413
+ """
3414
+ if exec_ is not None:
3415
+ pulumi.set(__self__, "exec_", exec_)
3416
+ if failure_threshold is not None:
3417
+ pulumi.set(__self__, "failure_threshold", failure_threshold)
3418
+ if grpc is not None:
3419
+ pulumi.set(__self__, "grpc", grpc)
3420
+ if http_get is not None:
3421
+ pulumi.set(__self__, "http_get", http_get)
3422
+ if initial_delay_seconds is not None:
3423
+ pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
3424
+ if period_seconds is not None:
3425
+ pulumi.set(__self__, "period_seconds", period_seconds)
3426
+ if success_threshold is not None:
3427
+ pulumi.set(__self__, "success_threshold", success_threshold)
3428
+ if tcp_socket is not None:
3429
+ pulumi.set(__self__, "tcp_socket", tcp_socket)
3430
+ if timeout_seconds is not None:
3431
+ pulumi.set(__self__, "timeout_seconds", timeout_seconds)
3432
+
3433
+ @_builtins.property
3434
+ @pulumi.getter(name="exec")
3435
+ def exec_(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec']:
3436
+ """
3437
+ ExecAction specifies a command to execute.
3438
+ Structure is documented below.
3439
+ """
3440
+ return pulumi.get(self, "exec_")
3441
+
3442
+ @_builtins.property
3443
+ @pulumi.getter(name="failureThreshold")
3444
+ def failure_threshold(self) -> Optional[_builtins.int]:
3445
+ """
3446
+ Number of consecutive failures before the probe is considered failed.
3447
+ Defaults to 3. Minimum value is 1.
3448
+ Maps to Kubernetes probe argument 'failureThreshold'.
3449
+ """
3450
+ return pulumi.get(self, "failure_threshold")
3451
+
3452
+ @_builtins.property
3453
+ @pulumi.getter
3454
+ def grpc(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc']:
3455
+ """
3456
+ GrpcAction checks the health of a container using a gRPC service.
3457
+ Structure is documented below.
3458
+ """
3459
+ return pulumi.get(self, "grpc")
3460
+
3461
+ @_builtins.property
3462
+ @pulumi.getter(name="httpGet")
3463
+ def http_get(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet']:
3464
+ """
3465
+ HttpGetAction describes an action based on HTTP Get requests.
3466
+ Structure is documented below.
3467
+ """
3468
+ return pulumi.get(self, "http_get")
3469
+
3470
+ @_builtins.property
3471
+ @pulumi.getter(name="initialDelaySeconds")
3472
+ def initial_delay_seconds(self) -> Optional[_builtins.int]:
3473
+ """
3474
+ Number of seconds to wait before starting the probe. Defaults to 0.
3475
+ Minimum value is 0.
3476
+ Maps to Kubernetes probe argument 'initialDelaySeconds'.
3477
+ """
3478
+ return pulumi.get(self, "initial_delay_seconds")
3479
+
3480
+ @_builtins.property
3481
+ @pulumi.getter(name="periodSeconds")
3482
+ def period_seconds(self) -> Optional[_builtins.int]:
3483
+ """
3484
+ How often (in seconds) to perform the probe. Default to 10 seconds.
3485
+ Minimum value is 1. Must be less than timeout_seconds.
3486
+ Maps to Kubernetes probe argument 'periodSeconds'.
3487
+ """
3488
+ return pulumi.get(self, "period_seconds")
3489
+
3490
+ @_builtins.property
3491
+ @pulumi.getter(name="successThreshold")
3492
+ def success_threshold(self) -> Optional[_builtins.int]:
3493
+ """
3494
+ Number of consecutive successes before the probe is considered successful.
3495
+ Defaults to 1. Minimum value is 1.
3496
+ Maps to Kubernetes probe argument 'successThreshold'.
3497
+ """
3498
+ return pulumi.get(self, "success_threshold")
3499
+
3500
+ @_builtins.property
3501
+ @pulumi.getter(name="tcpSocket")
3502
+ def tcp_socket(self) -> Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket']:
3503
+ """
3504
+ TcpSocketAction probes the health of a container by opening a TCP socket
3505
+ connection.
3506
+ Structure is documented below.
3507
+ """
3508
+ return pulumi.get(self, "tcp_socket")
3509
+
3510
+ @_builtins.property
3511
+ @pulumi.getter(name="timeoutSeconds")
3512
+ def timeout_seconds(self) -> Optional[_builtins.int]:
3513
+ """
3514
+ Number of seconds after which the probe times out. Defaults to 1 second.
3515
+ Minimum value is 1. Must be greater or equal to period_seconds.
3516
+ Maps to Kubernetes probe argument 'timeoutSeconds'.
3517
+ """
3518
+ return pulumi.get(self, "timeout_seconds")
3519
+
3520
+
3521
+ @pulumi.output_type
3522
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeExec(dict):
3523
+ def __init__(__self__, *,
3524
+ commands: Optional[Sequence[_builtins.str]] = None):
3525
+ """
3526
+ :param Sequence[_builtins.str] commands: Command is the command line to execute inside the container, the working
3527
+ directory for the command is root ('/') in the container's filesystem.
3528
+ The command is simply exec'd, it is not run inside a shell, so
3529
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3530
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3531
+ live/healthy and non-zero is unhealthy.
3532
+ """
3533
+ if commands is not None:
3534
+ pulumi.set(__self__, "commands", commands)
3535
+
3536
+ @_builtins.property
3537
+ @pulumi.getter
3538
+ def commands(self) -> Optional[Sequence[_builtins.str]]:
3539
+ """
3540
+ Command is the command line to execute inside the container, the working
3541
+ directory for the command is root ('/') in the container's filesystem.
3542
+ The command is simply exec'd, it is not run inside a shell, so
3543
+ traditional shell instructions ('|', etc) won't work. To use a shell, you
3544
+ need to explicitly call out to that shell. Exit status of 0 is treated as
3545
+ live/healthy and non-zero is unhealthy.
3546
+ """
3547
+ return pulumi.get(self, "commands")
3548
+
3549
+
3550
+ @pulumi.output_type
3551
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeGrpc(dict):
3552
+ def __init__(__self__, *,
3553
+ port: Optional[_builtins.int] = None,
3554
+ service: Optional[_builtins.str] = None):
3555
+ """
3556
+ :param _builtins.int port: Port number of the gRPC service. Number must be in the range 1 to 65535.
3557
+ :param _builtins.str service: Service is the name of the service to place in the gRPC
3558
+ HealthCheckRequest. See
3559
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3560
+ If this is not specified, the default behavior is defined by gRPC.
3561
+ """
3562
+ if port is not None:
3563
+ pulumi.set(__self__, "port", port)
3564
+ if service is not None:
3565
+ pulumi.set(__self__, "service", service)
3566
+
3567
+ @_builtins.property
3568
+ @pulumi.getter
3569
+ def port(self) -> Optional[_builtins.int]:
3570
+ """
3571
+ Port number of the gRPC service. Number must be in the range 1 to 65535.
3572
+ """
3573
+ return pulumi.get(self, "port")
3574
+
3575
+ @_builtins.property
3576
+ @pulumi.getter
3577
+ def service(self) -> Optional[_builtins.str]:
3578
+ """
3579
+ Service is the name of the service to place in the gRPC
3580
+ HealthCheckRequest. See
3581
+ https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
3582
+ If this is not specified, the default behavior is defined by gRPC.
3583
+ """
3584
+ return pulumi.get(self, "service")
3585
+
3586
+
3587
+ @pulumi.output_type
3588
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet(dict):
3589
+ @staticmethod
3590
+ def __key_warning(key: str):
3591
+ suggest = None
3592
+ if key == "httpHeaders":
3593
+ suggest = "http_headers"
3594
+
3595
+ if suggest:
3596
+ pulumi.log.warn(f"Key '{key}' not found in AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet. Access the value via the '{suggest}' property getter instead.")
3597
+
3598
+ def __getitem__(self, key: str) -> Any:
3599
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet.__key_warning(key)
3600
+ return super().__getitem__(key)
3601
+
3602
+ def get(self, key: str, default = None) -> Any:
3603
+ AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGet.__key_warning(key)
3604
+ return super().get(key, default)
3605
+
3606
+ def __init__(__self__, *,
3607
+ host: Optional[_builtins.str] = None,
3608
+ http_headers: Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader']] = None,
3609
+ path: Optional[_builtins.str] = None,
3610
+ port: Optional[_builtins.int] = None,
3611
+ scheme: Optional[_builtins.str] = None):
3612
+ """
3613
+ :param _builtins.str host: Host name to connect to, defaults to the model serving container's IP.
3614
+ You probably want to set "Host" in httpHeaders instead.
3615
+ :param Sequence['AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeaderArgs'] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
3616
+ Structure is documented below.
3617
+ :param _builtins.str path: Path to access on the HTTP server.
3618
+ :param _builtins.int port: Number of the port to access on the container.
3619
+ Number must be in the range 1 to 65535.
3620
+ :param _builtins.str scheme: Scheme to use for connecting to the host.
3621
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3622
+ """
3623
+ if host is not None:
3624
+ pulumi.set(__self__, "host", host)
3625
+ if http_headers is not None:
3626
+ pulumi.set(__self__, "http_headers", http_headers)
3627
+ if path is not None:
3628
+ pulumi.set(__self__, "path", path)
3629
+ if port is not None:
3630
+ pulumi.set(__self__, "port", port)
3631
+ if scheme is not None:
3632
+ pulumi.set(__self__, "scheme", scheme)
3633
+
3634
+ @_builtins.property
3635
+ @pulumi.getter
3636
+ def host(self) -> Optional[_builtins.str]:
3637
+ """
3638
+ Host name to connect to, defaults to the model serving container's IP.
3639
+ You probably want to set "Host" in httpHeaders instead.
3640
+ """
3641
+ return pulumi.get(self, "host")
3642
+
3643
+ @_builtins.property
3644
+ @pulumi.getter(name="httpHeaders")
3645
+ def http_headers(self) -> Optional[Sequence['outputs.AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader']]:
3646
+ """
3647
+ Custom headers to set in the request. HTTP allows repeated headers.
3648
+ Structure is documented below.
3649
+ """
3650
+ return pulumi.get(self, "http_headers")
3651
+
3652
+ @_builtins.property
3653
+ @pulumi.getter
3654
+ def path(self) -> Optional[_builtins.str]:
3655
+ """
3656
+ Path to access on the HTTP server.
3657
+ """
3658
+ return pulumi.get(self, "path")
3659
+
3660
+ @_builtins.property
3661
+ @pulumi.getter
3662
+ def port(self) -> Optional[_builtins.int]:
3663
+ """
3664
+ Number of the port to access on the container.
3665
+ Number must be in the range 1 to 65535.
3666
+ """
3667
+ return pulumi.get(self, "port")
3668
+
3669
+ @_builtins.property
3670
+ @pulumi.getter
3671
+ def scheme(self) -> Optional[_builtins.str]:
3672
+ """
3673
+ Scheme to use for connecting to the host.
3674
+ Defaults to HTTP. Acceptable values are "HTTP" or "HTTPS".
3675
+ """
3676
+ return pulumi.get(self, "scheme")
3677
+
3678
+
3679
+ @pulumi.output_type
3680
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeHttpGetHttpHeader(dict):
3681
+ def __init__(__self__, *,
3682
+ name: Optional[_builtins.str] = None,
3683
+ value: Optional[_builtins.str] = None):
3684
+ """
3685
+ :param _builtins.str name: The header field name.
3686
+ This will be canonicalized upon output, so case-variant names will be
3687
+ understood as the same header.
3688
+ :param _builtins.str value: The header field value
3689
+ """
3690
+ if name is not None:
3691
+ pulumi.set(__self__, "name", name)
3692
+ if value is not None:
3693
+ pulumi.set(__self__, "value", value)
3694
+
3695
+ @_builtins.property
3696
+ @pulumi.getter
3697
+ def name(self) -> Optional[_builtins.str]:
3698
+ """
3699
+ The header field name.
3700
+ This will be canonicalized upon output, so case-variant names will be
3701
+ understood as the same header.
3702
+ """
3703
+ return pulumi.get(self, "name")
3704
+
3705
+ @_builtins.property
3706
+ @pulumi.getter
3707
+ def value(self) -> Optional[_builtins.str]:
3708
+ """
3709
+ The header field value
3710
+ """
3711
+ return pulumi.get(self, "value")
3712
+
3713
+
3714
+ @pulumi.output_type
3715
+ class AiEndpointWithModelGardenDeploymentModelConfigContainerSpecStartupProbeTcpSocket(dict):
3716
+ def __init__(__self__, *,
3717
+ host: Optional[_builtins.str] = None,
3718
+ port: Optional[_builtins.int] = None):
3719
+ """
3720
+ :param _builtins.str host: Optional: Host name to connect to, defaults to the model serving
3721
+ container's IP.
3722
+ :param _builtins.int port: Number of the port to access on the container.
3723
+ Number must be in the range 1 to 65535.
3724
+ """
3725
+ if host is not None:
3726
+ pulumi.set(__self__, "host", host)
3727
+ if port is not None:
3728
+ pulumi.set(__self__, "port", port)
3729
+
3730
+ @_builtins.property
3731
+ @pulumi.getter
3732
+ def host(self) -> Optional[_builtins.str]:
3733
+ """
3734
+ Optional: Host name to connect to, defaults to the model serving
3735
+ container's IP.
3736
+ """
3737
+ return pulumi.get(self, "host")
3738
+
3739
+ @_builtins.property
3740
+ @pulumi.getter
3741
+ def port(self) -> Optional[_builtins.int]:
3742
+ """
3743
+ Number of the port to access on the container.
3744
+ Number must be in the range 1 to 65535.
3745
+ """
3746
+ return pulumi.get(self, "port")
3747
+
3748
+
1142
3749
  @pulumi.output_type
1143
3750
  class AiFeatureGroupBigQuery(dict):
1144
3751
  @staticmethod