pulumi-gcp 8.9.0a1731221331__py3-none-any.whl → 8.9.0a1731432418__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. pulumi_gcp/__init__.py +64 -0
  2. pulumi_gcp/activedirectory/domain.py +24 -0
  3. pulumi_gcp/apigee/__init__.py +1 -0
  4. pulumi_gcp/apigee/_inputs.py +74 -0
  5. pulumi_gcp/apigee/api.py +456 -0
  6. pulumi_gcp/apigee/outputs.py +65 -0
  7. pulumi_gcp/applicationintegration/auth_config.py +24 -0
  8. pulumi_gcp/backupdisasterrecovery/__init__.py +4 -0
  9. pulumi_gcp/backupdisasterrecovery/_inputs.py +533 -0
  10. pulumi_gcp/backupdisasterrecovery/backup_plan.py +653 -0
  11. pulumi_gcp/backupdisasterrecovery/backup_plan_association.py +766 -0
  12. pulumi_gcp/backupdisasterrecovery/backup_vault.py +146 -6
  13. pulumi_gcp/backupdisasterrecovery/get_backup_plan.py +204 -0
  14. pulumi_gcp/backupdisasterrecovery/get_backup_plan_association.py +243 -0
  15. pulumi_gcp/backupdisasterrecovery/outputs.py +702 -0
  16. pulumi_gcp/bigquery/data_transfer_config.py +24 -0
  17. pulumi_gcp/cloudrunv2/_inputs.py +46 -0
  18. pulumi_gcp/cloudrunv2/job.py +0 -2
  19. pulumi_gcp/cloudrunv2/outputs.py +60 -2
  20. pulumi_gcp/cloudrunv2/service.py +0 -2
  21. pulumi_gcp/compute/__init__.py +1 -0
  22. pulumi_gcp/compute/_inputs.py +2163 -256
  23. pulumi_gcp/compute/disk.py +7 -7
  24. pulumi_gcp/compute/firewall_policy_rule.py +108 -54
  25. pulumi_gcp/compute/get_region_instance_group_manager.py +12 -1
  26. pulumi_gcp/compute/health_check.py +42 -42
  27. pulumi_gcp/compute/network_firewall_policy_rule.py +4 -4
  28. pulumi_gcp/compute/network_firewall_policy_with_rules.py +10 -10
  29. pulumi_gcp/compute/node_template.py +95 -0
  30. pulumi_gcp/compute/outputs.py +1639 -213
  31. pulumi_gcp/compute/region_disk.py +7 -7
  32. pulumi_gcp/compute/region_health_check.py +42 -42
  33. pulumi_gcp/compute/region_instance_group_manager.py +54 -14
  34. pulumi_gcp/compute/region_network_firewall_policy_rule.py +4 -4
  35. pulumi_gcp/compute/region_network_firewall_policy_with_rules.py +10 -10
  36. pulumi_gcp/compute/region_resize_request.py +772 -0
  37. pulumi_gcp/compute/region_security_policy.py +120 -0
  38. pulumi_gcp/compute/region_security_policy_rule.py +6 -6
  39. pulumi_gcp/compute/router_peer.py +56 -35
  40. pulumi_gcp/compute/security_scan_config.py +8 -8
  41. pulumi_gcp/config/__init__.pyi +4 -0
  42. pulumi_gcp/config/vars.py +8 -0
  43. pulumi_gcp/container/_inputs.py +345 -10
  44. pulumi_gcp/container/cluster.py +101 -0
  45. pulumi_gcp/container/get_cluster.py +23 -1
  46. pulumi_gcp/container/outputs.py +456 -8
  47. pulumi_gcp/dataloss/prevention_discovery_config.py +7 -7
  48. pulumi_gcp/dataproc/__init__.py +2 -0
  49. pulumi_gcp/dataproc/_inputs.py +101 -0
  50. pulumi_gcp/dataproc/gdc_application_environment.py +931 -0
  51. pulumi_gcp/dataproc/gdc_service_instance.py +1022 -0
  52. pulumi_gcp/dataproc/outputs.py +94 -0
  53. pulumi_gcp/edgecontainer/vpn_connection.py +4 -4
  54. pulumi_gcp/firebase/android_app.py +2 -2
  55. pulumi_gcp/firebase/apple_app.py +2 -2
  56. pulumi_gcp/firebase/web_app.py +2 -2
  57. pulumi_gcp/firestore/index.py +44 -0
  58. pulumi_gcp/gkeonprem/_inputs.py +15 -15
  59. pulumi_gcp/gkeonprem/outputs.py +10 -10
  60. pulumi_gcp/healthcare/dataset.py +7 -7
  61. pulumi_gcp/healthcare/dicom_store.py +7 -7
  62. pulumi_gcp/healthcare/fhir_store.py +7 -7
  63. pulumi_gcp/healthcare/hl7_store.py +14 -14
  64. pulumi_gcp/iam/__init__.py +1 -0
  65. pulumi_gcp/iam/_inputs.py +161 -0
  66. pulumi_gcp/iam/outputs.py +114 -0
  67. pulumi_gcp/iam/principal_access_boundary_policy.py +679 -0
  68. pulumi_gcp/logging/metric.py +2 -2
  69. pulumi_gcp/looker/_inputs.py +6 -0
  70. pulumi_gcp/looker/instance.py +169 -321
  71. pulumi_gcp/looker/outputs.py +4 -0
  72. pulumi_gcp/memorystore/instance.py +4 -0
  73. pulumi_gcp/monitoring/alert_policy.py +24 -0
  74. pulumi_gcp/monitoring/custom_service.py +24 -0
  75. pulumi_gcp/monitoring/group.py +24 -0
  76. pulumi_gcp/monitoring/metric_descriptor.py +24 -0
  77. pulumi_gcp/monitoring/slo.py +24 -0
  78. pulumi_gcp/monitoring/uptime_check_config.py +24 -0
  79. pulumi_gcp/networkmanagement/__init__.py +1 -0
  80. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +1358 -0
  81. pulumi_gcp/osconfig/patch_deployment.py +8 -8
  82. pulumi_gcp/provider.py +40 -0
  83. pulumi_gcp/pulumi-plugin.json +1 -1
  84. pulumi_gcp/redis/_inputs.py +3 -3
  85. pulumi_gcp/redis/outputs.py +2 -2
  86. pulumi_gcp/securitycenter/notification_config.py +4 -16
  87. pulumi_gcp/securitycenter/project_notification_config.py +0 -24
  88. pulumi_gcp/securitycenter/v2_organization_notification_config.py +4 -16
  89. pulumi_gcp/spanner/__init__.py +1 -0
  90. pulumi_gcp/spanner/_inputs.py +9 -9
  91. pulumi_gcp/spanner/get_database.py +229 -0
  92. pulumi_gcp/spanner/get_instance.py +12 -1
  93. pulumi_gcp/spanner/instance.py +70 -0
  94. pulumi_gcp/spanner/outputs.py +46 -12
  95. pulumi_gcp/sql/_inputs.py +26 -0
  96. pulumi_gcp/sql/database_instance.py +124 -11
  97. pulumi_gcp/sql/get_database_instance.py +12 -1
  98. pulumi_gcp/sql/outputs.py +51 -0
  99. pulumi_gcp/storage/_inputs.py +3 -3
  100. pulumi_gcp/storage/outputs.py +2 -2
  101. pulumi_gcp/tags/tag_binding.py +4 -4
  102. pulumi_gcp/tags/tag_value.py +2 -2
  103. pulumi_gcp/transcoder/job.py +24 -0
  104. pulumi_gcp/vertex/_inputs.py +184 -0
  105. pulumi_gcp/vertex/ai_endpoint.py +394 -8
  106. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  107. pulumi_gcp/vertex/outputs.py +166 -0
  108. pulumi_gcp/workbench/instance.py +21 -7
  109. pulumi_gcp/workflows/workflow.py +36 -0
  110. pulumi_gcp/workstations/workstation_config.py +8 -8
  111. {pulumi_gcp-8.9.0a1731221331.dist-info → pulumi_gcp-8.9.0a1731432418.dist-info}/METADATA +1 -1
  112. {pulumi_gcp-8.9.0a1731221331.dist-info → pulumi_gcp-8.9.0a1731432418.dist-info}/RECORD +114 -103
  113. {pulumi_gcp-8.9.0a1731221331.dist-info → pulumi_gcp-8.9.0a1731432418.dist-info}/WHEEL +1 -1
  114. {pulumi_gcp-8.9.0a1731221331.dist-info → pulumi_gcp-8.9.0a1731432418.dist-info}/top_level.txt +0 -0
@@ -23,13 +23,17 @@ class AiEndpointArgs:
23
23
  def __init__(__self__, *,
24
24
  display_name: pulumi.Input[str],
25
25
  location: pulumi.Input[str],
26
+ dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
26
27
  description: Optional[pulumi.Input[str]] = None,
27
28
  encryption_spec: Optional[pulumi.Input['AiEndpointEncryptionSpecArgs']] = None,
28
29
  labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
29
30
  name: Optional[pulumi.Input[str]] = None,
30
31
  network: Optional[pulumi.Input[str]] = None,
32
+ predict_request_response_logging_config: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']] = None,
33
+ private_service_connect_config: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']] = None,
31
34
  project: Optional[pulumi.Input[str]] = None,
32
- region: Optional[pulumi.Input[str]] = None):
35
+ region: Optional[pulumi.Input[str]] = None,
36
+ traffic_split: Optional[pulumi.Input[str]] = None):
33
37
  """
34
38
  The set of arguments for constructing a AiEndpoint resource.
35
39
  :param pulumi.Input[str] display_name: Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.
@@ -37,6 +41,7 @@ class AiEndpointArgs:
37
41
 
38
42
 
39
43
  - - -
44
+ :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
40
45
  :param pulumi.Input[str] description: The description of the Endpoint.
41
46
  :param pulumi.Input['AiEndpointEncryptionSpecArgs'] encryption_spec: Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.
42
47
  Structure is documented below.
@@ -44,13 +49,26 @@ class AiEndpointArgs:
44
49
  **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
45
50
  Please refer to the field `effective_labels` for all of the labels present on the resource.
46
51
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
47
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
52
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
53
+ :param pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs'] predict_request_response_logging_config: Configures the request-response logging for online prediction.
54
+ Structure is documented below.
55
+ :param pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs'] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
56
+ Structure is documented below.
48
57
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
49
58
  If it is not provided, the provider project is used.
50
59
  :param pulumi.Input[str] region: The region for the resource
60
+ :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
61
+ If a DeployedModel's id is not listed in this map, then it receives no traffic.
62
+ The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
63
+ > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
64
+ resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
65
+ the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
66
+ [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
51
67
  """
52
68
  pulumi.set(__self__, "display_name", display_name)
53
69
  pulumi.set(__self__, "location", location)
70
+ if dedicated_endpoint_enabled is not None:
71
+ pulumi.set(__self__, "dedicated_endpoint_enabled", dedicated_endpoint_enabled)
54
72
  if description is not None:
55
73
  pulumi.set(__self__, "description", description)
56
74
  if encryption_spec is not None:
@@ -61,10 +79,16 @@ class AiEndpointArgs:
61
79
  pulumi.set(__self__, "name", name)
62
80
  if network is not None:
63
81
  pulumi.set(__self__, "network", network)
82
+ if predict_request_response_logging_config is not None:
83
+ pulumi.set(__self__, "predict_request_response_logging_config", predict_request_response_logging_config)
84
+ if private_service_connect_config is not None:
85
+ pulumi.set(__self__, "private_service_connect_config", private_service_connect_config)
64
86
  if project is not None:
65
87
  pulumi.set(__self__, "project", project)
66
88
  if region is not None:
67
89
  pulumi.set(__self__, "region", region)
90
+ if traffic_split is not None:
91
+ pulumi.set(__self__, "traffic_split", traffic_split)
68
92
 
69
93
  @property
70
94
  @pulumi.getter(name="displayName")
@@ -93,6 +117,18 @@ class AiEndpointArgs:
93
117
  def location(self, value: pulumi.Input[str]):
94
118
  pulumi.set(self, "location", value)
95
119
 
120
+ @property
121
+ @pulumi.getter(name="dedicatedEndpointEnabled")
122
+ def dedicated_endpoint_enabled(self) -> Optional[pulumi.Input[bool]]:
123
+ """
124
+ If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
125
+ """
126
+ return pulumi.get(self, "dedicated_endpoint_enabled")
127
+
128
+ @dedicated_endpoint_enabled.setter
129
+ def dedicated_endpoint_enabled(self, value: Optional[pulumi.Input[bool]]):
130
+ pulumi.set(self, "dedicated_endpoint_enabled", value)
131
+
96
132
  @property
97
133
  @pulumi.getter
98
134
  def description(self) -> Optional[pulumi.Input[str]]:
@@ -148,7 +184,7 @@ class AiEndpointArgs:
148
184
  @pulumi.getter
149
185
  def network(self) -> Optional[pulumi.Input[str]]:
150
186
  """
151
- The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
187
+ The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
152
188
  """
153
189
  return pulumi.get(self, "network")
154
190
 
@@ -156,6 +192,32 @@ class AiEndpointArgs:
156
192
  def network(self, value: Optional[pulumi.Input[str]]):
157
193
  pulumi.set(self, "network", value)
158
194
 
195
+ @property
196
+ @pulumi.getter(name="predictRequestResponseLoggingConfig")
197
+ def predict_request_response_logging_config(self) -> Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]:
198
+ """
199
+ Configures the request-response logging for online prediction.
200
+ Structure is documented below.
201
+ """
202
+ return pulumi.get(self, "predict_request_response_logging_config")
203
+
204
+ @predict_request_response_logging_config.setter
205
+ def predict_request_response_logging_config(self, value: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]):
206
+ pulumi.set(self, "predict_request_response_logging_config", value)
207
+
208
+ @property
209
+ @pulumi.getter(name="privateServiceConnectConfig")
210
+ def private_service_connect_config(self) -> Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]:
211
+ """
212
+ Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
213
+ Structure is documented below.
214
+ """
215
+ return pulumi.get(self, "private_service_connect_config")
216
+
217
+ @private_service_connect_config.setter
218
+ def private_service_connect_config(self, value: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]):
219
+ pulumi.set(self, "private_service_connect_config", value)
220
+
159
221
  @property
160
222
  @pulumi.getter
161
223
  def project(self) -> Optional[pulumi.Input[str]]:
@@ -181,11 +243,31 @@ class AiEndpointArgs:
181
243
  def region(self, value: Optional[pulumi.Input[str]]):
182
244
  pulumi.set(self, "region", value)
183
245
 
246
+ @property
247
+ @pulumi.getter(name="trafficSplit")
248
+ def traffic_split(self) -> Optional[pulumi.Input[str]]:
249
+ """
250
+ A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
251
+ If a DeployedModel's id is not listed in this map, then it receives no traffic.
252
+ The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
253
+ > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
254
+ resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
255
+ the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
256
+ [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
257
+ """
258
+ return pulumi.get(self, "traffic_split")
259
+
260
+ @traffic_split.setter
261
+ def traffic_split(self, value: Optional[pulumi.Input[str]]):
262
+ pulumi.set(self, "traffic_split", value)
263
+
184
264
 
185
265
  @pulumi.input_type
186
266
  class _AiEndpointState:
187
267
  def __init__(__self__, *,
188
268
  create_time: Optional[pulumi.Input[str]] = None,
269
+ dedicated_endpoint_dns: Optional[pulumi.Input[str]] = None,
270
+ dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
189
271
  deployed_models: Optional[pulumi.Input[Sequence[pulumi.Input['AiEndpointDeployedModelArgs']]]] = None,
190
272
  description: Optional[pulumi.Input[str]] = None,
191
273
  display_name: Optional[pulumi.Input[str]] = None,
@@ -197,14 +279,19 @@ class _AiEndpointState:
197
279
  model_deployment_monitoring_job: Optional[pulumi.Input[str]] = None,
198
280
  name: Optional[pulumi.Input[str]] = None,
199
281
  network: Optional[pulumi.Input[str]] = None,
282
+ predict_request_response_logging_config: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']] = None,
283
+ private_service_connect_config: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']] = None,
200
284
  project: Optional[pulumi.Input[str]] = None,
201
285
  pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
202
286
  region: Optional[pulumi.Input[str]] = None,
287
+ traffic_split: Optional[pulumi.Input[str]] = None,
203
288
  update_time: Optional[pulumi.Input[str]] = None):
204
289
  """
205
290
  Input properties used for looking up and filtering AiEndpoint resources.
206
291
  :param pulumi.Input[str] create_time: (Output)
207
292
  Output only. Timestamp when the DeployedModel was created.
293
+ :param pulumi.Input[str] dedicated_endpoint_dns: Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
294
+ :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
208
295
  :param pulumi.Input[Sequence[pulumi.Input['AiEndpointDeployedModelArgs']]] deployed_models: Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/).
209
296
  Structure is documented below.
210
297
  :param pulumi.Input[str] description: The description of the Endpoint.
@@ -222,16 +309,31 @@ class _AiEndpointState:
222
309
  - - -
223
310
  :param pulumi.Input[str] model_deployment_monitoring_job: Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`
224
311
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
225
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
312
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
313
+ :param pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs'] predict_request_response_logging_config: Configures the request-response logging for online prediction.
314
+ Structure is documented below.
315
+ :param pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs'] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
316
+ Structure is documented below.
226
317
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
227
318
  If it is not provided, the provider project is used.
228
319
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource
229
320
  and default labels configured on the provider.
230
321
  :param pulumi.Input[str] region: The region for the resource
322
+ :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
323
+ If a DeployedModel's id is not listed in this map, then it receives no traffic.
324
+ The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
325
+ > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
326
+ resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
327
+ the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
328
+ [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
231
329
  :param pulumi.Input[str] update_time: Output only. Timestamp when this Endpoint was last updated.
232
330
  """
233
331
  if create_time is not None:
234
332
  pulumi.set(__self__, "create_time", create_time)
333
+ if dedicated_endpoint_dns is not None:
334
+ pulumi.set(__self__, "dedicated_endpoint_dns", dedicated_endpoint_dns)
335
+ if dedicated_endpoint_enabled is not None:
336
+ pulumi.set(__self__, "dedicated_endpoint_enabled", dedicated_endpoint_enabled)
235
337
  if deployed_models is not None:
236
338
  pulumi.set(__self__, "deployed_models", deployed_models)
237
339
  if description is not None:
@@ -254,12 +356,18 @@ class _AiEndpointState:
254
356
  pulumi.set(__self__, "name", name)
255
357
  if network is not None:
256
358
  pulumi.set(__self__, "network", network)
359
+ if predict_request_response_logging_config is not None:
360
+ pulumi.set(__self__, "predict_request_response_logging_config", predict_request_response_logging_config)
361
+ if private_service_connect_config is not None:
362
+ pulumi.set(__self__, "private_service_connect_config", private_service_connect_config)
257
363
  if project is not None:
258
364
  pulumi.set(__self__, "project", project)
259
365
  if pulumi_labels is not None:
260
366
  pulumi.set(__self__, "pulumi_labels", pulumi_labels)
261
367
  if region is not None:
262
368
  pulumi.set(__self__, "region", region)
369
+ if traffic_split is not None:
370
+ pulumi.set(__self__, "traffic_split", traffic_split)
263
371
  if update_time is not None:
264
372
  pulumi.set(__self__, "update_time", update_time)
265
373
 
@@ -276,6 +384,30 @@ class _AiEndpointState:
276
384
  def create_time(self, value: Optional[pulumi.Input[str]]):
277
385
  pulumi.set(self, "create_time", value)
278
386
 
387
+ @property
388
+ @pulumi.getter(name="dedicatedEndpointDns")
389
+ def dedicated_endpoint_dns(self) -> Optional[pulumi.Input[str]]:
390
+ """
391
+ Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
392
+ """
393
+ return pulumi.get(self, "dedicated_endpoint_dns")
394
+
395
+ @dedicated_endpoint_dns.setter
396
+ def dedicated_endpoint_dns(self, value: Optional[pulumi.Input[str]]):
397
+ pulumi.set(self, "dedicated_endpoint_dns", value)
398
+
399
+ @property
400
+ @pulumi.getter(name="dedicatedEndpointEnabled")
401
+ def dedicated_endpoint_enabled(self) -> Optional[pulumi.Input[bool]]:
402
+ """
403
+ If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
404
+ """
405
+ return pulumi.get(self, "dedicated_endpoint_enabled")
406
+
407
+ @dedicated_endpoint_enabled.setter
408
+ def dedicated_endpoint_enabled(self, value: Optional[pulumi.Input[bool]]):
409
+ pulumi.set(self, "dedicated_endpoint_enabled", value)
410
+
279
411
  @property
280
412
  @pulumi.getter(name="deployedModels")
281
413
  def deployed_models(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AiEndpointDeployedModelArgs']]]]:
@@ -407,7 +539,7 @@ class _AiEndpointState:
407
539
  @pulumi.getter
408
540
  def network(self) -> Optional[pulumi.Input[str]]:
409
541
  """
410
- The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
542
+ The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
411
543
  """
412
544
  return pulumi.get(self, "network")
413
545
 
@@ -415,6 +547,32 @@ class _AiEndpointState:
415
547
  def network(self, value: Optional[pulumi.Input[str]]):
416
548
  pulumi.set(self, "network", value)
417
549
 
550
+ @property
551
+ @pulumi.getter(name="predictRequestResponseLoggingConfig")
552
+ def predict_request_response_logging_config(self) -> Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]:
553
+ """
554
+ Configures the request-response logging for online prediction.
555
+ Structure is documented below.
556
+ """
557
+ return pulumi.get(self, "predict_request_response_logging_config")
558
+
559
+ @predict_request_response_logging_config.setter
560
+ def predict_request_response_logging_config(self, value: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]):
561
+ pulumi.set(self, "predict_request_response_logging_config", value)
562
+
563
+ @property
564
+ @pulumi.getter(name="privateServiceConnectConfig")
565
+ def private_service_connect_config(self) -> Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]:
566
+ """
567
+ Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
568
+ Structure is documented below.
569
+ """
570
+ return pulumi.get(self, "private_service_connect_config")
571
+
572
+ @private_service_connect_config.setter
573
+ def private_service_connect_config(self, value: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]):
574
+ pulumi.set(self, "private_service_connect_config", value)
575
+
418
576
  @property
419
577
  @pulumi.getter
420
578
  def project(self) -> Optional[pulumi.Input[str]]:
@@ -453,6 +611,24 @@ class _AiEndpointState:
453
611
  def region(self, value: Optional[pulumi.Input[str]]):
454
612
  pulumi.set(self, "region", value)
455
613
 
614
+ @property
615
+ @pulumi.getter(name="trafficSplit")
616
+ def traffic_split(self) -> Optional[pulumi.Input[str]]:
617
+ """
618
+ A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
619
+ If a DeployedModel's id is not listed in this map, then it receives no traffic.
620
+ The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
621
+ > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
622
+ resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
623
+ the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
624
+ [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
625
+ """
626
+ return pulumi.get(self, "traffic_split")
627
+
628
+ @traffic_split.setter
629
+ def traffic_split(self, value: Optional[pulumi.Input[str]]):
630
+ pulumi.set(self, "traffic_split", value)
631
+
456
632
  @property
457
633
  @pulumi.getter(name="updateTime")
458
634
  def update_time(self) -> Optional[pulumi.Input[str]]:
@@ -471,6 +647,7 @@ class AiEndpoint(pulumi.CustomResource):
471
647
  def __init__(__self__,
472
648
  resource_name: str,
473
649
  opts: Optional[pulumi.ResourceOptions] = None,
650
+ dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
474
651
  description: Optional[pulumi.Input[str]] = None,
475
652
  display_name: Optional[pulumi.Input[str]] = None,
476
653
  encryption_spec: Optional[pulumi.Input[Union['AiEndpointEncryptionSpecArgs', 'AiEndpointEncryptionSpecArgsDict']]] = None,
@@ -478,8 +655,11 @@ class AiEndpoint(pulumi.CustomResource):
478
655
  location: Optional[pulumi.Input[str]] = None,
479
656
  name: Optional[pulumi.Input[str]] = None,
480
657
  network: Optional[pulumi.Input[str]] = None,
658
+ predict_request_response_logging_config: Optional[pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']]] = None,
659
+ private_service_connect_config: Optional[pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']]] = None,
481
660
  project: Optional[pulumi.Input[str]] = None,
482
661
  region: Optional[pulumi.Input[str]] = None,
662
+ traffic_split: Optional[pulumi.Input[str]] = None,
483
663
  __props__=None):
484
664
  """
485
665
  Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations.
@@ -496,6 +676,7 @@ class AiEndpoint(pulumi.CustomResource):
496
676
 
497
677
  ```python
498
678
  import pulumi
679
+ import json
499
680
  import pulumi_gcp as gcp
500
681
 
501
682
  vertex_network = gcp.compute.Network("vertex_network", name="network-name")
@@ -509,6 +690,12 @@ class AiEndpoint(pulumi.CustomResource):
509
690
  network=vertex_network.id,
510
691
  service="servicenetworking.googleapis.com",
511
692
  reserved_peering_ranges=[vertex_range.name])
693
+ bq_dataset = gcp.bigquery.Dataset("bq_dataset",
694
+ dataset_id="some_dataset",
695
+ friendly_name="logging dataset",
696
+ description="This is a dataset that requests are logged to",
697
+ location="US",
698
+ delete_contents_on_destroy=True)
512
699
  project = gcp.organizations.get_project()
513
700
  endpoint = gcp.vertex.AiEndpoint("endpoint",
514
701
  name="endpoint-name",
@@ -523,12 +710,62 @@ class AiEndpoint(pulumi.CustomResource):
523
710
  encryption_spec={
524
711
  "kms_key_name": "kms-name",
525
712
  },
713
+ predict_request_response_logging_config={
714
+ "bigquery_destination": {
715
+ "output_uri": bq_dataset.dataset_id.apply(lambda dataset_id: f"bq://{project.project_id}.{dataset_id}.request_response_logging"),
716
+ },
717
+ "enabled": True,
718
+ "sampling_rate": 0.1,
719
+ },
720
+ traffic_split=json.dumps({
721
+ "12345": 100,
722
+ }),
526
723
  opts = pulumi.ResourceOptions(depends_on=[vertex_vpc_connection]))
527
724
  crypto_key = gcp.kms.CryptoKeyIAMMember("crypto_key",
528
725
  crypto_key_id="kms-name",
529
726
  role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
530
727
  member=f"serviceAccount:service-{project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com")
531
728
  ```
729
+ ### Vertex Ai Endpoint Private Service Connect
730
+
731
+ ```python
732
+ import pulumi
733
+ import pulumi_gcp as gcp
734
+
735
+ project = gcp.organizations.get_project()
736
+ endpoint = gcp.vertex.AiEndpoint("endpoint",
737
+ name="endpoint-name_69391",
738
+ display_name="sample-endpoint",
739
+ description="A sample vertex endpoint",
740
+ location="us-central1",
741
+ region="us-central1",
742
+ labels={
743
+ "label-one": "value-one",
744
+ },
745
+ private_service_connect_config={
746
+ "enable_private_service_connect": True,
747
+ "project_allowlists": [project.project_id],
748
+ "enable_secure_private_service_connect": False,
749
+ })
750
+ ```
751
+ ### Vertex Ai Endpoint Dedicated Endpoint
752
+
753
+ ```python
754
+ import pulumi
755
+ import pulumi_gcp as gcp
756
+
757
+ endpoint = gcp.vertex.AiEndpoint("endpoint",
758
+ name="endpoint-name_8270",
759
+ display_name="sample-endpoint",
760
+ description="A sample vertex endpoint",
761
+ location="us-central1",
762
+ region="us-central1",
763
+ labels={
764
+ "label-one": "value-one",
765
+ },
766
+ dedicated_endpoint_enabled=True)
767
+ project = gcp.organizations.get_project()
768
+ ```
532
769
 
533
770
  ## Import
534
771
 
@@ -556,6 +793,7 @@ class AiEndpoint(pulumi.CustomResource):
556
793
 
557
794
  :param str resource_name: The name of the resource.
558
795
  :param pulumi.ResourceOptions opts: Options for the resource.
796
+ :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
559
797
  :param pulumi.Input[str] description: The description of the Endpoint.
560
798
  :param pulumi.Input[str] display_name: Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.
561
799
  :param pulumi.Input[Union['AiEndpointEncryptionSpecArgs', 'AiEndpointEncryptionSpecArgsDict']] encryption_spec: Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.
@@ -568,10 +806,21 @@ class AiEndpoint(pulumi.CustomResource):
568
806
 
569
807
  - - -
570
808
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
571
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
809
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
810
+ :param pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']] predict_request_response_logging_config: Configures the request-response logging for online prediction.
811
+ Structure is documented below.
812
+ :param pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
813
+ Structure is documented below.
572
814
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
573
815
  If it is not provided, the provider project is used.
574
816
  :param pulumi.Input[str] region: The region for the resource
817
+ :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
818
+ If a DeployedModel's id is not listed in this map, then it receives no traffic.
819
+ The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
820
+ > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
821
+ resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
822
+ the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
823
+ [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
575
824
  """
576
825
  ...
577
826
  @overload
@@ -594,6 +843,7 @@ class AiEndpoint(pulumi.CustomResource):
594
843
 
595
844
  ```python
596
845
  import pulumi
846
+ import json
597
847
  import pulumi_gcp as gcp
598
848
 
599
849
  vertex_network = gcp.compute.Network("vertex_network", name="network-name")
@@ -607,6 +857,12 @@ class AiEndpoint(pulumi.CustomResource):
607
857
  network=vertex_network.id,
608
858
  service="servicenetworking.googleapis.com",
609
859
  reserved_peering_ranges=[vertex_range.name])
860
+ bq_dataset = gcp.bigquery.Dataset("bq_dataset",
861
+ dataset_id="some_dataset",
862
+ friendly_name="logging dataset",
863
+ description="This is a dataset that requests are logged to",
864
+ location="US",
865
+ delete_contents_on_destroy=True)
610
866
  project = gcp.organizations.get_project()
611
867
  endpoint = gcp.vertex.AiEndpoint("endpoint",
612
868
  name="endpoint-name",
@@ -621,12 +877,62 @@ class AiEndpoint(pulumi.CustomResource):
621
877
  encryption_spec={
622
878
  "kms_key_name": "kms-name",
623
879
  },
880
+ predict_request_response_logging_config={
881
+ "bigquery_destination": {
882
+ "output_uri": bq_dataset.dataset_id.apply(lambda dataset_id: f"bq://{project.project_id}.{dataset_id}.request_response_logging"),
883
+ },
884
+ "enabled": True,
885
+ "sampling_rate": 0.1,
886
+ },
887
+ traffic_split=json.dumps({
888
+ "12345": 100,
889
+ }),
624
890
  opts = pulumi.ResourceOptions(depends_on=[vertex_vpc_connection]))
625
891
  crypto_key = gcp.kms.CryptoKeyIAMMember("crypto_key",
626
892
  crypto_key_id="kms-name",
627
893
  role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
628
894
  member=f"serviceAccount:service-{project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com")
629
895
  ```
896
+ ### Vertex Ai Endpoint Private Service Connect
897
+
898
+ ```python
899
+ import pulumi
900
+ import pulumi_gcp as gcp
901
+
902
+ project = gcp.organizations.get_project()
903
+ endpoint = gcp.vertex.AiEndpoint("endpoint",
904
+ name="endpoint-name_69391",
905
+ display_name="sample-endpoint",
906
+ description="A sample vertex endpoint",
907
+ location="us-central1",
908
+ region="us-central1",
909
+ labels={
910
+ "label-one": "value-one",
911
+ },
912
+ private_service_connect_config={
913
+ "enable_private_service_connect": True,
914
+ "project_allowlists": [project.project_id],
915
+ "enable_secure_private_service_connect": False,
916
+ })
917
+ ```
918
+ ### Vertex Ai Endpoint Dedicated Endpoint
919
+
920
+ ```python
921
+ import pulumi
922
+ import pulumi_gcp as gcp
923
+
924
+ endpoint = gcp.vertex.AiEndpoint("endpoint",
925
+ name="endpoint-name_8270",
926
+ display_name="sample-endpoint",
927
+ description="A sample vertex endpoint",
928
+ location="us-central1",
929
+ region="us-central1",
930
+ labels={
931
+ "label-one": "value-one",
932
+ },
933
+ dedicated_endpoint_enabled=True)
934
+ project = gcp.organizations.get_project()
935
+ ```
630
936
 
631
937
  ## Import
632
938
 
@@ -667,6 +973,7 @@ class AiEndpoint(pulumi.CustomResource):
667
973
  def _internal_init(__self__,
668
974
  resource_name: str,
669
975
  opts: Optional[pulumi.ResourceOptions] = None,
976
+ dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
670
977
  description: Optional[pulumi.Input[str]] = None,
671
978
  display_name: Optional[pulumi.Input[str]] = None,
672
979
  encryption_spec: Optional[pulumi.Input[Union['AiEndpointEncryptionSpecArgs', 'AiEndpointEncryptionSpecArgsDict']]] = None,
@@ -674,8 +981,11 @@ class AiEndpoint(pulumi.CustomResource):
674
981
  location: Optional[pulumi.Input[str]] = None,
675
982
  name: Optional[pulumi.Input[str]] = None,
676
983
  network: Optional[pulumi.Input[str]] = None,
984
+ predict_request_response_logging_config: Optional[pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']]] = None,
985
+ private_service_connect_config: Optional[pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']]] = None,
677
986
  project: Optional[pulumi.Input[str]] = None,
678
987
  region: Optional[pulumi.Input[str]] = None,
988
+ traffic_split: Optional[pulumi.Input[str]] = None,
679
989
  __props__=None):
680
990
  opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
681
991
  if not isinstance(opts, pulumi.ResourceOptions):
@@ -685,6 +995,7 @@ class AiEndpoint(pulumi.CustomResource):
685
995
  raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
686
996
  __props__ = AiEndpointArgs.__new__(AiEndpointArgs)
687
997
 
998
+ __props__.__dict__["dedicated_endpoint_enabled"] = dedicated_endpoint_enabled
688
999
  __props__.__dict__["description"] = description
689
1000
  if display_name is None and not opts.urn:
690
1001
  raise TypeError("Missing required property 'display_name'")
@@ -696,9 +1007,13 @@ class AiEndpoint(pulumi.CustomResource):
696
1007
  __props__.__dict__["location"] = location
697
1008
  __props__.__dict__["name"] = name
698
1009
  __props__.__dict__["network"] = network
1010
+ __props__.__dict__["predict_request_response_logging_config"] = predict_request_response_logging_config
1011
+ __props__.__dict__["private_service_connect_config"] = private_service_connect_config
699
1012
  __props__.__dict__["project"] = project
700
1013
  __props__.__dict__["region"] = region
1014
+ __props__.__dict__["traffic_split"] = traffic_split
701
1015
  __props__.__dict__["create_time"] = None
1016
+ __props__.__dict__["dedicated_endpoint_dns"] = None
702
1017
  __props__.__dict__["deployed_models"] = None
703
1018
  __props__.__dict__["effective_labels"] = None
704
1019
  __props__.__dict__["etag"] = None
@@ -718,6 +1033,8 @@ class AiEndpoint(pulumi.CustomResource):
718
1033
  id: pulumi.Input[str],
719
1034
  opts: Optional[pulumi.ResourceOptions] = None,
720
1035
  create_time: Optional[pulumi.Input[str]] = None,
1036
+ dedicated_endpoint_dns: Optional[pulumi.Input[str]] = None,
1037
+ dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
721
1038
  deployed_models: Optional[pulumi.Input[Sequence[pulumi.Input[Union['AiEndpointDeployedModelArgs', 'AiEndpointDeployedModelArgsDict']]]]] = None,
722
1039
  description: Optional[pulumi.Input[str]] = None,
723
1040
  display_name: Optional[pulumi.Input[str]] = None,
@@ -729,9 +1046,12 @@ class AiEndpoint(pulumi.CustomResource):
729
1046
  model_deployment_monitoring_job: Optional[pulumi.Input[str]] = None,
730
1047
  name: Optional[pulumi.Input[str]] = None,
731
1048
  network: Optional[pulumi.Input[str]] = None,
1049
+ predict_request_response_logging_config: Optional[pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']]] = None,
1050
+ private_service_connect_config: Optional[pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']]] = None,
732
1051
  project: Optional[pulumi.Input[str]] = None,
733
1052
  pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
734
1053
  region: Optional[pulumi.Input[str]] = None,
1054
+ traffic_split: Optional[pulumi.Input[str]] = None,
735
1055
  update_time: Optional[pulumi.Input[str]] = None) -> 'AiEndpoint':
736
1056
  """
737
1057
  Get an existing AiEndpoint resource's state with the given name, id, and optional extra
@@ -742,6 +1062,8 @@ class AiEndpoint(pulumi.CustomResource):
742
1062
  :param pulumi.ResourceOptions opts: Options for the resource.
743
1063
  :param pulumi.Input[str] create_time: (Output)
744
1064
  Output only. Timestamp when the DeployedModel was created.
1065
+ :param pulumi.Input[str] dedicated_endpoint_dns: Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
1066
+ :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
745
1067
  :param pulumi.Input[Sequence[pulumi.Input[Union['AiEndpointDeployedModelArgs', 'AiEndpointDeployedModelArgsDict']]]] deployed_models: Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/).
746
1068
  Structure is documented below.
747
1069
  :param pulumi.Input[str] description: The description of the Endpoint.
@@ -759,12 +1081,23 @@ class AiEndpoint(pulumi.CustomResource):
759
1081
  - - -
760
1082
  :param pulumi.Input[str] model_deployment_monitoring_job: Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`
761
1083
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
762
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
1084
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
1085
+ :param pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']] predict_request_response_logging_config: Configures the request-response logging for online prediction.
1086
+ Structure is documented below.
1087
+ :param pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
1088
+ Structure is documented below.
763
1089
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
764
1090
  If it is not provided, the provider project is used.
765
1091
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource
766
1092
  and default labels configured on the provider.
767
1093
  :param pulumi.Input[str] region: The region for the resource
1094
+ :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
1095
+ If a DeployedModel's id is not listed in this map, then it receives no traffic.
1096
+ The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
1097
+ > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
1098
+ resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
1099
+ the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
1100
+ [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
768
1101
  :param pulumi.Input[str] update_time: Output only. Timestamp when this Endpoint was last updated.
769
1102
  """
770
1103
  opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
@@ -772,6 +1105,8 @@ class AiEndpoint(pulumi.CustomResource):
772
1105
  __props__ = _AiEndpointState.__new__(_AiEndpointState)
773
1106
 
774
1107
  __props__.__dict__["create_time"] = create_time
1108
+ __props__.__dict__["dedicated_endpoint_dns"] = dedicated_endpoint_dns
1109
+ __props__.__dict__["dedicated_endpoint_enabled"] = dedicated_endpoint_enabled
775
1110
  __props__.__dict__["deployed_models"] = deployed_models
776
1111
  __props__.__dict__["description"] = description
777
1112
  __props__.__dict__["display_name"] = display_name
@@ -783,9 +1118,12 @@ class AiEndpoint(pulumi.CustomResource):
783
1118
  __props__.__dict__["model_deployment_monitoring_job"] = model_deployment_monitoring_job
784
1119
  __props__.__dict__["name"] = name
785
1120
  __props__.__dict__["network"] = network
1121
+ __props__.__dict__["predict_request_response_logging_config"] = predict_request_response_logging_config
1122
+ __props__.__dict__["private_service_connect_config"] = private_service_connect_config
786
1123
  __props__.__dict__["project"] = project
787
1124
  __props__.__dict__["pulumi_labels"] = pulumi_labels
788
1125
  __props__.__dict__["region"] = region
1126
+ __props__.__dict__["traffic_split"] = traffic_split
789
1127
  __props__.__dict__["update_time"] = update_time
790
1128
  return AiEndpoint(resource_name, opts=opts, __props__=__props__)
791
1129
 
@@ -798,6 +1136,22 @@ class AiEndpoint(pulumi.CustomResource):
798
1136
  """
799
1137
  return pulumi.get(self, "create_time")
800
1138
 
1139
+ @property
1140
+ @pulumi.getter(name="dedicatedEndpointDns")
1141
+ def dedicated_endpoint_dns(self) -> pulumi.Output[str]:
1142
+ """
1143
+ Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
1144
+ """
1145
+ return pulumi.get(self, "dedicated_endpoint_dns")
1146
+
1147
+ @property
1148
+ @pulumi.getter(name="dedicatedEndpointEnabled")
1149
+ def dedicated_endpoint_enabled(self) -> pulumi.Output[Optional[bool]]:
1150
+ """
1151
+ If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
1152
+ """
1153
+ return pulumi.get(self, "dedicated_endpoint_enabled")
1154
+
801
1155
  @property
802
1156
  @pulumi.getter(name="deployedModels")
803
1157
  def deployed_models(self) -> pulumi.Output[Sequence['outputs.AiEndpointDeployedModel']]:
@@ -889,10 +1243,28 @@ class AiEndpoint(pulumi.CustomResource):
889
1243
  @pulumi.getter
890
1244
  def network(self) -> pulumi.Output[Optional[str]]:
891
1245
  """
892
- The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
1246
+ The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
893
1247
  """
894
1248
  return pulumi.get(self, "network")
895
1249
 
1250
+ @property
1251
+ @pulumi.getter(name="predictRequestResponseLoggingConfig")
1252
+ def predict_request_response_logging_config(self) -> pulumi.Output[Optional['outputs.AiEndpointPredictRequestResponseLoggingConfig']]:
1253
+ """
1254
+ Configures the request-response logging for online prediction.
1255
+ Structure is documented below.
1256
+ """
1257
+ return pulumi.get(self, "predict_request_response_logging_config")
1258
+
1259
+ @property
1260
+ @pulumi.getter(name="privateServiceConnectConfig")
1261
+ def private_service_connect_config(self) -> pulumi.Output[Optional['outputs.AiEndpointPrivateServiceConnectConfig']]:
1262
+ """
1263
+ Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
1264
+ Structure is documented below.
1265
+ """
1266
+ return pulumi.get(self, "private_service_connect_config")
1267
+
896
1268
  @property
897
1269
  @pulumi.getter
898
1270
  def project(self) -> pulumi.Output[str]:
@@ -919,6 +1291,20 @@ class AiEndpoint(pulumi.CustomResource):
919
1291
  """
920
1292
  return pulumi.get(self, "region")
921
1293
 
1294
+ @property
1295
+ @pulumi.getter(name="trafficSplit")
1296
+ def traffic_split(self) -> pulumi.Output[Optional[str]]:
1297
+ """
1298
+ A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
1299
+ If a DeployedModel's id is not listed in this map, then it receives no traffic.
1300
+ The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
1301
+ > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
1302
+ resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
1303
+ the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
1304
+ [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
1305
+ """
1306
+ return pulumi.get(self, "traffic_split")
1307
+
922
1308
  @property
923
1309
  @pulumi.getter(name="updateTime")
924
1310
  def update_time(self) -> pulumi.Output[str]: