pulumi-gcp 8.8.2a1731515595__py3-none-any.whl → 8.9.0a1731097924__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. pulumi_gcp/__init__.py +0 -64
  2. pulumi_gcp/activedirectory/domain.py +0 -24
  3. pulumi_gcp/apigee/__init__.py +0 -1
  4. pulumi_gcp/apigee/_inputs.py +0 -74
  5. pulumi_gcp/apigee/outputs.py +0 -65
  6. pulumi_gcp/applicationintegration/auth_config.py +0 -24
  7. pulumi_gcp/backupdisasterrecovery/__init__.py +0 -4
  8. pulumi_gcp/backupdisasterrecovery/_inputs.py +0 -533
  9. pulumi_gcp/backupdisasterrecovery/backup_vault.py +6 -146
  10. pulumi_gcp/backupdisasterrecovery/outputs.py +0 -702
  11. pulumi_gcp/bigquery/data_transfer_config.py +0 -24
  12. pulumi_gcp/cloudrunv2/_inputs.py +0 -46
  13. pulumi_gcp/cloudrunv2/job.py +2 -0
  14. pulumi_gcp/cloudrunv2/outputs.py +2 -60
  15. pulumi_gcp/cloudrunv2/service.py +2 -0
  16. pulumi_gcp/compute/__init__.py +0 -1
  17. pulumi_gcp/compute/_inputs.py +255 -2162
  18. pulumi_gcp/compute/disk.py +7 -7
  19. pulumi_gcp/compute/firewall_policy_rule.py +54 -108
  20. pulumi_gcp/compute/get_region_instance_group_manager.py +1 -12
  21. pulumi_gcp/compute/health_check.py +42 -42
  22. pulumi_gcp/compute/network_firewall_policy_rule.py +4 -4
  23. pulumi_gcp/compute/network_firewall_policy_with_rules.py +10 -10
  24. pulumi_gcp/compute/node_template.py +0 -95
  25. pulumi_gcp/compute/outputs.py +213 -1639
  26. pulumi_gcp/compute/region_disk.py +7 -7
  27. pulumi_gcp/compute/region_health_check.py +42 -42
  28. pulumi_gcp/compute/region_instance_group_manager.py +14 -54
  29. pulumi_gcp/compute/region_network_firewall_policy_rule.py +4 -4
  30. pulumi_gcp/compute/region_network_firewall_policy_with_rules.py +10 -10
  31. pulumi_gcp/compute/region_security_policy.py +0 -120
  32. pulumi_gcp/compute/region_security_policy_rule.py +6 -6
  33. pulumi_gcp/compute/router_peer.py +35 -56
  34. pulumi_gcp/compute/security_scan_config.py +8 -8
  35. pulumi_gcp/config/__init__.pyi +0 -4
  36. pulumi_gcp/config/vars.py +0 -8
  37. pulumi_gcp/container/_inputs.py +10 -345
  38. pulumi_gcp/container/cluster.py +0 -101
  39. pulumi_gcp/container/get_cluster.py +1 -23
  40. pulumi_gcp/container/outputs.py +8 -456
  41. pulumi_gcp/dataloss/prevention_discovery_config.py +7 -7
  42. pulumi_gcp/dataproc/__init__.py +0 -2
  43. pulumi_gcp/dataproc/_inputs.py +0 -101
  44. pulumi_gcp/dataproc/outputs.py +0 -94
  45. pulumi_gcp/edgecontainer/vpn_connection.py +4 -4
  46. pulumi_gcp/firebase/android_app.py +2 -2
  47. pulumi_gcp/firebase/apple_app.py +2 -2
  48. pulumi_gcp/firebase/web_app.py +2 -2
  49. pulumi_gcp/firestore/index.py +0 -44
  50. pulumi_gcp/gkeonprem/_inputs.py +15 -15
  51. pulumi_gcp/gkeonprem/outputs.py +10 -10
  52. pulumi_gcp/healthcare/dataset.py +7 -7
  53. pulumi_gcp/healthcare/dicom_store.py +7 -7
  54. pulumi_gcp/healthcare/fhir_store.py +7 -7
  55. pulumi_gcp/healthcare/hl7_store.py +14 -14
  56. pulumi_gcp/iam/__init__.py +0 -1
  57. pulumi_gcp/iam/_inputs.py +0 -161
  58. pulumi_gcp/iam/outputs.py +0 -114
  59. pulumi_gcp/logging/metric.py +2 -2
  60. pulumi_gcp/looker/_inputs.py +0 -6
  61. pulumi_gcp/looker/instance.py +321 -169
  62. pulumi_gcp/looker/outputs.py +0 -4
  63. pulumi_gcp/memorystore/instance.py +0 -4
  64. pulumi_gcp/monitoring/alert_policy.py +0 -24
  65. pulumi_gcp/monitoring/custom_service.py +0 -24
  66. pulumi_gcp/monitoring/group.py +0 -24
  67. pulumi_gcp/monitoring/metric_descriptor.py +0 -24
  68. pulumi_gcp/monitoring/slo.py +0 -24
  69. pulumi_gcp/monitoring/uptime_check_config.py +0 -24
  70. pulumi_gcp/networkmanagement/__init__.py +0 -1
  71. pulumi_gcp/osconfig/patch_deployment.py +8 -8
  72. pulumi_gcp/provider.py +0 -40
  73. pulumi_gcp/pulumi-plugin.json +1 -1
  74. pulumi_gcp/redis/_inputs.py +3 -3
  75. pulumi_gcp/redis/outputs.py +2 -2
  76. pulumi_gcp/securitycenter/notification_config.py +16 -4
  77. pulumi_gcp/securitycenter/project_notification_config.py +24 -0
  78. pulumi_gcp/securitycenter/v2_organization_notification_config.py +16 -4
  79. pulumi_gcp/spanner/__init__.py +0 -1
  80. pulumi_gcp/spanner/_inputs.py +9 -9
  81. pulumi_gcp/spanner/get_instance.py +1 -12
  82. pulumi_gcp/spanner/instance.py +0 -70
  83. pulumi_gcp/spanner/outputs.py +12 -46
  84. pulumi_gcp/sql/_inputs.py +0 -26
  85. pulumi_gcp/sql/database_instance.py +11 -124
  86. pulumi_gcp/sql/get_database_instance.py +1 -12
  87. pulumi_gcp/sql/outputs.py +0 -51
  88. pulumi_gcp/storage/_inputs.py +3 -3
  89. pulumi_gcp/storage/outputs.py +2 -2
  90. pulumi_gcp/tags/tag_binding.py +4 -4
  91. pulumi_gcp/tags/tag_value.py +2 -2
  92. pulumi_gcp/transcoder/job.py +0 -24
  93. pulumi_gcp/vertex/_inputs.py +0 -184
  94. pulumi_gcp/vertex/ai_endpoint.py +8 -394
  95. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  96. pulumi_gcp/vertex/outputs.py +0 -166
  97. pulumi_gcp/workbench/instance.py +7 -21
  98. pulumi_gcp/workflows/workflow.py +0 -36
  99. pulumi_gcp/workstations/workstation_config.py +8 -8
  100. {pulumi_gcp-8.8.2a1731515595.dist-info → pulumi_gcp-8.9.0a1731097924.dist-info}/METADATA +1 -1
  101. {pulumi_gcp-8.8.2a1731515595.dist-info → pulumi_gcp-8.9.0a1731097924.dist-info}/RECORD +103 -114
  102. {pulumi_gcp-8.8.2a1731515595.dist-info → pulumi_gcp-8.9.0a1731097924.dist-info}/WHEEL +1 -1
  103. pulumi_gcp/apigee/api.py +0 -456
  104. pulumi_gcp/backupdisasterrecovery/backup_plan.py +0 -653
  105. pulumi_gcp/backupdisasterrecovery/backup_plan_association.py +0 -766
  106. pulumi_gcp/backupdisasterrecovery/get_backup_plan.py +0 -204
  107. pulumi_gcp/backupdisasterrecovery/get_backup_plan_association.py +0 -243
  108. pulumi_gcp/compute/region_resize_request.py +0 -772
  109. pulumi_gcp/dataproc/gdc_application_environment.py +0 -931
  110. pulumi_gcp/dataproc/gdc_service_instance.py +0 -1022
  111. pulumi_gcp/iam/principal_access_boundary_policy.py +0 -679
  112. pulumi_gcp/networkmanagement/vpc_flow_logs_config.py +0 -1358
  113. pulumi_gcp/spanner/get_database.py +0 -229
  114. {pulumi_gcp-8.8.2a1731515595.dist-info → pulumi_gcp-8.9.0a1731097924.dist-info}/top_level.txt +0 -0
@@ -23,17 +23,13 @@ class AiEndpointArgs:
23
23
  def __init__(__self__, *,
24
24
  display_name: pulumi.Input[str],
25
25
  location: pulumi.Input[str],
26
- dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
27
26
  description: Optional[pulumi.Input[str]] = None,
28
27
  encryption_spec: Optional[pulumi.Input['AiEndpointEncryptionSpecArgs']] = None,
29
28
  labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
30
29
  name: Optional[pulumi.Input[str]] = None,
31
30
  network: Optional[pulumi.Input[str]] = None,
32
- predict_request_response_logging_config: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']] = None,
33
- private_service_connect_config: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']] = None,
34
31
  project: Optional[pulumi.Input[str]] = None,
35
- region: Optional[pulumi.Input[str]] = None,
36
- traffic_split: Optional[pulumi.Input[str]] = None):
32
+ region: Optional[pulumi.Input[str]] = None):
37
33
  """
38
34
  The set of arguments for constructing a AiEndpoint resource.
39
35
  :param pulumi.Input[str] display_name: Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.
@@ -41,7 +37,6 @@ class AiEndpointArgs:
41
37
 
42
38
 
43
39
  - - -
44
- :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
45
40
  :param pulumi.Input[str] description: The description of the Endpoint.
46
41
  :param pulumi.Input['AiEndpointEncryptionSpecArgs'] encryption_spec: Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.
47
42
  Structure is documented below.
@@ -49,26 +44,13 @@ class AiEndpointArgs:
49
44
  **Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
50
45
  Please refer to the field `effective_labels` for all of the labels present on the resource.
51
46
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
52
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
53
- :param pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs'] predict_request_response_logging_config: Configures the request-response logging for online prediction.
54
- Structure is documented below.
55
- :param pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs'] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
56
- Structure is documented below.
47
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
57
48
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
58
49
  If it is not provided, the provider project is used.
59
50
  :param pulumi.Input[str] region: The region for the resource
60
- :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
61
- If a DeployedModel's id is not listed in this map, then it receives no traffic.
62
- The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
63
- > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
64
- resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
65
- the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
66
- [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
67
51
  """
68
52
  pulumi.set(__self__, "display_name", display_name)
69
53
  pulumi.set(__self__, "location", location)
70
- if dedicated_endpoint_enabled is not None:
71
- pulumi.set(__self__, "dedicated_endpoint_enabled", dedicated_endpoint_enabled)
72
54
  if description is not None:
73
55
  pulumi.set(__self__, "description", description)
74
56
  if encryption_spec is not None:
@@ -79,16 +61,10 @@ class AiEndpointArgs:
79
61
  pulumi.set(__self__, "name", name)
80
62
  if network is not None:
81
63
  pulumi.set(__self__, "network", network)
82
- if predict_request_response_logging_config is not None:
83
- pulumi.set(__self__, "predict_request_response_logging_config", predict_request_response_logging_config)
84
- if private_service_connect_config is not None:
85
- pulumi.set(__self__, "private_service_connect_config", private_service_connect_config)
86
64
  if project is not None:
87
65
  pulumi.set(__self__, "project", project)
88
66
  if region is not None:
89
67
  pulumi.set(__self__, "region", region)
90
- if traffic_split is not None:
91
- pulumi.set(__self__, "traffic_split", traffic_split)
92
68
 
93
69
  @property
94
70
  @pulumi.getter(name="displayName")
@@ -117,18 +93,6 @@ class AiEndpointArgs:
117
93
  def location(self, value: pulumi.Input[str]):
118
94
  pulumi.set(self, "location", value)
119
95
 
120
- @property
121
- @pulumi.getter(name="dedicatedEndpointEnabled")
122
- def dedicated_endpoint_enabled(self) -> Optional[pulumi.Input[bool]]:
123
- """
124
- If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
125
- """
126
- return pulumi.get(self, "dedicated_endpoint_enabled")
127
-
128
- @dedicated_endpoint_enabled.setter
129
- def dedicated_endpoint_enabled(self, value: Optional[pulumi.Input[bool]]):
130
- pulumi.set(self, "dedicated_endpoint_enabled", value)
131
-
132
96
  @property
133
97
  @pulumi.getter
134
98
  def description(self) -> Optional[pulumi.Input[str]]:
@@ -184,7 +148,7 @@ class AiEndpointArgs:
184
148
  @pulumi.getter
185
149
  def network(self) -> Optional[pulumi.Input[str]]:
186
150
  """
187
- The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
151
+ The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
188
152
  """
189
153
  return pulumi.get(self, "network")
190
154
 
@@ -192,32 +156,6 @@ class AiEndpointArgs:
192
156
  def network(self, value: Optional[pulumi.Input[str]]):
193
157
  pulumi.set(self, "network", value)
194
158
 
195
- @property
196
- @pulumi.getter(name="predictRequestResponseLoggingConfig")
197
- def predict_request_response_logging_config(self) -> Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]:
198
- """
199
- Configures the request-response logging for online prediction.
200
- Structure is documented below.
201
- """
202
- return pulumi.get(self, "predict_request_response_logging_config")
203
-
204
- @predict_request_response_logging_config.setter
205
- def predict_request_response_logging_config(self, value: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]):
206
- pulumi.set(self, "predict_request_response_logging_config", value)
207
-
208
- @property
209
- @pulumi.getter(name="privateServiceConnectConfig")
210
- def private_service_connect_config(self) -> Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]:
211
- """
212
- Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
213
- Structure is documented below.
214
- """
215
- return pulumi.get(self, "private_service_connect_config")
216
-
217
- @private_service_connect_config.setter
218
- def private_service_connect_config(self, value: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]):
219
- pulumi.set(self, "private_service_connect_config", value)
220
-
221
159
  @property
222
160
  @pulumi.getter
223
161
  def project(self) -> Optional[pulumi.Input[str]]:
@@ -243,31 +181,11 @@ class AiEndpointArgs:
243
181
  def region(self, value: Optional[pulumi.Input[str]]):
244
182
  pulumi.set(self, "region", value)
245
183
 
246
- @property
247
- @pulumi.getter(name="trafficSplit")
248
- def traffic_split(self) -> Optional[pulumi.Input[str]]:
249
- """
250
- A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
251
- If a DeployedModel's id is not listed in this map, then it receives no traffic.
252
- The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
253
- > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
254
- resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
255
- the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
256
- [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
257
- """
258
- return pulumi.get(self, "traffic_split")
259
-
260
- @traffic_split.setter
261
- def traffic_split(self, value: Optional[pulumi.Input[str]]):
262
- pulumi.set(self, "traffic_split", value)
263
-
264
184
 
265
185
  @pulumi.input_type
266
186
  class _AiEndpointState:
267
187
  def __init__(__self__, *,
268
188
  create_time: Optional[pulumi.Input[str]] = None,
269
- dedicated_endpoint_dns: Optional[pulumi.Input[str]] = None,
270
- dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
271
189
  deployed_models: Optional[pulumi.Input[Sequence[pulumi.Input['AiEndpointDeployedModelArgs']]]] = None,
272
190
  description: Optional[pulumi.Input[str]] = None,
273
191
  display_name: Optional[pulumi.Input[str]] = None,
@@ -279,19 +197,14 @@ class _AiEndpointState:
279
197
  model_deployment_monitoring_job: Optional[pulumi.Input[str]] = None,
280
198
  name: Optional[pulumi.Input[str]] = None,
281
199
  network: Optional[pulumi.Input[str]] = None,
282
- predict_request_response_logging_config: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']] = None,
283
- private_service_connect_config: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']] = None,
284
200
  project: Optional[pulumi.Input[str]] = None,
285
201
  pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
286
202
  region: Optional[pulumi.Input[str]] = None,
287
- traffic_split: Optional[pulumi.Input[str]] = None,
288
203
  update_time: Optional[pulumi.Input[str]] = None):
289
204
  """
290
205
  Input properties used for looking up and filtering AiEndpoint resources.
291
206
  :param pulumi.Input[str] create_time: (Output)
292
207
  Output only. Timestamp when the DeployedModel was created.
293
- :param pulumi.Input[str] dedicated_endpoint_dns: Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
294
- :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
295
208
  :param pulumi.Input[Sequence[pulumi.Input['AiEndpointDeployedModelArgs']]] deployed_models: Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/).
296
209
  Structure is documented below.
297
210
  :param pulumi.Input[str] description: The description of the Endpoint.
@@ -309,31 +222,16 @@ class _AiEndpointState:
309
222
  - - -
310
223
  :param pulumi.Input[str] model_deployment_monitoring_job: Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`
311
224
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
312
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
313
- :param pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs'] predict_request_response_logging_config: Configures the request-response logging for online prediction.
314
- Structure is documented below.
315
- :param pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs'] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
316
- Structure is documented below.
225
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
317
226
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
318
227
  If it is not provided, the provider project is used.
319
228
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource
320
229
  and default labels configured on the provider.
321
230
  :param pulumi.Input[str] region: The region for the resource
322
- :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
323
- If a DeployedModel's id is not listed in this map, then it receives no traffic.
324
- The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
325
- > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
326
- resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
327
- the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
328
- [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
329
231
  :param pulumi.Input[str] update_time: Output only. Timestamp when this Endpoint was last updated.
330
232
  """
331
233
  if create_time is not None:
332
234
  pulumi.set(__self__, "create_time", create_time)
333
- if dedicated_endpoint_dns is not None:
334
- pulumi.set(__self__, "dedicated_endpoint_dns", dedicated_endpoint_dns)
335
- if dedicated_endpoint_enabled is not None:
336
- pulumi.set(__self__, "dedicated_endpoint_enabled", dedicated_endpoint_enabled)
337
235
  if deployed_models is not None:
338
236
  pulumi.set(__self__, "deployed_models", deployed_models)
339
237
  if description is not None:
@@ -356,18 +254,12 @@ class _AiEndpointState:
356
254
  pulumi.set(__self__, "name", name)
357
255
  if network is not None:
358
256
  pulumi.set(__self__, "network", network)
359
- if predict_request_response_logging_config is not None:
360
- pulumi.set(__self__, "predict_request_response_logging_config", predict_request_response_logging_config)
361
- if private_service_connect_config is not None:
362
- pulumi.set(__self__, "private_service_connect_config", private_service_connect_config)
363
257
  if project is not None:
364
258
  pulumi.set(__self__, "project", project)
365
259
  if pulumi_labels is not None:
366
260
  pulumi.set(__self__, "pulumi_labels", pulumi_labels)
367
261
  if region is not None:
368
262
  pulumi.set(__self__, "region", region)
369
- if traffic_split is not None:
370
- pulumi.set(__self__, "traffic_split", traffic_split)
371
263
  if update_time is not None:
372
264
  pulumi.set(__self__, "update_time", update_time)
373
265
 
@@ -384,30 +276,6 @@ class _AiEndpointState:
384
276
  def create_time(self, value: Optional[pulumi.Input[str]]):
385
277
  pulumi.set(self, "create_time", value)
386
278
 
387
- @property
388
- @pulumi.getter(name="dedicatedEndpointDns")
389
- def dedicated_endpoint_dns(self) -> Optional[pulumi.Input[str]]:
390
- """
391
- Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
392
- """
393
- return pulumi.get(self, "dedicated_endpoint_dns")
394
-
395
- @dedicated_endpoint_dns.setter
396
- def dedicated_endpoint_dns(self, value: Optional[pulumi.Input[str]]):
397
- pulumi.set(self, "dedicated_endpoint_dns", value)
398
-
399
- @property
400
- @pulumi.getter(name="dedicatedEndpointEnabled")
401
- def dedicated_endpoint_enabled(self) -> Optional[pulumi.Input[bool]]:
402
- """
403
- If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
404
- """
405
- return pulumi.get(self, "dedicated_endpoint_enabled")
406
-
407
- @dedicated_endpoint_enabled.setter
408
- def dedicated_endpoint_enabled(self, value: Optional[pulumi.Input[bool]]):
409
- pulumi.set(self, "dedicated_endpoint_enabled", value)
410
-
411
279
  @property
412
280
  @pulumi.getter(name="deployedModels")
413
281
  def deployed_models(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AiEndpointDeployedModelArgs']]]]:
@@ -539,7 +407,7 @@ class _AiEndpointState:
539
407
  @pulumi.getter
540
408
  def network(self) -> Optional[pulumi.Input[str]]:
541
409
  """
542
- The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
410
+ The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
543
411
  """
544
412
  return pulumi.get(self, "network")
545
413
 
@@ -547,32 +415,6 @@ class _AiEndpointState:
547
415
  def network(self, value: Optional[pulumi.Input[str]]):
548
416
  pulumi.set(self, "network", value)
549
417
 
550
- @property
551
- @pulumi.getter(name="predictRequestResponseLoggingConfig")
552
- def predict_request_response_logging_config(self) -> Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]:
553
- """
554
- Configures the request-response logging for online prediction.
555
- Structure is documented below.
556
- """
557
- return pulumi.get(self, "predict_request_response_logging_config")
558
-
559
- @predict_request_response_logging_config.setter
560
- def predict_request_response_logging_config(self, value: Optional[pulumi.Input['AiEndpointPredictRequestResponseLoggingConfigArgs']]):
561
- pulumi.set(self, "predict_request_response_logging_config", value)
562
-
563
- @property
564
- @pulumi.getter(name="privateServiceConnectConfig")
565
- def private_service_connect_config(self) -> Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]:
566
- """
567
- Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
568
- Structure is documented below.
569
- """
570
- return pulumi.get(self, "private_service_connect_config")
571
-
572
- @private_service_connect_config.setter
573
- def private_service_connect_config(self, value: Optional[pulumi.Input['AiEndpointPrivateServiceConnectConfigArgs']]):
574
- pulumi.set(self, "private_service_connect_config", value)
575
-
576
418
  @property
577
419
  @pulumi.getter
578
420
  def project(self) -> Optional[pulumi.Input[str]]:
@@ -611,24 +453,6 @@ class _AiEndpointState:
611
453
  def region(self, value: Optional[pulumi.Input[str]]):
612
454
  pulumi.set(self, "region", value)
613
455
 
614
- @property
615
- @pulumi.getter(name="trafficSplit")
616
- def traffic_split(self) -> Optional[pulumi.Input[str]]:
617
- """
618
- A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
619
- If a DeployedModel's id is not listed in this map, then it receives no traffic.
620
- The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
621
- > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
622
- resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
623
- the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
624
- [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
625
- """
626
- return pulumi.get(self, "traffic_split")
627
-
628
- @traffic_split.setter
629
- def traffic_split(self, value: Optional[pulumi.Input[str]]):
630
- pulumi.set(self, "traffic_split", value)
631
-
632
456
  @property
633
457
  @pulumi.getter(name="updateTime")
634
458
  def update_time(self) -> Optional[pulumi.Input[str]]:
@@ -647,7 +471,6 @@ class AiEndpoint(pulumi.CustomResource):
647
471
  def __init__(__self__,
648
472
  resource_name: str,
649
473
  opts: Optional[pulumi.ResourceOptions] = None,
650
- dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
651
474
  description: Optional[pulumi.Input[str]] = None,
652
475
  display_name: Optional[pulumi.Input[str]] = None,
653
476
  encryption_spec: Optional[pulumi.Input[Union['AiEndpointEncryptionSpecArgs', 'AiEndpointEncryptionSpecArgsDict']]] = None,
@@ -655,11 +478,8 @@ class AiEndpoint(pulumi.CustomResource):
655
478
  location: Optional[pulumi.Input[str]] = None,
656
479
  name: Optional[pulumi.Input[str]] = None,
657
480
  network: Optional[pulumi.Input[str]] = None,
658
- predict_request_response_logging_config: Optional[pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']]] = None,
659
- private_service_connect_config: Optional[pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']]] = None,
660
481
  project: Optional[pulumi.Input[str]] = None,
661
482
  region: Optional[pulumi.Input[str]] = None,
662
- traffic_split: Optional[pulumi.Input[str]] = None,
663
483
  __props__=None):
664
484
  """
665
485
  Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations.
@@ -676,7 +496,6 @@ class AiEndpoint(pulumi.CustomResource):
676
496
 
677
497
  ```python
678
498
  import pulumi
679
- import json
680
499
  import pulumi_gcp as gcp
681
500
 
682
501
  vertex_network = gcp.compute.Network("vertex_network", name="network-name")
@@ -690,12 +509,6 @@ class AiEndpoint(pulumi.CustomResource):
690
509
  network=vertex_network.id,
691
510
  service="servicenetworking.googleapis.com",
692
511
  reserved_peering_ranges=[vertex_range.name])
693
- bq_dataset = gcp.bigquery.Dataset("bq_dataset",
694
- dataset_id="some_dataset",
695
- friendly_name="logging dataset",
696
- description="This is a dataset that requests are logged to",
697
- location="US",
698
- delete_contents_on_destroy=True)
699
512
  project = gcp.organizations.get_project()
700
513
  endpoint = gcp.vertex.AiEndpoint("endpoint",
701
514
  name="endpoint-name",
@@ -710,62 +523,12 @@ class AiEndpoint(pulumi.CustomResource):
710
523
  encryption_spec={
711
524
  "kms_key_name": "kms-name",
712
525
  },
713
- predict_request_response_logging_config={
714
- "bigquery_destination": {
715
- "output_uri": bq_dataset.dataset_id.apply(lambda dataset_id: f"bq://{project.project_id}.{dataset_id}.request_response_logging"),
716
- },
717
- "enabled": True,
718
- "sampling_rate": 0.1,
719
- },
720
- traffic_split=json.dumps({
721
- "12345": 100,
722
- }),
723
526
  opts = pulumi.ResourceOptions(depends_on=[vertex_vpc_connection]))
724
527
  crypto_key = gcp.kms.CryptoKeyIAMMember("crypto_key",
725
528
  crypto_key_id="kms-name",
726
529
  role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
727
530
  member=f"serviceAccount:service-{project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com")
728
531
  ```
729
- ### Vertex Ai Endpoint Private Service Connect
730
-
731
- ```python
732
- import pulumi
733
- import pulumi_gcp as gcp
734
-
735
- project = gcp.organizations.get_project()
736
- endpoint = gcp.vertex.AiEndpoint("endpoint",
737
- name="endpoint-name_69391",
738
- display_name="sample-endpoint",
739
- description="A sample vertex endpoint",
740
- location="us-central1",
741
- region="us-central1",
742
- labels={
743
- "label-one": "value-one",
744
- },
745
- private_service_connect_config={
746
- "enable_private_service_connect": True,
747
- "project_allowlists": [project.project_id],
748
- "enable_secure_private_service_connect": False,
749
- })
750
- ```
751
- ### Vertex Ai Endpoint Dedicated Endpoint
752
-
753
- ```python
754
- import pulumi
755
- import pulumi_gcp as gcp
756
-
757
- endpoint = gcp.vertex.AiEndpoint("endpoint",
758
- name="endpoint-name_8270",
759
- display_name="sample-endpoint",
760
- description="A sample vertex endpoint",
761
- location="us-central1",
762
- region="us-central1",
763
- labels={
764
- "label-one": "value-one",
765
- },
766
- dedicated_endpoint_enabled=True)
767
- project = gcp.organizations.get_project()
768
- ```
769
532
 
770
533
  ## Import
771
534
 
@@ -793,7 +556,6 @@ class AiEndpoint(pulumi.CustomResource):
793
556
 
794
557
  :param str resource_name: The name of the resource.
795
558
  :param pulumi.ResourceOptions opts: Options for the resource.
796
- :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
797
559
  :param pulumi.Input[str] description: The description of the Endpoint.
798
560
  :param pulumi.Input[str] display_name: Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.
799
561
  :param pulumi.Input[Union['AiEndpointEncryptionSpecArgs', 'AiEndpointEncryptionSpecArgsDict']] encryption_spec: Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.
@@ -806,21 +568,10 @@ class AiEndpoint(pulumi.CustomResource):
806
568
 
807
569
  - - -
808
570
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
809
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
810
- :param pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']] predict_request_response_logging_config: Configures the request-response logging for online prediction.
811
- Structure is documented below.
812
- :param pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
813
- Structure is documented below.
571
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
814
572
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
815
573
  If it is not provided, the provider project is used.
816
574
  :param pulumi.Input[str] region: The region for the resource
817
- :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
818
- If a DeployedModel's id is not listed in this map, then it receives no traffic.
819
- The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
820
- > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
821
- resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
822
- the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
823
- [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
824
575
  """
825
576
  ...
826
577
  @overload
@@ -843,7 +594,6 @@ class AiEndpoint(pulumi.CustomResource):
843
594
 
844
595
  ```python
845
596
  import pulumi
846
- import json
847
597
  import pulumi_gcp as gcp
848
598
 
849
599
  vertex_network = gcp.compute.Network("vertex_network", name="network-name")
@@ -857,12 +607,6 @@ class AiEndpoint(pulumi.CustomResource):
857
607
  network=vertex_network.id,
858
608
  service="servicenetworking.googleapis.com",
859
609
  reserved_peering_ranges=[vertex_range.name])
860
- bq_dataset = gcp.bigquery.Dataset("bq_dataset",
861
- dataset_id="some_dataset",
862
- friendly_name="logging dataset",
863
- description="This is a dataset that requests are logged to",
864
- location="US",
865
- delete_contents_on_destroy=True)
866
610
  project = gcp.organizations.get_project()
867
611
  endpoint = gcp.vertex.AiEndpoint("endpoint",
868
612
  name="endpoint-name",
@@ -877,62 +621,12 @@ class AiEndpoint(pulumi.CustomResource):
877
621
  encryption_spec={
878
622
  "kms_key_name": "kms-name",
879
623
  },
880
- predict_request_response_logging_config={
881
- "bigquery_destination": {
882
- "output_uri": bq_dataset.dataset_id.apply(lambda dataset_id: f"bq://{project.project_id}.{dataset_id}.request_response_logging"),
883
- },
884
- "enabled": True,
885
- "sampling_rate": 0.1,
886
- },
887
- traffic_split=json.dumps({
888
- "12345": 100,
889
- }),
890
624
  opts = pulumi.ResourceOptions(depends_on=[vertex_vpc_connection]))
891
625
  crypto_key = gcp.kms.CryptoKeyIAMMember("crypto_key",
892
626
  crypto_key_id="kms-name",
893
627
  role="roles/cloudkms.cryptoKeyEncrypterDecrypter",
894
628
  member=f"serviceAccount:service-{project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com")
895
629
  ```
896
- ### Vertex Ai Endpoint Private Service Connect
897
-
898
- ```python
899
- import pulumi
900
- import pulumi_gcp as gcp
901
-
902
- project = gcp.organizations.get_project()
903
- endpoint = gcp.vertex.AiEndpoint("endpoint",
904
- name="endpoint-name_69391",
905
- display_name="sample-endpoint",
906
- description="A sample vertex endpoint",
907
- location="us-central1",
908
- region="us-central1",
909
- labels={
910
- "label-one": "value-one",
911
- },
912
- private_service_connect_config={
913
- "enable_private_service_connect": True,
914
- "project_allowlists": [project.project_id],
915
- "enable_secure_private_service_connect": False,
916
- })
917
- ```
918
- ### Vertex Ai Endpoint Dedicated Endpoint
919
-
920
- ```python
921
- import pulumi
922
- import pulumi_gcp as gcp
923
-
924
- endpoint = gcp.vertex.AiEndpoint("endpoint",
925
- name="endpoint-name_8270",
926
- display_name="sample-endpoint",
927
- description="A sample vertex endpoint",
928
- location="us-central1",
929
- region="us-central1",
930
- labels={
931
- "label-one": "value-one",
932
- },
933
- dedicated_endpoint_enabled=True)
934
- project = gcp.organizations.get_project()
935
- ```
936
630
 
937
631
  ## Import
938
632
 
@@ -973,7 +667,6 @@ class AiEndpoint(pulumi.CustomResource):
973
667
  def _internal_init(__self__,
974
668
  resource_name: str,
975
669
  opts: Optional[pulumi.ResourceOptions] = None,
976
- dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
977
670
  description: Optional[pulumi.Input[str]] = None,
978
671
  display_name: Optional[pulumi.Input[str]] = None,
979
672
  encryption_spec: Optional[pulumi.Input[Union['AiEndpointEncryptionSpecArgs', 'AiEndpointEncryptionSpecArgsDict']]] = None,
@@ -981,11 +674,8 @@ class AiEndpoint(pulumi.CustomResource):
981
674
  location: Optional[pulumi.Input[str]] = None,
982
675
  name: Optional[pulumi.Input[str]] = None,
983
676
  network: Optional[pulumi.Input[str]] = None,
984
- predict_request_response_logging_config: Optional[pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']]] = None,
985
- private_service_connect_config: Optional[pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']]] = None,
986
677
  project: Optional[pulumi.Input[str]] = None,
987
678
  region: Optional[pulumi.Input[str]] = None,
988
- traffic_split: Optional[pulumi.Input[str]] = None,
989
679
  __props__=None):
990
680
  opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
991
681
  if not isinstance(opts, pulumi.ResourceOptions):
@@ -995,7 +685,6 @@ class AiEndpoint(pulumi.CustomResource):
995
685
  raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
996
686
  __props__ = AiEndpointArgs.__new__(AiEndpointArgs)
997
687
 
998
- __props__.__dict__["dedicated_endpoint_enabled"] = dedicated_endpoint_enabled
999
688
  __props__.__dict__["description"] = description
1000
689
  if display_name is None and not opts.urn:
1001
690
  raise TypeError("Missing required property 'display_name'")
@@ -1007,13 +696,9 @@ class AiEndpoint(pulumi.CustomResource):
1007
696
  __props__.__dict__["location"] = location
1008
697
  __props__.__dict__["name"] = name
1009
698
  __props__.__dict__["network"] = network
1010
- __props__.__dict__["predict_request_response_logging_config"] = predict_request_response_logging_config
1011
- __props__.__dict__["private_service_connect_config"] = private_service_connect_config
1012
699
  __props__.__dict__["project"] = project
1013
700
  __props__.__dict__["region"] = region
1014
- __props__.__dict__["traffic_split"] = traffic_split
1015
701
  __props__.__dict__["create_time"] = None
1016
- __props__.__dict__["dedicated_endpoint_dns"] = None
1017
702
  __props__.__dict__["deployed_models"] = None
1018
703
  __props__.__dict__["effective_labels"] = None
1019
704
  __props__.__dict__["etag"] = None
@@ -1033,8 +718,6 @@ class AiEndpoint(pulumi.CustomResource):
1033
718
  id: pulumi.Input[str],
1034
719
  opts: Optional[pulumi.ResourceOptions] = None,
1035
720
  create_time: Optional[pulumi.Input[str]] = None,
1036
- dedicated_endpoint_dns: Optional[pulumi.Input[str]] = None,
1037
- dedicated_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
1038
721
  deployed_models: Optional[pulumi.Input[Sequence[pulumi.Input[Union['AiEndpointDeployedModelArgs', 'AiEndpointDeployedModelArgsDict']]]]] = None,
1039
722
  description: Optional[pulumi.Input[str]] = None,
1040
723
  display_name: Optional[pulumi.Input[str]] = None,
@@ -1046,12 +729,9 @@ class AiEndpoint(pulumi.CustomResource):
1046
729
  model_deployment_monitoring_job: Optional[pulumi.Input[str]] = None,
1047
730
  name: Optional[pulumi.Input[str]] = None,
1048
731
  network: Optional[pulumi.Input[str]] = None,
1049
- predict_request_response_logging_config: Optional[pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']]] = None,
1050
- private_service_connect_config: Optional[pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']]] = None,
1051
732
  project: Optional[pulumi.Input[str]] = None,
1052
733
  pulumi_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
1053
734
  region: Optional[pulumi.Input[str]] = None,
1054
- traffic_split: Optional[pulumi.Input[str]] = None,
1055
735
  update_time: Optional[pulumi.Input[str]] = None) -> 'AiEndpoint':
1056
736
  """
1057
737
  Get an existing AiEndpoint resource's state with the given name, id, and optional extra
@@ -1062,8 +742,6 @@ class AiEndpoint(pulumi.CustomResource):
1062
742
  :param pulumi.ResourceOptions opts: Options for the resource.
1063
743
  :param pulumi.Input[str] create_time: (Output)
1064
744
  Output only. Timestamp when the DeployedModel was created.
1065
- :param pulumi.Input[str] dedicated_endpoint_dns: Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
1066
- :param pulumi.Input[bool] dedicated_endpoint_enabled: If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
1067
745
  :param pulumi.Input[Sequence[pulumi.Input[Union['AiEndpointDeployedModelArgs', 'AiEndpointDeployedModelArgsDict']]]] deployed_models: Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/).
1068
746
  Structure is documented below.
1069
747
  :param pulumi.Input[str] description: The description of the Endpoint.
@@ -1081,23 +759,12 @@ class AiEndpoint(pulumi.CustomResource):
1081
759
  - - -
1082
760
  :param pulumi.Input[str] model_deployment_monitoring_job: Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`
1083
761
  :param pulumi.Input[str] name: The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.
1084
- :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
1085
- :param pulumi.Input[Union['AiEndpointPredictRequestResponseLoggingConfigArgs', 'AiEndpointPredictRequestResponseLoggingConfigArgsDict']] predict_request_response_logging_config: Configures the request-response logging for online prediction.
1086
- Structure is documented below.
1087
- :param pulumi.Input[Union['AiEndpointPrivateServiceConnectConfigArgs', 'AiEndpointPrivateServiceConnectConfigArgsDict']] private_service_connect_config: Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
1088
- Structure is documented below.
762
+ :param pulumi.Input[str] network: The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
1089
763
  :param pulumi.Input[str] project: The ID of the project in which the resource belongs.
1090
764
  If it is not provided, the provider project is used.
1091
765
  :param pulumi.Input[Mapping[str, pulumi.Input[str]]] pulumi_labels: The combination of labels configured directly on the resource
1092
766
  and default labels configured on the provider.
1093
767
  :param pulumi.Input[str] region: The region for the resource
1094
- :param pulumi.Input[str] traffic_split: A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
1095
- If a DeployedModel's id is not listed in this map, then it receives no traffic.
1096
- The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
1097
- > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
1098
- resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
1099
- the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
1100
- [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
1101
768
  :param pulumi.Input[str] update_time: Output only. Timestamp when this Endpoint was last updated.
1102
769
  """
1103
770
  opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
@@ -1105,8 +772,6 @@ class AiEndpoint(pulumi.CustomResource):
1105
772
  __props__ = _AiEndpointState.__new__(_AiEndpointState)
1106
773
 
1107
774
  __props__.__dict__["create_time"] = create_time
1108
- __props__.__dict__["dedicated_endpoint_dns"] = dedicated_endpoint_dns
1109
- __props__.__dict__["dedicated_endpoint_enabled"] = dedicated_endpoint_enabled
1110
775
  __props__.__dict__["deployed_models"] = deployed_models
1111
776
  __props__.__dict__["description"] = description
1112
777
  __props__.__dict__["display_name"] = display_name
@@ -1118,12 +783,9 @@ class AiEndpoint(pulumi.CustomResource):
1118
783
  __props__.__dict__["model_deployment_monitoring_job"] = model_deployment_monitoring_job
1119
784
  __props__.__dict__["name"] = name
1120
785
  __props__.__dict__["network"] = network
1121
- __props__.__dict__["predict_request_response_logging_config"] = predict_request_response_logging_config
1122
- __props__.__dict__["private_service_connect_config"] = private_service_connect_config
1123
786
  __props__.__dict__["project"] = project
1124
787
  __props__.__dict__["pulumi_labels"] = pulumi_labels
1125
788
  __props__.__dict__["region"] = region
1126
- __props__.__dict__["traffic_split"] = traffic_split
1127
789
  __props__.__dict__["update_time"] = update_time
1128
790
  return AiEndpoint(resource_name, opts=opts, __props__=__props__)
1129
791
 
@@ -1136,22 +798,6 @@ class AiEndpoint(pulumi.CustomResource):
1136
798
  """
1137
799
  return pulumi.get(self, "create_time")
1138
800
 
1139
- @property
1140
- @pulumi.getter(name="dedicatedEndpointDns")
1141
- def dedicated_endpoint_dns(self) -> pulumi.Output[str]:
1142
- """
1143
- Output only. DNS of the dedicated endpoint. Will only be populated if dedicatedEndpointEnabled is true. Format: `https://{endpointId}.{region}-{projectNumber}.prediction.vertexai.goog`.
1144
- """
1145
- return pulumi.get(self, "dedicated_endpoint_dns")
1146
-
1147
- @property
1148
- @pulumi.getter(name="dedicatedEndpointEnabled")
1149
- def dedicated_endpoint_enabled(self) -> pulumi.Output[Optional[bool]]:
1150
- """
1151
- If true, the endpoint will be exposed through a dedicated DNS [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will be isolated from other users' traffic and will have better performance and reliability. Note: Once you enabled dedicated endpoint, you won't be able to send request to the shared DNS {region}-aiplatform.googleapis.com. The limitation will be removed soon.
1152
- """
1153
- return pulumi.get(self, "dedicated_endpoint_enabled")
1154
-
1155
801
  @property
1156
802
  @pulumi.getter(name="deployedModels")
1157
803
  def deployed_models(self) -> pulumi.Output[Sequence['outputs.AiEndpointDeployedModel']]:
@@ -1243,28 +889,10 @@ class AiEndpoint(pulumi.CustomResource):
1243
889
  @pulumi.getter
1244
890
  def network(self) -> pulumi.Output[Optional[str]]:
1245
891
  """
1246
- The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. Only one of the fields, `network` or `privateServiceConnectConfig`, can be set.
892
+ The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name.
1247
893
  """
1248
894
  return pulumi.get(self, "network")
1249
895
 
1250
- @property
1251
- @pulumi.getter(name="predictRequestResponseLoggingConfig")
1252
- def predict_request_response_logging_config(self) -> pulumi.Output[Optional['outputs.AiEndpointPredictRequestResponseLoggingConfig']]:
1253
- """
1254
- Configures the request-response logging for online prediction.
1255
- Structure is documented below.
1256
- """
1257
- return pulumi.get(self, "predict_request_response_logging_config")
1258
-
1259
- @property
1260
- @pulumi.getter(name="privateServiceConnectConfig")
1261
- def private_service_connect_config(self) -> pulumi.Output[Optional['outputs.AiEndpointPrivateServiceConnectConfig']]:
1262
- """
1263
- Configuration for private service connect. `network` and `privateServiceConnectConfig` are mutually exclusive.
1264
- Structure is documented below.
1265
- """
1266
- return pulumi.get(self, "private_service_connect_config")
1267
-
1268
896
  @property
1269
897
  @pulumi.getter
1270
898
  def project(self) -> pulumi.Output[str]:
@@ -1291,20 +919,6 @@ class AiEndpoint(pulumi.CustomResource):
1291
919
  """
1292
920
  return pulumi.get(self, "region")
1293
921
 
1294
- @property
1295
- @pulumi.getter(name="trafficSplit")
1296
- def traffic_split(self) -> pulumi.Output[Optional[str]]:
1297
- """
1298
- A map from a DeployedModel's id to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel.
1299
- If a DeployedModel's id is not listed in this map, then it receives no traffic.
1300
- The traffic percentage values must add up to 100, or map must be empty if the Endpoint is to not accept any traffic at a moment.
1301
- > **Note:** The `traffic_split` setting only applies after a model has been deployed to the endpoint. Re-applying a `vertex.AiEndpoint`
1302
- resource without updating the `traffic_split` post-deployment may lead to your deployed `traffic_split` being lost; see
1303
- the `deployModel` [example](https://cloud.google.com/vertex-ai/docs/general/deployment#deploy_a_model_to_an_endpoint) and
1304
- [documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints/deployModel) for details.
1305
- """
1306
- return pulumi.get(self, "traffic_split")
1307
-
1308
922
  @property
1309
923
  @pulumi.getter(name="updateTime")
1310
924
  def update_time(self) -> pulumi.Output[str]: