pulumi-gcp 7.17.0a1712402830__py3-none-any.whl → 7.17.0a1712602552__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_gcp/__init__.py +27 -0
- pulumi_gcp/apphub/__init__.py +1 -0
- pulumi_gcp/apphub/get_application.py +220 -0
- pulumi_gcp/apphub/outputs.py +214 -0
- pulumi_gcp/applicationintegration/__init__.py +10 -0
- pulumi_gcp/applicationintegration/_inputs.py +119 -0
- pulumi_gcp/applicationintegration/client.py +566 -0
- pulumi_gcp/applicationintegration/outputs.py +122 -0
- pulumi_gcp/bigquery/_inputs.py +16 -0
- pulumi_gcp/bigquery/outputs.py +14 -0
- pulumi_gcp/bigquery/routine.py +98 -0
- pulumi_gcp/billing/project_info.py +4 -4
- pulumi_gcp/cloudquota/__init__.py +2 -0
- pulumi_gcp/cloudquota/_inputs.py +131 -0
- pulumi_gcp/cloudquota/outputs.py +118 -0
- pulumi_gcp/cloudquota/s_quota_preference.py +777 -0
- pulumi_gcp/compute/interconnect_attachment.py +64 -0
- pulumi_gcp/compute/network_endpoint.py +8 -0
- pulumi_gcp/compute/network_endpoint_list.py +8 -0
- pulumi_gcp/compute/region_backend_service.py +28 -0
- pulumi_gcp/compute/target_instance.py +4 -4
- pulumi_gcp/config/__init__.pyi +2 -0
- pulumi_gcp/config/vars.py +4 -0
- pulumi_gcp/databasemigrationservice/connection_profile.py +6 -6
- pulumi_gcp/dataflow/flex_template_job.py +84 -112
- pulumi_gcp/dataplex/task.py +16 -16
- pulumi_gcp/dataproc/_inputs.py +71 -0
- pulumi_gcp/dataproc/get_metastore_service.py +11 -1
- pulumi_gcp/dataproc/metastore_service.py +120 -0
- pulumi_gcp/dataproc/outputs.py +128 -0
- pulumi_gcp/firebase/app_check_service_config.py +2 -2
- pulumi_gcp/firestore/backup_schedule.py +14 -14
- pulumi_gcp/firestore/field.py +4 -4
- pulumi_gcp/gkehub/membership_binding.py +6 -6
- pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
- pulumi_gcp/gkehub/namespace.py +4 -4
- pulumi_gcp/gkehub/scope_rbac_role_binding.py +4 -4
- pulumi_gcp/iap/tunnel_dest_group.py +2 -2
- pulumi_gcp/kms/_inputs.py +46 -0
- pulumi_gcp/kms/crypto_key.py +54 -0
- pulumi_gcp/kms/crypto_key_version.py +54 -0
- pulumi_gcp/kms/get_kms_crypto_key.py +11 -1
- pulumi_gcp/kms/outputs.py +54 -0
- pulumi_gcp/orgpolicy/policy.py +2 -2
- pulumi_gcp/provider.py +20 -0
- pulumi_gcp/pubsub/subscription.py +4 -4
- pulumi_gcp/vertex/__init__.py +1 -0
- pulumi_gcp/vertex/_inputs.py +169 -0
- pulumi_gcp/vertex/ai_deployment_resource_pool.py +477 -0
- pulumi_gcp/vertex/outputs.py +194 -0
- pulumi_gcp/vpcaccess/connector.py +77 -28
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.17.0a1712602552.dist-info}/METADATA +1 -1
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.17.0a1712602552.dist-info}/RECORD +55 -47
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.17.0a1712602552.dist-info}/WHEEL +0 -0
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.17.0a1712602552.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,477 @@
|
|
1
|
+
# coding=utf-8
|
2
|
+
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
|
3
|
+
# *** Do not edit by hand unless you're certain you know what you are doing! ***
|
4
|
+
|
5
|
+
import copy
|
6
|
+
import warnings
|
7
|
+
import pulumi
|
8
|
+
import pulumi.runtime
|
9
|
+
from typing import Any, Mapping, Optional, Sequence, Union, overload
|
10
|
+
from .. import _utilities
|
11
|
+
from . import outputs
|
12
|
+
from ._inputs import *
|
13
|
+
|
14
|
+
__all__ = ['AiDeploymentResourcePoolArgs', 'AiDeploymentResourcePool']
|
15
|
+
|
16
|
+
@pulumi.input_type
|
17
|
+
class AiDeploymentResourcePoolArgs:
|
18
|
+
def __init__(__self__, *,
|
19
|
+
dedicated_resources: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']] = None,
|
20
|
+
name: Optional[pulumi.Input[str]] = None,
|
21
|
+
project: Optional[pulumi.Input[str]] = None,
|
22
|
+
region: Optional[pulumi.Input[str]] = None):
|
23
|
+
"""
|
24
|
+
The set of arguments for constructing a AiDeploymentResourcePool resource.
|
25
|
+
:param pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs'] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
26
|
+
Structure is documented below.
|
27
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
28
|
+
|
29
|
+
|
30
|
+
- - -
|
31
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
32
|
+
If it is not provided, the provider project is used.
|
33
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
34
|
+
"""
|
35
|
+
if dedicated_resources is not None:
|
36
|
+
pulumi.set(__self__, "dedicated_resources", dedicated_resources)
|
37
|
+
if name is not None:
|
38
|
+
pulumi.set(__self__, "name", name)
|
39
|
+
if project is not None:
|
40
|
+
pulumi.set(__self__, "project", project)
|
41
|
+
if region is not None:
|
42
|
+
pulumi.set(__self__, "region", region)
|
43
|
+
|
44
|
+
@property
|
45
|
+
@pulumi.getter(name="dedicatedResources")
|
46
|
+
def dedicated_resources(self) -> Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]:
|
47
|
+
"""
|
48
|
+
The underlying dedicated resources that the deployment resource pool uses.
|
49
|
+
Structure is documented below.
|
50
|
+
"""
|
51
|
+
return pulumi.get(self, "dedicated_resources")
|
52
|
+
|
53
|
+
@dedicated_resources.setter
|
54
|
+
def dedicated_resources(self, value: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]):
|
55
|
+
pulumi.set(self, "dedicated_resources", value)
|
56
|
+
|
57
|
+
@property
|
58
|
+
@pulumi.getter
|
59
|
+
def name(self) -> Optional[pulumi.Input[str]]:
|
60
|
+
"""
|
61
|
+
The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
62
|
+
|
63
|
+
|
64
|
+
- - -
|
65
|
+
"""
|
66
|
+
return pulumi.get(self, "name")
|
67
|
+
|
68
|
+
@name.setter
|
69
|
+
def name(self, value: Optional[pulumi.Input[str]]):
|
70
|
+
pulumi.set(self, "name", value)
|
71
|
+
|
72
|
+
@property
|
73
|
+
@pulumi.getter
|
74
|
+
def project(self) -> Optional[pulumi.Input[str]]:
|
75
|
+
"""
|
76
|
+
The ID of the project in which the resource belongs.
|
77
|
+
If it is not provided, the provider project is used.
|
78
|
+
"""
|
79
|
+
return pulumi.get(self, "project")
|
80
|
+
|
81
|
+
@project.setter
|
82
|
+
def project(self, value: Optional[pulumi.Input[str]]):
|
83
|
+
pulumi.set(self, "project", value)
|
84
|
+
|
85
|
+
@property
|
86
|
+
@pulumi.getter
|
87
|
+
def region(self) -> Optional[pulumi.Input[str]]:
|
88
|
+
"""
|
89
|
+
The region of deployment resource pool. eg us-central1
|
90
|
+
"""
|
91
|
+
return pulumi.get(self, "region")
|
92
|
+
|
93
|
+
@region.setter
|
94
|
+
def region(self, value: Optional[pulumi.Input[str]]):
|
95
|
+
pulumi.set(self, "region", value)
|
96
|
+
|
97
|
+
|
98
|
+
@pulumi.input_type
|
99
|
+
class _AiDeploymentResourcePoolState:
|
100
|
+
def __init__(__self__, *,
|
101
|
+
create_time: Optional[pulumi.Input[str]] = None,
|
102
|
+
dedicated_resources: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']] = None,
|
103
|
+
name: Optional[pulumi.Input[str]] = None,
|
104
|
+
project: Optional[pulumi.Input[str]] = None,
|
105
|
+
region: Optional[pulumi.Input[str]] = None):
|
106
|
+
"""
|
107
|
+
Input properties used for looking up and filtering AiDeploymentResourcePool resources.
|
108
|
+
:param pulumi.Input[str] create_time: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
109
|
+
:param pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs'] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
110
|
+
Structure is documented below.
|
111
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
112
|
+
|
113
|
+
|
114
|
+
- - -
|
115
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
116
|
+
If it is not provided, the provider project is used.
|
117
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
118
|
+
"""
|
119
|
+
if create_time is not None:
|
120
|
+
pulumi.set(__self__, "create_time", create_time)
|
121
|
+
if dedicated_resources is not None:
|
122
|
+
pulumi.set(__self__, "dedicated_resources", dedicated_resources)
|
123
|
+
if name is not None:
|
124
|
+
pulumi.set(__self__, "name", name)
|
125
|
+
if project is not None:
|
126
|
+
pulumi.set(__self__, "project", project)
|
127
|
+
if region is not None:
|
128
|
+
pulumi.set(__self__, "region", region)
|
129
|
+
|
130
|
+
@property
|
131
|
+
@pulumi.getter(name="createTime")
|
132
|
+
def create_time(self) -> Optional[pulumi.Input[str]]:
|
133
|
+
"""
|
134
|
+
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
135
|
+
"""
|
136
|
+
return pulumi.get(self, "create_time")
|
137
|
+
|
138
|
+
@create_time.setter
|
139
|
+
def create_time(self, value: Optional[pulumi.Input[str]]):
|
140
|
+
pulumi.set(self, "create_time", value)
|
141
|
+
|
142
|
+
@property
|
143
|
+
@pulumi.getter(name="dedicatedResources")
|
144
|
+
def dedicated_resources(self) -> Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]:
|
145
|
+
"""
|
146
|
+
The underlying dedicated resources that the deployment resource pool uses.
|
147
|
+
Structure is documented below.
|
148
|
+
"""
|
149
|
+
return pulumi.get(self, "dedicated_resources")
|
150
|
+
|
151
|
+
@dedicated_resources.setter
|
152
|
+
def dedicated_resources(self, value: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]):
|
153
|
+
pulumi.set(self, "dedicated_resources", value)
|
154
|
+
|
155
|
+
@property
|
156
|
+
@pulumi.getter
|
157
|
+
def name(self) -> Optional[pulumi.Input[str]]:
|
158
|
+
"""
|
159
|
+
The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
160
|
+
|
161
|
+
|
162
|
+
- - -
|
163
|
+
"""
|
164
|
+
return pulumi.get(self, "name")
|
165
|
+
|
166
|
+
@name.setter
|
167
|
+
def name(self, value: Optional[pulumi.Input[str]]):
|
168
|
+
pulumi.set(self, "name", value)
|
169
|
+
|
170
|
+
@property
|
171
|
+
@pulumi.getter
|
172
|
+
def project(self) -> Optional[pulumi.Input[str]]:
|
173
|
+
"""
|
174
|
+
The ID of the project in which the resource belongs.
|
175
|
+
If it is not provided, the provider project is used.
|
176
|
+
"""
|
177
|
+
return pulumi.get(self, "project")
|
178
|
+
|
179
|
+
@project.setter
|
180
|
+
def project(self, value: Optional[pulumi.Input[str]]):
|
181
|
+
pulumi.set(self, "project", value)
|
182
|
+
|
183
|
+
@property
|
184
|
+
@pulumi.getter
|
185
|
+
def region(self) -> Optional[pulumi.Input[str]]:
|
186
|
+
"""
|
187
|
+
The region of deployment resource pool. eg us-central1
|
188
|
+
"""
|
189
|
+
return pulumi.get(self, "region")
|
190
|
+
|
191
|
+
@region.setter
|
192
|
+
def region(self, value: Optional[pulumi.Input[str]]):
|
193
|
+
pulumi.set(self, "region", value)
|
194
|
+
|
195
|
+
|
196
|
+
class AiDeploymentResourcePool(pulumi.CustomResource):
|
197
|
+
@overload
|
198
|
+
def __init__(__self__,
|
199
|
+
resource_name: str,
|
200
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
201
|
+
dedicated_resources: Optional[pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']]] = None,
|
202
|
+
name: Optional[pulumi.Input[str]] = None,
|
203
|
+
project: Optional[pulumi.Input[str]] = None,
|
204
|
+
region: Optional[pulumi.Input[str]] = None,
|
205
|
+
__props__=None):
|
206
|
+
"""
|
207
|
+
'DeploymentResourcePool can be shared by multiple deployed models,
|
208
|
+
whose underlying specification consists of dedicated resources.'
|
209
|
+
|
210
|
+
To get more information about DeploymentResourcePool, see:
|
211
|
+
|
212
|
+
* [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.deploymentResourcePools)
|
213
|
+
|
214
|
+
## Example Usage
|
215
|
+
|
216
|
+
### Vertex Ai Deployment Resource Pool
|
217
|
+
|
218
|
+
<!--Start PulumiCodeChooser -->
|
219
|
+
```python
|
220
|
+
import pulumi
|
221
|
+
import pulumi_gcp as gcp
|
222
|
+
|
223
|
+
deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
|
224
|
+
region="us-central1",
|
225
|
+
name="example-deployment-resource-pool",
|
226
|
+
dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
|
227
|
+
machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
|
228
|
+
machine_type="n1-standard-4",
|
229
|
+
accelerator_type="NVIDIA_TESLA_K80",
|
230
|
+
accelerator_count=1,
|
231
|
+
),
|
232
|
+
min_replica_count=1,
|
233
|
+
max_replica_count=2,
|
234
|
+
autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
|
235
|
+
metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
|
236
|
+
target=60,
|
237
|
+
)],
|
238
|
+
))
|
239
|
+
```
|
240
|
+
<!--End PulumiCodeChooser -->
|
241
|
+
|
242
|
+
## Import
|
243
|
+
|
244
|
+
DeploymentResourcePool can be imported using any of these accepted formats:
|
245
|
+
|
246
|
+
* `projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}`
|
247
|
+
|
248
|
+
* `{{project}}/{{region}}/{{name}}`
|
249
|
+
|
250
|
+
* `{{region}}/{{name}}`
|
251
|
+
|
252
|
+
* `{{name}}`
|
253
|
+
|
254
|
+
When using the `pulumi import` command, DeploymentResourcePool can be imported using one of the formats above. For example:
|
255
|
+
|
256
|
+
```sh
|
257
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
|
258
|
+
```
|
259
|
+
|
260
|
+
```sh
|
261
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
|
262
|
+
```
|
263
|
+
|
264
|
+
```sh
|
265
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
|
266
|
+
```
|
267
|
+
|
268
|
+
```sh
|
269
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
|
270
|
+
```
|
271
|
+
|
272
|
+
:param str resource_name: The name of the resource.
|
273
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
274
|
+
:param pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
275
|
+
Structure is documented below.
|
276
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
277
|
+
|
278
|
+
|
279
|
+
- - -
|
280
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
281
|
+
If it is not provided, the provider project is used.
|
282
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
283
|
+
"""
|
284
|
+
...
|
285
|
+
@overload
|
286
|
+
def __init__(__self__,
|
287
|
+
resource_name: str,
|
288
|
+
args: Optional[AiDeploymentResourcePoolArgs] = None,
|
289
|
+
opts: Optional[pulumi.ResourceOptions] = None):
|
290
|
+
"""
|
291
|
+
'DeploymentResourcePool can be shared by multiple deployed models,
|
292
|
+
whose underlying specification consists of dedicated resources.'
|
293
|
+
|
294
|
+
To get more information about DeploymentResourcePool, see:
|
295
|
+
|
296
|
+
* [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.deploymentResourcePools)
|
297
|
+
|
298
|
+
## Example Usage
|
299
|
+
|
300
|
+
### Vertex Ai Deployment Resource Pool
|
301
|
+
|
302
|
+
<!--Start PulumiCodeChooser -->
|
303
|
+
```python
|
304
|
+
import pulumi
|
305
|
+
import pulumi_gcp as gcp
|
306
|
+
|
307
|
+
deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
|
308
|
+
region="us-central1",
|
309
|
+
name="example-deployment-resource-pool",
|
310
|
+
dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
|
311
|
+
machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
|
312
|
+
machine_type="n1-standard-4",
|
313
|
+
accelerator_type="NVIDIA_TESLA_K80",
|
314
|
+
accelerator_count=1,
|
315
|
+
),
|
316
|
+
min_replica_count=1,
|
317
|
+
max_replica_count=2,
|
318
|
+
autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
|
319
|
+
metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
|
320
|
+
target=60,
|
321
|
+
)],
|
322
|
+
))
|
323
|
+
```
|
324
|
+
<!--End PulumiCodeChooser -->
|
325
|
+
|
326
|
+
## Import
|
327
|
+
|
328
|
+
DeploymentResourcePool can be imported using any of these accepted formats:
|
329
|
+
|
330
|
+
* `projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}`
|
331
|
+
|
332
|
+
* `{{project}}/{{region}}/{{name}}`
|
333
|
+
|
334
|
+
* `{{region}}/{{name}}`
|
335
|
+
|
336
|
+
* `{{name}}`
|
337
|
+
|
338
|
+
When using the `pulumi import` command, DeploymentResourcePool can be imported using one of the formats above. For example:
|
339
|
+
|
340
|
+
```sh
|
341
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
|
342
|
+
```
|
343
|
+
|
344
|
+
```sh
|
345
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
|
346
|
+
```
|
347
|
+
|
348
|
+
```sh
|
349
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
|
350
|
+
```
|
351
|
+
|
352
|
+
```sh
|
353
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
|
354
|
+
```
|
355
|
+
|
356
|
+
:param str resource_name: The name of the resource.
|
357
|
+
:param AiDeploymentResourcePoolArgs args: The arguments to use to populate this resource's properties.
|
358
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
359
|
+
"""
|
360
|
+
...
|
361
|
+
def __init__(__self__, resource_name: str, *args, **kwargs):
|
362
|
+
resource_args, opts = _utilities.get_resource_args_opts(AiDeploymentResourcePoolArgs, pulumi.ResourceOptions, *args, **kwargs)
|
363
|
+
if resource_args is not None:
|
364
|
+
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
|
365
|
+
else:
|
366
|
+
__self__._internal_init(resource_name, *args, **kwargs)
|
367
|
+
|
368
|
+
def _internal_init(__self__,
|
369
|
+
resource_name: str,
|
370
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
371
|
+
dedicated_resources: Optional[pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']]] = None,
|
372
|
+
name: Optional[pulumi.Input[str]] = None,
|
373
|
+
project: Optional[pulumi.Input[str]] = None,
|
374
|
+
region: Optional[pulumi.Input[str]] = None,
|
375
|
+
__props__=None):
|
376
|
+
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
|
377
|
+
if not isinstance(opts, pulumi.ResourceOptions):
|
378
|
+
raise TypeError('Expected resource options to be a ResourceOptions instance')
|
379
|
+
if opts.id is None:
|
380
|
+
if __props__ is not None:
|
381
|
+
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
|
382
|
+
__props__ = AiDeploymentResourcePoolArgs.__new__(AiDeploymentResourcePoolArgs)
|
383
|
+
|
384
|
+
__props__.__dict__["dedicated_resources"] = dedicated_resources
|
385
|
+
__props__.__dict__["name"] = name
|
386
|
+
__props__.__dict__["project"] = project
|
387
|
+
__props__.__dict__["region"] = region
|
388
|
+
__props__.__dict__["create_time"] = None
|
389
|
+
super(AiDeploymentResourcePool, __self__).__init__(
|
390
|
+
'gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool',
|
391
|
+
resource_name,
|
392
|
+
__props__,
|
393
|
+
opts)
|
394
|
+
|
395
|
+
@staticmethod
|
396
|
+
def get(resource_name: str,
|
397
|
+
id: pulumi.Input[str],
|
398
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
399
|
+
create_time: Optional[pulumi.Input[str]] = None,
|
400
|
+
dedicated_resources: Optional[pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']]] = None,
|
401
|
+
name: Optional[pulumi.Input[str]] = None,
|
402
|
+
project: Optional[pulumi.Input[str]] = None,
|
403
|
+
region: Optional[pulumi.Input[str]] = None) -> 'AiDeploymentResourcePool':
|
404
|
+
"""
|
405
|
+
Get an existing AiDeploymentResourcePool resource's state with the given name, id, and optional extra
|
406
|
+
properties used to qualify the lookup.
|
407
|
+
|
408
|
+
:param str resource_name: The unique name of the resulting resource.
|
409
|
+
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
|
410
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
411
|
+
:param pulumi.Input[str] create_time: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
412
|
+
:param pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
413
|
+
Structure is documented below.
|
414
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
415
|
+
|
416
|
+
|
417
|
+
- - -
|
418
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
419
|
+
If it is not provided, the provider project is used.
|
420
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
421
|
+
"""
|
422
|
+
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
|
423
|
+
|
424
|
+
__props__ = _AiDeploymentResourcePoolState.__new__(_AiDeploymentResourcePoolState)
|
425
|
+
|
426
|
+
__props__.__dict__["create_time"] = create_time
|
427
|
+
__props__.__dict__["dedicated_resources"] = dedicated_resources
|
428
|
+
__props__.__dict__["name"] = name
|
429
|
+
__props__.__dict__["project"] = project
|
430
|
+
__props__.__dict__["region"] = region
|
431
|
+
return AiDeploymentResourcePool(resource_name, opts=opts, __props__=__props__)
|
432
|
+
|
433
|
+
@property
|
434
|
+
@pulumi.getter(name="createTime")
|
435
|
+
def create_time(self) -> pulumi.Output[str]:
|
436
|
+
"""
|
437
|
+
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
438
|
+
"""
|
439
|
+
return pulumi.get(self, "create_time")
|
440
|
+
|
441
|
+
@property
|
442
|
+
@pulumi.getter(name="dedicatedResources")
|
443
|
+
def dedicated_resources(self) -> pulumi.Output[Optional['outputs.AiDeploymentResourcePoolDedicatedResources']]:
|
444
|
+
"""
|
445
|
+
The underlying dedicated resources that the deployment resource pool uses.
|
446
|
+
Structure is documented below.
|
447
|
+
"""
|
448
|
+
return pulumi.get(self, "dedicated_resources")
|
449
|
+
|
450
|
+
@property
|
451
|
+
@pulumi.getter
|
452
|
+
def name(self) -> pulumi.Output[str]:
|
453
|
+
"""
|
454
|
+
The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
455
|
+
|
456
|
+
|
457
|
+
- - -
|
458
|
+
"""
|
459
|
+
return pulumi.get(self, "name")
|
460
|
+
|
461
|
+
@property
|
462
|
+
@pulumi.getter
|
463
|
+
def project(self) -> pulumi.Output[str]:
|
464
|
+
"""
|
465
|
+
The ID of the project in which the resource belongs.
|
466
|
+
If it is not provided, the provider project is used.
|
467
|
+
"""
|
468
|
+
return pulumi.get(self, "project")
|
469
|
+
|
470
|
+
@property
|
471
|
+
@pulumi.getter
|
472
|
+
def region(self) -> pulumi.Output[Optional[str]]:
|
473
|
+
"""
|
474
|
+
The region of deployment resource pool. eg us-central1
|
475
|
+
"""
|
476
|
+
return pulumi.get(self, "region")
|
477
|
+
|
pulumi_gcp/vertex/outputs.py
CHANGED
@@ -12,6 +12,9 @@ from . import outputs
|
|
12
12
|
|
13
13
|
__all__ = [
|
14
14
|
'AiDatasetEncryptionSpec',
|
15
|
+
'AiDeploymentResourcePoolDedicatedResources',
|
16
|
+
'AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec',
|
17
|
+
'AiDeploymentResourcePoolDedicatedResourcesMachineSpec',
|
15
18
|
'AiEndpointDeployedModel',
|
16
19
|
'AiEndpointDeployedModelAutomaticResource',
|
17
20
|
'AiEndpointDeployedModelDedicatedResource',
|
@@ -106,6 +109,197 @@ class AiDatasetEncryptionSpec(dict):
|
|
106
109
|
return pulumi.get(self, "kms_key_name")
|
107
110
|
|
108
111
|
|
112
|
+
@pulumi.output_type
|
113
|
+
class AiDeploymentResourcePoolDedicatedResources(dict):
|
114
|
+
@staticmethod
|
115
|
+
def __key_warning(key: str):
|
116
|
+
suggest = None
|
117
|
+
if key == "machineSpec":
|
118
|
+
suggest = "machine_spec"
|
119
|
+
elif key == "minReplicaCount":
|
120
|
+
suggest = "min_replica_count"
|
121
|
+
elif key == "autoscalingMetricSpecs":
|
122
|
+
suggest = "autoscaling_metric_specs"
|
123
|
+
elif key == "maxReplicaCount":
|
124
|
+
suggest = "max_replica_count"
|
125
|
+
|
126
|
+
if suggest:
|
127
|
+
pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResources. Access the value via the '{suggest}' property getter instead.")
|
128
|
+
|
129
|
+
def __getitem__(self, key: str) -> Any:
|
130
|
+
AiDeploymentResourcePoolDedicatedResources.__key_warning(key)
|
131
|
+
return super().__getitem__(key)
|
132
|
+
|
133
|
+
def get(self, key: str, default = None) -> Any:
|
134
|
+
AiDeploymentResourcePoolDedicatedResources.__key_warning(key)
|
135
|
+
return super().get(key, default)
|
136
|
+
|
137
|
+
def __init__(__self__, *,
|
138
|
+
machine_spec: 'outputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpec',
|
139
|
+
min_replica_count: int,
|
140
|
+
autoscaling_metric_specs: Optional[Sequence['outputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec']] = None,
|
141
|
+
max_replica_count: Optional[int] = None):
|
142
|
+
"""
|
143
|
+
:param 'AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs' machine_spec: The specification of a single machine used by the prediction
|
144
|
+
Structure is documented below.
|
145
|
+
:param int min_replica_count: The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
|
146
|
+
:param Sequence['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs'] autoscaling_metric_specs: A list of the metric specifications that overrides a resource utilization metric.
|
147
|
+
Structure is documented below.
|
148
|
+
:param int max_replica_count: The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
|
149
|
+
"""
|
150
|
+
pulumi.set(__self__, "machine_spec", machine_spec)
|
151
|
+
pulumi.set(__self__, "min_replica_count", min_replica_count)
|
152
|
+
if autoscaling_metric_specs is not None:
|
153
|
+
pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
|
154
|
+
if max_replica_count is not None:
|
155
|
+
pulumi.set(__self__, "max_replica_count", max_replica_count)
|
156
|
+
|
157
|
+
@property
|
158
|
+
@pulumi.getter(name="machineSpec")
|
159
|
+
def machine_spec(self) -> 'outputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpec':
|
160
|
+
"""
|
161
|
+
The specification of a single machine used by the prediction
|
162
|
+
Structure is documented below.
|
163
|
+
"""
|
164
|
+
return pulumi.get(self, "machine_spec")
|
165
|
+
|
166
|
+
@property
|
167
|
+
@pulumi.getter(name="minReplicaCount")
|
168
|
+
def min_replica_count(self) -> int:
|
169
|
+
"""
|
170
|
+
The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
|
171
|
+
"""
|
172
|
+
return pulumi.get(self, "min_replica_count")
|
173
|
+
|
174
|
+
@property
|
175
|
+
@pulumi.getter(name="autoscalingMetricSpecs")
|
176
|
+
def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec']]:
|
177
|
+
"""
|
178
|
+
A list of the metric specifications that overrides a resource utilization metric.
|
179
|
+
Structure is documented below.
|
180
|
+
"""
|
181
|
+
return pulumi.get(self, "autoscaling_metric_specs")
|
182
|
+
|
183
|
+
@property
|
184
|
+
@pulumi.getter(name="maxReplicaCount")
|
185
|
+
def max_replica_count(self) -> Optional[int]:
|
186
|
+
"""
|
187
|
+
The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
|
188
|
+
"""
|
189
|
+
return pulumi.get(self, "max_replica_count")
|
190
|
+
|
191
|
+
|
192
|
+
@pulumi.output_type
|
193
|
+
class AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec(dict):
|
194
|
+
@staticmethod
|
195
|
+
def __key_warning(key: str):
|
196
|
+
suggest = None
|
197
|
+
if key == "metricName":
|
198
|
+
suggest = "metric_name"
|
199
|
+
|
200
|
+
if suggest:
|
201
|
+
pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec. Access the value via the '{suggest}' property getter instead.")
|
202
|
+
|
203
|
+
def __getitem__(self, key: str) -> Any:
|
204
|
+
AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
|
205
|
+
return super().__getitem__(key)
|
206
|
+
|
207
|
+
def get(self, key: str, default = None) -> Any:
|
208
|
+
AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
|
209
|
+
return super().get(key, default)
|
210
|
+
|
211
|
+
def __init__(__self__, *,
|
212
|
+
metric_name: str,
|
213
|
+
target: Optional[int] = None):
|
214
|
+
"""
|
215
|
+
:param str metric_name: The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
|
216
|
+
:param int target: The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
|
217
|
+
"""
|
218
|
+
pulumi.set(__self__, "metric_name", metric_name)
|
219
|
+
if target is not None:
|
220
|
+
pulumi.set(__self__, "target", target)
|
221
|
+
|
222
|
+
@property
|
223
|
+
@pulumi.getter(name="metricName")
|
224
|
+
def metric_name(self) -> str:
|
225
|
+
"""
|
226
|
+
The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
|
227
|
+
"""
|
228
|
+
return pulumi.get(self, "metric_name")
|
229
|
+
|
230
|
+
@property
|
231
|
+
@pulumi.getter
|
232
|
+
def target(self) -> Optional[int]:
|
233
|
+
"""
|
234
|
+
The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
|
235
|
+
"""
|
236
|
+
return pulumi.get(self, "target")
|
237
|
+
|
238
|
+
|
239
|
+
@pulumi.output_type
|
240
|
+
class AiDeploymentResourcePoolDedicatedResourcesMachineSpec(dict):
|
241
|
+
@staticmethod
|
242
|
+
def __key_warning(key: str):
|
243
|
+
suggest = None
|
244
|
+
if key == "acceleratorCount":
|
245
|
+
suggest = "accelerator_count"
|
246
|
+
elif key == "acceleratorType":
|
247
|
+
suggest = "accelerator_type"
|
248
|
+
elif key == "machineType":
|
249
|
+
suggest = "machine_type"
|
250
|
+
|
251
|
+
if suggest:
|
252
|
+
pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResourcesMachineSpec. Access the value via the '{suggest}' property getter instead.")
|
253
|
+
|
254
|
+
def __getitem__(self, key: str) -> Any:
|
255
|
+
AiDeploymentResourcePoolDedicatedResourcesMachineSpec.__key_warning(key)
|
256
|
+
return super().__getitem__(key)
|
257
|
+
|
258
|
+
def get(self, key: str, default = None) -> Any:
|
259
|
+
AiDeploymentResourcePoolDedicatedResourcesMachineSpec.__key_warning(key)
|
260
|
+
return super().get(key, default)
|
261
|
+
|
262
|
+
def __init__(__self__, *,
|
263
|
+
accelerator_count: Optional[int] = None,
|
264
|
+
accelerator_type: Optional[str] = None,
|
265
|
+
machine_type: Optional[str] = None):
|
266
|
+
"""
|
267
|
+
:param int accelerator_count: The number of accelerators to attach to the machine.
|
268
|
+
:param str accelerator_type: The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
|
269
|
+
:param str machine_type: The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
|
270
|
+
"""
|
271
|
+
if accelerator_count is not None:
|
272
|
+
pulumi.set(__self__, "accelerator_count", accelerator_count)
|
273
|
+
if accelerator_type is not None:
|
274
|
+
pulumi.set(__self__, "accelerator_type", accelerator_type)
|
275
|
+
if machine_type is not None:
|
276
|
+
pulumi.set(__self__, "machine_type", machine_type)
|
277
|
+
|
278
|
+
@property
|
279
|
+
@pulumi.getter(name="acceleratorCount")
|
280
|
+
def accelerator_count(self) -> Optional[int]:
|
281
|
+
"""
|
282
|
+
The number of accelerators to attach to the machine.
|
283
|
+
"""
|
284
|
+
return pulumi.get(self, "accelerator_count")
|
285
|
+
|
286
|
+
@property
|
287
|
+
@pulumi.getter(name="acceleratorType")
|
288
|
+
def accelerator_type(self) -> Optional[str]:
|
289
|
+
"""
|
290
|
+
The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
|
291
|
+
"""
|
292
|
+
return pulumi.get(self, "accelerator_type")
|
293
|
+
|
294
|
+
@property
|
295
|
+
@pulumi.getter(name="machineType")
|
296
|
+
def machine_type(self) -> Optional[str]:
|
297
|
+
"""
|
298
|
+
The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
|
299
|
+
"""
|
300
|
+
return pulumi.get(self, "machine_type")
|
301
|
+
|
302
|
+
|
109
303
|
@pulumi.output_type
|
110
304
|
class AiEndpointDeployedModel(dict):
|
111
305
|
@staticmethod
|