pulumi-gcp 7.17.0a1712402830__py3-none-any.whl → 7.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pulumi_gcp/__init__.py +27 -0
- pulumi_gcp/apphub/__init__.py +1 -0
- pulumi_gcp/apphub/get_application.py +220 -0
- pulumi_gcp/apphub/outputs.py +214 -0
- pulumi_gcp/applicationintegration/__init__.py +10 -0
- pulumi_gcp/applicationintegration/_inputs.py +119 -0
- pulumi_gcp/applicationintegration/client.py +574 -0
- pulumi_gcp/applicationintegration/outputs.py +122 -0
- pulumi_gcp/bigquery/_inputs.py +16 -0
- pulumi_gcp/bigquery/outputs.py +14 -0
- pulumi_gcp/bigquery/routine.py +98 -0
- pulumi_gcp/billing/project_info.py +4 -4
- pulumi_gcp/cloudquota/__init__.py +2 -0
- pulumi_gcp/cloudquota/_inputs.py +131 -0
- pulumi_gcp/cloudquota/outputs.py +118 -0
- pulumi_gcp/cloudquota/s_quota_preference.py +777 -0
- pulumi_gcp/cloudrunv2/_inputs.py +73 -0
- pulumi_gcp/cloudrunv2/outputs.py +126 -0
- pulumi_gcp/compute/interconnect_attachment.py +64 -0
- pulumi_gcp/compute/network_endpoint.py +8 -0
- pulumi_gcp/compute/network_endpoint_list.py +8 -0
- pulumi_gcp/compute/region_backend_service.py +28 -0
- pulumi_gcp/compute/target_instance.py +4 -4
- pulumi_gcp/config/__init__.pyi +2 -0
- pulumi_gcp/config/vars.py +4 -0
- pulumi_gcp/container/_inputs.py +17 -1
- pulumi_gcp/container/cluster.py +47 -0
- pulumi_gcp/container/get_cluster.py +11 -1
- pulumi_gcp/container/outputs.py +27 -2
- pulumi_gcp/databasemigrationservice/connection_profile.py +6 -6
- pulumi_gcp/dataflow/flex_template_job.py +84 -112
- pulumi_gcp/dataplex/task.py +16 -16
- pulumi_gcp/dataproc/_inputs.py +71 -0
- pulumi_gcp/dataproc/get_metastore_service.py +11 -1
- pulumi_gcp/dataproc/metastore_service.py +120 -0
- pulumi_gcp/dataproc/outputs.py +128 -0
- pulumi_gcp/firebase/app_check_service_config.py +2 -2
- pulumi_gcp/firestore/backup_schedule.py +23 -23
- pulumi_gcp/firestore/field.py +4 -4
- pulumi_gcp/gkehub/membership_binding.py +6 -6
- pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
- pulumi_gcp/gkehub/namespace.py +4 -4
- pulumi_gcp/gkehub/scope_rbac_role_binding.py +4 -4
- pulumi_gcp/gkeonprem/v_mware_cluster.py +49 -0
- pulumi_gcp/iap/tunnel_dest_group.py +2 -2
- pulumi_gcp/kms/_inputs.py +46 -0
- pulumi_gcp/kms/crypto_key.py +54 -0
- pulumi_gcp/kms/crypto_key_version.py +54 -0
- pulumi_gcp/kms/get_kms_crypto_key.py +11 -1
- pulumi_gcp/kms/outputs.py +54 -0
- pulumi_gcp/looker/instance.py +20 -30
- pulumi_gcp/orgpolicy/policy.py +2 -2
- pulumi_gcp/provider.py +20 -0
- pulumi_gcp/pubsub/subscription.py +4 -4
- pulumi_gcp/redis/cluster.py +91 -0
- pulumi_gcp/storage/bucket.py +28 -0
- pulumi_gcp/storage/get_bucket.py +17 -2
- pulumi_gcp/vertex/__init__.py +1 -0
- pulumi_gcp/vertex/_inputs.py +169 -0
- pulumi_gcp/vertex/ai_deployment_resource_pool.py +477 -0
- pulumi_gcp/vertex/outputs.py +194 -0
- pulumi_gcp/vpcaccess/connector.py +77 -28
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/METADATA +1 -1
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/RECORD +66 -58
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/WHEEL +0 -0
- {pulumi_gcp-7.17.0a1712402830.dist-info → pulumi_gcp-7.18.0.dist-info}/top_level.txt +0 -0
pulumi_gcp/vertex/_inputs.py
CHANGED
@@ -11,6 +11,9 @@ from .. import _utilities
|
|
11
11
|
|
12
12
|
__all__ = [
|
13
13
|
'AiDatasetEncryptionSpecArgs',
|
14
|
+
'AiDeploymentResourcePoolDedicatedResourcesArgs',
|
15
|
+
'AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs',
|
16
|
+
'AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs',
|
14
17
|
'AiEndpointDeployedModelArgs',
|
15
18
|
'AiEndpointDeployedModelAutomaticResourceArgs',
|
16
19
|
'AiEndpointDeployedModelDedicatedResourceArgs',
|
@@ -85,6 +88,172 @@ class AiDatasetEncryptionSpecArgs:
|
|
85
88
|
pulumi.set(self, "kms_key_name", value)
|
86
89
|
|
87
90
|
|
91
|
+
@pulumi.input_type
|
92
|
+
class AiDeploymentResourcePoolDedicatedResourcesArgs:
|
93
|
+
def __init__(__self__, *,
|
94
|
+
machine_spec: pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs'],
|
95
|
+
min_replica_count: pulumi.Input[int],
|
96
|
+
autoscaling_metric_specs: Optional[pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]]] = None,
|
97
|
+
max_replica_count: Optional[pulumi.Input[int]] = None):
|
98
|
+
"""
|
99
|
+
:param pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs'] machine_spec: The specification of a single machine used by the prediction
|
100
|
+
Structure is documented below.
|
101
|
+
:param pulumi.Input[int] min_replica_count: The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
|
102
|
+
:param pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]] autoscaling_metric_specs: A list of the metric specifications that overrides a resource utilization metric.
|
103
|
+
Structure is documented below.
|
104
|
+
:param pulumi.Input[int] max_replica_count: The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
|
105
|
+
"""
|
106
|
+
pulumi.set(__self__, "machine_spec", machine_spec)
|
107
|
+
pulumi.set(__self__, "min_replica_count", min_replica_count)
|
108
|
+
if autoscaling_metric_specs is not None:
|
109
|
+
pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
|
110
|
+
if max_replica_count is not None:
|
111
|
+
pulumi.set(__self__, "max_replica_count", max_replica_count)
|
112
|
+
|
113
|
+
@property
|
114
|
+
@pulumi.getter(name="machineSpec")
|
115
|
+
def machine_spec(self) -> pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs']:
|
116
|
+
"""
|
117
|
+
The specification of a single machine used by the prediction
|
118
|
+
Structure is documented below.
|
119
|
+
"""
|
120
|
+
return pulumi.get(self, "machine_spec")
|
121
|
+
|
122
|
+
@machine_spec.setter
|
123
|
+
def machine_spec(self, value: pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs']):
|
124
|
+
pulumi.set(self, "machine_spec", value)
|
125
|
+
|
126
|
+
@property
|
127
|
+
@pulumi.getter(name="minReplicaCount")
|
128
|
+
def min_replica_count(self) -> pulumi.Input[int]:
|
129
|
+
"""
|
130
|
+
The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
|
131
|
+
"""
|
132
|
+
return pulumi.get(self, "min_replica_count")
|
133
|
+
|
134
|
+
@min_replica_count.setter
|
135
|
+
def min_replica_count(self, value: pulumi.Input[int]):
|
136
|
+
pulumi.set(self, "min_replica_count", value)
|
137
|
+
|
138
|
+
@property
|
139
|
+
@pulumi.getter(name="autoscalingMetricSpecs")
|
140
|
+
def autoscaling_metric_specs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]]]:
|
141
|
+
"""
|
142
|
+
A list of the metric specifications that overrides a resource utilization metric.
|
143
|
+
Structure is documented below.
|
144
|
+
"""
|
145
|
+
return pulumi.get(self, "autoscaling_metric_specs")
|
146
|
+
|
147
|
+
@autoscaling_metric_specs.setter
|
148
|
+
def autoscaling_metric_specs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]]]):
|
149
|
+
pulumi.set(self, "autoscaling_metric_specs", value)
|
150
|
+
|
151
|
+
@property
|
152
|
+
@pulumi.getter(name="maxReplicaCount")
|
153
|
+
def max_replica_count(self) -> Optional[pulumi.Input[int]]:
|
154
|
+
"""
|
155
|
+
The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
|
156
|
+
"""
|
157
|
+
return pulumi.get(self, "max_replica_count")
|
158
|
+
|
159
|
+
@max_replica_count.setter
|
160
|
+
def max_replica_count(self, value: Optional[pulumi.Input[int]]):
|
161
|
+
pulumi.set(self, "max_replica_count", value)
|
162
|
+
|
163
|
+
|
164
|
+
@pulumi.input_type
|
165
|
+
class AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs:
|
166
|
+
def __init__(__self__, *,
|
167
|
+
metric_name: pulumi.Input[str],
|
168
|
+
target: Optional[pulumi.Input[int]] = None):
|
169
|
+
"""
|
170
|
+
:param pulumi.Input[str] metric_name: The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
|
171
|
+
:param pulumi.Input[int] target: The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
|
172
|
+
"""
|
173
|
+
pulumi.set(__self__, "metric_name", metric_name)
|
174
|
+
if target is not None:
|
175
|
+
pulumi.set(__self__, "target", target)
|
176
|
+
|
177
|
+
@property
|
178
|
+
@pulumi.getter(name="metricName")
|
179
|
+
def metric_name(self) -> pulumi.Input[str]:
|
180
|
+
"""
|
181
|
+
The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
|
182
|
+
"""
|
183
|
+
return pulumi.get(self, "metric_name")
|
184
|
+
|
185
|
+
@metric_name.setter
|
186
|
+
def metric_name(self, value: pulumi.Input[str]):
|
187
|
+
pulumi.set(self, "metric_name", value)
|
188
|
+
|
189
|
+
@property
|
190
|
+
@pulumi.getter
|
191
|
+
def target(self) -> Optional[pulumi.Input[int]]:
|
192
|
+
"""
|
193
|
+
The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
|
194
|
+
"""
|
195
|
+
return pulumi.get(self, "target")
|
196
|
+
|
197
|
+
@target.setter
|
198
|
+
def target(self, value: Optional[pulumi.Input[int]]):
|
199
|
+
pulumi.set(self, "target", value)
|
200
|
+
|
201
|
+
|
202
|
+
@pulumi.input_type
|
203
|
+
class AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs:
|
204
|
+
def __init__(__self__, *,
|
205
|
+
accelerator_count: Optional[pulumi.Input[int]] = None,
|
206
|
+
accelerator_type: Optional[pulumi.Input[str]] = None,
|
207
|
+
machine_type: Optional[pulumi.Input[str]] = None):
|
208
|
+
"""
|
209
|
+
:param pulumi.Input[int] accelerator_count: The number of accelerators to attach to the machine.
|
210
|
+
:param pulumi.Input[str] accelerator_type: The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
|
211
|
+
:param pulumi.Input[str] machine_type: The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
|
212
|
+
"""
|
213
|
+
if accelerator_count is not None:
|
214
|
+
pulumi.set(__self__, "accelerator_count", accelerator_count)
|
215
|
+
if accelerator_type is not None:
|
216
|
+
pulumi.set(__self__, "accelerator_type", accelerator_type)
|
217
|
+
if machine_type is not None:
|
218
|
+
pulumi.set(__self__, "machine_type", machine_type)
|
219
|
+
|
220
|
+
@property
|
221
|
+
@pulumi.getter(name="acceleratorCount")
|
222
|
+
def accelerator_count(self) -> Optional[pulumi.Input[int]]:
|
223
|
+
"""
|
224
|
+
The number of accelerators to attach to the machine.
|
225
|
+
"""
|
226
|
+
return pulumi.get(self, "accelerator_count")
|
227
|
+
|
228
|
+
@accelerator_count.setter
|
229
|
+
def accelerator_count(self, value: Optional[pulumi.Input[int]]):
|
230
|
+
pulumi.set(self, "accelerator_count", value)
|
231
|
+
|
232
|
+
@property
|
233
|
+
@pulumi.getter(name="acceleratorType")
|
234
|
+
def accelerator_type(self) -> Optional[pulumi.Input[str]]:
|
235
|
+
"""
|
236
|
+
The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
|
237
|
+
"""
|
238
|
+
return pulumi.get(self, "accelerator_type")
|
239
|
+
|
240
|
+
@accelerator_type.setter
|
241
|
+
def accelerator_type(self, value: Optional[pulumi.Input[str]]):
|
242
|
+
pulumi.set(self, "accelerator_type", value)
|
243
|
+
|
244
|
+
@property
|
245
|
+
@pulumi.getter(name="machineType")
|
246
|
+
def machine_type(self) -> Optional[pulumi.Input[str]]:
|
247
|
+
"""
|
248
|
+
The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
|
249
|
+
"""
|
250
|
+
return pulumi.get(self, "machine_type")
|
251
|
+
|
252
|
+
@machine_type.setter
|
253
|
+
def machine_type(self, value: Optional[pulumi.Input[str]]):
|
254
|
+
pulumi.set(self, "machine_type", value)
|
255
|
+
|
256
|
+
|
88
257
|
@pulumi.input_type
|
89
258
|
class AiEndpointDeployedModelArgs:
|
90
259
|
def __init__(__self__, *,
|
@@ -0,0 +1,477 @@
|
|
1
|
+
# coding=utf-8
|
2
|
+
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
|
3
|
+
# *** Do not edit by hand unless you're certain you know what you are doing! ***
|
4
|
+
|
5
|
+
import copy
|
6
|
+
import warnings
|
7
|
+
import pulumi
|
8
|
+
import pulumi.runtime
|
9
|
+
from typing import Any, Mapping, Optional, Sequence, Union, overload
|
10
|
+
from .. import _utilities
|
11
|
+
from . import outputs
|
12
|
+
from ._inputs import *
|
13
|
+
|
14
|
+
__all__ = ['AiDeploymentResourcePoolArgs', 'AiDeploymentResourcePool']
|
15
|
+
|
16
|
+
@pulumi.input_type
|
17
|
+
class AiDeploymentResourcePoolArgs:
|
18
|
+
def __init__(__self__, *,
|
19
|
+
dedicated_resources: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']] = None,
|
20
|
+
name: Optional[pulumi.Input[str]] = None,
|
21
|
+
project: Optional[pulumi.Input[str]] = None,
|
22
|
+
region: Optional[pulumi.Input[str]] = None):
|
23
|
+
"""
|
24
|
+
The set of arguments for constructing a AiDeploymentResourcePool resource.
|
25
|
+
:param pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs'] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
26
|
+
Structure is documented below.
|
27
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
28
|
+
|
29
|
+
|
30
|
+
- - -
|
31
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
32
|
+
If it is not provided, the provider project is used.
|
33
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
34
|
+
"""
|
35
|
+
if dedicated_resources is not None:
|
36
|
+
pulumi.set(__self__, "dedicated_resources", dedicated_resources)
|
37
|
+
if name is not None:
|
38
|
+
pulumi.set(__self__, "name", name)
|
39
|
+
if project is not None:
|
40
|
+
pulumi.set(__self__, "project", project)
|
41
|
+
if region is not None:
|
42
|
+
pulumi.set(__self__, "region", region)
|
43
|
+
|
44
|
+
@property
|
45
|
+
@pulumi.getter(name="dedicatedResources")
|
46
|
+
def dedicated_resources(self) -> Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]:
|
47
|
+
"""
|
48
|
+
The underlying dedicated resources that the deployment resource pool uses.
|
49
|
+
Structure is documented below.
|
50
|
+
"""
|
51
|
+
return pulumi.get(self, "dedicated_resources")
|
52
|
+
|
53
|
+
@dedicated_resources.setter
|
54
|
+
def dedicated_resources(self, value: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]):
|
55
|
+
pulumi.set(self, "dedicated_resources", value)
|
56
|
+
|
57
|
+
@property
|
58
|
+
@pulumi.getter
|
59
|
+
def name(self) -> Optional[pulumi.Input[str]]:
|
60
|
+
"""
|
61
|
+
The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
62
|
+
|
63
|
+
|
64
|
+
- - -
|
65
|
+
"""
|
66
|
+
return pulumi.get(self, "name")
|
67
|
+
|
68
|
+
@name.setter
|
69
|
+
def name(self, value: Optional[pulumi.Input[str]]):
|
70
|
+
pulumi.set(self, "name", value)
|
71
|
+
|
72
|
+
@property
|
73
|
+
@pulumi.getter
|
74
|
+
def project(self) -> Optional[pulumi.Input[str]]:
|
75
|
+
"""
|
76
|
+
The ID of the project in which the resource belongs.
|
77
|
+
If it is not provided, the provider project is used.
|
78
|
+
"""
|
79
|
+
return pulumi.get(self, "project")
|
80
|
+
|
81
|
+
@project.setter
|
82
|
+
def project(self, value: Optional[pulumi.Input[str]]):
|
83
|
+
pulumi.set(self, "project", value)
|
84
|
+
|
85
|
+
@property
|
86
|
+
@pulumi.getter
|
87
|
+
def region(self) -> Optional[pulumi.Input[str]]:
|
88
|
+
"""
|
89
|
+
The region of deployment resource pool. eg us-central1
|
90
|
+
"""
|
91
|
+
return pulumi.get(self, "region")
|
92
|
+
|
93
|
+
@region.setter
|
94
|
+
def region(self, value: Optional[pulumi.Input[str]]):
|
95
|
+
pulumi.set(self, "region", value)
|
96
|
+
|
97
|
+
|
98
|
+
@pulumi.input_type
|
99
|
+
class _AiDeploymentResourcePoolState:
|
100
|
+
def __init__(__self__, *,
|
101
|
+
create_time: Optional[pulumi.Input[str]] = None,
|
102
|
+
dedicated_resources: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']] = None,
|
103
|
+
name: Optional[pulumi.Input[str]] = None,
|
104
|
+
project: Optional[pulumi.Input[str]] = None,
|
105
|
+
region: Optional[pulumi.Input[str]] = None):
|
106
|
+
"""
|
107
|
+
Input properties used for looking up and filtering AiDeploymentResourcePool resources.
|
108
|
+
:param pulumi.Input[str] create_time: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
109
|
+
:param pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs'] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
110
|
+
Structure is documented below.
|
111
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
112
|
+
|
113
|
+
|
114
|
+
- - -
|
115
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
116
|
+
If it is not provided, the provider project is used.
|
117
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
118
|
+
"""
|
119
|
+
if create_time is not None:
|
120
|
+
pulumi.set(__self__, "create_time", create_time)
|
121
|
+
if dedicated_resources is not None:
|
122
|
+
pulumi.set(__self__, "dedicated_resources", dedicated_resources)
|
123
|
+
if name is not None:
|
124
|
+
pulumi.set(__self__, "name", name)
|
125
|
+
if project is not None:
|
126
|
+
pulumi.set(__self__, "project", project)
|
127
|
+
if region is not None:
|
128
|
+
pulumi.set(__self__, "region", region)
|
129
|
+
|
130
|
+
@property
|
131
|
+
@pulumi.getter(name="createTime")
|
132
|
+
def create_time(self) -> Optional[pulumi.Input[str]]:
|
133
|
+
"""
|
134
|
+
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
135
|
+
"""
|
136
|
+
return pulumi.get(self, "create_time")
|
137
|
+
|
138
|
+
@create_time.setter
|
139
|
+
def create_time(self, value: Optional[pulumi.Input[str]]):
|
140
|
+
pulumi.set(self, "create_time", value)
|
141
|
+
|
142
|
+
@property
|
143
|
+
@pulumi.getter(name="dedicatedResources")
|
144
|
+
def dedicated_resources(self) -> Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]:
|
145
|
+
"""
|
146
|
+
The underlying dedicated resources that the deployment resource pool uses.
|
147
|
+
Structure is documented below.
|
148
|
+
"""
|
149
|
+
return pulumi.get(self, "dedicated_resources")
|
150
|
+
|
151
|
+
@dedicated_resources.setter
|
152
|
+
def dedicated_resources(self, value: Optional[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesArgs']]):
|
153
|
+
pulumi.set(self, "dedicated_resources", value)
|
154
|
+
|
155
|
+
@property
|
156
|
+
@pulumi.getter
|
157
|
+
def name(self) -> Optional[pulumi.Input[str]]:
|
158
|
+
"""
|
159
|
+
The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
160
|
+
|
161
|
+
|
162
|
+
- - -
|
163
|
+
"""
|
164
|
+
return pulumi.get(self, "name")
|
165
|
+
|
166
|
+
@name.setter
|
167
|
+
def name(self, value: Optional[pulumi.Input[str]]):
|
168
|
+
pulumi.set(self, "name", value)
|
169
|
+
|
170
|
+
@property
|
171
|
+
@pulumi.getter
|
172
|
+
def project(self) -> Optional[pulumi.Input[str]]:
|
173
|
+
"""
|
174
|
+
The ID of the project in which the resource belongs.
|
175
|
+
If it is not provided, the provider project is used.
|
176
|
+
"""
|
177
|
+
return pulumi.get(self, "project")
|
178
|
+
|
179
|
+
@project.setter
|
180
|
+
def project(self, value: Optional[pulumi.Input[str]]):
|
181
|
+
pulumi.set(self, "project", value)
|
182
|
+
|
183
|
+
@property
|
184
|
+
@pulumi.getter
|
185
|
+
def region(self) -> Optional[pulumi.Input[str]]:
|
186
|
+
"""
|
187
|
+
The region of deployment resource pool. eg us-central1
|
188
|
+
"""
|
189
|
+
return pulumi.get(self, "region")
|
190
|
+
|
191
|
+
@region.setter
|
192
|
+
def region(self, value: Optional[pulumi.Input[str]]):
|
193
|
+
pulumi.set(self, "region", value)
|
194
|
+
|
195
|
+
|
196
|
+
class AiDeploymentResourcePool(pulumi.CustomResource):
|
197
|
+
@overload
|
198
|
+
def __init__(__self__,
|
199
|
+
resource_name: str,
|
200
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
201
|
+
dedicated_resources: Optional[pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']]] = None,
|
202
|
+
name: Optional[pulumi.Input[str]] = None,
|
203
|
+
project: Optional[pulumi.Input[str]] = None,
|
204
|
+
region: Optional[pulumi.Input[str]] = None,
|
205
|
+
__props__=None):
|
206
|
+
"""
|
207
|
+
'DeploymentResourcePool can be shared by multiple deployed models,
|
208
|
+
whose underlying specification consists of dedicated resources.'
|
209
|
+
|
210
|
+
To get more information about DeploymentResourcePool, see:
|
211
|
+
|
212
|
+
* [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.deploymentResourcePools)
|
213
|
+
|
214
|
+
## Example Usage
|
215
|
+
|
216
|
+
### Vertex Ai Deployment Resource Pool
|
217
|
+
|
218
|
+
<!--Start PulumiCodeChooser -->
|
219
|
+
```python
|
220
|
+
import pulumi
|
221
|
+
import pulumi_gcp as gcp
|
222
|
+
|
223
|
+
deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
|
224
|
+
region="us-central1",
|
225
|
+
name="example-deployment-resource-pool",
|
226
|
+
dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
|
227
|
+
machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
|
228
|
+
machine_type="n1-standard-4",
|
229
|
+
accelerator_type="NVIDIA_TESLA_K80",
|
230
|
+
accelerator_count=1,
|
231
|
+
),
|
232
|
+
min_replica_count=1,
|
233
|
+
max_replica_count=2,
|
234
|
+
autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
|
235
|
+
metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
|
236
|
+
target=60,
|
237
|
+
)],
|
238
|
+
))
|
239
|
+
```
|
240
|
+
<!--End PulumiCodeChooser -->
|
241
|
+
|
242
|
+
## Import
|
243
|
+
|
244
|
+
DeploymentResourcePool can be imported using any of these accepted formats:
|
245
|
+
|
246
|
+
* `projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}`
|
247
|
+
|
248
|
+
* `{{project}}/{{region}}/{{name}}`
|
249
|
+
|
250
|
+
* `{{region}}/{{name}}`
|
251
|
+
|
252
|
+
* `{{name}}`
|
253
|
+
|
254
|
+
When using the `pulumi import` command, DeploymentResourcePool can be imported using one of the formats above. For example:
|
255
|
+
|
256
|
+
```sh
|
257
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
|
258
|
+
```
|
259
|
+
|
260
|
+
```sh
|
261
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
|
262
|
+
```
|
263
|
+
|
264
|
+
```sh
|
265
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
|
266
|
+
```
|
267
|
+
|
268
|
+
```sh
|
269
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
|
270
|
+
```
|
271
|
+
|
272
|
+
:param str resource_name: The name of the resource.
|
273
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
274
|
+
:param pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
275
|
+
Structure is documented below.
|
276
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
277
|
+
|
278
|
+
|
279
|
+
- - -
|
280
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
281
|
+
If it is not provided, the provider project is used.
|
282
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
283
|
+
"""
|
284
|
+
...
|
285
|
+
@overload
|
286
|
+
def __init__(__self__,
|
287
|
+
resource_name: str,
|
288
|
+
args: Optional[AiDeploymentResourcePoolArgs] = None,
|
289
|
+
opts: Optional[pulumi.ResourceOptions] = None):
|
290
|
+
"""
|
291
|
+
'DeploymentResourcePool can be shared by multiple deployed models,
|
292
|
+
whose underlying specification consists of dedicated resources.'
|
293
|
+
|
294
|
+
To get more information about DeploymentResourcePool, see:
|
295
|
+
|
296
|
+
* [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.deploymentResourcePools)
|
297
|
+
|
298
|
+
## Example Usage
|
299
|
+
|
300
|
+
### Vertex Ai Deployment Resource Pool
|
301
|
+
|
302
|
+
<!--Start PulumiCodeChooser -->
|
303
|
+
```python
|
304
|
+
import pulumi
|
305
|
+
import pulumi_gcp as gcp
|
306
|
+
|
307
|
+
deployment_resource_pool = gcp.vertex.AiDeploymentResourcePool("deployment_resource_pool",
|
308
|
+
region="us-central1",
|
309
|
+
name="example-deployment-resource-pool",
|
310
|
+
dedicated_resources=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesArgs(
|
311
|
+
machine_spec=gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs(
|
312
|
+
machine_type="n1-standard-4",
|
313
|
+
accelerator_type="NVIDIA_TESLA_K80",
|
314
|
+
accelerator_count=1,
|
315
|
+
),
|
316
|
+
min_replica_count=1,
|
317
|
+
max_replica_count=2,
|
318
|
+
autoscaling_metric_specs=[gcp.vertex.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs(
|
319
|
+
metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
|
320
|
+
target=60,
|
321
|
+
)],
|
322
|
+
))
|
323
|
+
```
|
324
|
+
<!--End PulumiCodeChooser -->
|
325
|
+
|
326
|
+
## Import
|
327
|
+
|
328
|
+
DeploymentResourcePool can be imported using any of these accepted formats:
|
329
|
+
|
330
|
+
* `projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}`
|
331
|
+
|
332
|
+
* `{{project}}/{{region}}/{{name}}`
|
333
|
+
|
334
|
+
* `{{region}}/{{name}}`
|
335
|
+
|
336
|
+
* `{{name}}`
|
337
|
+
|
338
|
+
When using the `pulumi import` command, DeploymentResourcePool can be imported using one of the formats above. For example:
|
339
|
+
|
340
|
+
```sh
|
341
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default projects/{{project}}/locations/{{region}}/deploymentResourcePools/{{name}}
|
342
|
+
```
|
343
|
+
|
344
|
+
```sh
|
345
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{project}}/{{region}}/{{name}}
|
346
|
+
```
|
347
|
+
|
348
|
+
```sh
|
349
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{region}}/{{name}}
|
350
|
+
```
|
351
|
+
|
352
|
+
```sh
|
353
|
+
$ pulumi import gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool default {{name}}
|
354
|
+
```
|
355
|
+
|
356
|
+
:param str resource_name: The name of the resource.
|
357
|
+
:param AiDeploymentResourcePoolArgs args: The arguments to use to populate this resource's properties.
|
358
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
359
|
+
"""
|
360
|
+
...
|
361
|
+
def __init__(__self__, resource_name: str, *args, **kwargs):
|
362
|
+
resource_args, opts = _utilities.get_resource_args_opts(AiDeploymentResourcePoolArgs, pulumi.ResourceOptions, *args, **kwargs)
|
363
|
+
if resource_args is not None:
|
364
|
+
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
|
365
|
+
else:
|
366
|
+
__self__._internal_init(resource_name, *args, **kwargs)
|
367
|
+
|
368
|
+
def _internal_init(__self__,
|
369
|
+
resource_name: str,
|
370
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
371
|
+
dedicated_resources: Optional[pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']]] = None,
|
372
|
+
name: Optional[pulumi.Input[str]] = None,
|
373
|
+
project: Optional[pulumi.Input[str]] = None,
|
374
|
+
region: Optional[pulumi.Input[str]] = None,
|
375
|
+
__props__=None):
|
376
|
+
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
|
377
|
+
if not isinstance(opts, pulumi.ResourceOptions):
|
378
|
+
raise TypeError('Expected resource options to be a ResourceOptions instance')
|
379
|
+
if opts.id is None:
|
380
|
+
if __props__ is not None:
|
381
|
+
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
|
382
|
+
__props__ = AiDeploymentResourcePoolArgs.__new__(AiDeploymentResourcePoolArgs)
|
383
|
+
|
384
|
+
__props__.__dict__["dedicated_resources"] = dedicated_resources
|
385
|
+
__props__.__dict__["name"] = name
|
386
|
+
__props__.__dict__["project"] = project
|
387
|
+
__props__.__dict__["region"] = region
|
388
|
+
__props__.__dict__["create_time"] = None
|
389
|
+
super(AiDeploymentResourcePool, __self__).__init__(
|
390
|
+
'gcp:vertex/aiDeploymentResourcePool:AiDeploymentResourcePool',
|
391
|
+
resource_name,
|
392
|
+
__props__,
|
393
|
+
opts)
|
394
|
+
|
395
|
+
@staticmethod
|
396
|
+
def get(resource_name: str,
|
397
|
+
id: pulumi.Input[str],
|
398
|
+
opts: Optional[pulumi.ResourceOptions] = None,
|
399
|
+
create_time: Optional[pulumi.Input[str]] = None,
|
400
|
+
dedicated_resources: Optional[pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']]] = None,
|
401
|
+
name: Optional[pulumi.Input[str]] = None,
|
402
|
+
project: Optional[pulumi.Input[str]] = None,
|
403
|
+
region: Optional[pulumi.Input[str]] = None) -> 'AiDeploymentResourcePool':
|
404
|
+
"""
|
405
|
+
Get an existing AiDeploymentResourcePool resource's state with the given name, id, and optional extra
|
406
|
+
properties used to qualify the lookup.
|
407
|
+
|
408
|
+
:param str resource_name: The unique name of the resulting resource.
|
409
|
+
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
|
410
|
+
:param pulumi.ResourceOptions opts: Options for the resource.
|
411
|
+
:param pulumi.Input[str] create_time: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
412
|
+
:param pulumi.Input[pulumi.InputType['AiDeploymentResourcePoolDedicatedResourcesArgs']] dedicated_resources: The underlying dedicated resources that the deployment resource pool uses.
|
413
|
+
Structure is documented below.
|
414
|
+
:param pulumi.Input[str] name: The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
415
|
+
|
416
|
+
|
417
|
+
- - -
|
418
|
+
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
|
419
|
+
If it is not provided, the provider project is used.
|
420
|
+
:param pulumi.Input[str] region: The region of deployment resource pool. eg us-central1
|
421
|
+
"""
|
422
|
+
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
|
423
|
+
|
424
|
+
__props__ = _AiDeploymentResourcePoolState.__new__(_AiDeploymentResourcePoolState)
|
425
|
+
|
426
|
+
__props__.__dict__["create_time"] = create_time
|
427
|
+
__props__.__dict__["dedicated_resources"] = dedicated_resources
|
428
|
+
__props__.__dict__["name"] = name
|
429
|
+
__props__.__dict__["project"] = project
|
430
|
+
__props__.__dict__["region"] = region
|
431
|
+
return AiDeploymentResourcePool(resource_name, opts=opts, __props__=__props__)
|
432
|
+
|
433
|
+
@property
|
434
|
+
@pulumi.getter(name="createTime")
|
435
|
+
def create_time(self) -> pulumi.Output[str]:
|
436
|
+
"""
|
437
|
+
A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.
|
438
|
+
"""
|
439
|
+
return pulumi.get(self, "create_time")
|
440
|
+
|
441
|
+
@property
|
442
|
+
@pulumi.getter(name="dedicatedResources")
|
443
|
+
def dedicated_resources(self) -> pulumi.Output[Optional['outputs.AiDeploymentResourcePoolDedicatedResources']]:
|
444
|
+
"""
|
445
|
+
The underlying dedicated resources that the deployment resource pool uses.
|
446
|
+
Structure is documented below.
|
447
|
+
"""
|
448
|
+
return pulumi.get(self, "dedicated_resources")
|
449
|
+
|
450
|
+
@property
|
451
|
+
@pulumi.getter
|
452
|
+
def name(self) -> pulumi.Output[str]:
|
453
|
+
"""
|
454
|
+
The resource name of deployment resource pool. The maximum length is 63 characters, and valid characters are `/^a-z?$/`.
|
455
|
+
|
456
|
+
|
457
|
+
- - -
|
458
|
+
"""
|
459
|
+
return pulumi.get(self, "name")
|
460
|
+
|
461
|
+
@property
|
462
|
+
@pulumi.getter
|
463
|
+
def project(self) -> pulumi.Output[str]:
|
464
|
+
"""
|
465
|
+
The ID of the project in which the resource belongs.
|
466
|
+
If it is not provided, the provider project is used.
|
467
|
+
"""
|
468
|
+
return pulumi.get(self, "project")
|
469
|
+
|
470
|
+
@property
|
471
|
+
@pulumi.getter
|
472
|
+
def region(self) -> pulumi.Output[Optional[str]]:
|
473
|
+
"""
|
474
|
+
The region of deployment resource pool. eg us-central1
|
475
|
+
"""
|
476
|
+
return pulumi.get(self, "region")
|
477
|
+
|