pulumi-gcp 8.40.0a1754721948__py3-none-any.whl → 8.40.0a1754951145__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. pulumi_gcp/__init__.py +128 -0
  2. pulumi_gcp/accesscontextmanager/_inputs.py +24 -4
  3. pulumi_gcp/accesscontextmanager/outputs.py +15 -3
  4. pulumi_gcp/apigee/__init__.py +2 -0
  5. pulumi_gcp/apigee/_inputs.py +1435 -0
  6. pulumi_gcp/apigee/api_product.py +1698 -0
  7. pulumi_gcp/apigee/outputs.py +1081 -0
  8. pulumi_gcp/apigee/security_action.py +1010 -0
  9. pulumi_gcp/artifactregistry/__init__.py +1 -0
  10. pulumi_gcp/artifactregistry/get_docker_images.py +164 -0
  11. pulumi_gcp/artifactregistry/outputs.py +109 -2
  12. pulumi_gcp/artifactregistry/repository.py +6 -6
  13. pulumi_gcp/backupdisasterrecovery/backup_vault.py +56 -0
  14. pulumi_gcp/backupdisasterrecovery/get_backup_vault.py +12 -1
  15. pulumi_gcp/bigquery/_inputs.py +6 -0
  16. pulumi_gcp/bigquery/get_table.py +23 -1
  17. pulumi_gcp/bigquery/outputs.py +4 -0
  18. pulumi_gcp/bigquery/table.py +62 -0
  19. pulumi_gcp/bigqueryanalyticshub/_inputs.py +180 -0
  20. pulumi_gcp/bigqueryanalyticshub/data_exchange.py +80 -0
  21. pulumi_gcp/bigqueryanalyticshub/listing.py +322 -2
  22. pulumi_gcp/bigqueryanalyticshub/listing_subscription.py +32 -0
  23. pulumi_gcp/bigqueryanalyticshub/outputs.py +159 -0
  24. pulumi_gcp/bigtable/__init__.py +1 -0
  25. pulumi_gcp/bigtable/_inputs.py +33 -0
  26. pulumi_gcp/bigtable/outputs.py +36 -0
  27. pulumi_gcp/bigtable/schema_bundle.py +568 -0
  28. pulumi_gcp/cloudfunctions/_inputs.py +48 -0
  29. pulumi_gcp/cloudfunctions/function.py +94 -0
  30. pulumi_gcp/cloudfunctions/get_function.py +23 -1
  31. pulumi_gcp/cloudfunctions/outputs.py +70 -0
  32. pulumi_gcp/cloudrunv2/_inputs.py +20 -0
  33. pulumi_gcp/cloudrunv2/job.py +2 -0
  34. pulumi_gcp/cloudrunv2/outputs.py +25 -0
  35. pulumi_gcp/cloudrunv2/worker_pool.py +2 -0
  36. pulumi_gcp/compute/__init__.py +1 -0
  37. pulumi_gcp/compute/_inputs.py +713 -22
  38. pulumi_gcp/compute/firewall_policy_with_rules.py +66 -0
  39. pulumi_gcp/compute/forwarding_rule.py +0 -21
  40. pulumi_gcp/compute/get_router.py +12 -1
  41. pulumi_gcp/compute/outputs.py +562 -22
  42. pulumi_gcp/compute/preview_feature.py +396 -0
  43. pulumi_gcp/compute/region_url_map.py +392 -0
  44. pulumi_gcp/compute/reservation.py +4 -4
  45. pulumi_gcp/compute/router.py +54 -0
  46. pulumi_gcp/compute/storage_pool.py +154 -0
  47. pulumi_gcp/compute/subnetwork.py +54 -0
  48. pulumi_gcp/config/__init__.pyi +2 -0
  49. pulumi_gcp/config/vars.py +4 -0
  50. pulumi_gcp/container/_inputs.py +278 -8
  51. pulumi_gcp/container/cluster.py +61 -21
  52. pulumi_gcp/container/get_cluster.py +12 -1
  53. pulumi_gcp/container/outputs.py +352 -8
  54. pulumi_gcp/dataproc/_inputs.py +249 -14
  55. pulumi_gcp/dataproc/batch.py +6 -0
  56. pulumi_gcp/dataproc/cluster.py +2 -0
  57. pulumi_gcp/dataproc/outputs.py +215 -12
  58. pulumi_gcp/dataproc/session_template.py +14 -2
  59. pulumi_gcp/developerconnect/__init__.py +1 -0
  60. pulumi_gcp/developerconnect/_inputs.py +583 -0
  61. pulumi_gcp/developerconnect/insights_config.py +895 -0
  62. pulumi_gcp/developerconnect/outputs.py +442 -0
  63. pulumi_gcp/diagflow/__init__.py +1 -0
  64. pulumi_gcp/diagflow/_inputs.py +1165 -58
  65. pulumi_gcp/diagflow/cx_generator.py +636 -0
  66. pulumi_gcp/diagflow/cx_tool.py +2 -2
  67. pulumi_gcp/diagflow/cx_webhook.py +380 -36
  68. pulumi_gcp/diagflow/outputs.py +848 -25
  69. pulumi_gcp/discoveryengine/__init__.py +2 -0
  70. pulumi_gcp/discoveryengine/_inputs.py +465 -0
  71. pulumi_gcp/discoveryengine/cmek_config.py +707 -0
  72. pulumi_gcp/discoveryengine/outputs.py +412 -0
  73. pulumi_gcp/discoveryengine/recommendation_engine.py +813 -0
  74. pulumi_gcp/firestore/field.py +6 -6
  75. pulumi_gcp/gemini/gemini_gcp_enablement_setting.py +107 -9
  76. pulumi_gcp/gemini/gemini_gcp_enablement_setting_binding.py +2 -2
  77. pulumi_gcp/gkehub/membership_binding.py +6 -6
  78. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  79. pulumi_gcp/gkehub/namespace.py +4 -4
  80. pulumi_gcp/gkehub/scope_rbac_role_binding.py +8 -8
  81. pulumi_gcp/iam/__init__.py +4 -0
  82. pulumi_gcp/iam/_inputs.py +98 -0
  83. pulumi_gcp/iam/get_workforce_pool_iam_policy.py +161 -0
  84. pulumi_gcp/iam/outputs.py +56 -0
  85. pulumi_gcp/iam/workforce_pool_iam_binding.py +761 -0
  86. pulumi_gcp/iam/workforce_pool_iam_member.py +761 -0
  87. pulumi_gcp/iam/workforce_pool_iam_policy.py +600 -0
  88. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  89. pulumi_gcp/integrationconnectors/managed_zone.py +8 -8
  90. pulumi_gcp/looker/instance.py +28 -7
  91. pulumi_gcp/managedkafka/_inputs.py +127 -0
  92. pulumi_gcp/managedkafka/cluster.py +131 -1
  93. pulumi_gcp/managedkafka/connect_cluster.py +4 -4
  94. pulumi_gcp/managedkafka/connector.py +4 -4
  95. pulumi_gcp/managedkafka/outputs.py +128 -0
  96. pulumi_gcp/memorystore/instance.py +8 -12
  97. pulumi_gcp/modelarmor/__init__.py +1 -0
  98. pulumi_gcp/modelarmor/_inputs.py +683 -0
  99. pulumi_gcp/modelarmor/floorsetting.py +736 -0
  100. pulumi_gcp/modelarmor/outputs.py +618 -0
  101. pulumi_gcp/networkconnectivity/_inputs.py +60 -0
  102. pulumi_gcp/networkconnectivity/internal_range.py +136 -0
  103. pulumi_gcp/networkconnectivity/outputs.py +55 -0
  104. pulumi_gcp/networkconnectivity/spoke.py +14 -14
  105. pulumi_gcp/oracledatabase/__init__.py +2 -0
  106. pulumi_gcp/oracledatabase/autonomous_database.py +262 -38
  107. pulumi_gcp/oracledatabase/cloud_vm_cluster.py +314 -50
  108. pulumi_gcp/oracledatabase/get_autonomous_database.py +23 -1
  109. pulumi_gcp/oracledatabase/get_cloud_vm_cluster.py +34 -1
  110. pulumi_gcp/oracledatabase/odb_network.py +721 -0
  111. pulumi_gcp/oracledatabase/odb_subnet.py +803 -0
  112. pulumi_gcp/oracledatabase/outputs.py +83 -0
  113. pulumi_gcp/orgpolicy/policy.py +2 -2
  114. pulumi_gcp/parametermanager/parameter_version.py +62 -0
  115. pulumi_gcp/parametermanager/regional_parameter_version.py +64 -0
  116. pulumi_gcp/provider.py +20 -0
  117. pulumi_gcp/pubsub/subscription.py +46 -6
  118. pulumi_gcp/pubsub/topic.py +36 -0
  119. pulumi_gcp/pulumi-plugin.json +1 -1
  120. pulumi_gcp/redis/cluster.py +70 -0
  121. pulumi_gcp/redis/get_cluster.py +12 -1
  122. pulumi_gcp/redis/instance.py +8 -12
  123. pulumi_gcp/secretmanager/get_regional_secret.py +12 -1
  124. pulumi_gcp/secretmanager/get_secret.py +12 -1
  125. pulumi_gcp/secretmanager/outputs.py +30 -0
  126. pulumi_gcp/secretmanager/regional_secret.py +61 -0
  127. pulumi_gcp/secretmanager/secret.py +61 -0
  128. pulumi_gcp/securesourcemanager/branch_rule.py +16 -8
  129. pulumi_gcp/securesourcemanager/instance.py +112 -4
  130. pulumi_gcp/securesourcemanager/repository.py +112 -8
  131. pulumi_gcp/serviceaccount/get_account_key.py +1 -0
  132. pulumi_gcp/sql/_inputs.py +6 -6
  133. pulumi_gcp/sql/database.py +0 -12
  134. pulumi_gcp/sql/outputs.py +4 -4
  135. pulumi_gcp/storage/__init__.py +2 -0
  136. pulumi_gcp/storage/_inputs.py +451 -0
  137. pulumi_gcp/storage/bucket.py +7 -7
  138. pulumi_gcp/storage/bucket_object.py +34 -0
  139. pulumi_gcp/storage/get_bucket_object.py +12 -1
  140. pulumi_gcp/storage/get_bucket_object_content.py +12 -1
  141. pulumi_gcp/storage/get_insights_dataset_config.py +363 -0
  142. pulumi_gcp/storage/insights_dataset_config.py +1280 -0
  143. pulumi_gcp/storage/outputs.py +619 -0
  144. pulumi_gcp/vertex/__init__.py +1 -0
  145. pulumi_gcp/vertex/_inputs.py +3646 -3
  146. pulumi_gcp/vertex/ai_endpoint.py +4 -4
  147. pulumi_gcp/vertex/ai_endpoint_with_model_garden_deployment.py +940 -0
  148. pulumi_gcp/vertex/ai_feature_online_store_featureview.py +4 -4
  149. pulumi_gcp/vertex/outputs.py +2609 -2
  150. pulumi_gcp/vmwareengine/network_peering.py +7 -7
  151. pulumi_gcp/workbench/_inputs.py +118 -0
  152. pulumi_gcp/workbench/instance.py +171 -2
  153. pulumi_gcp/workbench/outputs.py +91 -0
  154. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/METADATA +1 -1
  155. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/RECORD +157 -138
  156. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/WHEEL +0 -0
  157. {pulumi_gcp-8.40.0a1754721948.dist-info → pulumi_gcp-8.40.0a1754951145.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,940 @@
1
+ # coding=utf-8
2
+ # *** WARNING: this file was generated by pulumi-language-python. ***
3
+ # *** Do not edit by hand unless you're certain you know what you are doing! ***
4
+
5
+ import builtins as _builtins
6
+ import warnings
7
+ import sys
8
+ import pulumi
9
+ import pulumi.runtime
10
+ from typing import Any, Mapping, Optional, Sequence, Union, overload
11
+ if sys.version_info >= (3, 11):
12
+ from typing import NotRequired, TypedDict, TypeAlias
13
+ else:
14
+ from typing_extensions import NotRequired, TypedDict, TypeAlias
15
+ from .. import _utilities
16
+ from . import outputs
17
+ from ._inputs import *
18
+
19
+ __all__ = ['AiEndpointWithModelGardenDeploymentArgs', 'AiEndpointWithModelGardenDeployment']
20
+
21
+ @pulumi.input_type
22
+ class AiEndpointWithModelGardenDeploymentArgs:
23
+ def __init__(__self__, *,
24
+ location: pulumi.Input[_builtins.str],
25
+ deploy_config: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs']] = None,
26
+ endpoint_config: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs']] = None,
27
+ hugging_face_model_id: Optional[pulumi.Input[_builtins.str]] = None,
28
+ model_config: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs']] = None,
29
+ project: Optional[pulumi.Input[_builtins.str]] = None,
30
+ publisher_model_name: Optional[pulumi.Input[_builtins.str]] = None):
31
+ """
32
+ The set of arguments for constructing a AiEndpointWithModelGardenDeployment resource.
33
+ :param pulumi.Input[_builtins.str] location: Resource ID segment making up resource `location`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
34
+ :param pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs'] deploy_config: The deploy config to use for the deployment.
35
+ Structure is documented below.
36
+ :param pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs'] endpoint_config: The endpoint config to use for the deployment.
37
+ Structure is documented below.
38
+ :param pulumi.Input[_builtins.str] hugging_face_model_id: The Hugging Face model to deploy.
39
+ Format: Hugging Face model ID like `google/gemma-2-2b-it`.
40
+ :param pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs'] model_config: The model config to use for the deployment.
41
+ Structure is documented below.
42
+ :param pulumi.Input[_builtins.str] project: The ID of the project in which the resource belongs.
43
+ If it is not provided, the provider project is used.
44
+ :param pulumi.Input[_builtins.str] publisher_model_name: The Model Garden model to deploy.
45
+ Format:
46
+ `publishers/{publisher}/models/{publisher_model}@{version_id}`, or
47
+ `publishers/hf-{hugging-face-author}/models/{hugging-face-model-name}@001`.
48
+ """
49
+ pulumi.set(__self__, "location", location)
50
+ if deploy_config is not None:
51
+ pulumi.set(__self__, "deploy_config", deploy_config)
52
+ if endpoint_config is not None:
53
+ pulumi.set(__self__, "endpoint_config", endpoint_config)
54
+ if hugging_face_model_id is not None:
55
+ pulumi.set(__self__, "hugging_face_model_id", hugging_face_model_id)
56
+ if model_config is not None:
57
+ pulumi.set(__self__, "model_config", model_config)
58
+ if project is not None:
59
+ pulumi.set(__self__, "project", project)
60
+ if publisher_model_name is not None:
61
+ pulumi.set(__self__, "publisher_model_name", publisher_model_name)
62
+
63
+ @_builtins.property
64
+ @pulumi.getter
65
+ def location(self) -> pulumi.Input[_builtins.str]:
66
+ """
67
+ Resource ID segment making up resource `location`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
68
+ """
69
+ return pulumi.get(self, "location")
70
+
71
+ @location.setter
72
+ def location(self, value: pulumi.Input[_builtins.str]):
73
+ pulumi.set(self, "location", value)
74
+
75
+ @_builtins.property
76
+ @pulumi.getter(name="deployConfig")
77
+ def deploy_config(self) -> Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs']]:
78
+ """
79
+ The deploy config to use for the deployment.
80
+ Structure is documented below.
81
+ """
82
+ return pulumi.get(self, "deploy_config")
83
+
84
+ @deploy_config.setter
85
+ def deploy_config(self, value: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs']]):
86
+ pulumi.set(self, "deploy_config", value)
87
+
88
+ @_builtins.property
89
+ @pulumi.getter(name="endpointConfig")
90
+ def endpoint_config(self) -> Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs']]:
91
+ """
92
+ The endpoint config to use for the deployment.
93
+ Structure is documented below.
94
+ """
95
+ return pulumi.get(self, "endpoint_config")
96
+
97
+ @endpoint_config.setter
98
+ def endpoint_config(self, value: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs']]):
99
+ pulumi.set(self, "endpoint_config", value)
100
+
101
+ @_builtins.property
102
+ @pulumi.getter(name="huggingFaceModelId")
103
+ def hugging_face_model_id(self) -> Optional[pulumi.Input[_builtins.str]]:
104
+ """
105
+ The Hugging Face model to deploy.
106
+ Format: Hugging Face model ID like `google/gemma-2-2b-it`.
107
+ """
108
+ return pulumi.get(self, "hugging_face_model_id")
109
+
110
+ @hugging_face_model_id.setter
111
+ def hugging_face_model_id(self, value: Optional[pulumi.Input[_builtins.str]]):
112
+ pulumi.set(self, "hugging_face_model_id", value)
113
+
114
+ @_builtins.property
115
+ @pulumi.getter(name="modelConfig")
116
+ def model_config(self) -> Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs']]:
117
+ """
118
+ The model config to use for the deployment.
119
+ Structure is documented below.
120
+ """
121
+ return pulumi.get(self, "model_config")
122
+
123
+ @model_config.setter
124
+ def model_config(self, value: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs']]):
125
+ pulumi.set(self, "model_config", value)
126
+
127
+ @_builtins.property
128
+ @pulumi.getter
129
+ def project(self) -> Optional[pulumi.Input[_builtins.str]]:
130
+ """
131
+ The ID of the project in which the resource belongs.
132
+ If it is not provided, the provider project is used.
133
+ """
134
+ return pulumi.get(self, "project")
135
+
136
+ @project.setter
137
+ def project(self, value: Optional[pulumi.Input[_builtins.str]]):
138
+ pulumi.set(self, "project", value)
139
+
140
+ @_builtins.property
141
+ @pulumi.getter(name="publisherModelName")
142
+ def publisher_model_name(self) -> Optional[pulumi.Input[_builtins.str]]:
143
+ """
144
+ The Model Garden model to deploy.
145
+ Format:
146
+ `publishers/{publisher}/models/{publisher_model}@{version_id}`, or
147
+ `publishers/hf-{hugging-face-author}/models/{hugging-face-model-name}@001`.
148
+ """
149
+ return pulumi.get(self, "publisher_model_name")
150
+
151
+ @publisher_model_name.setter
152
+ def publisher_model_name(self, value: Optional[pulumi.Input[_builtins.str]]):
153
+ pulumi.set(self, "publisher_model_name", value)
154
+
155
+
156
+ @pulumi.input_type
157
+ class _AiEndpointWithModelGardenDeploymentState:
158
+ def __init__(__self__, *,
159
+ deploy_config: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs']] = None,
160
+ deployed_model_display_name: Optional[pulumi.Input[_builtins.str]] = None,
161
+ deployed_model_id: Optional[pulumi.Input[_builtins.str]] = None,
162
+ endpoint: Optional[pulumi.Input[_builtins.str]] = None,
163
+ endpoint_config: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs']] = None,
164
+ hugging_face_model_id: Optional[pulumi.Input[_builtins.str]] = None,
165
+ location: Optional[pulumi.Input[_builtins.str]] = None,
166
+ model_config: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs']] = None,
167
+ project: Optional[pulumi.Input[_builtins.str]] = None,
168
+ publisher_model_name: Optional[pulumi.Input[_builtins.str]] = None):
169
+ """
170
+ Input properties used for looking up and filtering AiEndpointWithModelGardenDeployment resources.
171
+ :param pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs'] deploy_config: The deploy config to use for the deployment.
172
+ Structure is documented below.
173
+ :param pulumi.Input[_builtins.str] deployed_model_display_name: Output only. The display name assigned to the model deployed to the endpoint.
174
+ This is not required to delete the resource but is used for debug logging.
175
+ :param pulumi.Input[_builtins.str] deployed_model_id: Output only. The unique numeric ID that Vertex AI assigns to the model at the time it is deployed to the endpoint.
176
+ It is required to undeploy the model from the endpoint during resource deletion as described in
177
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints/undeployModel.
178
+ :param pulumi.Input[_builtins.str] endpoint: Resource ID segment making up resource `endpoint`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
179
+ :param pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs'] endpoint_config: The endpoint config to use for the deployment.
180
+ Structure is documented below.
181
+ :param pulumi.Input[_builtins.str] hugging_face_model_id: The Hugging Face model to deploy.
182
+ Format: Hugging Face model ID like `google/gemma-2-2b-it`.
183
+ :param pulumi.Input[_builtins.str] location: Resource ID segment making up resource `location`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
184
+ :param pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs'] model_config: The model config to use for the deployment.
185
+ Structure is documented below.
186
+ :param pulumi.Input[_builtins.str] project: The ID of the project in which the resource belongs.
187
+ If it is not provided, the provider project is used.
188
+ :param pulumi.Input[_builtins.str] publisher_model_name: The Model Garden model to deploy.
189
+ Format:
190
+ `publishers/{publisher}/models/{publisher_model}@{version_id}`, or
191
+ `publishers/hf-{hugging-face-author}/models/{hugging-face-model-name}@001`.
192
+ """
193
+ if deploy_config is not None:
194
+ pulumi.set(__self__, "deploy_config", deploy_config)
195
+ if deployed_model_display_name is not None:
196
+ pulumi.set(__self__, "deployed_model_display_name", deployed_model_display_name)
197
+ if deployed_model_id is not None:
198
+ pulumi.set(__self__, "deployed_model_id", deployed_model_id)
199
+ if endpoint is not None:
200
+ pulumi.set(__self__, "endpoint", endpoint)
201
+ if endpoint_config is not None:
202
+ pulumi.set(__self__, "endpoint_config", endpoint_config)
203
+ if hugging_face_model_id is not None:
204
+ pulumi.set(__self__, "hugging_face_model_id", hugging_face_model_id)
205
+ if location is not None:
206
+ pulumi.set(__self__, "location", location)
207
+ if model_config is not None:
208
+ pulumi.set(__self__, "model_config", model_config)
209
+ if project is not None:
210
+ pulumi.set(__self__, "project", project)
211
+ if publisher_model_name is not None:
212
+ pulumi.set(__self__, "publisher_model_name", publisher_model_name)
213
+
214
+ @_builtins.property
215
+ @pulumi.getter(name="deployConfig")
216
+ def deploy_config(self) -> Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs']]:
217
+ """
218
+ The deploy config to use for the deployment.
219
+ Structure is documented below.
220
+ """
221
+ return pulumi.get(self, "deploy_config")
222
+
223
+ @deploy_config.setter
224
+ def deploy_config(self, value: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentDeployConfigArgs']]):
225
+ pulumi.set(self, "deploy_config", value)
226
+
227
+ @_builtins.property
228
+ @pulumi.getter(name="deployedModelDisplayName")
229
+ def deployed_model_display_name(self) -> Optional[pulumi.Input[_builtins.str]]:
230
+ """
231
+ Output only. The display name assigned to the model deployed to the endpoint.
232
+ This is not required to delete the resource but is used for debug logging.
233
+ """
234
+ return pulumi.get(self, "deployed_model_display_name")
235
+
236
+ @deployed_model_display_name.setter
237
+ def deployed_model_display_name(self, value: Optional[pulumi.Input[_builtins.str]]):
238
+ pulumi.set(self, "deployed_model_display_name", value)
239
+
240
+ @_builtins.property
241
+ @pulumi.getter(name="deployedModelId")
242
+ def deployed_model_id(self) -> Optional[pulumi.Input[_builtins.str]]:
243
+ """
244
+ Output only. The unique numeric ID that Vertex AI assigns to the model at the time it is deployed to the endpoint.
245
+ It is required to undeploy the model from the endpoint during resource deletion as described in
246
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints/undeployModel.
247
+ """
248
+ return pulumi.get(self, "deployed_model_id")
249
+
250
+ @deployed_model_id.setter
251
+ def deployed_model_id(self, value: Optional[pulumi.Input[_builtins.str]]):
252
+ pulumi.set(self, "deployed_model_id", value)
253
+
254
+ @_builtins.property
255
+ @pulumi.getter
256
+ def endpoint(self) -> Optional[pulumi.Input[_builtins.str]]:
257
+ """
258
+ Resource ID segment making up resource `endpoint`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
259
+ """
260
+ return pulumi.get(self, "endpoint")
261
+
262
+ @endpoint.setter
263
+ def endpoint(self, value: Optional[pulumi.Input[_builtins.str]]):
264
+ pulumi.set(self, "endpoint", value)
265
+
266
+ @_builtins.property
267
+ @pulumi.getter(name="endpointConfig")
268
+ def endpoint_config(self) -> Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs']]:
269
+ """
270
+ The endpoint config to use for the deployment.
271
+ Structure is documented below.
272
+ """
273
+ return pulumi.get(self, "endpoint_config")
274
+
275
+ @endpoint_config.setter
276
+ def endpoint_config(self, value: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentEndpointConfigArgs']]):
277
+ pulumi.set(self, "endpoint_config", value)
278
+
279
+ @_builtins.property
280
+ @pulumi.getter(name="huggingFaceModelId")
281
+ def hugging_face_model_id(self) -> Optional[pulumi.Input[_builtins.str]]:
282
+ """
283
+ The Hugging Face model to deploy.
284
+ Format: Hugging Face model ID like `google/gemma-2-2b-it`.
285
+ """
286
+ return pulumi.get(self, "hugging_face_model_id")
287
+
288
+ @hugging_face_model_id.setter
289
+ def hugging_face_model_id(self, value: Optional[pulumi.Input[_builtins.str]]):
290
+ pulumi.set(self, "hugging_face_model_id", value)
291
+
292
+ @_builtins.property
293
+ @pulumi.getter
294
+ def location(self) -> Optional[pulumi.Input[_builtins.str]]:
295
+ """
296
+ Resource ID segment making up resource `location`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
297
+ """
298
+ return pulumi.get(self, "location")
299
+
300
+ @location.setter
301
+ def location(self, value: Optional[pulumi.Input[_builtins.str]]):
302
+ pulumi.set(self, "location", value)
303
+
304
+ @_builtins.property
305
+ @pulumi.getter(name="modelConfig")
306
+ def model_config(self) -> Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs']]:
307
+ """
308
+ The model config to use for the deployment.
309
+ Structure is documented below.
310
+ """
311
+ return pulumi.get(self, "model_config")
312
+
313
+ @model_config.setter
314
+ def model_config(self, value: Optional[pulumi.Input['AiEndpointWithModelGardenDeploymentModelConfigArgs']]):
315
+ pulumi.set(self, "model_config", value)
316
+
317
+ @_builtins.property
318
+ @pulumi.getter
319
+ def project(self) -> Optional[pulumi.Input[_builtins.str]]:
320
+ """
321
+ The ID of the project in which the resource belongs.
322
+ If it is not provided, the provider project is used.
323
+ """
324
+ return pulumi.get(self, "project")
325
+
326
+ @project.setter
327
+ def project(self, value: Optional[pulumi.Input[_builtins.str]]):
328
+ pulumi.set(self, "project", value)
329
+
330
+ @_builtins.property
331
+ @pulumi.getter(name="publisherModelName")
332
+ def publisher_model_name(self) -> Optional[pulumi.Input[_builtins.str]]:
333
+ """
334
+ The Model Garden model to deploy.
335
+ Format:
336
+ `publishers/{publisher}/models/{publisher_model}@{version_id}`, or
337
+ `publishers/hf-{hugging-face-author}/models/{hugging-face-model-name}@001`.
338
+ """
339
+ return pulumi.get(self, "publisher_model_name")
340
+
341
+ @publisher_model_name.setter
342
+ def publisher_model_name(self, value: Optional[pulumi.Input[_builtins.str]]):
343
+ pulumi.set(self, "publisher_model_name", value)
344
+
345
+
346
+ @pulumi.type_token("gcp:vertex/aiEndpointWithModelGardenDeployment:AiEndpointWithModelGardenDeployment")
347
+ class AiEndpointWithModelGardenDeployment(pulumi.CustomResource):
348
+ @overload
349
+ def __init__(__self__,
350
+ resource_name: str,
351
+ opts: Optional[pulumi.ResourceOptions] = None,
352
+ deploy_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentDeployConfigArgs', 'AiEndpointWithModelGardenDeploymentDeployConfigArgsDict']]] = None,
353
+ endpoint_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentEndpointConfigArgs', 'AiEndpointWithModelGardenDeploymentEndpointConfigArgsDict']]] = None,
354
+ hugging_face_model_id: Optional[pulumi.Input[_builtins.str]] = None,
355
+ location: Optional[pulumi.Input[_builtins.str]] = None,
356
+ model_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentModelConfigArgs', 'AiEndpointWithModelGardenDeploymentModelConfigArgsDict']]] = None,
357
+ project: Optional[pulumi.Input[_builtins.str]] = None,
358
+ publisher_model_name: Optional[pulumi.Input[_builtins.str]] = None,
359
+ __props__=None):
360
+ """
361
+ Create an Endpoint and deploy a Model Garden model to it.
362
+
363
+ To get more information about EndpointWithModelGardenDeployment, see:
364
+
365
+ * [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations/deploy)
366
+ * How-to Guides
367
+ * [Overview of Model Garden](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models)
368
+ * [Overview of self-deployed models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/self-deployed-models)
369
+ * [Use models in Model Garden](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/use-models)
370
+
371
+ ## Example Usage
372
+
373
+ ### Vertex Ai Deploy Basic
374
+
375
+ ```python
376
+ import pulumi
377
+ import pulumi_gcp as gcp
378
+
379
+ deploy = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy",
380
+ publisher_model_name="publishers/google/models/paligemma@paligemma-224-float32",
381
+ location="us-central1",
382
+ model_config={
383
+ "accept_eula": True,
384
+ })
385
+ ```
386
+ ### Vertex Ai Deploy Huggingface Model
387
+
388
+ ```python
389
+ import pulumi
390
+ import pulumi_gcp as gcp
391
+
392
+ deploy = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy",
393
+ hugging_face_model_id="Qwen/Qwen3-0.6B",
394
+ location="us-central1",
395
+ model_config={
396
+ "accept_eula": True,
397
+ })
398
+ ```
399
+ ### Vertex Ai Deploy With Configs
400
+
401
+ ```python
402
+ import pulumi
403
+ import pulumi_gcp as gcp
404
+
405
+ deploy = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy",
406
+ publisher_model_name="publishers/google/models/paligemma@paligemma-224-float32",
407
+ location="us-central1",
408
+ model_config={
409
+ "accept_eula": True,
410
+ },
411
+ deploy_config={
412
+ "dedicated_resources": {
413
+ "machine_spec": {
414
+ "machine_type": "g2-standard-16",
415
+ "accelerator_type": "NVIDIA_L4",
416
+ "accelerator_count": 1,
417
+ },
418
+ "min_replica_count": 1,
419
+ },
420
+ })
421
+ ```
422
+ ### Vertex Ai Deploy Multiple Models In Parallel
423
+
424
+ ```python
425
+ import pulumi
426
+ import pulumi_gcp as gcp
427
+
428
+ deploy_gemma_11_2b_it = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-gemma-1_1-2b-it",
429
+ publisher_model_name="publishers/google/models/gemma@gemma-1.1-2b-it",
430
+ location="us-central1",
431
+ model_config={
432
+ "accept_eula": True,
433
+ },
434
+ deploy_config={
435
+ "dedicated_resources": {
436
+ "machine_spec": {
437
+ "machine_type": "g2-standard-12",
438
+ "accelerator_type": "us-central1",
439
+ "accelerator_count": 1,
440
+ },
441
+ "min_replica_count": 1,
442
+ },
443
+ })
444
+ deploy_qwen3_06b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-qwen3-0_6b",
445
+ hugging_face_model_id="Qwen/Qwen3-0.6B",
446
+ location="us-central1",
447
+ model_config={
448
+ "accept_eula": True,
449
+ },
450
+ deploy_config={
451
+ "dedicated_resources": {
452
+ "machine_spec": {
453
+ "machine_type": "g2-standard-12",
454
+ "accelerator_type": "NVIDIA_L4",
455
+ "accelerator_count": 1,
456
+ },
457
+ "min_replica_count": 1,
458
+ },
459
+ })
460
+ deploy_llama_32_1b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-llama-3_2-1b",
461
+ publisher_model_name="publishers/meta/models/llama3-2@llama-3.2-1b",
462
+ location="us-central1",
463
+ model_config={
464
+ "accept_eula": True,
465
+ },
466
+ deploy_config={
467
+ "dedicated_resources": {
468
+ "machine_spec": {
469
+ "machine_type": "g2-standard-12",
470
+ "accelerator_type": "NVIDIA_L4",
471
+ "accelerator_count": 1,
472
+ },
473
+ "min_replica_count": 1,
474
+ },
475
+ })
476
+ ```
477
+ ### Vertex Ai Deploy Multiple Models In Sequence
478
+
479
+ ```python
480
+ import pulumi
481
+ import pulumi_gcp as gcp
482
+
483
+ deploy_gemma_11_2b_it = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-gemma-1_1-2b-it",
484
+ publisher_model_name="publishers/google/models/gemma@gemma-1.1-2b-it",
485
+ location="us-central1",
486
+ model_config={
487
+ "accept_eula": True,
488
+ },
489
+ deploy_config={
490
+ "dedicated_resources": {
491
+ "machine_spec": {
492
+ "machine_type": "g2-standard-12",
493
+ "accelerator_type": "NVIDIA_L4",
494
+ "accelerator_count": 1,
495
+ },
496
+ "min_replica_count": 1,
497
+ },
498
+ })
499
+ deploy_qwen3_06b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-qwen3-0_6b",
500
+ hugging_face_model_id="Qwen/Qwen3-0.6B",
501
+ location="us-central1",
502
+ model_config={
503
+ "accept_eula": True,
504
+ },
505
+ deploy_config={
506
+ "dedicated_resources": {
507
+ "machine_spec": {
508
+ "machine_type": "g2-standard-12",
509
+ "accelerator_type": "NVIDIA_L4",
510
+ "accelerator_count": 1,
511
+ },
512
+ "min_replica_count": 1,
513
+ },
514
+ },
515
+ opts = pulumi.ResourceOptions(depends_on=[deploy_gemma_11_2b_it]))
516
+ deploy_llama_32_1b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-llama-3_2-1b",
517
+ publisher_model_name="publishers/meta/models/llama3-2@llama-3.2-1b",
518
+ location="us-central1",
519
+ model_config={
520
+ "accept_eula": True,
521
+ },
522
+ deploy_config={
523
+ "dedicated_resources": {
524
+ "machine_spec": {
525
+ "machine_type": "g2-standard-12",
526
+ "accelerator_type": "NVIDIA_L4",
527
+ "accelerator_count": 1,
528
+ },
529
+ "min_replica_count": 1,
530
+ },
531
+ },
532
+ opts = pulumi.ResourceOptions(depends_on=[deploy_qwen3_06b]))
533
+ ```
534
+
535
+ ## Import
536
+
537
+ This resource does not support import.
538
+
539
+ :param str resource_name: The name of the resource.
540
+ :param pulumi.ResourceOptions opts: Options for the resource.
541
+ :param pulumi.Input[Union['AiEndpointWithModelGardenDeploymentDeployConfigArgs', 'AiEndpointWithModelGardenDeploymentDeployConfigArgsDict']] deploy_config: The deploy config to use for the deployment.
542
+ Structure is documented below.
543
+ :param pulumi.Input[Union['AiEndpointWithModelGardenDeploymentEndpointConfigArgs', 'AiEndpointWithModelGardenDeploymentEndpointConfigArgsDict']] endpoint_config: The endpoint config to use for the deployment.
544
+ Structure is documented below.
545
+ :param pulumi.Input[_builtins.str] hugging_face_model_id: The Hugging Face model to deploy.
546
+ Format: Hugging Face model ID like `google/gemma-2-2b-it`.
547
+ :param pulumi.Input[_builtins.str] location: Resource ID segment making up resource `location`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
548
+ :param pulumi.Input[Union['AiEndpointWithModelGardenDeploymentModelConfigArgs', 'AiEndpointWithModelGardenDeploymentModelConfigArgsDict']] model_config: The model config to use for the deployment.
549
+ Structure is documented below.
550
+ :param pulumi.Input[_builtins.str] project: The ID of the project in which the resource belongs.
551
+ If it is not provided, the provider project is used.
552
+ :param pulumi.Input[_builtins.str] publisher_model_name: The Model Garden model to deploy.
553
+ Format:
554
+ `publishers/{publisher}/models/{publisher_model}@{version_id}`, or
555
+ `publishers/hf-{hugging-face-author}/models/{hugging-face-model-name}@001`.
556
+ """
557
+ ...
558
+ @overload
559
+ def __init__(__self__,
560
+ resource_name: str,
561
+ args: AiEndpointWithModelGardenDeploymentArgs,
562
+ opts: Optional[pulumi.ResourceOptions] = None):
563
+ """
564
+ Create an Endpoint and deploy a Model Garden model to it.
565
+
566
+ To get more information about EndpointWithModelGardenDeployment, see:
567
+
568
+ * [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations/deploy)
569
+ * How-to Guides
570
+ * [Overview of Model Garden](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/explore-models)
571
+ * [Overview of self-deployed models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/self-deployed-models)
572
+ * [Use models in Model Garden](https://cloud.google.com/vertex-ai/generative-ai/docs/model-garden/use-models)
573
+
574
+ ## Example Usage
575
+
576
+ ### Vertex Ai Deploy Basic
577
+
578
+ ```python
579
+ import pulumi
580
+ import pulumi_gcp as gcp
581
+
582
+ deploy = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy",
583
+ publisher_model_name="publishers/google/models/paligemma@paligemma-224-float32",
584
+ location="us-central1",
585
+ model_config={
586
+ "accept_eula": True,
587
+ })
588
+ ```
589
+ ### Vertex Ai Deploy Huggingface Model
590
+
591
+ ```python
592
+ import pulumi
593
+ import pulumi_gcp as gcp
594
+
595
+ deploy = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy",
596
+ hugging_face_model_id="Qwen/Qwen3-0.6B",
597
+ location="us-central1",
598
+ model_config={
599
+ "accept_eula": True,
600
+ })
601
+ ```
602
+ ### Vertex Ai Deploy With Configs
603
+
604
+ ```python
605
+ import pulumi
606
+ import pulumi_gcp as gcp
607
+
608
+ deploy = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy",
609
+ publisher_model_name="publishers/google/models/paligemma@paligemma-224-float32",
610
+ location="us-central1",
611
+ model_config={
612
+ "accept_eula": True,
613
+ },
614
+ deploy_config={
615
+ "dedicated_resources": {
616
+ "machine_spec": {
617
+ "machine_type": "g2-standard-16",
618
+ "accelerator_type": "NVIDIA_L4",
619
+ "accelerator_count": 1,
620
+ },
621
+ "min_replica_count": 1,
622
+ },
623
+ })
624
+ ```
625
+ ### Vertex Ai Deploy Multiple Models In Parallel
626
+
627
+ ```python
628
+ import pulumi
629
+ import pulumi_gcp as gcp
630
+
631
+ deploy_gemma_11_2b_it = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-gemma-1_1-2b-it",
632
+ publisher_model_name="publishers/google/models/gemma@gemma-1.1-2b-it",
633
+ location="us-central1",
634
+ model_config={
635
+ "accept_eula": True,
636
+ },
637
+ deploy_config={
638
+ "dedicated_resources": {
639
+ "machine_spec": {
640
+ "machine_type": "g2-standard-12",
641
+ "accelerator_type": "us-central1",
642
+ "accelerator_count": 1,
643
+ },
644
+ "min_replica_count": 1,
645
+ },
646
+ })
647
+ deploy_qwen3_06b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-qwen3-0_6b",
648
+ hugging_face_model_id="Qwen/Qwen3-0.6B",
649
+ location="us-central1",
650
+ model_config={
651
+ "accept_eula": True,
652
+ },
653
+ deploy_config={
654
+ "dedicated_resources": {
655
+ "machine_spec": {
656
+ "machine_type": "g2-standard-12",
657
+ "accelerator_type": "NVIDIA_L4",
658
+ "accelerator_count": 1,
659
+ },
660
+ "min_replica_count": 1,
661
+ },
662
+ })
663
+ deploy_llama_32_1b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-llama-3_2-1b",
664
+ publisher_model_name="publishers/meta/models/llama3-2@llama-3.2-1b",
665
+ location="us-central1",
666
+ model_config={
667
+ "accept_eula": True,
668
+ },
669
+ deploy_config={
670
+ "dedicated_resources": {
671
+ "machine_spec": {
672
+ "machine_type": "g2-standard-12",
673
+ "accelerator_type": "NVIDIA_L4",
674
+ "accelerator_count": 1,
675
+ },
676
+ "min_replica_count": 1,
677
+ },
678
+ })
679
+ ```
680
+ ### Vertex Ai Deploy Multiple Models In Sequence
681
+
682
+ ```python
683
+ import pulumi
684
+ import pulumi_gcp as gcp
685
+
686
+ deploy_gemma_11_2b_it = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-gemma-1_1-2b-it",
687
+ publisher_model_name="publishers/google/models/gemma@gemma-1.1-2b-it",
688
+ location="us-central1",
689
+ model_config={
690
+ "accept_eula": True,
691
+ },
692
+ deploy_config={
693
+ "dedicated_resources": {
694
+ "machine_spec": {
695
+ "machine_type": "g2-standard-12",
696
+ "accelerator_type": "NVIDIA_L4",
697
+ "accelerator_count": 1,
698
+ },
699
+ "min_replica_count": 1,
700
+ },
701
+ })
702
+ deploy_qwen3_06b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-qwen3-0_6b",
703
+ hugging_face_model_id="Qwen/Qwen3-0.6B",
704
+ location="us-central1",
705
+ model_config={
706
+ "accept_eula": True,
707
+ },
708
+ deploy_config={
709
+ "dedicated_resources": {
710
+ "machine_spec": {
711
+ "machine_type": "g2-standard-12",
712
+ "accelerator_type": "NVIDIA_L4",
713
+ "accelerator_count": 1,
714
+ },
715
+ "min_replica_count": 1,
716
+ },
717
+ },
718
+ opts = pulumi.ResourceOptions(depends_on=[deploy_gemma_11_2b_it]))
719
+ deploy_llama_32_1b = gcp.vertex.AiEndpointWithModelGardenDeployment("deploy-llama-3_2-1b",
720
+ publisher_model_name="publishers/meta/models/llama3-2@llama-3.2-1b",
721
+ location="us-central1",
722
+ model_config={
723
+ "accept_eula": True,
724
+ },
725
+ deploy_config={
726
+ "dedicated_resources": {
727
+ "machine_spec": {
728
+ "machine_type": "g2-standard-12",
729
+ "accelerator_type": "NVIDIA_L4",
730
+ "accelerator_count": 1,
731
+ },
732
+ "min_replica_count": 1,
733
+ },
734
+ },
735
+ opts = pulumi.ResourceOptions(depends_on=[deploy_qwen3_06b]))
736
+ ```
737
+
738
+ ## Import
739
+
740
+ This resource does not support import.
741
+
742
+ :param str resource_name: The name of the resource.
743
+ :param AiEndpointWithModelGardenDeploymentArgs args: The arguments to use to populate this resource's properties.
744
+ :param pulumi.ResourceOptions opts: Options for the resource.
745
+ """
746
+ ...
747
+ def __init__(__self__, resource_name: str, *args, **kwargs):
748
+ resource_args, opts = _utilities.get_resource_args_opts(AiEndpointWithModelGardenDeploymentArgs, pulumi.ResourceOptions, *args, **kwargs)
749
+ if resource_args is not None:
750
+ __self__._internal_init(resource_name, opts, **resource_args.__dict__)
751
+ else:
752
+ __self__._internal_init(resource_name, *args, **kwargs)
753
+
754
+ def _internal_init(__self__,
755
+ resource_name: str,
756
+ opts: Optional[pulumi.ResourceOptions] = None,
757
+ deploy_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentDeployConfigArgs', 'AiEndpointWithModelGardenDeploymentDeployConfigArgsDict']]] = None,
758
+ endpoint_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentEndpointConfigArgs', 'AiEndpointWithModelGardenDeploymentEndpointConfigArgsDict']]] = None,
759
+ hugging_face_model_id: Optional[pulumi.Input[_builtins.str]] = None,
760
+ location: Optional[pulumi.Input[_builtins.str]] = None,
761
+ model_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentModelConfigArgs', 'AiEndpointWithModelGardenDeploymentModelConfigArgsDict']]] = None,
762
+ project: Optional[pulumi.Input[_builtins.str]] = None,
763
+ publisher_model_name: Optional[pulumi.Input[_builtins.str]] = None,
764
+ __props__=None):
765
+ opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
766
+ if not isinstance(opts, pulumi.ResourceOptions):
767
+ raise TypeError('Expected resource options to be a ResourceOptions instance')
768
+ if opts.id is None:
769
+ if __props__ is not None:
770
+ raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
771
+ __props__ = AiEndpointWithModelGardenDeploymentArgs.__new__(AiEndpointWithModelGardenDeploymentArgs)
772
+
773
+ __props__.__dict__["deploy_config"] = deploy_config
774
+ __props__.__dict__["endpoint_config"] = endpoint_config
775
+ __props__.__dict__["hugging_face_model_id"] = hugging_face_model_id
776
+ if location is None and not opts.urn:
777
+ raise TypeError("Missing required property 'location'")
778
+ __props__.__dict__["location"] = location
779
+ __props__.__dict__["model_config"] = model_config
780
+ __props__.__dict__["project"] = project
781
+ __props__.__dict__["publisher_model_name"] = publisher_model_name
782
+ __props__.__dict__["deployed_model_display_name"] = None
783
+ __props__.__dict__["deployed_model_id"] = None
784
+ __props__.__dict__["endpoint"] = None
785
+ super(AiEndpointWithModelGardenDeployment, __self__).__init__(
786
+ 'gcp:vertex/aiEndpointWithModelGardenDeployment:AiEndpointWithModelGardenDeployment',
787
+ resource_name,
788
+ __props__,
789
+ opts)
790
+
791
+ @staticmethod
792
+ def get(resource_name: str,
793
+ id: pulumi.Input[str],
794
+ opts: Optional[pulumi.ResourceOptions] = None,
795
+ deploy_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentDeployConfigArgs', 'AiEndpointWithModelGardenDeploymentDeployConfigArgsDict']]] = None,
796
+ deployed_model_display_name: Optional[pulumi.Input[_builtins.str]] = None,
797
+ deployed_model_id: Optional[pulumi.Input[_builtins.str]] = None,
798
+ endpoint: Optional[pulumi.Input[_builtins.str]] = None,
799
+ endpoint_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentEndpointConfigArgs', 'AiEndpointWithModelGardenDeploymentEndpointConfigArgsDict']]] = None,
800
+ hugging_face_model_id: Optional[pulumi.Input[_builtins.str]] = None,
801
+ location: Optional[pulumi.Input[_builtins.str]] = None,
802
+ model_config: Optional[pulumi.Input[Union['AiEndpointWithModelGardenDeploymentModelConfigArgs', 'AiEndpointWithModelGardenDeploymentModelConfigArgsDict']]] = None,
803
+ project: Optional[pulumi.Input[_builtins.str]] = None,
804
+ publisher_model_name: Optional[pulumi.Input[_builtins.str]] = None) -> 'AiEndpointWithModelGardenDeployment':
805
+ """
806
+ Get an existing AiEndpointWithModelGardenDeployment resource's state with the given name, id, and optional extra
807
+ properties used to qualify the lookup.
808
+
809
+ :param str resource_name: The unique name of the resulting resource.
810
+ :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
811
+ :param pulumi.ResourceOptions opts: Options for the resource.
812
+ :param pulumi.Input[Union['AiEndpointWithModelGardenDeploymentDeployConfigArgs', 'AiEndpointWithModelGardenDeploymentDeployConfigArgsDict']] deploy_config: The deploy config to use for the deployment.
813
+ Structure is documented below.
814
+ :param pulumi.Input[_builtins.str] deployed_model_display_name: Output only. The display name assigned to the model deployed to the endpoint.
815
+ This is not required to delete the resource but is used for debug logging.
816
+ :param pulumi.Input[_builtins.str] deployed_model_id: Output only. The unique numeric ID that Vertex AI assigns to the model at the time it is deployed to the endpoint.
817
+ It is required to undeploy the model from the endpoint during resource deletion as described in
818
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints/undeployModel.
819
+ :param pulumi.Input[_builtins.str] endpoint: Resource ID segment making up resource `endpoint`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
820
+ :param pulumi.Input[Union['AiEndpointWithModelGardenDeploymentEndpointConfigArgs', 'AiEndpointWithModelGardenDeploymentEndpointConfigArgsDict']] endpoint_config: The endpoint config to use for the deployment.
821
+ Structure is documented below.
822
+ :param pulumi.Input[_builtins.str] hugging_face_model_id: The Hugging Face model to deploy.
823
+ Format: Hugging Face model ID like `google/gemma-2-2b-it`.
824
+ :param pulumi.Input[_builtins.str] location: Resource ID segment making up resource `location`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
825
+ :param pulumi.Input[Union['AiEndpointWithModelGardenDeploymentModelConfigArgs', 'AiEndpointWithModelGardenDeploymentModelConfigArgsDict']] model_config: The model config to use for the deployment.
826
+ Structure is documented below.
827
+ :param pulumi.Input[_builtins.str] project: The ID of the project in which the resource belongs.
828
+ If it is not provided, the provider project is used.
829
+ :param pulumi.Input[_builtins.str] publisher_model_name: The Model Garden model to deploy.
830
+ Format:
831
+ `publishers/{publisher}/models/{publisher_model}@{version_id}`, or
832
+ `publishers/hf-{hugging-face-author}/models/{hugging-face-model-name}@001`.
833
+ """
834
+ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
835
+
836
+ __props__ = _AiEndpointWithModelGardenDeploymentState.__new__(_AiEndpointWithModelGardenDeploymentState)
837
+
838
+ __props__.__dict__["deploy_config"] = deploy_config
839
+ __props__.__dict__["deployed_model_display_name"] = deployed_model_display_name
840
+ __props__.__dict__["deployed_model_id"] = deployed_model_id
841
+ __props__.__dict__["endpoint"] = endpoint
842
+ __props__.__dict__["endpoint_config"] = endpoint_config
843
+ __props__.__dict__["hugging_face_model_id"] = hugging_face_model_id
844
+ __props__.__dict__["location"] = location
845
+ __props__.__dict__["model_config"] = model_config
846
+ __props__.__dict__["project"] = project
847
+ __props__.__dict__["publisher_model_name"] = publisher_model_name
848
+ return AiEndpointWithModelGardenDeployment(resource_name, opts=opts, __props__=__props__)
849
+
850
+ @_builtins.property
851
+ @pulumi.getter(name="deployConfig")
852
+ def deploy_config(self) -> pulumi.Output[Optional['outputs.AiEndpointWithModelGardenDeploymentDeployConfig']]:
853
+ """
854
+ The deploy config to use for the deployment.
855
+ Structure is documented below.
856
+ """
857
+ return pulumi.get(self, "deploy_config")
858
+
859
+ @_builtins.property
860
+ @pulumi.getter(name="deployedModelDisplayName")
861
+ def deployed_model_display_name(self) -> pulumi.Output[_builtins.str]:
862
+ """
863
+ Output only. The display name assigned to the model deployed to the endpoint.
864
+ This is not required to delete the resource but is used for debug logging.
865
+ """
866
+ return pulumi.get(self, "deployed_model_display_name")
867
+
868
+ @_builtins.property
869
+ @pulumi.getter(name="deployedModelId")
870
+ def deployed_model_id(self) -> pulumi.Output[_builtins.str]:
871
+ """
872
+ Output only. The unique numeric ID that Vertex AI assigns to the model at the time it is deployed to the endpoint.
873
+ It is required to undeploy the model from the endpoint during resource deletion as described in
874
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints/undeployModel.
875
+ """
876
+ return pulumi.get(self, "deployed_model_id")
877
+
878
+ @_builtins.property
879
+ @pulumi.getter
880
+ def endpoint(self) -> pulumi.Output[_builtins.str]:
881
+ """
882
+ Resource ID segment making up resource `endpoint`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
883
+ """
884
+ return pulumi.get(self, "endpoint")
885
+
886
+ @_builtins.property
887
+ @pulumi.getter(name="endpointConfig")
888
+ def endpoint_config(self) -> pulumi.Output[Optional['outputs.AiEndpointWithModelGardenDeploymentEndpointConfig']]:
889
+ """
890
+ The endpoint config to use for the deployment.
891
+ Structure is documented below.
892
+ """
893
+ return pulumi.get(self, "endpoint_config")
894
+
895
+ @_builtins.property
896
+ @pulumi.getter(name="huggingFaceModelId")
897
+ def hugging_face_model_id(self) -> pulumi.Output[Optional[_builtins.str]]:
898
+ """
899
+ The Hugging Face model to deploy.
900
+ Format: Hugging Face model ID like `google/gemma-2-2b-it`.
901
+ """
902
+ return pulumi.get(self, "hugging_face_model_id")
903
+
904
+ @_builtins.property
905
+ @pulumi.getter
906
+ def location(self) -> pulumi.Output[_builtins.str]:
907
+ """
908
+ Resource ID segment making up resource `location`. It identifies the resource within its parent collection as described in https://google.aip.dev/122.
909
+ """
910
+ return pulumi.get(self, "location")
911
+
912
+ @_builtins.property
913
+ @pulumi.getter(name="modelConfig")
914
+ def model_config(self) -> pulumi.Output[Optional['outputs.AiEndpointWithModelGardenDeploymentModelConfig']]:
915
+ """
916
+ The model config to use for the deployment.
917
+ Structure is documented below.
918
+ """
919
+ return pulumi.get(self, "model_config")
920
+
921
+ @_builtins.property
922
+ @pulumi.getter
923
+ def project(self) -> pulumi.Output[_builtins.str]:
924
+ """
925
+ The ID of the project in which the resource belongs.
926
+ If it is not provided, the provider project is used.
927
+ """
928
+ return pulumi.get(self, "project")
929
+
930
+ @_builtins.property
931
+ @pulumi.getter(name="publisherModelName")
932
+ def publisher_model_name(self) -> pulumi.Output[Optional[_builtins.str]]:
933
+ """
934
+ The Model Garden model to deploy.
935
+ Format:
936
+ `publishers/{publisher}/models/{publisher_model}@{version_id}`, or
937
+ `publishers/hf-{hugging-face-author}/models/{hugging-face-model-name}@001`.
938
+ """
939
+ return pulumi.get(self, "publisher_model_name")
940
+