pulumi-gcp 7.17.0__py3-none-any.whl → 7.17.0a1711607165__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. pulumi_gcp/__init__.py +0 -27
  2. pulumi_gcp/apphub/__init__.py +0 -1
  3. pulumi_gcp/apphub/outputs.py +0 -214
  4. pulumi_gcp/bigquery/_inputs.py +0 -16
  5. pulumi_gcp/bigquery/outputs.py +0 -14
  6. pulumi_gcp/bigquery/routine.py +0 -98
  7. pulumi_gcp/bigtable/_inputs.py +4 -4
  8. pulumi_gcp/bigtable/gc_policy.py +0 -8
  9. pulumi_gcp/bigtable/outputs.py +4 -4
  10. pulumi_gcp/billing/_inputs.py +4 -4
  11. pulumi_gcp/billing/outputs.py +4 -4
  12. pulumi_gcp/billing/project_info.py +4 -4
  13. pulumi_gcp/cloudfunctionsv2/_inputs.py +2 -2
  14. pulumi_gcp/cloudfunctionsv2/outputs.py +4 -4
  15. pulumi_gcp/cloudquota/__init__.py +0 -2
  16. pulumi_gcp/cloudquota/outputs.py +0 -118
  17. pulumi_gcp/cloudrunv2/service.py +7 -7
  18. pulumi_gcp/composer/_inputs.py +4 -14
  19. pulumi_gcp/composer/outputs.py +10 -22
  20. pulumi_gcp/compute/_inputs.py +44 -40
  21. pulumi_gcp/compute/autoscaler.py +14 -14
  22. pulumi_gcp/compute/interconnect_attachment.py +0 -64
  23. pulumi_gcp/compute/network_endpoint.py +0 -8
  24. pulumi_gcp/compute/network_endpoint_list.py +0 -8
  25. pulumi_gcp/compute/outputs.py +60 -62
  26. pulumi_gcp/compute/region_autoscaler.py +14 -14
  27. pulumi_gcp/compute/region_backend_service.py +0 -28
  28. pulumi_gcp/compute/target_instance.py +4 -4
  29. pulumi_gcp/config/__init__.pyi +0 -2
  30. pulumi_gcp/config/vars.py +0 -4
  31. pulumi_gcp/container/_inputs.py +16 -148
  32. pulumi_gcp/container/outputs.py +16 -148
  33. pulumi_gcp/databasemigrationservice/connection_profile.py +6 -6
  34. pulumi_gcp/dataflow/flex_template_job.py +112 -84
  35. pulumi_gcp/dataloss/_inputs.py +6 -6
  36. pulumi_gcp/dataloss/outputs.py +6 -6
  37. pulumi_gcp/dataplex/task.py +16 -16
  38. pulumi_gcp/dataproc/_inputs.py +10 -85
  39. pulumi_gcp/dataproc/get_metastore_service.py +1 -11
  40. pulumi_gcp/dataproc/metastore_service.py +0 -120
  41. pulumi_gcp/dataproc/outputs.py +10 -142
  42. pulumi_gcp/firebase/app_check_service_config.py +2 -2
  43. pulumi_gcp/firestore/backup_schedule.py +14 -14
  44. pulumi_gcp/firestore/field.py +4 -4
  45. pulumi_gcp/gkehub/membership_binding.py +6 -6
  46. pulumi_gcp/gkehub/membership_rbac_role_binding.py +4 -4
  47. pulumi_gcp/gkehub/namespace.py +4 -4
  48. pulumi_gcp/gkehub/scope_rbac_role_binding.py +4 -4
  49. pulumi_gcp/iam/_inputs.py +0 -76
  50. pulumi_gcp/iam/outputs.py +0 -76
  51. pulumi_gcp/iam/workforce_pool_provider.py +0 -35
  52. pulumi_gcp/iam/workload_identity_pool_provider.py +0 -140
  53. pulumi_gcp/iap/tunnel_dest_group.py +2 -2
  54. pulumi_gcp/kms/_inputs.py +0 -46
  55. pulumi_gcp/kms/crypto_key.py +0 -54
  56. pulumi_gcp/kms/crypto_key_version.py +0 -54
  57. pulumi_gcp/kms/get_kms_crypto_key.py +1 -11
  58. pulumi_gcp/kms/outputs.py +0 -54
  59. pulumi_gcp/logging/_inputs.py +8 -8
  60. pulumi_gcp/logging/metric.py +7 -7
  61. pulumi_gcp/logging/outputs.py +8 -8
  62. pulumi_gcp/monitoring/_inputs.py +2 -2
  63. pulumi_gcp/monitoring/outputs.py +2 -2
  64. pulumi_gcp/monitoring/slo.py +4 -4
  65. pulumi_gcp/networkservices/_inputs.py +6 -6
  66. pulumi_gcp/networkservices/outputs.py +6 -6
  67. pulumi_gcp/orgpolicy/policy.py +2 -2
  68. pulumi_gcp/provider.py +0 -20
  69. pulumi_gcp/pubsub/subscription.py +4 -4
  70. pulumi_gcp/serviceusage/consumer_quota_override.py +7 -7
  71. pulumi_gcp/sql/_inputs.py +2 -20
  72. pulumi_gcp/sql/database_instance.py +2 -2
  73. pulumi_gcp/sql/outputs.py +2 -20
  74. pulumi_gcp/storage/_inputs.py +2 -2
  75. pulumi_gcp/storage/outputs.py +2 -2
  76. pulumi_gcp/vertex/__init__.py +0 -1
  77. pulumi_gcp/vertex/_inputs.py +8 -175
  78. pulumi_gcp/vertex/outputs.py +10 -202
  79. pulumi_gcp/vpcaccess/connector.py +28 -77
  80. {pulumi_gcp-7.17.0.dist-info → pulumi_gcp-7.17.0a1711607165.dist-info}/METADATA +1 -1
  81. {pulumi_gcp-7.17.0.dist-info → pulumi_gcp-7.17.0a1711607165.dist-info}/RECORD +83 -91
  82. pulumi_gcp/apphub/get_application.py +0 -220
  83. pulumi_gcp/applicationintegration/__init__.py +0 -10
  84. pulumi_gcp/applicationintegration/_inputs.py +0 -119
  85. pulumi_gcp/applicationintegration/client.py +0 -566
  86. pulumi_gcp/applicationintegration/outputs.py +0 -122
  87. pulumi_gcp/cloudquota/_inputs.py +0 -131
  88. pulumi_gcp/cloudquota/s_quota_preference.py +0 -777
  89. pulumi_gcp/vertex/ai_deployment_resource_pool.py +0 -477
  90. {pulumi_gcp-7.17.0.dist-info → pulumi_gcp-7.17.0a1711607165.dist-info}/WHEEL +0 -0
  91. {pulumi_gcp-7.17.0.dist-info → pulumi_gcp-7.17.0a1711607165.dist-info}/top_level.txt +0 -0
pulumi_gcp/sql/outputs.py CHANGED
@@ -203,12 +203,6 @@ class DatabaseInstanceIpAddress(dict):
203
203
  ip_address: Optional[str] = None,
204
204
  time_to_retire: Optional[str] = None,
205
205
  type: Optional[str] = None):
206
- """
207
- :param str ip_address: The IPv4 address assigned.
208
- :param str time_to_retire: The time this IP address will be retired, in RFC
209
- 3339 format.
210
- :param str type: The type of this IP address.
211
- """
212
206
  if ip_address is not None:
213
207
  pulumi.set(__self__, "ip_address", ip_address)
214
208
  if time_to_retire is not None:
@@ -219,26 +213,16 @@ class DatabaseInstanceIpAddress(dict):
219
213
  @property
220
214
  @pulumi.getter(name="ipAddress")
221
215
  def ip_address(self) -> Optional[str]:
222
- """
223
- The IPv4 address assigned.
224
- """
225
216
  return pulumi.get(self, "ip_address")
226
217
 
227
218
  @property
228
219
  @pulumi.getter(name="timeToRetire")
229
220
  def time_to_retire(self) -> Optional[str]:
230
- """
231
- The time this IP address will be retired, in RFC
232
- 3339 format.
233
- """
234
221
  return pulumi.get(self, "time_to_retire")
235
222
 
236
223
  @property
237
224
  @pulumi.getter
238
225
  def type(self) -> Optional[str]:
239
- """
240
- The type of this IP address.
241
- """
242
226
  return pulumi.get(self, "type")
243
227
 
244
228
 
@@ -708,8 +692,7 @@ class DatabaseInstanceSettings(dict):
708
692
  :param str pricing_plan: Pricing plan for this instance, can only be `PER_USE`.
709
693
  :param str time_zone: The time_zone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format.
710
694
  :param Mapping[str, str] user_labels: A set of key/value user label pairs to assign to the instance.
711
- :param int version: Used to make sure changes to the `settings` block are
712
- atomic.
695
+ :param int version: Used to make sure changes to the settings block are atomic.
713
696
  """
714
697
  pulumi.set(__self__, "tier", tier)
715
698
  if activation_policy is not None:
@@ -958,8 +941,7 @@ class DatabaseInstanceSettings(dict):
958
941
  @pulumi.getter
959
942
  def version(self) -> Optional[int]:
960
943
  """
961
- Used to make sure changes to the `settings` block are
962
- atomic.
944
+ Used to make sure changes to the settings block are atomic.
963
945
  """
964
946
  return pulumi.get(self, "version")
965
947
 
@@ -1993,7 +1993,7 @@ class TransferJobTransferSpecAzureBlobStorageDataSourceArgs:
1993
1993
  :param pulumi.Input[str] container: The container to transfer from the Azure Storage account.`
1994
1994
  :param pulumi.Input[str] storage_account: The name of the Azure Storage account.
1995
1995
  :param pulumi.Input['TransferJobTransferSpecAzureBlobStorageDataSourceAzureCredentialsArgs'] azure_credentials: Credentials used to authenticate API requests to Azure block.
1996
- :param pulumi.Input[str] credentials_secret: Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%20with%20a%20%27/%27.-,credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
1996
+ :param pulumi.Input[str] credentials_secret: Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%!w(MISSING)ith%!a(MISSING)%27/%!-(MISSING),credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
1997
1997
  :param pulumi.Input[str] path: Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.
1998
1998
  """
1999
1999
  pulumi.set(__self__, "container", container)
@@ -2045,7 +2045,7 @@ class TransferJobTransferSpecAzureBlobStorageDataSourceArgs:
2045
2045
  @pulumi.getter(name="credentialsSecret")
2046
2046
  def credentials_secret(self) -> Optional[pulumi.Input[str]]:
2047
2047
  """
2048
- Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%20with%20a%20%27/%27.-,credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
2048
+ Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%!w(MISSING)ith%!a(MISSING)%27/%!-(MISSING),credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
2049
2049
  """
2050
2050
  return pulumi.get(self, "credentials_secret")
2051
2051
 
@@ -2122,7 +2122,7 @@ class TransferJobTransferSpecAzureBlobStorageDataSource(dict):
2122
2122
  :param str container: The container to transfer from the Azure Storage account.`
2123
2123
  :param str storage_account: The name of the Azure Storage account.
2124
2124
  :param 'TransferJobTransferSpecAzureBlobStorageDataSourceAzureCredentialsArgs' azure_credentials: Credentials used to authenticate API requests to Azure block.
2125
- :param str credentials_secret: Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%20with%20a%20%27/%27.-,credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
2125
+ :param str credentials_secret: Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%!w(MISSING)ith%!a(MISSING)%27/%!-(MISSING),credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
2126
2126
  :param str path: Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.
2127
2127
  """
2128
2128
  pulumi.set(__self__, "container", container)
@@ -2162,7 +2162,7 @@ class TransferJobTransferSpecAzureBlobStorageDataSource(dict):
2162
2162
  @pulumi.getter(name="credentialsSecret")
2163
2163
  def credentials_secret(self) -> Optional[str]:
2164
2164
  """
2165
- Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%20with%20a%20%27/%27.-,credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
2165
+ Full Resource name of a secret in Secret Manager containing [SAS Credentials in JSON form](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/TransferSpec#azureblobstoragedata:~:text=begin%!w(MISSING)ith%!a(MISSING)%27/%!-(MISSING),credentialsSecret,-string). Service Agent for Storage Transfer must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`,
2166
2166
  """
2167
2167
  return pulumi.get(self, "credentials_secret")
2168
2168
 
@@ -6,7 +6,6 @@ from .. import _utilities
6
6
  import typing
7
7
  # Export this package's modules as members:
8
8
  from .ai_dataset import *
9
- from .ai_deployment_resource_pool import *
10
9
  from .ai_endpoint import *
11
10
  from .ai_endpoint_iam_binding import *
12
11
  from .ai_endpoint_iam_member import *
@@ -11,9 +11,6 @@ from .. import _utilities
11
11
 
12
12
  __all__ = [
13
13
  'AiDatasetEncryptionSpecArgs',
14
- 'AiDeploymentResourcePoolDedicatedResourcesArgs',
15
- 'AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs',
16
- 'AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs',
17
14
  'AiEndpointDeployedModelArgs',
18
15
  'AiEndpointDeployedModelAutomaticResourceArgs',
19
16
  'AiEndpointDeployedModelDedicatedResourceArgs',
@@ -88,172 +85,6 @@ class AiDatasetEncryptionSpecArgs:
88
85
  pulumi.set(self, "kms_key_name", value)
89
86
 
90
87
 
91
- @pulumi.input_type
92
- class AiDeploymentResourcePoolDedicatedResourcesArgs:
93
- def __init__(__self__, *,
94
- machine_spec: pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs'],
95
- min_replica_count: pulumi.Input[int],
96
- autoscaling_metric_specs: Optional[pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]]] = None,
97
- max_replica_count: Optional[pulumi.Input[int]] = None):
98
- """
99
- :param pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs'] machine_spec: The specification of a single machine used by the prediction
100
- Structure is documented below.
101
- :param pulumi.Input[int] min_replica_count: The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
102
- :param pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]] autoscaling_metric_specs: A list of the metric specifications that overrides a resource utilization metric.
103
- Structure is documented below.
104
- :param pulumi.Input[int] max_replica_count: The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
105
- """
106
- pulumi.set(__self__, "machine_spec", machine_spec)
107
- pulumi.set(__self__, "min_replica_count", min_replica_count)
108
- if autoscaling_metric_specs is not None:
109
- pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
110
- if max_replica_count is not None:
111
- pulumi.set(__self__, "max_replica_count", max_replica_count)
112
-
113
- @property
114
- @pulumi.getter(name="machineSpec")
115
- def machine_spec(self) -> pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs']:
116
- """
117
- The specification of a single machine used by the prediction
118
- Structure is documented below.
119
- """
120
- return pulumi.get(self, "machine_spec")
121
-
122
- @machine_spec.setter
123
- def machine_spec(self, value: pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs']):
124
- pulumi.set(self, "machine_spec", value)
125
-
126
- @property
127
- @pulumi.getter(name="minReplicaCount")
128
- def min_replica_count(self) -> pulumi.Input[int]:
129
- """
130
- The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
131
- """
132
- return pulumi.get(self, "min_replica_count")
133
-
134
- @min_replica_count.setter
135
- def min_replica_count(self, value: pulumi.Input[int]):
136
- pulumi.set(self, "min_replica_count", value)
137
-
138
- @property
139
- @pulumi.getter(name="autoscalingMetricSpecs")
140
- def autoscaling_metric_specs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]]]:
141
- """
142
- A list of the metric specifications that overrides a resource utilization metric.
143
- Structure is documented below.
144
- """
145
- return pulumi.get(self, "autoscaling_metric_specs")
146
-
147
- @autoscaling_metric_specs.setter
148
- def autoscaling_metric_specs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs']]]]):
149
- pulumi.set(self, "autoscaling_metric_specs", value)
150
-
151
- @property
152
- @pulumi.getter(name="maxReplicaCount")
153
- def max_replica_count(self) -> Optional[pulumi.Input[int]]:
154
- """
155
- The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
156
- """
157
- return pulumi.get(self, "max_replica_count")
158
-
159
- @max_replica_count.setter
160
- def max_replica_count(self, value: Optional[pulumi.Input[int]]):
161
- pulumi.set(self, "max_replica_count", value)
162
-
163
-
164
- @pulumi.input_type
165
- class AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs:
166
- def __init__(__self__, *,
167
- metric_name: pulumi.Input[str],
168
- target: Optional[pulumi.Input[int]] = None):
169
- """
170
- :param pulumi.Input[str] metric_name: The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
171
- :param pulumi.Input[int] target: The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
172
- """
173
- pulumi.set(__self__, "metric_name", metric_name)
174
- if target is not None:
175
- pulumi.set(__self__, "target", target)
176
-
177
- @property
178
- @pulumi.getter(name="metricName")
179
- def metric_name(self) -> pulumi.Input[str]:
180
- """
181
- The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
182
- """
183
- return pulumi.get(self, "metric_name")
184
-
185
- @metric_name.setter
186
- def metric_name(self, value: pulumi.Input[str]):
187
- pulumi.set(self, "metric_name", value)
188
-
189
- @property
190
- @pulumi.getter
191
- def target(self) -> Optional[pulumi.Input[int]]:
192
- """
193
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
194
- """
195
- return pulumi.get(self, "target")
196
-
197
- @target.setter
198
- def target(self, value: Optional[pulumi.Input[int]]):
199
- pulumi.set(self, "target", value)
200
-
201
-
202
- @pulumi.input_type
203
- class AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs:
204
- def __init__(__self__, *,
205
- accelerator_count: Optional[pulumi.Input[int]] = None,
206
- accelerator_type: Optional[pulumi.Input[str]] = None,
207
- machine_type: Optional[pulumi.Input[str]] = None):
208
- """
209
- :param pulumi.Input[int] accelerator_count: The number of accelerators to attach to the machine.
210
- :param pulumi.Input[str] accelerator_type: The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
211
- :param pulumi.Input[str] machine_type: The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
212
- """
213
- if accelerator_count is not None:
214
- pulumi.set(__self__, "accelerator_count", accelerator_count)
215
- if accelerator_type is not None:
216
- pulumi.set(__self__, "accelerator_type", accelerator_type)
217
- if machine_type is not None:
218
- pulumi.set(__self__, "machine_type", machine_type)
219
-
220
- @property
221
- @pulumi.getter(name="acceleratorCount")
222
- def accelerator_count(self) -> Optional[pulumi.Input[int]]:
223
- """
224
- The number of accelerators to attach to the machine.
225
- """
226
- return pulumi.get(self, "accelerator_count")
227
-
228
- @accelerator_count.setter
229
- def accelerator_count(self, value: Optional[pulumi.Input[int]]):
230
- pulumi.set(self, "accelerator_count", value)
231
-
232
- @property
233
- @pulumi.getter(name="acceleratorType")
234
- def accelerator_type(self) -> Optional[pulumi.Input[str]]:
235
- """
236
- The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
237
- """
238
- return pulumi.get(self, "accelerator_type")
239
-
240
- @accelerator_type.setter
241
- def accelerator_type(self, value: Optional[pulumi.Input[str]]):
242
- pulumi.set(self, "accelerator_type", value)
243
-
244
- @property
245
- @pulumi.getter(name="machineType")
246
- def machine_type(self) -> Optional[pulumi.Input[str]]:
247
- """
248
- The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
249
- """
250
- return pulumi.get(self, "machine_type")
251
-
252
- @machine_type.setter
253
- def machine_type(self, value: Optional[pulumi.Input[str]]):
254
- pulumi.set(self, "machine_type", value)
255
-
256
-
257
88
  @pulumi.input_type
258
89
  class AiEndpointDeployedModelArgs:
259
90
  def __init__(__self__, *,
@@ -616,7 +447,7 @@ class AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpecArgs:
616
447
  :param pulumi.Input[str] metric_name: (Output)
617
448
  The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
618
449
  :param pulumi.Input[int] target: (Output)
619
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
450
+ The target resource utilization in percentage (1%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%!)(MISSING) if not provided.
620
451
  """
621
452
  if metric_name is not None:
622
453
  pulumi.set(__self__, "metric_name", metric_name)
@@ -641,7 +472,7 @@ class AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpecArgs:
641
472
  def target(self) -> Optional[pulumi.Input[int]]:
642
473
  """
643
474
  (Output)
644
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
475
+ The target resource utilization in percentage (1%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%!)(MISSING) if not provided.
645
476
  """
646
477
  return pulumi.get(self, "target")
647
478
 
@@ -985,7 +816,8 @@ class AiFeatureOnlineStoreBigtableAutoScalingArgs:
985
816
  """
986
817
  :param pulumi.Input[int] max_node_count: The maximum number of nodes to scale up to. Must be greater than or equal to minNodeCount, and less than or equal to 10 times of 'minNodeCount'.
987
818
  :param pulumi.Input[int] min_node_count: The minimum number of nodes to scale down to. Must be greater than or equal to 1.
988
- :param pulumi.Input[int] cpu_utilization_target: A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%.
819
+ :param pulumi.Input[int] cpu_utilization_target: A percentage of the cluster's CPU capacity. Can be from 10%!t(MISSING)o 80%! (MISSING)When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%!
820
+ (MISSING)
989
821
  """
990
822
  pulumi.set(__self__, "max_node_count", max_node_count)
991
823
  pulumi.set(__self__, "min_node_count", min_node_count)
@@ -1020,7 +852,8 @@ class AiFeatureOnlineStoreBigtableAutoScalingArgs:
1020
852
  @pulumi.getter(name="cpuUtilizationTarget")
1021
853
  def cpu_utilization_target(self) -> Optional[pulumi.Input[int]]:
1022
854
  """
1023
- A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%.
855
+ A percentage of the cluster's CPU capacity. Can be from 10%!t(MISSING)o 80%! (MISSING)When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%!
856
+ (MISSING)
1024
857
  """
1025
858
  return pulumi.get(self, "cpu_utilization_target")
1026
859
 
@@ -2329,7 +2162,7 @@ class AiIndexMetadataConfigAlgorithmConfigTreeAhConfigArgs:
2329
2162
  """
2330
2163
  :param pulumi.Input[int] leaf_node_embedding_count: Number of embeddings on each leaf node. The default value is 1000 if not set.
2331
2164
  :param pulumi.Input[int] leaf_nodes_to_search_percent: The default percentage of leaf nodes that any query may be searched. Must be in
2332
- range 1-100, inclusive. The default value is 10 (means 10%) if not set.
2165
+ range 1-100, inclusive. The default value is 10 (means 10%!)(MISSING) if not set.
2333
2166
  """
2334
2167
  if leaf_node_embedding_count is not None:
2335
2168
  pulumi.set(__self__, "leaf_node_embedding_count", leaf_node_embedding_count)
@@ -2353,7 +2186,7 @@ class AiIndexMetadataConfigAlgorithmConfigTreeAhConfigArgs:
2353
2186
  def leaf_nodes_to_search_percent(self) -> Optional[pulumi.Input[int]]:
2354
2187
  """
2355
2188
  The default percentage of leaf nodes that any query may be searched. Must be in
2356
- range 1-100, inclusive. The default value is 10 (means 10%) if not set.
2189
+ range 1-100, inclusive. The default value is 10 (means 10%!)(MISSING) if not set.
2357
2190
  """
2358
2191
  return pulumi.get(self, "leaf_nodes_to_search_percent")
2359
2192
 
@@ -12,9 +12,6 @@ from . import outputs
12
12
 
13
13
  __all__ = [
14
14
  'AiDatasetEncryptionSpec',
15
- 'AiDeploymentResourcePoolDedicatedResources',
16
- 'AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec',
17
- 'AiDeploymentResourcePoolDedicatedResourcesMachineSpec',
18
15
  'AiEndpointDeployedModel',
19
16
  'AiEndpointDeployedModelAutomaticResource',
20
17
  'AiEndpointDeployedModelDedicatedResource',
@@ -109,197 +106,6 @@ class AiDatasetEncryptionSpec(dict):
109
106
  return pulumi.get(self, "kms_key_name")
110
107
 
111
108
 
112
- @pulumi.output_type
113
- class AiDeploymentResourcePoolDedicatedResources(dict):
114
- @staticmethod
115
- def __key_warning(key: str):
116
- suggest = None
117
- if key == "machineSpec":
118
- suggest = "machine_spec"
119
- elif key == "minReplicaCount":
120
- suggest = "min_replica_count"
121
- elif key == "autoscalingMetricSpecs":
122
- suggest = "autoscaling_metric_specs"
123
- elif key == "maxReplicaCount":
124
- suggest = "max_replica_count"
125
-
126
- if suggest:
127
- pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResources. Access the value via the '{suggest}' property getter instead.")
128
-
129
- def __getitem__(self, key: str) -> Any:
130
- AiDeploymentResourcePoolDedicatedResources.__key_warning(key)
131
- return super().__getitem__(key)
132
-
133
- def get(self, key: str, default = None) -> Any:
134
- AiDeploymentResourcePoolDedicatedResources.__key_warning(key)
135
- return super().get(key, default)
136
-
137
- def __init__(__self__, *,
138
- machine_spec: 'outputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpec',
139
- min_replica_count: int,
140
- autoscaling_metric_specs: Optional[Sequence['outputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec']] = None,
141
- max_replica_count: Optional[int] = None):
142
- """
143
- :param 'AiDeploymentResourcePoolDedicatedResourcesMachineSpecArgs' machine_spec: The specification of a single machine used by the prediction
144
- Structure is documented below.
145
- :param int min_replica_count: The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
146
- :param Sequence['AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpecArgs'] autoscaling_metric_specs: A list of the metric specifications that overrides a resource utilization metric.
147
- Structure is documented below.
148
- :param int max_replica_count: The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
149
- """
150
- pulumi.set(__self__, "machine_spec", machine_spec)
151
- pulumi.set(__self__, "min_replica_count", min_replica_count)
152
- if autoscaling_metric_specs is not None:
153
- pulumi.set(__self__, "autoscaling_metric_specs", autoscaling_metric_specs)
154
- if max_replica_count is not None:
155
- pulumi.set(__self__, "max_replica_count", max_replica_count)
156
-
157
- @property
158
- @pulumi.getter(name="machineSpec")
159
- def machine_spec(self) -> 'outputs.AiDeploymentResourcePoolDedicatedResourcesMachineSpec':
160
- """
161
- The specification of a single machine used by the prediction
162
- Structure is documented below.
163
- """
164
- return pulumi.get(self, "machine_spec")
165
-
166
- @property
167
- @pulumi.getter(name="minReplicaCount")
168
- def min_replica_count(self) -> int:
169
- """
170
- The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.
171
- """
172
- return pulumi.get(self, "min_replica_count")
173
-
174
- @property
175
- @pulumi.getter(name="autoscalingMetricSpecs")
176
- def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec']]:
177
- """
178
- A list of the metric specifications that overrides a resource utilization metric.
179
- Structure is documented below.
180
- """
181
- return pulumi.get(self, "autoscaling_metric_specs")
182
-
183
- @property
184
- @pulumi.getter(name="maxReplicaCount")
185
- def max_replica_count(self) -> Optional[int]:
186
- """
187
- The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).
188
- """
189
- return pulumi.get(self, "max_replica_count")
190
-
191
-
192
- @pulumi.output_type
193
- class AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec(dict):
194
- @staticmethod
195
- def __key_warning(key: str):
196
- suggest = None
197
- if key == "metricName":
198
- suggest = "metric_name"
199
-
200
- if suggest:
201
- pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec. Access the value via the '{suggest}' property getter instead.")
202
-
203
- def __getitem__(self, key: str) -> Any:
204
- AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
205
- return super().__getitem__(key)
206
-
207
- def get(self, key: str, default = None) -> Any:
208
- AiDeploymentResourcePoolDedicatedResourcesAutoscalingMetricSpec.__key_warning(key)
209
- return super().get(key, default)
210
-
211
- def __init__(__self__, *,
212
- metric_name: str,
213
- target: Optional[int] = None):
214
- """
215
- :param str metric_name: The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
216
- :param int target: The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
217
- """
218
- pulumi.set(__self__, "metric_name", metric_name)
219
- if target is not None:
220
- pulumi.set(__self__, "target", target)
221
-
222
- @property
223
- @pulumi.getter(name="metricName")
224
- def metric_name(self) -> str:
225
- """
226
- The resource metric name. Supported metrics: For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
227
- """
228
- return pulumi.get(self, "metric_name")
229
-
230
- @property
231
- @pulumi.getter
232
- def target(self) -> Optional[int]:
233
- """
234
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
235
- """
236
- return pulumi.get(self, "target")
237
-
238
-
239
- @pulumi.output_type
240
- class AiDeploymentResourcePoolDedicatedResourcesMachineSpec(dict):
241
- @staticmethod
242
- def __key_warning(key: str):
243
- suggest = None
244
- if key == "acceleratorCount":
245
- suggest = "accelerator_count"
246
- elif key == "acceleratorType":
247
- suggest = "accelerator_type"
248
- elif key == "machineType":
249
- suggest = "machine_type"
250
-
251
- if suggest:
252
- pulumi.log.warn(f"Key '{key}' not found in AiDeploymentResourcePoolDedicatedResourcesMachineSpec. Access the value via the '{suggest}' property getter instead.")
253
-
254
- def __getitem__(self, key: str) -> Any:
255
- AiDeploymentResourcePoolDedicatedResourcesMachineSpec.__key_warning(key)
256
- return super().__getitem__(key)
257
-
258
- def get(self, key: str, default = None) -> Any:
259
- AiDeploymentResourcePoolDedicatedResourcesMachineSpec.__key_warning(key)
260
- return super().get(key, default)
261
-
262
- def __init__(__self__, *,
263
- accelerator_count: Optional[int] = None,
264
- accelerator_type: Optional[str] = None,
265
- machine_type: Optional[str] = None):
266
- """
267
- :param int accelerator_count: The number of accelerators to attach to the machine.
268
- :param str accelerator_type: The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
269
- :param str machine_type: The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
270
- """
271
- if accelerator_count is not None:
272
- pulumi.set(__self__, "accelerator_count", accelerator_count)
273
- if accelerator_type is not None:
274
- pulumi.set(__self__, "accelerator_type", accelerator_type)
275
- if machine_type is not None:
276
- pulumi.set(__self__, "machine_type", machine_type)
277
-
278
- @property
279
- @pulumi.getter(name="acceleratorCount")
280
- def accelerator_count(self) -> Optional[int]:
281
- """
282
- The number of accelerators to attach to the machine.
283
- """
284
- return pulumi.get(self, "accelerator_count")
285
-
286
- @property
287
- @pulumi.getter(name="acceleratorType")
288
- def accelerator_type(self) -> Optional[str]:
289
- """
290
- The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).
291
- """
292
- return pulumi.get(self, "accelerator_type")
293
-
294
- @property
295
- @pulumi.getter(name="machineType")
296
- def machine_type(self) -> Optional[str]:
297
- """
298
- The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types).
299
- """
300
- return pulumi.get(self, "machine_type")
301
-
302
-
303
109
  @pulumi.output_type
304
110
  class AiEndpointDeployedModel(dict):
305
111
  @staticmethod
@@ -684,7 +490,7 @@ class AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec(dict):
684
490
  :param str metric_name: (Output)
685
491
  The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization`
686
492
  :param int target: (Output)
687
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
493
+ The target resource utilization in percentage (1%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%!)(MISSING) if not provided.
688
494
  """
689
495
  if metric_name is not None:
690
496
  pulumi.set(__self__, "metric_name", metric_name)
@@ -705,7 +511,7 @@ class AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec(dict):
705
511
  def target(self) -> Optional[int]:
706
512
  """
707
513
  (Output)
708
- The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.
514
+ The target resource utilization in percentage (1%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%!)(MISSING) if not provided.
709
515
  """
710
516
  return pulumi.get(self, "target")
711
517
 
@@ -1108,7 +914,8 @@ class AiFeatureOnlineStoreBigtableAutoScaling(dict):
1108
914
  """
1109
915
  :param int max_node_count: The maximum number of nodes to scale up to. Must be greater than or equal to minNodeCount, and less than or equal to 10 times of 'minNodeCount'.
1110
916
  :param int min_node_count: The minimum number of nodes to scale down to. Must be greater than or equal to 1.
1111
- :param int cpu_utilization_target: A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%.
917
+ :param int cpu_utilization_target: A percentage of the cluster's CPU capacity. Can be from 10%!t(MISSING)o 80%! (MISSING)When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%!
918
+ (MISSING)
1112
919
  """
1113
920
  pulumi.set(__self__, "max_node_count", max_node_count)
1114
921
  pulumi.set(__self__, "min_node_count", min_node_count)
@@ -1135,7 +942,8 @@ class AiFeatureOnlineStoreBigtableAutoScaling(dict):
1135
942
  @pulumi.getter(name="cpuUtilizationTarget")
1136
943
  def cpu_utilization_target(self) -> Optional[int]:
1137
944
  """
1138
- A percentage of the cluster's CPU capacity. Can be from 10% to 80%. When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%.
945
+ A percentage of the cluster's CPU capacity. Can be from 10%!t(MISSING)o 80%! (MISSING)When a cluster's CPU utilization exceeds the target that you have set, Bigtable immediately adds nodes to the cluster. When CPU utilization is substantially lower than the target, Bigtable removes nodes. If not set will default to 50%!
946
+ (MISSING)
1139
947
  """
1140
948
  return pulumi.get(self, "cpu_utilization_target")
1141
949
 
@@ -2565,7 +2373,7 @@ class AiIndexMetadataConfigAlgorithmConfigTreeAhConfig(dict):
2565
2373
  """
2566
2374
  :param int leaf_node_embedding_count: Number of embeddings on each leaf node. The default value is 1000 if not set.
2567
2375
  :param int leaf_nodes_to_search_percent: The default percentage of leaf nodes that any query may be searched. Must be in
2568
- range 1-100, inclusive. The default value is 10 (means 10%) if not set.
2376
+ range 1-100, inclusive. The default value is 10 (means 10%!)(MISSING) if not set.
2569
2377
  """
2570
2378
  if leaf_node_embedding_count is not None:
2571
2379
  pulumi.set(__self__, "leaf_node_embedding_count", leaf_node_embedding_count)
@@ -2585,7 +2393,7 @@ class AiIndexMetadataConfigAlgorithmConfigTreeAhConfig(dict):
2585
2393
  def leaf_nodes_to_search_percent(self) -> Optional[int]:
2586
2394
  """
2587
2395
  The default percentage of leaf nodes that any query may be searched. Must be in
2588
- range 1-100, inclusive. The default value is 10 (means 10%) if not set.
2396
+ range 1-100, inclusive. The default value is 10 (means 10%!)(MISSING) if not set.
2589
2397
  """
2590
2398
  return pulumi.get(self, "leaf_nodes_to_search_percent")
2591
2399
 
@@ -2959,7 +2767,7 @@ class GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfigResult(dict):
2959
2767
  """
2960
2768
  :param int leaf_node_embedding_count: Number of embeddings on each leaf node. The default value is 1000 if not set.
2961
2769
  :param int leaf_nodes_to_search_percent: The default percentage of leaf nodes that any query may be searched. Must be in
2962
- range 1-100, inclusive. The default value is 10 (means 10%) if not set.
2770
+ range 1-100, inclusive. The default value is 10 (means 10%!)(MISSING) if not set.
2963
2771
  """
2964
2772
  pulumi.set(__self__, "leaf_node_embedding_count", leaf_node_embedding_count)
2965
2773
  pulumi.set(__self__, "leaf_nodes_to_search_percent", leaf_nodes_to_search_percent)
@@ -2977,7 +2785,7 @@ class GetAiIndexMetadataConfigAlgorithmConfigTreeAhConfigResult(dict):
2977
2785
  def leaf_nodes_to_search_percent(self) -> int:
2978
2786
  """
2979
2787
  The default percentage of leaf nodes that any query may be searched. Must be in
2980
- range 1-100, inclusive. The default value is 10 (means 10%) if not set.
2788
+ range 1-100, inclusive. The default value is 10 (means 10%!)(MISSING) if not set.
2981
2789
  """
2982
2790
  return pulumi.get(self, "leaf_nodes_to_search_percent")
2983
2791